aboutsummaryrefslogtreecommitdiff
path: root/clang
diff options
context:
space:
mode:
Diffstat (limited to 'clang')
-rw-r--r--clang/bindings/python/clang/cindex.py21
-rw-r--r--clang/bindings/python/tests/CMakeLists.txt2
-rw-r--r--clang/bindings/python/tests/cindex/test_cursor_language.py27
-rw-r--r--clang/bindings/python/tests/cindex/test_enums.py2
-rw-r--r--clang/bindings/python/tests/cindex/test_type.py2
-rw-r--r--clang/cmake/caches/Fuchsia.cmake2
-rw-r--r--clang/cmake/caches/PGO.cmake2
-rw-r--r--clang/docs/APINotes.rst11
-rw-r--r--clang/docs/AddressSanitizer.rst2
-rw-r--r--clang/docs/AutomaticReferenceCounting.rst12
-rw-r--r--clang/docs/ClangFormatStyleOptions.rst51
-rw-r--r--clang/docs/ClangLinkerWrapper.rst4
-rw-r--r--clang/docs/ClangOffloadBundler.rst8
-rw-r--r--clang/docs/DebuggingCoroutines.rst58
-rw-r--r--clang/docs/InternalsManual.rst2
-rw-r--r--clang/docs/LanguageExtensions.rst165
-rw-r--r--clang/docs/OpenMPSupport.rst42
-rw-r--r--clang/docs/PointerAuthentication.rst1195
-rw-r--r--clang/docs/ReleaseNotes.rst207
-rw-r--r--clang/docs/ReleaseNotesTemplate.txt246
-rw-r--r--clang/docs/SanitizerSpecialCaseList.rst2
-rw-r--r--clang/docs/SourceBasedCodeCoverage.rst32
-rw-r--r--clang/docs/StandardCPlusPlusModules.rst44
-rw-r--r--clang/docs/ThreadSafetyAnalysis.rst7
-rw-r--r--clang/docs/UsersManual.rst2
-rw-r--r--clang/docs/index.rst1
-rw-r--r--clang/include/clang/APINotes/Types.h27
-rw-r--r--clang/include/clang/AST/APNumericStorage.h3
-rw-r--r--clang/include/clang/AST/APValue.h2
-rw-r--r--clang/include/clang/AST/ASTConcept.h9
-rw-r--r--clang/include/clang/AST/ASTContext.h217
-rw-r--r--clang/include/clang/AST/ASTImporter.h2
-rw-r--r--clang/include/clang/AST/ASTNodeTraverser.h40
-rw-r--r--clang/include/clang/AST/ASTTypeTraits.h15
-rw-r--r--clang/include/clang/AST/AbstractBasicReader.h41
-rw-r--r--clang/include/clang/AST/AbstractBasicWriter.h37
-rw-r--r--clang/include/clang/AST/CXXInheritance.h2
-rw-r--r--clang/include/clang/AST/CanonicalType.h11
-rw-r--r--clang/include/clang/AST/Comment.h21
-rw-r--r--clang/include/clang/AST/CommentHTMLTags.td5
-rw-r--r--clang/include/clang/AST/CommentSema.h1
-rw-r--r--clang/include/clang/AST/Decl.h77
-rw-r--r--clang/include/clang/AST/DeclBase.h16
-rw-r--r--clang/include/clang/AST/DeclCXX.h61
-rw-r--r--clang/include/clang/AST/DeclObjC.h3
-rw-r--r--clang/include/clang/AST/DeclTemplate.h47
-rw-r--r--clang/include/clang/AST/DeclarationName.h2
-rw-r--r--clang/include/clang/AST/DependenceFlags.h2
-rw-r--r--clang/include/clang/AST/DynamicRecursiveASTVisitor.h13
-rw-r--r--clang/include/clang/AST/Expr.h31
-rw-r--r--clang/include/clang/AST/ExprCXX.h21
-rw-r--r--clang/include/clang/AST/JSONNodeDumper.h1
-rw-r--r--clang/include/clang/AST/NestedNameSpecifier.h665
-rw-r--r--clang/include/clang/AST/NestedNameSpecifierBase.h586
-rw-r--r--clang/include/clang/AST/ODRHash.h2
-rw-r--r--clang/include/clang/AST/OpenACCClause.h35
-rw-r--r--clang/include/clang/AST/OpenMPClause.h59
-rw-r--r--clang/include/clang/AST/PrettyPrinter.h11
-rw-r--r--clang/include/clang/AST/PropertiesBase.td5
-rw-r--r--clang/include/clang/AST/QualTypeNames.h10
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h306
-rw-r--r--clang/include/clang/AST/TemplateBase.h42
-rw-r--r--clang/include/clang/AST/TemplateName.h30
-rw-r--r--clang/include/clang/AST/TextNodeDumper.h2
-rw-r--r--clang/include/clang/AST/Type.h9094
-rw-r--r--clang/include/clang/AST/TypeBase.h9281
-rw-r--r--clang/include/clang/AST/TypeLoc.h424
-rw-r--r--clang/include/clang/AST/TypeProperties.td159
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchFinder.h5
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchers.h186
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchersInternal.h20
-rw-r--r--clang/include/clang/Analysis/Analyses/LifetimeSafety.h51
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/ASTOps.h10
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h34
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h14
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/Formula.h19
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/FormulaSerialization.h40
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/StorageLocation.h11
-rw-r--r--clang/include/clang/Basic/ABIVersions.def135
-rw-r--r--clang/include/clang/Basic/AllDiagnosticKinds.inc1
-rw-r--r--clang/include/clang/Basic/AllDiagnostics.h11
-rw-r--r--clang/include/clang/Basic/Attr.td16
-rw-r--r--clang/include/clang/Basic/AttrDocs.td149
-rw-r--r--clang/include/clang/Basic/BuiltinTemplates.td4
-rw-r--r--clang/include/clang/Basic/Builtins.td56
-rw-r--r--clang/include/clang/Basic/BuiltinsAMDGPU.def6
-rw-r--r--clang/include/clang/Basic/BuiltinsPPC.def6
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.td504
-rw-r--r--clang/include/clang/Basic/CMakeLists.txt1
-rw-r--r--clang/include/clang/Basic/CodeGenOptions.def4
-rw-r--r--clang/include/clang/Basic/CodeGenOptions.h10
-rw-r--r--clang/include/clang/Basic/Diagnostic.h6
-rw-r--r--clang/include/clang/Basic/Diagnostic.td5
-rw-r--r--clang/include/clang/Basic/DiagnosticASTKinds.td3
-rw-r--r--clang/include/clang/Basic/DiagnosticDriverKinds.td12
-rw-r--r--clang/include/clang/Basic/DiagnosticFrontendKinds.td4
-rw-r--r--clang/include/clang/Basic/DiagnosticGroups.td11
-rw-r--r--clang/include/clang/Basic/DiagnosticIDs.h156
-rw-r--r--clang/include/clang/Basic/DiagnosticLexKinds.td5
-rw-r--r--clang/include/clang/Basic/DiagnosticParseKinds.td7
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td107
-rw-r--r--clang/include/clang/Basic/DiagnosticTrap.h14
-rw-r--r--clang/include/clang/Basic/DiagnosticTrapKinds.td30
-rw-r--r--clang/include/clang/Basic/Features.def18
-rw-r--r--clang/include/clang/Basic/LangOptions.def3
-rw-r--r--clang/include/clang/Basic/LangOptions.h97
-rw-r--r--clang/include/clang/Basic/PointerAuthOptions.h16
-rw-r--r--clang/include/clang/Basic/TargetInfo.h10
-rw-r--r--clang/include/clang/Basic/TokenKinds.def4
-rw-r--r--clang/include/clang/Basic/TokenKinds.h18
-rw-r--r--clang/include/clang/Basic/TypeNodes.td32
-rw-r--r--clang/include/clang/Basic/arm_sme.td180
-rw-r--r--clang/include/clang/Basic/arm_sve.td108
-rw-r--r--clang/include/clang/Basic/riscv_vector.td935
-rw-r--r--clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h128
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRAttrs.td263
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h35
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRDialect.td2
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROps.td741
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h12
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td8
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRTypes.td8
-rw-r--r--clang/include/clang/CIR/Dialect/Passes.h1
-rw-r--r--clang/include/clang/CIR/Dialect/Passes.td10
-rw-r--r--clang/include/clang/CIR/MissingFeatures.h39
-rw-r--r--clang/include/clang/CodeGen/CGFunctionInfo.h47
-rw-r--r--clang/include/clang/CodeGen/CodeGenABITypes.h7
-rw-r--r--clang/include/clang/Driver/Action.h14
-rw-r--r--clang/include/clang/Driver/CommonArgs.h2
-rw-r--r--clang/include/clang/Driver/Driver.h32
-rw-r--r--clang/include/clang/Driver/OffloadBundler.h2
-rw-r--r--clang/include/clang/Driver/Options.td109
-rw-r--r--clang/include/clang/Driver/ToolChain.h6
-rw-r--r--clang/include/clang/ExtractAPI/DeclarationFragments.h5
-rw-r--r--clang/include/clang/Format/Format.h47
-rw-r--r--clang/include/clang/Frontend/ASTUnit.h11
-rw-r--r--clang/include/clang/Frontend/FrontendActions.h12
-rw-r--r--clang/include/clang/Index/IndexSymbol.h1
-rw-r--r--clang/include/clang/Interpreter/Interpreter.h16
-rw-r--r--clang/include/clang/Lex/DependencyDirectivesScanner.h7
-rw-r--r--clang/include/clang/Lex/Lexer.h3
-rw-r--r--clang/include/clang/Lex/NoTrivialPPDirectiveTracer.h310
-rw-r--r--clang/include/clang/Lex/Preprocessor.h12
-rw-r--r--clang/include/clang/Lex/Token.h17
-rw-r--r--clang/include/clang/Parse/ParseHLSLRootSignature.h4
-rw-r--r--clang/include/clang/Sema/CodeCompleteConsumer.h7
-rw-r--r--clang/include/clang/Sema/DeclSpec.h36
-rw-r--r--clang/include/clang/Sema/HeuristicResolver.h3
-rw-r--r--clang/include/clang/Sema/ParsedTemplate.h26
-rw-r--r--clang/include/clang/Sema/ScopeInfo.h2
-rw-r--r--clang/include/clang/Sema/Sema.h111
-rw-r--r--clang/include/clang/Sema/SemaHLSL.h15
-rw-r--r--clang/include/clang/Sema/SemaInternal.h19
-rw-r--r--clang/include/clang/Sema/SemaOpenACC.h21
-rw-r--r--clang/include/clang/Sema/SemaSYCL.h1
-rw-r--r--clang/include/clang/Sema/SemaWasm.h3
-rw-r--r--clang/include/clang/Sema/TypoCorrection.h29
-rw-r--r--clang/include/clang/Serialization/ASTReader.h3
-rw-r--r--clang/include/clang/Serialization/ASTRecordReader.h2
-rw-r--r--clang/include/clang/Serialization/ASTRecordWriter.h5
-rw-r--r--clang/include/clang/Serialization/TypeBitCodes.def2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h6
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/Checker.h4
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/CheckerManager.h11
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h23
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h13
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h4
-rw-r--r--clang/include/clang/Tooling/Refactoring/Lookup.h3
-rw-r--r--clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h16
-rw-r--r--clang/include/clang/Tooling/Tooling.h4
-rw-r--r--clang/include/module.modulemap1
-rw-r--r--clang/lib/APINotes/APINotesFormat.h2
-rw-r--r--clang/lib/APINotes/APINotesReader.cpp18
-rw-r--r--clang/lib/APINotes/APINotesWriter.cpp17
-rw-r--r--clang/lib/APINotes/APINotesYAMLCompiler.cpp12
-rw-r--r--clang/lib/AST/APValue.cpp2
-rw-r--r--clang/lib/AST/ASTConcept.cpp11
-rw-r--r--clang/lib/AST/ASTContext.cpp1188
-rw-r--r--clang/lib/AST/ASTDiagnostic.cpp49
-rw-r--r--clang/lib/AST/ASTDumper.cpp4
-rw-r--r--clang/lib/AST/ASTImporter.cpp310
-rw-r--r--clang/lib/AST/ASTImporterLookupTable.cpp16
-rw-r--r--clang/lib/AST/ASTStructuralEquivalence.cpp194
-rw-r--r--clang/lib/AST/ASTTypeTraits.cpp36
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp339
-rw-r--r--clang/lib/AST/ByteCode/Compiler.h23
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp25
-rw-r--r--clang/lib/AST/ByteCode/Context.h21
-rw-r--r--clang/lib/AST/ByteCode/Descriptor.cpp124
-rw-r--r--clang/lib/AST/ByteCode/Descriptor.h9
-rw-r--r--clang/lib/AST/ByteCode/Disasm.cpp53
-rw-r--r--clang/lib/AST/ByteCode/DynamicAllocator.cpp76
-rw-r--r--clang/lib/AST/ByteCode/DynamicAllocator.h11
-rw-r--r--clang/lib/AST/ByteCode/EvalEmitter.cpp23
-rw-r--r--clang/lib/AST/ByteCode/EvaluationResult.cpp58
-rw-r--r--clang/lib/AST/ByteCode/EvaluationResult.h57
-rw-r--r--clang/lib/AST/ByteCode/Function.h2
-rw-r--r--clang/lib/AST/ByteCode/Integral.h5
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp288
-rw-r--r--clang/lib/AST/ByteCode/Interp.h110
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.cpp47
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.h78
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp843
-rw-r--r--clang/lib/AST/ByteCode/InterpFrame.cpp12
-rw-r--r--clang/lib/AST/ByteCode/InterpFrame.h3
-rw-r--r--clang/lib/AST/ByteCode/InterpStack.cpp57
-rw-r--r--clang/lib/AST/ByteCode/InterpStack.h24
-rw-r--r--clang/lib/AST/ByteCode/InterpState.cpp40
-rw-r--r--clang/lib/AST/ByteCode/InterpState.h5
-rw-r--r--clang/lib/AST/ByteCode/MemberPointer.h6
-rw-r--r--clang/lib/AST/ByteCode/Pointer.cpp223
-rw-r--r--clang/lib/AST/ByteCode/Pointer.h263
-rw-r--r--clang/lib/AST/ByteCode/PrimType.h30
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp61
-rw-r--r--clang/lib/AST/ByteCode/Program.h9
-rw-r--r--clang/lib/AST/ByteCode/Record.cpp4
-rw-r--r--clang/lib/AST/CXXInheritance.cpp34
-rw-r--r--clang/lib/AST/Comment.cpp2
-rw-r--r--clang/lib/AST/CommentLexer.cpp2
-rw-r--r--clang/lib/AST/CommentParser.cpp5
-rw-r--r--clang/lib/AST/CommentSema.cpp21
-rw-r--r--clang/lib/AST/ComparisonCategories.cpp2
-rw-r--r--clang/lib/AST/ComputeDependence.cpp26
-rw-r--r--clang/lib/AST/Decl.cpp141
-rw-r--r--clang/lib/AST/DeclBase.cpp40
-rw-r--r--clang/lib/AST/DeclCXX.cpp129
-rw-r--r--clang/lib/AST/DeclPrinter.cpp60
-rw-r--r--clang/lib/AST/DeclTemplate.cpp91
-rw-r--r--clang/lib/AST/DeclarationName.cpp11
-rw-r--r--clang/lib/AST/DynamicRecursiveASTVisitor.cpp53
-rw-r--r--clang/lib/AST/Expr.cpp142
-rw-r--r--clang/lib/AST/ExprCXX.cpp9
-rw-r--r--clang/lib/AST/ExprConcepts.cpp4
-rw-r--r--clang/lib/AST/ExprConstant.cpp689
-rw-r--r--clang/lib/AST/FormatString.cpp18
-rw-r--r--clang/lib/AST/InheritViz.cpp6
-rw-r--r--clang/lib/AST/ItaniumCXXABI.cpp7
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp214
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp36
-rw-r--r--clang/lib/AST/MicrosoftMangle.cpp30
-rw-r--r--clang/lib/AST/NestedNameSpecifier.cpp474
-rw-r--r--clang/lib/AST/ODRHash.cpp124
-rw-r--r--clang/lib/AST/OpenACCClause.cpp8
-rw-r--r--clang/lib/AST/OpenMPClause.cpp35
-rw-r--r--clang/lib/AST/ParentMapContext.cpp6
-rw-r--r--clang/lib/AST/PrintfFormatString.cpp4
-rw-r--r--clang/lib/AST/QualTypeNames.cpp368
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp54
-rw-r--r--clang/lib/AST/ScanfFormatString.cpp6
-rw-r--r--clang/lib/AST/StmtPrinter.cpp33
-rw-r--r--clang/lib/AST/StmtProfile.cpp73
-rw-r--r--clang/lib/AST/TemplateBase.cpp34
-rw-r--r--clang/lib/AST/TemplateName.cpp66
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp78
-rw-r--r--clang/lib/AST/Type.cpp644
-rw-r--r--clang/lib/AST/TypeLoc.cpp242
-rw-r--r--clang/lib/AST/TypePrinter.cpp276
-rw-r--r--clang/lib/AST/VTTBuilder.cpp10
-rw-r--r--clang/lib/AST/VTableBuilder.cpp8
-rw-r--r--clang/lib/ASTMatchers/ASTMatchFinder.cpp176
-rw-r--r--clang/lib/ASTMatchers/ASTMatchersInternal.cpp5
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Registry.cpp6
-rw-r--r--clang/lib/Analysis/CFG.cpp3
-rw-r--r--clang/lib/Analysis/ExprMutationAnalyzer.cpp3
-rw-r--r--clang/lib/Analysis/FlowSensitive/CMakeLists.txt1
-rw-r--r--clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp79
-rw-r--r--clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp6
-rw-r--r--clang/lib/Analysis/FlowSensitive/FormulaSerialization.cpp153
-rw-r--r--clang/lib/Analysis/FlowSensitive/Transfer.cpp71
-rw-r--r--clang/lib/Analysis/LifetimeSafety.cpp308
-rw-r--r--clang/lib/Analysis/ThreadSafety.cpp7
-rw-r--r--clang/lib/Analysis/ThreadSafetyCommon.cpp12
-rw-r--r--clang/lib/Analysis/UnsafeBufferUsage.cpp14
-rw-r--r--clang/lib/Basic/Diagnostic.cpp2
-rw-r--r--clang/lib/Basic/DiagnosticIDs.cpp4
-rw-r--r--clang/lib/Basic/SourceManager.cpp4
-rw-r--r--clang/lib/Basic/TargetInfo.cpp2
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp2
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.cpp7
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp8
-rw-r--r--clang/lib/Basic/Targets/AVR.h2
-rw-r--r--clang/lib/Basic/Targets/DirectX.h2
-rw-r--r--clang/lib/Basic/Targets/Hexagon.cpp2
-rw-r--r--clang/lib/Basic/Targets/LoongArch.cpp2
-rw-r--r--clang/lib/Basic/Targets/NVPTX.cpp15
-rw-r--r--clang/lib/Basic/Targets/OSTargets.h3
-rw-r--r--clang/lib/Basic/Targets/PPC.cpp2
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp2
-rw-r--r--clang/lib/Basic/Targets/SPIR.h11
-rw-r--r--clang/lib/Basic/Targets/SystemZ.h2
-rw-r--r--clang/lib/Basic/Targets/X86.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/Address.h6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenAsm.cpp136
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenAtomic.cpp569
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuilder.cpp64
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuilder.h97
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp93
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h56
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp53
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.cpp46
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.h67
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp319
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp143
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.h142
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp30
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenException.cpp41
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp212
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp286
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp36
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp205
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp161
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp64
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp68
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h217
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp263
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp153
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h60
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp374
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayout.h4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp131
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp153
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypeCache.h1
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypes.cpp63
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypes.h7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenVTables.cpp244
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenVTables.h74
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h9
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenerator.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CMakeLists.txt5
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h101
-rw-r--r--clang/lib/CIR/CodeGen/TargetInfo.cpp14
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRAttrs.cpp101
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp41
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp341
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp4
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CMakeLists.txt1
-rw-r--r--clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp57
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp281
-rw-r--r--clang/lib/CIR/Lowering/CIRPasses.cpp1
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp519
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h137
-rw-r--r--clang/lib/CodeGen/ABIInfo.cpp11
-rw-r--r--clang/lib/CodeGen/ABIInfo.h6
-rw-r--r--clang/lib/CodeGen/ABIInfoImpl.cpp63
-rw-r--r--clang/lib/CodeGen/Address.h5
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp1
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp107
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp143
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp2
-rw-r--r--clang/lib/CodeGen/CGCXX.cpp21
-rw-r--r--clang/lib/CodeGen/CGCXXABI.cpp9
-rw-r--r--clang/lib/CodeGen/CGCall.cpp162
-rw-r--r--clang/lib/CodeGen/CGCall.h8
-rw-r--r--clang/lib/CodeGen/CGClass.cpp99
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp133
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.h4
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp51
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp205
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp39
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp36
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp30
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp173
-rw-r--r--clang/lib/CodeGen/CGHLSLBuiltins.cpp16
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.cpp231
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.h6
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp11
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp4
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp24
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp4
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp172
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h63
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp57
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.h26
-rw-r--r--clang/lib/CodeGen/CGPointerAuth.cpp10
-rw-r--r--clang/lib/CodeGen/CGRecordLayoutBuilder.cpp5
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp4
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp25
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp8
-rw-r--r--clang/lib/CodeGen/CMakeLists.txt1
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp72
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h55
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp134
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h8
-rw-r--r--clang/lib/CodeGen/CodeGenTBAA.cpp20
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp35
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp10
-rw-r--r--clang/lib/CodeGen/EHScopeStack.h2
-rw-r--r--clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp27
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp86
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp25
-rw-r--r--clang/lib/CodeGen/SwiftCallingConv.cpp11
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp7
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/ARM.cpp14
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/PPC.cpp5
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/RISCV.cpp949
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp29
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/X86.cpp59
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp7
-rw-r--r--clang/lib/CodeGen/Targets/AArch64.cpp17
-rw-r--r--clang/lib/CodeGen/Targets/AMDGPU.cpp19
-rw-r--r--clang/lib/CodeGen/Targets/ARC.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/ARM.cpp25
-rw-r--r--clang/lib/CodeGen/Targets/BPF.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/CSKY.cpp6
-rw-r--r--clang/lib/CodeGen/Targets/DirectX.cpp3
-rw-r--r--clang/lib/CodeGen/Targets/Hexagon.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/Lanai.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/LoongArch.cpp15
-rw-r--r--clang/lib/CodeGen/Targets/Mips.cpp16
-rw-r--r--clang/lib/CodeGen/Targets/NVPTX.cpp55
-rw-r--r--clang/lib/CodeGen/Targets/PPC.cpp11
-rw-r--r--clang/lib/CodeGen/Targets/RISCV.cpp192
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp79
-rw-r--r--clang/lib/CodeGen/Targets/Sparc.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/SystemZ.cpp16
-rw-r--r--clang/lib/CodeGen/Targets/WebAssembly.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/X86.cpp91
-rw-r--r--clang/lib/CodeGen/Targets/XCore.cpp12
-rw-r--r--clang/lib/CodeGen/TrapReasonBuilder.cpp50
-rw-r--r--clang/lib/CodeGen/TrapReasonBuilder.h112
-rw-r--r--clang/lib/Driver/Action.cpp7
-rw-r--r--clang/lib/Driver/CMakeLists.txt1
-rw-r--r--clang/lib/Driver/Compilation.cpp7
-rw-r--r--clang/lib/Driver/Driver.cpp87
-rw-r--r--clang/lib/Driver/OffloadBundler.cpp3
-rw-r--r--clang/lib/Driver/SanitizerArgs.cpp8
-rw-r--r--clang/lib/Driver/ToolChain.cpp16
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.cpp16
-rw-r--r--clang/lib/Driver/ToolChains/Arch/RISCV.cpp11
-rw-r--r--clang/lib/Driver/ToolChains/BareMetal.cpp19
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp52
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp27
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.cpp9
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp38
-rw-r--r--clang/lib/Driver/ToolChains/Flang.cpp16
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.cpp233
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.h101
-rw-r--r--clang/lib/Driver/ToolChains/HLSL.cpp130
-rw-r--r--clang/lib/Driver/ToolChains/HLSL.h22
-rw-r--r--clang/lib/Driver/ToolChains/Hurd.cpp14
-rw-r--r--clang/lib/Driver/ToolChains/Linux.cpp18
-rw-r--r--clang/lib/Driver/ToolChains/Managarm.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/OpenBSD.h5
-rw-r--r--clang/lib/Edit/RewriteObjCFoundationAPI.cpp2
-rw-r--r--clang/lib/ExtractAPI/DeclarationFragments.cpp110
-rw-r--r--clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp2
-rw-r--r--clang/lib/Format/CMakeLists.txt1
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp11
-rw-r--r--clang/lib/Format/Format.cpp24
-rw-r--r--clang/lib/Format/NumericLiteralInfo.cpp65
-rw-r--r--clang/lib/Format/NumericLiteralInfo.h29
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp43
-rw-r--r--clang/lib/Format/UnwrappedLineFormatter.cpp14
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp36
-rw-r--r--clang/lib/Frontend/ASTConsumers.cpp7
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp2
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp22
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp107
-rw-r--r--clang/lib/Frontend/FrontendActions.cpp83
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp10
-rw-r--r--clang/lib/Frontend/LayoutOverrideSource.cpp1
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp73
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteObjC.cpp21
-rw-r--r--clang/lib/Frontend/TextDiagnostic.cpp2
-rw-r--r--clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp3
-rw-r--r--clang/lib/Headers/avx10_2_512bf16intrin.h16
-rw-r--r--clang/lib/Headers/avx10_2bf16intrin.h32
-rw-r--r--clang/lib/Headers/avx2intrin.h338
-rw-r--r--clang/lib/Headers/avx512bitalgintrin.h56
-rw-r--r--clang/lib/Headers/avx512bwintrin.h212
-rw-r--r--clang/lib/Headers/avx512cdintrin.h43
-rw-r--r--clang/lib/Headers/avx512dqintrin.h57
-rw-r--r--clang/lib/Headers/avx512fintrin.h676
-rw-r--r--clang/lib/Headers/avx512fp16intrin.h29
-rw-r--r--clang/lib/Headers/avx512vbmi2intrin.h27
-rw-r--r--clang/lib/Headers/avx512vlbitalgintrin.h106
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h336
-rw-r--r--clang/lib/Headers/avx512vlcdintrin.h82
-rw-r--r--clang/lib/Headers/avx512vldqintrin.h35
-rw-r--r--clang/lib/Headers/avx512vlfp16intrin.h92
-rw-r--r--clang/lib/Headers/avx512vlintrin.h726
-rw-r--r--clang/lib/Headers/avx512vlvbmi2intrin.h54
-rw-r--r--clang/lib/Headers/avx512vpopcntdqintrin.h17
-rw-r--r--clang/lib/Headers/avx512vpopcntdqvlintrin.h28
-rw-r--r--clang/lib/Headers/avxintrin.h119
-rw-r--r--clang/lib/Headers/cpuid.h5
-rw-r--r--clang/lib/Headers/emmintrin.h203
-rw-r--r--clang/lib/Headers/f16cintrin.h13
-rw-r--r--clang/lib/Headers/fma4intrin.h138
-rw-r--r--clang/lib/Headers/fmaintrin.h90
-rw-r--r--clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h6
-rw-r--r--clang/lib/Headers/mmintrin.h223
-rw-r--r--clang/lib/Headers/ptrauth.h4
-rw-r--r--clang/lib/Headers/smmintrin.h55
-rw-r--r--clang/lib/Headers/tmmintrin.h44
-rw-r--r--clang/lib/Headers/xmmintrin.h22
-rw-r--r--clang/lib/Headers/xopintrin.h22
-rw-r--r--clang/lib/Index/IndexSymbol.cpp3
-rw-r--r--clang/lib/Index/IndexTypeSourceInfo.cpp50
-rw-r--r--clang/lib/Index/USRGeneration.cpp22
-rw-r--r--clang/lib/InstallAPI/Visitor.cpp8
-rw-r--r--clang/lib/Interpreter/CMakeLists.txt1
-rw-r--r--clang/lib/Interpreter/DeviceOffload.cpp5
-rw-r--r--clang/lib/Interpreter/DeviceOffload.h5
-rw-r--r--clang/lib/Interpreter/IncrementalAction.cpp152
-rw-r--r--clang/lib/Interpreter/IncrementalAction.h90
-rw-r--r--clang/lib/Interpreter/IncrementalParser.cpp30
-rw-r--r--clang/lib/Interpreter/IncrementalParser.h19
-rw-r--r--clang/lib/Interpreter/Interpreter.cpp199
-rw-r--r--clang/lib/Interpreter/InterpreterValuePrinter.cpp20
-rw-r--r--clang/lib/Interpreter/Value.cpp19
-rw-r--r--clang/lib/Lex/DependencyDirectivesScanner.cpp50
-rw-r--r--clang/lib/Lex/Lexer.cpp9
-rw-r--r--clang/lib/Lex/ModuleMapFile.cpp34
-rw-r--r--clang/lib/Lex/PPDirectives.cpp6
-rw-r--r--clang/lib/Lex/Preprocessor.cpp40
-rw-r--r--clang/lib/Parse/ParseCXXInlineMethods.cpp6
-rw-r--r--clang/lib/Parse/ParseDecl.cpp7
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp54
-rw-r--r--clang/lib/Parse/ParseExpr.cpp3
-rw-r--r--clang/lib/Parse/ParseExprCXX.cpp13
-rw-r--r--clang/lib/Parse/ParseHLSLRootSignature.cpp38
-rw-r--r--clang/lib/Parse/ParseTemplate.cpp31
-rw-r--r--clang/lib/Parse/ParseTentative.cpp2
-rw-r--r--clang/lib/Parse/Parser.cpp25
-rw-r--r--clang/lib/Sema/AnalysisBasedWarnings.cpp42
-rw-r--r--clang/lib/Sema/DeclSpec.cpp29
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp23
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h2
-rw-r--r--clang/lib/Sema/HeuristicResolver.cpp67
-rw-r--r--clang/lib/Sema/Sema.cpp32
-rw-r--r--clang/lib/Sema/SemaAPINotes.cpp11
-rw-r--r--clang/lib/Sema/SemaAccess.cpp43
-rw-r--r--clang/lib/Sema/SemaAvailability.cpp4
-rw-r--r--clang/lib/Sema/SemaBPF.cpp16
-rw-r--r--clang/lib/Sema/SemaCUDA.cpp14
-rw-r--r--clang/lib/Sema/SemaCXXScopeSpec.cpp328
-rw-r--r--clang/lib/Sema/SemaCast.cpp84
-rw-r--r--clang/lib/Sema/SemaChecking.cpp363
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp268
-rw-r--r--clang/lib/Sema/SemaConcept.cpp13
-rw-r--r--clang/lib/Sema/SemaCoroutine.cpp35
-rw-r--r--clang/lib/Sema/SemaDecl.cpp876
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp69
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp553
-rw-r--r--clang/lib/Sema/SemaDeclObjC.cpp43
-rw-r--r--clang/lib/Sema/SemaExceptionSpec.cpp4
-rw-r--r--clang/lib/Sema/SemaExpr.cpp361
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp200
-rw-r--r--clang/lib/Sema/SemaExprMember.cpp7
-rw-r--r--clang/lib/Sema/SemaExprObjC.cpp18
-rw-r--r--clang/lib/Sema/SemaFunctionEffects.cpp8
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp255
-rw-r--r--clang/lib/Sema/SemaInit.cpp286
-rw-r--r--clang/lib/Sema/SemaLambda.cpp279
-rw-r--r--clang/lib/Sema/SemaLookup.cpp234
-rw-r--r--clang/lib/Sema/SemaModule.cpp8
-rw-r--r--clang/lib/Sema/SemaObjC.cpp11
-rw-r--r--clang/lib/Sema/SemaObjCProperty.cpp4
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp177
-rw-r--r--clang/lib/Sema/SemaOpenACCClause.cpp134
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp69
-rw-r--r--clang/lib/Sema/SemaOverload.cpp212
-rw-r--r--clang/lib/Sema/SemaPPC.cpp4
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp18
-rw-r--r--clang/lib/Sema/SemaSYCL.cpp19
-rw-r--r--clang/lib/Sema/SemaStmt.cpp64
-rw-r--r--clang/lib/Sema/SemaStmtAsm.cpp20
-rw-r--r--clang/lib/Sema/SemaSwift.cpp15
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp660
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp209
-rw-r--r--clang/lib/Sema/SemaTemplateDeductionGuide.cpp63
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp366
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp140
-rw-r--r--clang/lib/Sema/SemaTemplateVariadic.cpp161
-rw-r--r--clang/lib/Sema/SemaType.cpp228
-rw-r--r--clang/lib/Sema/SemaTypeTraits.cpp157
-rw-r--r--clang/lib/Sema/SemaWasm.cpp36
-rw-r--r--clang/lib/Sema/TreeTransform.h1436
-rw-r--r--clang/lib/Sema/UsedDeclVisitor.h9
-rw-r--r--clang/lib/Serialization/ASTReader.cpp143
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp26
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp120
-rw-r--r--clang/lib/Serialization/ASTWriterDecl.cpp7
-rw-r--r--clang/lib/Serialization/ASTWriterStmt.cpp2
-rw-r--r--clang/lib/Serialization/TemplateArgumentHasher.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp20
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp76
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h57
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp83
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h59
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp51
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp35
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/CallEvent.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerManager.cpp16
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp13
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/MemRegion.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Core/RegionStore.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Core/SValBuilder.cpp4
-rw-r--r--clang/lib/Tooling/ASTDiff/ASTDiff.cpp13
-rw-r--r--clang/lib/Tooling/Refactoring/Lookup.cpp14
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp167
-rw-r--r--clang/lib/Tooling/Syntax/BuildTree.cpp171
-rw-r--r--clang/lib/Tooling/Tooling.cpp15
-rw-r--r--clang/lib/Tooling/Transformer/RangeSelector.cpp8
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/Simple.framework/Headers/Simple.apinotes3
-rw-r--r--clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes7
-rw-r--r--clang/test/APINotes/Inputs/Headers/SwiftImportAs.h7
-rw-r--r--clang/test/APINotes/swift-import-as.cpp14
-rw-r--r--clang/test/APINotes/yaml-roundtrip.test2
-rw-r--r--clang/test/AST/ByteCode/arrays.cpp41
-rw-r--r--clang/test/AST/ByteCode/builtin-functions.cpp60
-rw-r--r--clang/test/AST/ByteCode/builtin-object-size-codegen.cpp87
-rw-r--r--clang/test/AST/ByteCode/c.c39
-rw-r--r--clang/test/AST/ByteCode/constexpr-vectors.cpp7
-rw-r--r--clang/test/AST/ByteCode/cxx11.cpp32
-rw-r--r--clang/test/AST/ByteCode/cxx20.cpp16
-rw-r--r--clang/test/AST/ByteCode/cxx23.cpp5
-rw-r--r--clang/test/AST/ByteCode/cxx2a.cpp28
-rw-r--r--clang/test/AST/ByteCode/cxx98.cpp24
-rw-r--r--clang/test/AST/ByteCode/functions.cpp29
-rw-r--r--clang/test/AST/ByteCode/invalid.cpp8
-rw-r--r--clang/test/AST/ByteCode/lifetimes.cpp21
-rw-r--r--clang/test/AST/ByteCode/lifetimes26.cpp23
-rw-r--r--clang/test/AST/ByteCode/literals.cpp21
-rw-r--r--clang/test/AST/ByteCode/new-delete.cpp28
-rw-r--r--clang/test/AST/ByteCode/records.cpp16
-rw-r--r--clang/test/AST/ByteCode/typeid.cpp14
-rw-r--r--clang/test/AST/ByteCode/unions.cpp12
-rw-r--r--clang/test/AST/ByteCode/vectors.cpp31
-rw-r--r--clang/test/AST/HLSL/RootSignatures-AST.hlsl2
-rw-r--r--clang/test/AST/HLSL/StructuredBuffers-AST.hlsl30
-rw-r--r--clang/test/AST/HLSL/TypedBuffers-AST.hlsl18
-rw-r--r--clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl4
-rw-r--r--clang/test/AST/HLSL/resource_binding_attr.hlsl48
-rw-r--r--clang/test/AST/HLSL/rootsignature-define-ast.hlsl62
-rw-r--r--clang/test/AST/HLSL/vector-constructors.hlsl8
-rw-r--r--clang/test/AST/arm-mfp8.cpp2
-rw-r--r--clang/test/AST/ast-dump-color.cpp16
-rw-r--r--clang/test/AST/ast-dump-comment.cpp48
-rw-r--r--clang/test/AST/ast-dump-ctad-alias.cpp23
-rw-r--r--clang/test/AST/ast-dump-cxx2b-deducing-this.cpp2
-rw-r--r--clang/test/AST/ast-dump-decl-json.c3
-rw-r--r--clang/test/AST/ast-dump-decl.cpp66
-rw-r--r--clang/test/AST/ast-dump-expr-json.cpp14
-rw-r--r--clang/test/AST/ast-dump-expr.cpp7
-rw-r--r--clang/test/AST/ast-dump-for-range-lifetime.cpp318
-rw-r--r--clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp24
-rw-r--r--clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp2
-rw-r--r--clang/test/AST/ast-dump-record-definition-data-json.cpp5
-rw-r--r--clang/test/AST/ast-dump-records-json.cpp2
-rw-r--r--clang/test/AST/ast-dump-records.c8
-rw-r--r--clang/test/AST/ast-dump-records.cpp8
-rw-r--r--clang/test/AST/ast-dump-recovery.cpp6
-rw-r--r--clang/test/AST/ast-dump-stmt-json.cpp86
-rw-r--r--clang/test/AST/ast-dump-stmt.m2
-rw-r--r--clang/test/AST/ast-dump-template-decls.cpp3
-rw-r--r--clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp112
-rw-r--r--clang/test/AST/ast-dump-template-name.cpp47
-rw-r--r--clang/test/AST/ast-dump-templates.cpp1194
-rw-r--r--clang/test/AST/ast-dump-traits.cpp5
-rw-r--r--clang/test/AST/ast-dump-types-json.cpp77
-rw-r--r--clang/test/AST/ast-dump-using-template.cpp38
-rw-r--r--clang/test/AST/ast-dump-using.cpp16
-rw-r--r--clang/test/AST/ast-print-openacc-combined-construct.cpp9
-rw-r--r--clang/test/AST/ast-print-openacc-compute-construct.cpp9
-rw-r--r--clang/test/AST/ast-print-openacc-loop-construct.cpp10
-rw-r--r--clang/test/AST/attr-swift_attr.m2
-rw-r--r--clang/test/AST/coroutine-locals-cleanup.cpp4
-rw-r--r--clang/test/AST/cxx2c-variadic-friends.cpp4
-rw-r--r--clang/test/AST/deduction-guides.cpp18
-rw-r--r--clang/test/AST/float16.cpp2
-rw-r--r--clang/test/AST/sourceranges.cpp6
-rw-r--r--clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp8
-rw-r--r--clang/test/Analysis/LifetimeSafety/benchmark.py227
-rw-r--r--clang/test/Analysis/Malloc+MismatchedDeallocator+NewDelete.cpp4
-rw-r--r--clang/test/Analysis/Malloc+MismatchedDeallocator_intersections.cpp2
-rw-r--r--clang/test/Analysis/MismatchedDeallocator-checker-test.mm4
-rw-r--r--clang/test/Analysis/NewDelete-checker-test.cpp6
-rw-r--r--clang/test/Analysis/NewDelete-intersections.mm3
-rw-r--r--clang/test/Analysis/anonymous-decls.cpp4
-rw-r--r--clang/test/Analysis/castsize.c2
-rw-r--r--clang/test/Analysis/element-region-address-space.c18
-rw-r--r--clang/test/Analysis/malloc-annotations.c1
-rw-r--r--clang/test/Analysis/malloc-checker-arg-uaf.c44
-rw-r--r--clang/test/Analysis/malloc-sizeof.c2
-rw-r--r--clang/test/Analysis/malloc.c1
-rw-r--r--clang/test/Analysis/unix-fns.c4
-rw-r--r--clang/test/C/C11/n1285_1.c44
-rw-r--r--clang/test/CIR/CodeGen/array.cpp199
-rw-r--r--clang/test/CIR/CodeGen/atomic.c206
-rw-r--r--clang/test/CIR/CodeGen/bitfield-union.c41
-rw-r--r--clang/test/CIR/CodeGen/builtin_call.cpp32
-rw-r--r--clang/test/CIR/CodeGen/builtin_printf.cpp8
-rw-r--r--clang/test/CIR/CodeGen/builtins.cpp42
-rw-r--r--clang/test/CIR/CodeGen/call.cpp16
-rw-r--r--clang/test/CIR/CodeGen/class.cpp19
-rw-r--r--clang/test/CIR/CodeGen/complex-compound-assignment.cpp453
-rw-r--r--clang/test/CIR/CodeGen/complex-mul-div.cpp1005
-rw-r--r--clang/test/CIR/CodeGen/complex-unary.cpp135
-rw-r--r--clang/test/CIR/CodeGen/complex.cpp110
-rw-r--r--clang/test/CIR/CodeGen/destructors.cpp99
-rw-r--r--clang/test/CIR/CodeGen/function-to-pointer-decay.c47
-rw-r--r--clang/test/CIR/CodeGen/globals.cpp37
-rw-r--r--clang/test/CIR/CodeGen/goto.cpp305
-rw-r--r--clang/test/CIR/CodeGen/inline-asm.c24
-rw-r--r--clang/test/CIR/CodeGen/label.c186
-rw-r--r--clang/test/CIR/CodeGen/lang-c-cpp.cpp11
-rw-r--r--clang/test/CIR/CodeGen/local-vars.cpp230
-rw-r--r--clang/test/CIR/CodeGen/module-asm.c6
-rw-r--r--clang/test/CIR/CodeGen/multi-vtable.cpp183
-rw-r--r--clang/test/CIR/CodeGen/statement-exprs.c277
-rw-r--r--clang/test/CIR/CodeGen/static-vars.cpp33
-rw-r--r--clang/test/CIR/CodeGen/stmt-expr.cpp90
-rw-r--r--clang/test/CIR/CodeGen/string-literals.c12
-rw-r--r--clang/test/CIR/CodeGen/string-literals.cpp34
-rw-r--r--clang/test/CIR/CodeGen/throws.cpp85
-rw-r--r--clang/test/CIR/CodeGen/var_arg.c166
-rw-r--r--clang/test/CIR/CodeGen/vbase.cpp70
-rw-r--r--clang/test/CIR/CodeGen/virtual-function-calls.cpp76
-rw-r--r--clang/test/CIR/CodeGen/vtable-emission.cpp38
-rw-r--r--clang/test/CIR/CodeGen/vtt.cpp45
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp779
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp428
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp366
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp657
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp363
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp655
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause-templates.cpp95
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c340
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp716
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-private-clause.c223
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c396
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp429
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c361
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp366
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp657
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c361
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp363
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp655
-rw-r--r--clang/test/CIR/CodeGenOpenACC/init.c4
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp428
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp366
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp657
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp363
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp655
-rw-r--r--clang/test/CIR/CodeGenOpenACC/openacc-not-implemented.cpp10
-rw-r--r--clang/test/CIR/CodeGenOpenACC/shutdown.c4
-rw-r--r--clang/test/CIR/IR/inline-asm.cir112
-rw-r--r--clang/test/CIR/IR/invalid-const-record.cir23
-rw-r--r--clang/test/CIR/IR/invalid-goto.cir9
-rw-r--r--clang/test/CIR/IR/invalid-label.cir12
-rw-r--r--clang/test/CIR/IR/invalid-lang-attr.cir5
-rw-r--r--clang/test/CIR/IR/invalid-throw.cir16
-rw-r--r--clang/test/CIR/IR/invalid-vtable.cir134
-rw-r--r--clang/test/CIR/IR/label.cir26
-rw-r--r--clang/test/CIR/IR/module.cir12
-rw-r--r--clang/test/CIR/IR/struct.cir13
-rw-r--r--clang/test/CIR/IR/throw.cir63
-rw-r--r--clang/test/CIR/IR/vtable-addrpt.cir23
-rw-r--r--clang/test/CIR/IR/vtable-attr.cir19
-rw-r--r--clang/test/CIR/IR/vtt-addrpoint.cir55
-rw-r--r--clang/test/CIR/Lowering/array.cpp91
-rw-r--r--clang/test/CIR/Lowering/goto.cir52
-rw-r--r--clang/test/CIR/Lowering/inline-asm.cir86
-rw-r--r--clang/test/CIR/Lowering/module-asm.cir11
-rw-r--r--clang/test/CIR/Lowering/vtt-addrpoint.cir59
-rw-r--r--clang/test/CMakeLists.txt1
-rw-r--r--clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp2
-rw-r--r--clang/test/CXX/class.access/p6.cpp2
-rw-r--r--clang/test/CXX/class.derived/class.derived.general/p2.cpp8
-rw-r--r--clang/test/CXX/class/class.mem/class.mem.general/p8.cpp8
-rw-r--r--clang/test/CXX/class/class.mem/p13.cpp9
-rw-r--r--clang/test/CXX/class/class.union/class.union.anon/p4.cpp25
-rw-r--r--clang/test/CXX/dcl.dcl/dcl.attr/dcl.attr.nodiscard/p2.cpp2
-rw-r--r--clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp2
-rw-r--r--clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp2
-rw-r--r--clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp2
-rw-r--r--clang/test/CXX/dcl/dcl.fct/p17.cpp2
-rw-r--r--clang/test/CXX/drs/cwg0xx.cpp10
-rw-r--r--clang/test/CXX/drs/cwg12xx.cpp2
-rw-r--r--clang/test/CXX/drs/cwg13xx.cpp2
-rw-r--r--clang/test/CXX/drs/cwg2149.cpp8
-rw-r--r--clang/test/CXX/drs/cwg26xx.cpp8
-rw-r--r--clang/test/CXX/drs/cwg279.cpp4
-rw-r--r--clang/test/CXX/drs/cwg28xx.cpp12
-rw-r--r--clang/test/CXX/drs/cwg2xx.cpp10
-rw-r--r--clang/test/CXX/drs/cwg3xx.cpp10
-rw-r--r--clang/test/CXX/drs/cwg4xx.cpp2
-rw-r--r--clang/test/CXX/drs/cwg6xx.cpp6
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp2
-rw-r--r--clang/test/CXX/module/cpp.pre/module_decl.cpp141
-rw-r--r--clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp2
-rw-r--r--clang/test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp1
-rw-r--r--clang/test/CXX/stmt.stmt/stmt.select/stmt.if/p2.cpp16
-rw-r--r--clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp4
-rw-r--r--clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp2
-rw-r--r--clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp4
-rw-r--r--clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp4
-rw-r--r--clang/test/CXX/temp/temp.param/p15-cxx0x.cpp4
-rw-r--r--clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp40
-rw-r--r--clang/test/ClangScanDeps/link-libraries-diag-dup.c57
-rw-r--r--clang/test/CodeCompletion/cpp23-explicit-object.cpp153
-rw-r--r--clang/test/CodeCompletion/skip-explicit-object-parameter.cpp48
-rw-r--r--clang/test/CodeGen/2007-01-20-VectorICE.c6
-rw-r--r--clang/test/CodeGen/2007-06-18-SextAttrAggregate.c2
-rw-r--r--clang/test/CodeGen/AArch64/ABI-align-packed.c56
-rw-r--r--clang/test/CodeGen/AArch64/byval-temp.c24
-rw-r--r--clang/test/CodeGen/AArch64/pure-scalable-args-empty-union.c2
-rw-r--r--clang/test/CodeGen/AArch64/pure-scalable-args.c10
-rw-r--r--clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_ld1_vnum.c364
-rw-r--r--clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_st1_vnum.c364
-rw-r--r--clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c4
-rw-r--r--clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp4
-rw-r--r--clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesd.c13
-rw-r--r--clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aese.c13
-rw-r--r--clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesimc.c15
-rw-r--r--clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesmc.c13
-rw-r--r--clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullb_128.c21
-rw-r--r--clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullt_128.c17
-rw-r--r--clang/test/CodeGen/LoongArch/targetattr-lasx.c7
-rw-r--r--clang/test/CodeGen/PowerPC/builtins-bcd-format-conversion.c29
-rw-r--r--clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c36
-rw-r--r--clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c4
-rw-r--r--clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c66
-rw-r--r--clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp8
-rw-r--r--clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c93
-rw-r--r--clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfncvtbf16-out-of-range.c31
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfwmaccbf16-out-of-range.c66
-rw-r--r--clang/test/CodeGen/WebAssembly/builtins-test-fp-sig.c70
-rw-r--r--clang/test/CodeGen/X86/avx-builtins.c49
-rw-r--r--clang/test/CodeGen/X86/avx2-builtins.c106
-rw-r--r--clang/test/CodeGen/X86/avx512-reduceIntrin.c114
-rw-r--r--clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c116
-rw-r--r--clang/test/CodeGen/X86/avx512bf16-builtins.c69
-rw-r--r--clang/test/CodeGen/X86/avx512bitalg-builtins.c36
-rw-r--r--clang/test/CodeGen/X86/avx512bw-builtins.c738
-rw-r--r--clang/test/CodeGen/X86/avx512cd-builtins.c93
-rw-r--r--clang/test/CodeGen/X86/avx512dq-builtins.c469
-rw-r--r--clang/test/CodeGen/X86/avx512f-builtins.c239
-rw-r--r--clang/test/CodeGen/X86/avx512fp16-builtins.c914
-rw-r--r--clang/test/CodeGen/X86/avx512ifma-builtins.c29
-rw-r--r--clang/test/CodeGen/X86/avx512ifmavl-builtins.c53
-rw-r--r--clang/test/CodeGen/X86/avx512vbmi-builtins.c45
-rw-r--r--clang/test/CodeGen/X86/avx512vbmi2-builtins.c205
-rw-r--r--clang/test/CodeGen/X86/avx512vbmivl-builtin.c85
-rw-r--r--clang/test/CodeGen/X86/avx512vl-builtins.c2668
-rw-r--r--clang/test/CodeGen/X86/avx512vlbf16-builtins.c148
-rw-r--r--clang/test/CodeGen/X86/avx512vlbitalg-builtins.c60
-rw-r--r--clang/test/CodeGen/X86/avx512vlbw-builtins.c960
-rw-r--r--clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c308
-rw-r--r--clang/test/CodeGen/X86/avx512vlcd-builtins.c166
-rw-r--r--clang/test/CodeGen/X86/avx512vldq-builtins.c370
-rw-r--r--clang/test/CodeGen/X86/avx512vlfp16-builtins.c552
-rw-r--r--clang/test/CodeGen/X86/avx512vlvbmi2-builtins.c405
-rw-r--r--clang/test/CodeGen/X86/avx512vlvnni-builtins.c101
-rw-r--r--clang/test/CodeGen/X86/avx512vlvp2intersect-builtins.c6
-rw-r--r--clang/test/CodeGen/X86/avx512vnni-builtins.c53
-rw-r--r--clang/test/CodeGen/X86/avx512vp2intersect-builtins.c6
-rw-r--r--clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c29
-rw-r--r--clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c43
-rw-r--r--clang/test/CodeGen/X86/avxifma-builtins.c38
-rw-r--r--clang/test/CodeGen/X86/avxvnni-builtins.c70
-rw-r--r--clang/test/CodeGen/X86/avxvnniint16-builtins.c36
-rw-r--r--clang/test/CodeGen/X86/avxvnniint8-builtins.c60
-rw-r--r--clang/test/CodeGen/X86/bitscan-builtins.c40
-rw-r--r--clang/test/CodeGen/X86/builtin_test_helpers.h210
-rw-r--r--clang/test/CodeGen/X86/f16c-builtins-constrained.c15
-rw-r--r--clang/test/CodeGen/X86/f16c-builtins.c20
-rw-r--r--clang/test/CodeGen/X86/fma-builtins.c30
-rw-r--r--clang/test/CodeGen/X86/fma4-builtins.c22
-rw-r--r--clang/test/CodeGen/X86/lzcnt-builtins.c43
-rw-r--r--clang/test/CodeGen/X86/mmx-builtins.c117
-rw-r--r--clang/test/CodeGen/X86/popcnt-builtins.c46
-rw-r--r--clang/test/CodeGen/X86/rot-intrinsics.c66
-rw-r--r--clang/test/CodeGen/X86/sse-builtins.c9
-rw-r--r--clang/test/CodeGen/X86/sse2-builtins.c77
-rw-r--r--clang/test/CodeGen/X86/sse3-builtins.c5
-rw-r--r--clang/test/CodeGen/X86/sse41-builtins.c36
-rw-r--r--clang/test/CodeGen/X86/sse42-builtins.c11
-rw-r--r--clang/test/CodeGen/X86/ssse3-builtins.c17
-rw-r--r--clang/test/CodeGen/X86/x86-bswap.c30
-rw-r--r--clang/test/CodeGen/X86/x86-builtins.c23
-rw-r--r--clang/test/CodeGen/X86/xop-builtins.c70
-rw-r--r--clang/test/CodeGen/afn-flag-test.c2
-rw-r--r--clang/test/CodeGen/aggregate-assign-call.c22
-rw-r--r--clang/test/CodeGen/asan-unified-lto.ll1
-rw-r--r--clang/test/CodeGen/attr-counted-by-for-pointers.c8
-rw-r--r--clang/test/CodeGen/attr-counted-by.c208
-rw-r--r--clang/test/CodeGen/builtin-bpf-btf-type-id.c2
-rw-r--r--clang/test/CodeGen/builtin-masked.c131
-rw-r--r--clang/test/CodeGen/builtins-elementwise-math.c189
-rw-r--r--clang/test/CodeGen/builtins-wasm.c21
-rw-r--r--clang/test/CodeGen/builtins-x86.c4
-rw-r--r--clang/test/CodeGen/builtins.c424
-rw-r--r--clang/test/CodeGen/c-strings.c6
-rw-r--r--clang/test/CodeGen/cfi-salt.c188
-rw-r--r--clang/test/CodeGen/cleanup-destslot-simple.c24
-rw-r--r--clang/test/CodeGen/dominating-store-to-return.c8
-rw-r--r--clang/test/CodeGen/func-attr.c1
-rw-r--r--clang/test/CodeGen/issue155126.c17
-rw-r--r--clang/test/CodeGen/kcfi-generalize.c33
-rw-r--r--clang/test/CodeGen/lifetime-sanitizer.c8
-rw-r--r--clang/test/CodeGen/lifetime.c8
-rw-r--r--clang/test/CodeGen/lifetime2.c18
-rw-r--r--clang/test/CodeGen/lifetime3.cpp16
-rw-r--r--clang/test/CodeGen/math-libcalls-tbaa.c12
-rw-r--r--clang/test/CodeGen/nofpclass.c8
-rw-r--r--clang/test/CodeGen/object-size.cpp1
-rw-r--r--clang/test/CodeGen/packed-arrays.c6
-rw-r--r--clang/test/CodeGen/palignr.c12
-rw-r--r--clang/test/CodeGen/pointer-arithmetic-align.c83
-rw-r--r--clang/test/CodeGen/ptrauth-qualifier-blocks.c10
-rw-r--r--clang/test/CodeGen/rounding-math.cpp52
-rw-r--r--clang/test/CodeGen/target-builtin-noerror.c6
-rw-r--r--clang/test/CodeGen/target-data.c4
-rw-r--r--clang/test/CodeGen/target-features-error-3.c12
-rw-r--r--clang/test/CodeGen/target-features-error-4.c12
-rw-r--r--clang/test/CodeGen/target-features-error-5.c12
-rw-r--r--clang/test/CodeGen/target-features-no-error-2.c12
-rw-r--r--clang/test/CodeGen/temporary-lifetime-exceptions.cpp12
-rw-r--r--clang/test/CodeGen/temporary-lifetime.cpp48
-rw-r--r--clang/test/CodeGen/ubsan-trap-reason-add-overflow.c9
-rw-r--r--clang/test/CodeGen/ubsan-trap-reason-flag.c22
-rw-r--r--clang/test/CodeGen/ubsan-trap-reason-mul-overflow.c9
-rw-r--r--clang/test/CodeGen/ubsan-trap-reason-sub-overflow.c9
-rw-r--r--clang/test/CodeGen/unified-lto-pipeline.c6
-rw-r--r--clang/test/CodeGen/union-tbaa1.c26
-rw-r--r--clang/test/CodeGen/volatile-1.c45
-rw-r--r--clang/test/CodeGenCUDA/grid-constant.cu16
-rw-r--r--clang/test/CodeGenCXX/aarch64-sve-vector-conditional-op.cpp3
-rw-r--r--clang/test/CodeGenCXX/amdgcn_declspec_get.cpp4
-rw-r--r--clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp40
-rw-r--r--clang/test/CodeGenCXX/cxx2b-deducing-this.cpp52
-rw-r--r--clang/test/CodeGenCXX/debug-info-class-limited.test1
-rw-r--r--clang/test/CodeGenCXX/destructors.cpp14
-rw-r--r--clang/test/CodeGenCXX/ext-vector-type-conditional.cpp344
-rw-r--r--clang/test/CodeGenCXX/int64_uint64.cpp8
-rw-r--r--clang/test/CodeGenCXX/lambda-this-2.cpp (renamed from clang/test/CodeGenCXX/debug-info-lambda-this.cpp)0
-rw-r--r--clang/test/CodeGenCXX/mangle-ms-cxx11.cpp39
-rw-r--r--clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp6
-rw-r--r--clang/test/CodeGenCXX/modules-vtable.cppm2
-rw-r--r--clang/test/CodeGenCXX/pr70585.cppm2
-rw-r--r--clang/test/CodeGenCXX/ptrauth-explicit-vtable-pointer-control.cpp37
-rw-r--r--clang/test/CodeGenCXX/sret_cast_with_nonzero_alloca_as.cpp7
-rw-r--r--clang/test/CodeGenCXX/stack-reuse-exceptions.cpp60
-rw-r--r--clang/test/CodeGenCXX/stack-reuse-miscompile.cpp6
-rw-r--r--clang/test/CodeGenHIP/hip-cumode.hip10
-rw-r--r--clang/test/CodeGenHIP/store-addr-space.hip46
-rw-r--r--clang/test/CodeGenHLSL/builtins/dot2add.hlsl2
-rw-r--r--clang/test/CodeGenHLSL/builtins/hlsl_resource_t.hlsl21
-rw-r--r--clang/test/CodeGenHLSL/convergence/global_array.hlsl10
-rw-r--r--clang/test/CodeGenHLSL/resources/ByteAddressBuffers-constructors.hlsl4
-rw-r--r--clang/test/CodeGenHLSL/resources/RWBuffer-constructor.hlsl6
-rw-r--r--clang/test/CodeGenHLSL/resources/RWBuffer-elementtype.hlsl12
-rw-r--r--clang/test/CodeGenHLSL/resources/RWBuffer-imageformat.hlsl74
-rw-r--r--clang/test/CodeGenHLSL/resources/RWBuffer-subscript.hlsl2
-rw-r--r--clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl4
-rw-r--r--clang/test/CodeGenHLSL/resources/cbuffer.hlsl20
-rw-r--r--clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl2
-rw-r--r--clang/test/CodeGenHLSL/resources/res-array-global-dyn-index.hlsl29
-rw-r--r--clang/test/CodeGenHLSL/resources/res-array-global-multi-dim.hlsl46
-rw-r--r--clang/test/CodeGenHLSL/resources/res-array-global.hlsl75
-rw-r--r--clang/test/CodeGenHLSL/resources/res-array-local-multi-dim.hlsl49
-rw-r--r--clang/test/CodeGenHLSL/resources/res-array-local1.hlsl64
-rw-r--r--clang/test/CodeGenHLSL/resources/res-array-local2.hlsl37
-rw-r--r--clang/test/CodeGenHLSL/resources/res-array-local3.hlsl62
-rw-r--r--clang/test/CodeGenHLSL/resources/resource-bindings.hlsl22
-rw-r--r--clang/test/CodeGenHLSL/static-local-ctor.hlsl5
-rw-r--r--clang/test/CodeGenHLSL/vk-features/SpirvType.hlsl2
-rw-r--r--clang/test/CodeGenObjC/arc-blocks.m18
-rw-r--r--clang/test/CodeGenObjC/arc-precise-lifetime.m68
-rw-r--r--clang/test/CodeGenObjC/arc-ternary-op.m12
-rw-r--r--clang/test/CodeGenObjC/arc.m102
-rw-r--r--clang/test/CodeGenObjC/exceptions.m4
-rw-r--r--clang/test/CodeGenObjC/ptrauth-block-descriptor-pointer.m39
-rw-r--r--clang/test/CodeGenObjC/ptrauth-block-isa.m5
-rw-r--r--clang/test/CodeGenObjCXX/arc-move.mm8
-rw-r--r--clang/test/CodeGenObjCXX/arc-references.mm6
-rw-r--r--clang/test/CodeGenObjCXX/arc.mm6
-rw-r--r--clang/test/CodeGenObjCXX/literals.mm12
-rw-r--r--clang/test/CodeGenOpenCL/addr-space-struct-arg.cl1
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl1
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl32
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-features-illegal.cl2
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-printf.cl4
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-fp8.cl1
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-param-err.cl7
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12.cl44
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl191
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl7
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl7
-rw-r--r--clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl28
-rw-r--r--clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl4
-rw-r--r--clang/test/CodeGenOpenCL/preserve_vec3.cl22
-rw-r--r--clang/test/CodeGenSPIRV/Builtins/generic_cast_to_ptr_explicit.c23
-rw-r--r--clang/test/CodeGenSPIRV/Builtins/ids_and_ranges.c50
-rw-r--r--clang/test/CodeGenSPIRV/spirv-intel.c9
-rw-r--r--clang/test/CodeGenSYCL/address-space-conversions.cpp170
-rw-r--r--clang/test/CodeGenSYCL/address-space-deduction.cpp2
-rw-r--r--clang/test/CodeGenSYCL/address-space-mangling.cpp2
-rw-r--r--clang/test/CodeGenSYCL/amd-address-space-conversions.cpp160
-rw-r--r--clang/test/CodeGenSYCL/cuda-address-space-conversions.cpp152
-rw-r--r--clang/test/CodeGenSYCL/debug-info-kernel-variables.cpp2
-rw-r--r--clang/test/CodeGenSYCL/field-annotate-addr-space.cpp2
-rw-r--r--clang/test/CodeGenSYCL/function-attrs.cpp21
-rw-r--r--clang/test/CodeGenSYCL/functionptr-addrspace.cpp2
-rw-r--r--clang/test/CodeGenSYCL/kernel-caller-entry-point.cpp7
-rw-r--r--clang/test/CodeGenSYCL/sycl-external-attr.cpp85
-rw-r--r--clang/test/CodeGenSYCL/unique_stable_name.cpp126
-rw-r--r--clang/test/CoverageMapping/logical.cpp22
-rw-r--r--clang/test/DebugInfo/AArch64/sve-vector-types.c (renamed from clang/test/CodeGen/AArch64/debug-sve-vector-types.c)0
-rw-r--r--clang/test/DebugInfo/AArch64/sve-vectorx2-types.c (renamed from clang/test/CodeGen/AArch64/debug-sve-vectorx2-types.c)0
-rw-r--r--clang/test/DebugInfo/AArch64/sve-vectorx3-types.c (renamed from clang/test/CodeGen/AArch64/debug-sve-vectorx3-types.c)0
-rw-r--r--clang/test/DebugInfo/AArch64/sve-vectorx4-types.c (renamed from clang/test/CodeGen/AArch64/debug-sve-vectorx4-types.c)0
-rw-r--r--clang/test/DebugInfo/AArch64/types.c (renamed from clang/test/CodeGen/AArch64/debug-types.c)0
-rw-r--r--clang/test/DebugInfo/AssignmentTracking/assignment-tracking.cpp (renamed from clang/test/CodeGen/assignment-tracking/assignment-tracking.cpp)0
-rw-r--r--clang/test/DebugInfo/AssignmentTracking/flag.cpp (renamed from clang/test/CodeGen/assignment-tracking/flag.cpp)0
-rw-r--r--clang/test/DebugInfo/AssignmentTracking/memcpy-fragment.cpp (renamed from clang/test/CodeGen/assignment-tracking/memcpy-fragment.cpp)0
-rw-r--r--clang/test/DebugInfo/AssignmentTracking/nested-scope.cpp (renamed from clang/test/CodeGen/assignment-tracking/nested-scope.cpp)0
-rw-r--r--clang/test/DebugInfo/BPF/attr-btf_tag-typedef.c (renamed from clang/test/CodeGen/attr-btf_tag-typedef.c)0
-rw-r--r--clang/test/DebugInfo/BPF/attr-btf_type_tag-func.c (renamed from clang/test/CodeGen/attr-btf_type_tag-func.c)0
-rw-r--r--clang/test/DebugInfo/BPF/bpf-attr-type-tag-atomic.c (renamed from clang/test/CodeGen/bpf-attr-type-tag-atomic.c)0
-rw-r--r--clang/test/DebugInfo/BPF/bpf-debug-info-extern-func.c (renamed from clang/test/CodeGen/bpf-debug-info-extern-func.c)0
-rw-r--r--clang/test/DebugInfo/BPF/builtin-preserve-access-index-nonptr.c (renamed from clang/test/CodeGen/builtin-preserve-access-index-nonptr.c)0
-rw-r--r--clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-1.c (renamed from clang/test/CodeGen/builtins-bpf-preserve-field-info-1.c)0
-rw-r--r--clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-2.c (renamed from clang/test/CodeGen/builtins-bpf-preserve-field-info-2.c)0
-rw-r--r--clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-3.c (renamed from clang/test/CodeGen/builtins-bpf-preserve-field-info-3.c)2
-rw-r--r--clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-4.c (renamed from clang/test/CodeGen/builtins-bpf-preserve-field-info-4.c)0
-rw-r--r--clang/test/DebugInfo/CXX/2006-11-20-GlobalSymbols.cpp (renamed from clang/test/CodeGenCXX/2006-11-20-GlobalSymbols.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/2007-01-02-UnboundedArray.cpp (renamed from clang/test/CodeGenCXX/2007-01-02-UnboundedArray.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/2009-03-17.cpp (renamed from clang/test/CodeGenCXX/2009-03-17-dbg.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/2009-06-16-Crash.cpp (renamed from clang/test/CodeGenCXX/2009-06-16-DebugInfoCrash.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/2010-05-10-Var.cpp (renamed from clang/test/CodeGenCXX/2010-05-10-Var-DbgInfo.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/2010-05-12-PtrToMember.cpp (renamed from clang/test/CodeGenCXX/2010-05-12-PtrToMember-Dbg.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/2010-06-21-LocalVarDbg.cpp (renamed from clang/test/CodeGenCXX/2010-06-21-LocalVarDbg.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/2010-07-23-DeclLoc.cpp (renamed from clang/test/CodeGenCXX/2010-07-23-DeclLoc.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/Inputs/class-limited.cpp (renamed from clang/test/CodeGenCXX/Inputs/debug-info-class-limited.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/PR20038.cpp (renamed from clang/test/CodeGenCXX/PR20038.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/access.cpp (renamed from clang/test/CodeGenCXX/debug-info-access.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/aix-static-init.cpp (renamed from clang/test/CodeGenCXX/aix-static-init-debug-info.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/alias.cpp (renamed from clang/test/CodeGenCXX/debug-info-alias.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/all-calls-described.cpp (renamed from clang/test/CodeGenCXX/dbg-info-all-calls-described.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/anon-namespace.cpp (renamed from clang/test/CodeGenCXX/debug-info-anon-namespace.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/anon-union-vars.cpp (renamed from clang/test/CodeGenCXX/debug-info-anon-union-vars.cpp)2
-rw-r--r--clang/test/DebugInfo/CXX/artificial-arg.cpp (renamed from clang/test/CodeGenCXX/debug-info-artificial-arg.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/atexit-stub.cpp (renamed from clang/test/CodeGenCXX/debug-info-atexit-stub.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/auto-return.cpp (renamed from clang/test/CodeGenCXX/debug-info-auto-return.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/block-invocation-linkage-name.cpp (renamed from clang/test/CodeGenCXX/debug-info-block-invocation-linkage-name.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/blocks.cpp (renamed from clang/test/CodeGenCXX/debug-info-blocks.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/bpf-structors.cpp (renamed from clang/test/CodeGenCXX/bpf-debug-structors.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/byval.cpp (renamed from clang/test/CodeGenCXX/debug-info-byval.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/calling-conventions.cpp (renamed from clang/test/CodeGenCXX/debug-info-calling-conventions.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/char16.cpp (renamed from clang/test/CodeGenCXX/debug-info-char16.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/class-limited-plugin.test (renamed from clang/test/CodeGenCXX/debug-info-class-limited-plugin.test)2
-rw-r--r--clang/test/DebugInfo/CXX/class-limited.test1
-rw-r--r--clang/test/DebugInfo/CXX/class-nolimit.cpp (renamed from clang/test/CodeGenCXX/debug-info-class-nolimit.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/class-optzns.cpp (renamed from clang/test/CodeGenCXX/debug-info-class-optzns.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/class.cpp (renamed from clang/test/CodeGenCXX/debug-info-class.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-display-name.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-display-name.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-heapallocsite.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-heapallocsite.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-injected-class.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-injected-class.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-nested-types.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-nested-types.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-nodebug.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-nodebug.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-template-literal.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-template-literal.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-template-type.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-template-type.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-unnamed.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-unnamed.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/codeview-var-templates.cpp (renamed from clang/test/CodeGenCXX/debug-info-codeview-var-templates.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/coff.cpp (renamed from clang/test/CodeGenCXX/debug-info-coff.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/composite-cc.cpp (renamed from clang/test/CodeGenCXX/debug-info-composite-cc.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/composite-triviality-fwd-decl.cpp (renamed from clang/test/CodeGenCXX/debug-info-composite-triviality-fwd-decl.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/composite-triviality.cpp (renamed from clang/test/CodeGenCXX/debug-info-composite-triviality.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/context.cpp (renamed from clang/test/CodeGenCXX/debug-info-context.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/cp-blocks-linetables.cpp (renamed from clang/test/CodeGenCXX/cp-blocks-linetables.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ctor-homing-flag.cpp (renamed from clang/test/CodeGenCXX/debug-info-ctor-homing-flag.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ctor.cpp (renamed from clang/test/CodeGenCXX/debug-info-ctor.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ctor2.cpp (renamed from clang/test/CodeGenCXX/debug-info-ctor2.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/cxx0x.cpp (renamed from clang/test/CodeGenCXX/debug-info-cxx0x.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/cxx1y.cpp (renamed from clang/test/CodeGenCXX/debug-info-cxx1y.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/debug-info.cpp (renamed from clang/test/CodeGenCXX/debug-info.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/decl-nested.cpp (renamed from clang/test/CodeGenCXX/debug-info-decl-nested.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/defaulted-template-alias.cpp (renamed from clang/test/CodeGenCXX/defaulted-template-alias.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/deleted.cpp (renamed from clang/test/CodeGenCXX/debug-info-deleted.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/dependent-template-alias.cpp (renamed from clang/test/CodeGenCXX/dependent-template-alias.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/dependent-template-type-scope.cpp12
-rw-r--r--clang/test/DebugInfo/CXX/destroy-helper.cpp (renamed from clang/test/CodeGenCXX/debug-info-destroy-helper.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/destructor.cpp (renamed from clang/test/CodeGenCXX/destructor-debug-info.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/determinism.cpp (renamed from clang/test/CodeGenCXX/debug-info-determinism.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/difile_entry.cpp (renamed from clang/test/CodeGenCXX/difile_entry.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/dllimport-base-class.cpp (renamed from clang/test/CodeGenCXX/debug-info-dllimport-base-class.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/dtor-implicit-args.cpp (renamed from clang/test/CodeGenCXX/debug-info-dtor-implicit-args.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/dup-fwd-decl.cpp (renamed from clang/test/CodeGenCXX/debug-info-dup-fwd-decl.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/enum-class.cpp (renamed from clang/test/CodeGenCXX/debug-info-enum-class.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/enum-i128.cpp (renamed from clang/test/CodeGenCXX/debug-info-enum-i128.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/enum-metadata-collision.cpp (renamed from clang/test/CodeGenCXX/debug-info-enum-metadata-collision.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/enum.cpp (renamed from clang/test/CodeGenCXX/debug-info-enum.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/explicit-cast.cpp (renamed from clang/test/CodeGenCXX/debug-info-explicit-cast.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/explicit-this.cpp (renamed from clang/test/CodeGenCXX/debug-info-explicit-this.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/export_symbols.cpp (renamed from clang/test/CodeGenCXX/debug-info-export_symbols.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/fdebug-info-for-profiling.cpp (renamed from clang/test/CodeGenCXX/fdebug-info-for-profiling.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/field-access.cpp (renamed from clang/test/CodeGenCXX/field-access-debug-info.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/fn-template.cpp (renamed from clang/test/CodeGenCXX/debug-info-fn-template.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/friend.cpp (renamed from clang/test/CodeGenCXX/debug-info-friend.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/function-context.cpp (renamed from clang/test/CodeGenCXX/debug-info-function-context.cpp)2
-rw-r--r--clang/test/DebugInfo/CXX/fwd-ref.cpp (renamed from clang/test/CodeGenCXX/debug-info-fwd-ref.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/fwd-template-param.cpp (renamed from clang/test/CodeGenCXX/debug-info-fwd-template-param.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/gline-tables-only-codeview.cpp (renamed from clang/test/CodeGenCXX/debug-info-gline-tables-only-codeview.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/gline-tables-only.cpp (renamed from clang/test/CodeGenCXX/debug-info-gline-tables-only.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/global-ctor-dtor.cpp (renamed from clang/test/CodeGenCXX/debug-info-global-ctor-dtor.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/global.cpp (renamed from clang/test/CodeGenCXX/debug-info-global.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/globalinit-loc.cpp (renamed from clang/test/CodeGenCXX/globalinit-loc.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/globalinit.cpp (renamed from clang/test/CodeGenCXX/debug-info-globalinit.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/hotpatch-aarch64.cpp (renamed from clang/test/CodeGenCXX/debug-info-hotpatch-aarch64.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/hotpatch-arm.cpp (renamed from clang/test/CodeGenCXX/debug-info-hotpatch-arm.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/hotpatch.cpp (renamed from clang/test/CodeGenCXX/debug-info-hotpatch.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/incomplete-types.cpp (renamed from clang/test/CodeGenCXX/debug-info-incomplete-types.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/indirect-field-decl.cpp (renamed from clang/test/CodeGenCXX/debug-info-indirect-field-decl.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/inheriting-constructor.cpp (renamed from clang/test/CodeGenCXX/debug-info-inheriting-constructor.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/inline-dllexport-member.cpp (renamed from clang/test/CodeGenCXX/inline-dllexport-member.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/inlined.cpp (renamed from clang/test/CodeGenCXX/debug-info-inlined.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/lambda-expressions.cpp (renamed from clang/test/CodeGenCXX/debug-lambda-expressions.cpp)2
-rw-r--r--clang/test/DebugInfo/CXX/lambda-this.cpp (renamed from clang/test/CodeGenCXX/debug-lambda-this.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/lambda.cpp (renamed from clang/test/CodeGenCXX/debug-info-lambda.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/large-constant.cpp (renamed from clang/test/CodeGenCXX/debug-info-large-constant.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/limited-ctor.cpp (renamed from clang/test/CodeGenCXX/debug-info-limited-ctor.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/limited.cpp (renamed from clang/test/CodeGenCXX/debug-info-limited.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/line-if-2.cpp (renamed from clang/test/CodeGenCXX/debug-info-line-if-2.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/line-if.cpp (renamed from clang/test/CodeGenCXX/debug-info-line-if.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/line.cpp (renamed from clang/test/CodeGenCXX/debug-info-line.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/linetable-cleanup.cpp (renamed from clang/test/CodeGenCXX/linetable-cleanup.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/linetable-eh.cpp (renamed from clang/test/CodeGenCXX/linetable-eh.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/linetable-fnbegin.cpp (renamed from clang/test/CodeGenCXX/linetable-fnbegin.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/linetable-virtual-variadic.cpp (renamed from clang/test/CodeGenCXX/linetable-virtual-variadic.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/loops.cpp (renamed from clang/test/CodeGenCXX/debug-info-loops.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/lpad-linetable.cpp (renamed from clang/test/CodeGenCXX/lpad-linetable.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/member-call.cpp (renamed from clang/test/CodeGenCXX/debug-info-member-call.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/member.cpp (renamed from clang/test/CodeGenCXX/debug-info-member.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/method-nodebug.cpp (renamed from clang/test/CodeGenCXX/debug-info-method-nodebug.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/method-spec.cpp (renamed from clang/test/CodeGenCXX/debug-info-method-spec.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/method.cpp (renamed from clang/test/CodeGenCXX/debug-info-method.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/method2.cpp (renamed from clang/test/CodeGenCXX/debug-info-method2.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/microsoft-abi-member-pointers.cpp (renamed from clang/test/CodeGenCXX/microsoft-abi-member-pointers-debug-info.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ms-abi.cpp (renamed from clang/test/CodeGenCXX/debug-info-ms-abi.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ms-anonymous-tag.cpp (renamed from clang/test/CodeGenCXX/debug-info-ms-anonymous-tag.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ms-bitfields.cpp (renamed from clang/test/CodeGenCXX/debug-info-ms-bitfields.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ms-dtor-thunks.cpp (renamed from clang/test/CodeGenCXX/debug-info-ms-dtor-thunks.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ms-novtable.cpp (renamed from clang/test/CodeGenCXX/debug-info-ms-novtable.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ms-ptr-to-member.cpp (renamed from clang/test/CodeGenCXX/debug-info-ms-ptr-to-member.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ms-vbase.cpp (renamed from clang/test/CodeGenCXX/debug-info-ms-vbase.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/namespace.cpp (renamed from clang/test/CodeGenCXX/debug-info-namespace.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/nested-exprs.cpp (renamed from clang/test/CodeGenCXX/debug-info-nested-exprs.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/nodebug.cpp (renamed from clang/test/CodeGenCXX/debug-info-nodebug.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/noreturn.cpp (renamed from clang/test/CodeGenCXX/debug-info-noreturn.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/nrvo.cpp (renamed from clang/test/CodeGenCXX/debug-info-nrvo.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/nullptr.cpp (renamed from clang/test/CodeGenCXX/debug-info-nullptr.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/object-pointer.cpp (renamed from clang/test/CodeGenCXX/debug-info-object-pointer.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/objname.cpp (renamed from clang/test/CodeGenCXX/debug-info-objname.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/prefix-map-lambda.cpp (renamed from clang/test/CodeGenCXX/debug-prefix-map-lambda.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/programming-language.cpp (renamed from clang/test/CodeGenCXX/debug-info-programming-language.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ptr-to-member-function.cpp (renamed from clang/test/CodeGenCXX/debug-info-ptr-to-member-function.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ptr-to-ptr.cpp (renamed from clang/test/CodeGenCXX/debug-info-ptr-to-ptr.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/qualifiers.cpp (renamed from clang/test/CodeGenCXX/debug-info-qualifiers.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/range-for-var-names.cpp (renamed from clang/test/CodeGenCXX/debug-info-range-for-var-names.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/rvalue-ref.cpp (renamed from clang/test/CodeGenCXX/debug-info-rvalue-ref.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/scope.cpp (renamed from clang/test/CodeGenCXX/debug-info-scope.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/scoped-class.cpp (renamed from clang/test/CodeGenCXX/debug-info-scoped-class.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/scoped-enums.cpp (renamed from clang/test/CodeGenCXX/scoped-enums-debug-info.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/simple-template-names.cpp (renamed from clang/test/CodeGenCXX/debug-info-simple-template-names.cpp)8
-rw-r--r--clang/test/DebugInfo/CXX/standalone-debug-attribute.cpp (renamed from clang/test/CodeGenCXX/standalone-debug-attribute.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/static-fns.cpp (renamed from clang/test/CodeGenCXX/debug-info-static-fns.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/static-member.cpp (renamed from clang/test/CodeGenCXX/debug-info-static-member.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/struct-align.cpp (renamed from clang/test/CodeGenCXX/debug-info-struct-align.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/structured-binding-bitfield.cpp (renamed from clang/test/CodeGenCXX/debug-info-structured-binding-bitfield.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/structured-binding.cpp (renamed from clang/test/CodeGenCXX/debug-info-structured-binding.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-alias.cpp (renamed from clang/test/CodeGenCXX/template-alias.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-align.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-align.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-array.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-array.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-deduction-guide.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-deduction-guide.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-explicit-specialization.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-explicit-specialization.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-fwd.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-fwd.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-limit.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-limit.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-member.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-member.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-parameter.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-parameter.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-partial-specialization.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-partial-specialization.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-quals.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-quals.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template-recursive.cpp (renamed from clang/test/CodeGenCXX/debug-info-template-recursive.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/template.cpp (renamed from clang/test/CodeGenCXX/debug-info-template.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/this.cpp (renamed from clang/test/CodeGenCXX/debug-info-this.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/thunk-msabi.cpp (renamed from clang/test/CodeGenCXX/debug-info-thunk-msabi.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/thunk.cpp (renamed from clang/test/CodeGenCXX/debug-info-thunk.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/trivial_abi.cpp (renamed from clang/test/CodeGenCXX/trivial_abi_debuginfo.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/ubsan-check-debuglocs.cpp (renamed from clang/test/CodeGenCXX/ubsan-check-debuglocs.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/union-template.cpp (renamed from clang/test/CodeGenCXX/debug-info-union-template.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/union.cpp (renamed from clang/test/CodeGenCXX/debug-info-union.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/use-after-free.cpp (renamed from clang/test/CodeGenCXX/debug-info-use-after-free.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/uuid.cpp (renamed from clang/test/CodeGenCXX/debug-info-uuid.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/var-template-partial-spec.cpp (renamed from clang/test/CodeGenCXX/debug-info-var-template-partial-spec.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/var-template-partial.cpp (renamed from clang/test/CodeGenCXX/debug-info-var-template-partial.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/varargs.cpp (renamed from clang/test/CodeGenCXX/debug-info-varargs.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/variadic-template-alias.cpp (renamed from clang/test/CodeGenCXX/variadic-template-alias.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/verbose-trap.cpp (renamed from clang/test/CodeGenCXX/debug-info-verbose-trap.cpp)2
-rw-r--r--clang/test/DebugInfo/CXX/vla.cpp (renamed from clang/test/CodeGenCXX/debug-info-vla.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/vtable-holder-self-reference.cpp (renamed from clang/test/CodeGenCXX/vtable-holder-self-reference.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/vtable-inheritance-diamond.cpp (renamed from clang/test/CodeGenCXX/vtable-debug-info-inheritance-diamond.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/vtable-inheritance-multiple.cpp (renamed from clang/test/CodeGenCXX/vtable-debug-info-inheritance-multiple.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/vtable-inheritance-simple-main.cpp (renamed from clang/test/CodeGenCXX/vtable-debug-info-inheritance-simple-main.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/vtable-inheritance-simple.cpp (renamed from clang/test/CodeGenCXX/vtable-debug-info-inheritance-simple.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/vtable-inheritance-virtual.cpp (renamed from clang/test/CodeGenCXX/vtable-debug-info-inheritance-virtual.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/vtable-optzn.cpp (renamed from clang/test/CodeGenCXX/debug-info-vtable-optzn.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/vtable.cpp (renamed from clang/test/CodeGenCXX/vtable-debug-info.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/wchar.cpp (renamed from clang/test/CodeGenCXX/debug-info-wchar.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/windows-dtor.cpp (renamed from clang/test/CodeGenCXX/debug-info-windows-dtor.cpp)0
-rw-r--r--clang/test/DebugInfo/CXX/zero-length-arrays.cpp (renamed from clang/test/CodeGenCXX/debug-info-zero-length-arrays.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/2007-05-11-str-const.c (renamed from clang/test/CodeGen/2007-05-11-str-const.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2009-03-13-dbg.c (renamed from clang/test/CodeGen/2009-03-13-dbg.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2009-04-23-dbg.c (renamed from clang/test/CodeGen/2009-04-23-dbg.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2009-07-31-DbgDeclare.c (renamed from clang/test/CodeGen/2009-07-31-DbgDeclare.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2010-01-14-FnType-DebugInfo.c (renamed from clang/test/CodeGen/2010-01-14-FnType-DebugInfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2010-01-18-Inlined-Debug.c (renamed from clang/test/CodeGen/2010-01-18-Inlined-Debug.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2010-02-10-PointerName.c (renamed from clang/test/CodeGen/2010-02-10-PointerName.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2010-02-15-DbgStaticVar.c (renamed from clang/test/CodeGen/2010-02-15-DbgStaticVar.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2010-02-16-DbgScopes.c (renamed from clang/test/CodeGen/2010-02-16-DbgScopes.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2010-03-5-LexicalScope.c (renamed from clang/test/CodeGen/2010-03-5-LexicalScope.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2010-07-08-DeclDebugLineNo.c (renamed from clang/test/CodeGen/2010-07-08-DeclDebugLineNo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/2010-08-10-DbgConstant.c (renamed from clang/test/CodeGen/2010-08-10-DbgConstant.c)0
-rw-r--r--clang/test/DebugInfo/Generic/257-args.c (renamed from clang/test/CodeGen/debug-info-257-args.c)0
-rw-r--r--clang/test/DebugInfo/Generic/Inputs/debug-info-embed-source.c (renamed from clang/test/CodeGen/Inputs/debug-info-embed-source.c)0
-rw-r--r--clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum-line.cpp (renamed from clang/test/CodeGen/Inputs/debug-info-file-checksum-line.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum-pre.cpp (renamed from clang/test/CodeGen/Inputs/debug-info-file-checksum-pre.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum.c (renamed from clang/test/CodeGen/Inputs/debug-info-file-checksum.c)0
-rw-r--r--clang/test/DebugInfo/Generic/Inputs/debug-info-macro.h (renamed from clang/test/CodeGen/Inputs/debug-info-macro.h)0
-rw-r--r--clang/test/DebugInfo/Generic/Inputs/debug-info-slash.cpp (renamed from clang/test/CodeGen/Inputs/debug-info-slash.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/Inputs/debug-info-slash.h (renamed from clang/test/CodeGen/Inputs/debug-info-slash.h)0
-rw-r--r--clang/test/DebugInfo/Generic/Inputs/stdio.h9
-rw-r--r--clang/test/DebugInfo/Generic/abspath.c (renamed from clang/test/CodeGen/debug-info-abspath.c)10
-rw-r--r--clang/test/DebugInfo/Generic/alias-pointer.c (renamed from clang/test/CodeGen/debug-info-alias-pointer.c)0
-rw-r--r--clang/test/DebugInfo/Generic/alias.c (renamed from clang/test/CodeGen/debug-info-alias.c)0
-rw-r--r--clang/test/DebugInfo/Generic/amdgpu-barrier-type-debug-info.c (renamed from clang/test/CodeGen/amdgpu-barrier-type-debug-info.c)0
-rw-r--r--clang/test/DebugInfo/Generic/amdgpu-buffer-rsrc-type-debug-info.c (renamed from clang/test/CodeGen/amdgpu-buffer-rsrc-type-debug-info.c)0
-rw-r--r--clang/test/DebugInfo/Generic/args.c (renamed from clang/test/CodeGen/debug-info-args.c)0
-rw-r--r--clang/test/DebugInfo/Generic/artificial.c (renamed from clang/test/CodeGen/artificial.c)0
-rw-r--r--clang/test/DebugInfo/Generic/atomic.c (renamed from clang/test/CodeGen/debug-info-atomic.c)0
-rw-r--r--clang/test/DebugInfo/Generic/attr-btf_type_tag-func-ptr.c (renamed from clang/test/CodeGen/attr-btf_type_tag-func-ptr.c)0
-rw-r--r--clang/test/DebugInfo/Generic/attr-btf_type_tag-similar-type.c (renamed from clang/test/CodeGen/attr-btf_type_tag-similar-type.c)0
-rw-r--r--clang/test/DebugInfo/Generic/attr-btf_type_tag-typedef-field.c (renamed from clang/test/CodeGen/attr-btf_type_tag-typedef-field.c)0
-rw-r--r--clang/test/DebugInfo/Generic/attr-btf_type_tag-var.c (renamed from clang/test/CodeGen/attr-btf_type_tag-var.c)0
-rw-r--r--clang/test/DebugInfo/Generic/attr-counted-by-debug-info.c (renamed from clang/test/CodeGen/attr-counted-by-debug-info.c)0
-rw-r--r--clang/test/DebugInfo/Generic/attr-nodebug.c (renamed from clang/test/CodeGen/attr-nodebug.c)0
-rw-r--r--clang/test/DebugInfo/Generic/attr-nodebug2.c (renamed from clang/test/CodeGen/attr-nodebug2.c)0
-rw-r--r--clang/test/DebugInfo/Generic/attributed-stmt.c (renamed from clang/test/CodeGen/debug-info-attributed-stmt.c)0
-rw-r--r--clang/test/DebugInfo/Generic/bitfield-0-struct.c (renamed from clang/test/CodeGen/debug-info-bitfield-0-struct.c)0
-rw-r--r--clang/test/DebugInfo/Generic/block-decl.c (renamed from clang/test/CodeGen/debug-info-block-decl.c)0
-rw-r--r--clang/test/DebugInfo/Generic/block-expr.c (renamed from clang/test/CodeGen/debug-info-block-expr.c)0
-rw-r--r--clang/test/DebugInfo/Generic/block-out-return.c (renamed from clang/test/CodeGen/debug-info-block-out-return.c)0
-rw-r--r--clang/test/DebugInfo/Generic/block-vars.c (renamed from clang/test/CodeGen/debug-info-block-vars.c)0
-rw-r--r--clang/test/DebugInfo/Generic/block.c (renamed from clang/test/CodeGen/debug-info-block.c)0
-rw-r--r--clang/test/DebugInfo/Generic/bounds-checking-debuginfo.c (renamed from clang/test/CodeGen/bounds-checking-debuginfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/bpf-debug-info-unref.c (renamed from clang/test/CodeGen/bpf-debug-info-unref.c)0
-rw-r--r--clang/test/DebugInfo/Generic/builtin-preserve-access-index-array.c (renamed from clang/test/CodeGen/builtin-preserve-access-index-array.c)0
-rw-r--r--clang/test/DebugInfo/Generic/cc.c (renamed from clang/test/CodeGen/debug-info-cc.c)0
-rw-r--r--clang/test/DebugInfo/Generic/cfi-check-fail-debuginfo.c (renamed from clang/test/CodeGen/cfi-check-fail-debuginfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/cfi-icall-generalize-debuginfo.c (renamed from clang/test/CodeGen/cfi-icall-generalize-debuginfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/cfi-icall-normalize2-debuginfo.c (renamed from clang/test/CodeGen/cfi-icall-normalize2-debuginfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/codeview-buildinfo.c (renamed from clang/test/CodeGen/debug-info-codeview-buildinfo.c)2
-rw-r--r--clang/test/DebugInfo/Generic/codeview-heapallocsite.c (renamed from clang/test/CodeGen/debug-info-codeview-heapallocsite.c)0
-rw-r--r--clang/test/DebugInfo/Generic/codeview-unnamed.c (renamed from clang/test/CodeGen/debug-info-codeview-unnamed.c)0
-rw-r--r--clang/test/DebugInfo/Generic/compilation-dir.c (renamed from clang/test/CodeGen/debug-info-compilation-dir.c)6
-rw-r--r--clang/test/DebugInfo/Generic/crash.c (renamed from clang/test/CodeGen/debug-info-crash.c)0
-rw-r--r--clang/test/DebugInfo/Generic/dbg-const-int128.c (renamed from clang/test/CodeGen/dbg-const-int128.c)0
-rw-r--r--clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c (renamed from clang/test/CodeGen/dbg-info-all-calls-described.c)0
-rw-r--r--clang/test/DebugInfo/Generic/debug-info.c (renamed from clang/test/CodeGen/debug-info.c)0
-rw-r--r--clang/test/DebugInfo/Generic/debug-label-inline.c (renamed from clang/test/CodeGen/debug-label-inline.c)0
-rw-r--r--clang/test/DebugInfo/Generic/debug-line-1.c (renamed from clang/test/CodeGen/debug-line-1.c)0
-rw-r--r--clang/test/DebugInfo/Generic/debug-prefix-map.c (renamed from clang/test/CodeGen/debug-prefix-map.c)0
-rw-r--r--clang/test/DebugInfo/Generic/debug-prefix-map.cpp (renamed from clang/test/CodeGen/debug-prefix-map.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/embed-source.c (renamed from clang/test/CodeGen/debug-info-embed-source.c)0
-rw-r--r--clang/test/DebugInfo/Generic/enum-case-val.c (renamed from clang/test/CodeGen/debug-info-enum-case-val.c)0
-rw-r--r--clang/test/DebugInfo/Generic/enum-extensibility.c (renamed from clang/test/CodeGen/debug-info-enum-extensibility.c)0
-rw-r--r--clang/test/DebugInfo/Generic/enum.c (renamed from clang/test/CodeGen/debug-info-enum.c)0
-rw-r--r--clang/test/DebugInfo/Generic/enum.cpp (renamed from clang/test/CodeGen/debug-info-enum.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/enum2.c (renamed from clang/test/CodeGen/enum2.c)0
-rw-r--r--clang/test/DebugInfo/Generic/extern-basic.c (renamed from clang/test/CodeGen/debug-info-extern-basic.c)0
-rw-r--r--clang/test/DebugInfo/Generic/extern-basic.cpp (renamed from clang/test/CodeGen/debug-info-extern-basic.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/extern-call.c (renamed from clang/test/CodeGen/debug-info-extern-call.c)0
-rw-r--r--clang/test/DebugInfo/Generic/extern-callback.c (renamed from clang/test/CodeGen/debug-info-extern-callback.c)0
-rw-r--r--clang/test/DebugInfo/Generic/extern-duplicate.c (renamed from clang/test/CodeGen/debug-info-extern-duplicate.c)0
-rw-r--r--clang/test/DebugInfo/Generic/extern-multi.c (renamed from clang/test/CodeGen/debug-info-extern-multi.c)0
-rw-r--r--clang/test/DebugInfo/Generic/extern-unused.c (renamed from clang/test/CodeGen/debug-info-extern-unused.c)0
-rw-r--r--clang/test/DebugInfo/Generic/fake-use-return-line.c (renamed from clang/test/CodeGen/fake-use-return-line.c)0
-rw-r--r--clang/test/DebugInfo/Generic/file-change.c (renamed from clang/test/CodeGen/debug-info-file-change.c)0
-rw-r--r--clang/test/DebugInfo/Generic/file-checksum.c (renamed from clang/test/CodeGen/debug-info-file-checksum.c)0
-rw-r--r--clang/test/DebugInfo/Generic/gline-tables-only.c (renamed from clang/test/CodeGen/debug-info-gline-tables-only.c)0
-rw-r--r--clang/test/DebugInfo/Generic/gline-tables-only2.c (renamed from clang/test/CodeGen/debug-info-gline-tables-only2.c)0
-rw-r--r--clang/test/DebugInfo/Generic/global-blocks-lines.c (renamed from clang/test/CodeGen/global-blocks-lines.c)0
-rw-r--r--clang/test/DebugInfo/Generic/global-constant.c (renamed from clang/test/CodeGen/debug-info-global-constant.c)0
-rw-r--r--clang/test/DebugInfo/Generic/imported-entity.cpp (renamed from clang/test/CodeGen/debug-info-imported-entity.cpp)2
-rw-r--r--clang/test/DebugInfo/Generic/inline-for.c (renamed from clang/test/CodeGen/debug-info-inline-for.c)0
-rw-r--r--clang/test/DebugInfo/Generic/label.c (renamed from clang/test/CodeGen/debug-label.c)0
-rw-r--r--clang/test/DebugInfo/Generic/lifetime-debuginfo-1.c (renamed from clang/test/CodeGen/lifetime-debuginfo-1.c)0
-rw-r--r--clang/test/DebugInfo/Generic/lifetime-debuginfo-2.c (renamed from clang/test/CodeGen/lifetime-debuginfo-2.c)0
-rw-r--r--clang/test/DebugInfo/Generic/limited.c (renamed from clang/test/CodeGen/debug-info-limited.c)0
-rw-r--r--clang/test/DebugInfo/Generic/line.c (renamed from clang/test/CodeGen/debug-info-line.c)0
-rw-r--r--clang/test/DebugInfo/Generic/line2.c (renamed from clang/test/CodeGen/debug-info-line2.c)0
-rw-r--r--clang/test/DebugInfo/Generic/line3.c (renamed from clang/test/CodeGen/debug-info-line3.c)0
-rw-r--r--clang/test/DebugInfo/Generic/line4.c (renamed from clang/test/CodeGen/debug-info-line4.c)0
-rw-r--r--clang/test/DebugInfo/Generic/lineno-dbginfo.c (renamed from clang/test/CodeGen/lineno-dbginfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/linetable-endscope.c (renamed from clang/test/CodeGen/linetable-endscope.c)0
-rw-r--r--clang/test/DebugInfo/Generic/lto.c (renamed from clang/test/CodeGen/debug-info-lto.c)0
-rw-r--r--clang/test/DebugInfo/Generic/macro.c (renamed from clang/test/CodeGen/debug-info-macro.c)2
-rw-r--r--clang/test/DebugInfo/Generic/matrix-types.c (renamed from clang/test/CodeGen/debug-info-matrix-types.c)0
-rw-r--r--clang/test/DebugInfo/Generic/member.c (renamed from clang/test/CodeGen/debug-info-member.c)0
-rw-r--r--clang/test/DebugInfo/Generic/mips-debug-info-bitfield.c (renamed from clang/test/CodeGen/mips-debug-info-bitfield.c)0
-rw-r--r--clang/test/DebugInfo/Generic/names.c (renamed from clang/test/CodeGen/debug-info-names.c)0
-rw-r--r--clang/test/DebugInfo/Generic/no-inline-line-tables.c (renamed from clang/test/CodeGen/debug-info-no-inline-line-tables.c)0
-rw-r--r--clang/test/DebugInfo/Generic/nodebug-attr.c (renamed from clang/test/CodeGen/nodebug-attr.c)0
-rw-r--r--clang/test/DebugInfo/Generic/null-sanitizer-debug-info-regression.cpp (renamed from clang/test/CodeGen/null-sanitizer-debug-info-regression.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/nvptx.c (renamed from clang/test/CodeGen/debug-nvptx.c)0
-rw-r--r--clang/test/DebugInfo/Generic/openmp-prefix-map.c (renamed from clang/test/CodeGen/openmp-prefix-map.c)0
-rw-r--r--clang/test/DebugInfo/Generic/oslog.c (renamed from clang/test/CodeGen/debug-info-oslog.c)0
-rw-r--r--clang/test/DebugInfo/Generic/overloadable-debug.c (renamed from clang/test/CodeGen/overloadable-debug.c)0
-rw-r--r--clang/test/DebugInfo/Generic/packed-struct.c (renamed from clang/test/CodeGen/debug-info-packed-struct.c)0
-rw-r--r--clang/test/DebugInfo/Generic/pr52782-stdcall-func-decl.cpp (renamed from clang/test/CodeGen/pr52782-stdcall-func-decl.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/preferred-type.cpp (renamed from clang/test/CodeGen/debug-info-preferred-type.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/preferred_name-chain.cpp (renamed from clang/test/CodeGen/preferred_name-chain.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/preferred_name.cpp (renamed from clang/test/CodeGen/preferred_name.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/preprocessed-file.i (renamed from clang/test/CodeGen/debug-info-preprocessed-file.i)0
-rw-r--r--clang/test/DebugInfo/Generic/programming-language.c (renamed from clang/test/CodeGen/debug-info-programming-language.c)0
-rw-r--r--clang/test/DebugInfo/Generic/pseudo-probe.cpp (renamed from clang/test/CodeGen/debug-info-pseudo-probe.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/ptrauth-debuginfo.c (renamed from clang/test/CodeGen/ptrauth-debuginfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ranges-base-address.c (renamed from clang/test/CodeGen/debug-info-ranges-base-address.c)0
-rw-r--r--clang/test/DebugInfo/Generic/same-line.c (renamed from clang/test/CodeGen/debug-info-same-line.c)0
-rw-r--r--clang/test/DebugInfo/Generic/scope-file.c (renamed from clang/test/CodeGen/debug-info-scope-file.c)0
-rw-r--r--clang/test/DebugInfo/Generic/scope.c (renamed from clang/test/CodeGen/debug-info-scope.c)0
-rw-r--r--clang/test/DebugInfo/Generic/slash.c (renamed from clang/test/CodeGen/debug-info-slash.c)4
-rw-r--r--clang/test/DebugInfo/Generic/slash.test (renamed from clang/test/CodeGen/debug-info-slash.test)0
-rw-r--r--clang/test/DebugInfo/Generic/split-debug-filename.c (renamed from clang/test/CodeGen/split-debug-filename.c)0
-rw-r--r--clang/test/DebugInfo/Generic/split-debug-inlining.c (renamed from clang/test/CodeGen/split-debug-inlining.c)0
-rw-r--r--clang/test/DebugInfo/Generic/split-debug-output.c (renamed from clang/test/CodeGen/split-debug-output.c)0
-rw-r--r--clang/test/DebugInfo/Generic/split-debug-single-file.c (renamed from clang/test/CodeGen/split-debug-single-file.c)0
-rw-r--r--clang/test/DebugInfo/Generic/static-const-fp.c (renamed from clang/test/CodeGen/debug-info-static-const-fp.c)0
-rw-r--r--clang/test/DebugInfo/Generic/static.c (renamed from clang/test/CodeGen/debug-info-static.c)0
-rw-r--r--clang/test/DebugInfo/Generic/switch-fallthrough.c (renamed from clang/test/CodeGen/debug-info-switch-fallthrough.c)0
-rw-r--r--clang/test/DebugInfo/Generic/sysroot-sdk.c (renamed from clang/test/CodeGen/debug-info-sysroot-sdk.c)0
-rw-r--r--clang/test/DebugInfo/Generic/thinlto-split-dwarf.c (renamed from clang/test/CodeGen/thinlto-split-dwarf.c)0
-rw-r--r--clang/test/DebugInfo/Generic/typedef.c (renamed from clang/test/CodeGen/debug-info-typedef.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-debuglog-return.c (renamed from clang/test/CodeGen/ubsan-debuglog-return.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-function-debuginfo.c (renamed from clang/test/CodeGen/ubsan-function-debuginfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-debugloc.c (renamed from clang/test/CodeGen/ubsan-trap-debugloc.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-add-overflow.c32
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-alignment-assumption.c (renamed from clang/test/CodeGen/ubsan-trap-reason-alignment-assumption.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-builtin-unreachable.c (renamed from clang/test/CodeGen/ubsan-trap-reason-builtin-unreachable.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-cfi-check-fail.c (renamed from clang/test/CodeGen/ubsan-trap-reason-cfi-check-fail.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-crash.cpp (renamed from clang/test/CodeGen/ubsan-trap-reason-crash.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-div-rem-overflow.c (renamed from clang/test/CodeGen/ubsan-trap-reason-div-rem-overflow.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-dynamic-type-cache-miss.cpp (renamed from clang/test/CodeGen/ubsan-trap-reason-dynamic-type-cache-miss.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-flag.c47
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-float-cast-overflow.c (renamed from clang/test/CodeGen/ubsan-trap-reason-float-cast-overflow.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-function-type-mismatch.c (renamed from clang/test/CodeGen/ubsan-trap-reason-function-type-mismatch.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-implicit-conversion.c (renamed from clang/test/CodeGen/ubsan-trap-reason-implicit-conversion.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-invalid-builtin.c (renamed from clang/test/CodeGen/ubsan-trap-reason-invalid-builtin.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-invalid-objc-cast.m (renamed from clang/test/CodeGen/ubsan-trap-reason-invalid-objc-cast.m)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-load-invalid-value.c (renamed from clang/test/CodeGen/ubsan-trap-reason-load-invalid-value.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-missing-return.cpp (renamed from clang/test/CodeGen/ubsan-trap-reason-missing-return.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-mul-overflow.c30
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-negate-overflow.c (renamed from clang/test/CodeGen/ubsan-trap-reason-negate-overflow.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-nonnull-arg.c (renamed from clang/test/CodeGen/ubsan-trap-reason-nonnull-arg.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-nonnull-return.c (renamed from clang/test/CodeGen/ubsan-trap-reason-nonnull-return.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-nullability-arg.c (renamed from clang/test/CodeGen/ubsan-trap-reason-nullability-arg.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-nullability-return.c (renamed from clang/test/CodeGen/ubsan-trap-reason-nullability-return.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-out-of-bounds.c (renamed from clang/test/CodeGen/ubsan-trap-reason-out-of-bounds.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-pointer-overflow.c (renamed from clang/test/CodeGen/ubsan-trap-reason-pointer-overflow.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-shift-out-of-bounds.c (renamed from clang/test/CodeGen/ubsan-trap-reason-shift-out-of-bounds.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-sub-overflow.c30
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-type-mismatch.c (renamed from clang/test/CodeGen/ubsan-trap-reason-type-mismatch.c)0
-rw-r--r--clang/test/DebugInfo/Generic/ubsan-trap-reason-vla-bound-not-positive.c (renamed from clang/test/CodeGen/ubsan-trap-reason-vla-bound-not-positive.c)0
-rw-r--r--clang/test/DebugInfo/Generic/unique-internal-linkage-names-dwarf.c (renamed from clang/test/CodeGen/unique-internal-linkage-names-dwarf.c)0
-rw-r--r--clang/test/DebugInfo/Generic/unique-internal-linkage-names-dwarf.cpp (renamed from clang/test/CodeGen/unique-internal-linkage-names-dwarf.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/unsigned-promotion-debuginfo.c (renamed from clang/test/CodeGen/unsigned-promotion-debuginfo.c)0
-rw-r--r--clang/test/DebugInfo/Generic/unused-types.c (renamed from clang/test/CodeGen/debug-info-unused-types.c)0
-rw-r--r--clang/test/DebugInfo/Generic/unused-types.cpp (renamed from clang/test/CodeGen/debug-info-unused-types.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/unused_nested_enump.cpp (renamed from clang/test/CodeGen/unused_nested_enump.cpp)0
-rw-r--r--clang/test/DebugInfo/Generic/variables.c (renamed from clang/test/CodeGen/debug-info-variables.c)0
-rw-r--r--clang/test/DebugInfo/Generic/vector-bool.c (renamed from clang/test/CodeGen/debug-info-vector-bool.c)0
-rw-r--r--clang/test/DebugInfo/Generic/vector.c (renamed from clang/test/CodeGen/debug-info-vector.c)0
-rw-r--r--clang/test/DebugInfo/Generic/version-coff.c (renamed from clang/test/CodeGen/debug-info-version-coff.c)0
-rw-r--r--clang/test/DebugInfo/Generic/version.c (renamed from clang/test/CodeGen/debug-info-version.c)2
-rw-r--r--clang/test/DebugInfo/Generic/vla.c (renamed from clang/test/CodeGen/debug-info-vla.c)0
-rw-r--r--clang/test/DebugInfo/KeyInstructions/flag.cpp42
-rw-r--r--clang/test/DebugInfo/KeyInstructions/lit.local.cfg2
-rw-r--r--clang/test/DebugInfo/ObjC/2009-01-21-invalid.m (renamed from clang/test/CodeGenObjC/2009-01-21-invalid-debug-info.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/2010-02-09-Self.m (renamed from clang/test/CodeGenObjC/2010-02-09-DbgSelf.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/2010-02-15-MethodStart.m (renamed from clang/test/CodeGenObjC/2010-02-15-Dbg-MethodStart.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/2010-02-23-Inheritance.m (renamed from clang/test/CodeGenObjC/2010-02-23-DbgInheritance.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/arc-linetable-autorelease.m (renamed from clang/test/CodeGenObjC/arc-linetable-autorelease.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/arc-linetable.m (renamed from clang/test/CodeGenObjC/arc-linetable.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/block-byref.m (renamed from clang/test/CodeGenObjC/block-byref-debuginfo.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/block-helper.m (renamed from clang/test/CodeGenObjC/debug-info-block-helper.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/block-line.m (renamed from clang/test/CodeGenObjC/debug-info-block-line.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/block-type.m (renamed from clang/test/CodeGenObjC/debug-info-block-type.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/blocks-ivar.m (renamed from clang/test/CodeGenObjC/blocks-ivar-debug.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/blocks.m (renamed from clang/test/CodeGenObjC/debug-info-blocks.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/catch-lexical-block.m (renamed from clang/test/CodeGenObjC/catch-lexical-block.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/category.m (renamed from clang/test/CodeGenObjC/debug-info-category.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/class-extension.m (renamed from clang/test/CodeGenObjC/debug-info-class-extension.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/class-extension2.m (renamed from clang/test/CodeGenObjC/debug-info-class-extension2.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/class-extension3.m (renamed from clang/test/CodeGenObjC/debug-info-class-extension3.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/crash-2.m (renamed from clang/test/CodeGenObjC/debug-info-crash-2.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/crash.m (renamed from clang/test/CodeGenObjC/debug-info-crash.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/default-synth-ivar.m (renamed from clang/test/CodeGenObjC/debug-info-default-synth-ivar.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/direct-method.m (renamed from clang/test/CodeGenObjC/debug-info-direct-method.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/fwddecl.m (renamed from clang/test/CodeGenObjC/debug-info-fwddecl.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/getter-name.m (renamed from clang/test/CodeGenObjC/debug-info-getter-name.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/id-with-protocol.m (renamed from clang/test/CodeGenObjC/debug-info-id-with-protocol.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/impl.m (renamed from clang/test/CodeGenObjC/debug-info-impl.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/instancetype.m (renamed from clang/test/CodeGenObjC/debug-info-instancetype.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/ivars-extension.m (renamed from clang/test/CodeGenObjC/debug-info-ivars-extension.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/ivars-indirect.m (renamed from clang/test/CodeGenObjC/debug-info-ivars-indirect.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/ivars-private.m (renamed from clang/test/CodeGenObjC/debug-info-ivars-private.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/ivars.m (renamed from clang/test/CodeGenObjC/debug-info-ivars.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/layout-bitfield-crash.m (renamed from clang/test/CodeGenObjC/layout-bitfield-crash.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/lifetime-crash.m (renamed from clang/test/CodeGenObjC/debug-info-lifetime-crash.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/linkagename.m (renamed from clang/test/CodeGenObjC/debug-info-linkagename.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/lit.local.cfg5
-rw-r--r--clang/test/DebugInfo/ObjC/nested-blocks.m (renamed from clang/test/CodeGenObjC/debug-info-nested-blocks.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/nodebug.m (renamed from clang/test/CodeGenObjC/debug-info-nodebug.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/nontrivial-c-struct-exception.m (renamed from clang/test/CodeGenObjC/nontrivial-c-struct-exception.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/objc-fixed-enum.m (renamed from clang/test/CodeGenObjC/objc-fixed-enum.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/objc-property-dwarf5.m (renamed from clang/test/CodeGenObjC/debug-info-objc-property-dwarf5.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/objc2-weak-ivar.m (renamed from clang/test/CodeGenObjC/objc2-weak-ivar-debug.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/properties.m (renamed from clang/test/CodeGenObjC/debuginfo-properties.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property-2.m (renamed from clang/test/CodeGenObjC/property-dbg.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property-accessors.m (renamed from clang/test/CodeGenObjC/debug-info-property-accessors.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property-class-extension.m (renamed from clang/test/CodeGenObjC/debug-info-property-class-extension.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property-class-instance-same-name.m (renamed from clang/test/CodeGenObjC/debug-info-property-class-instance-same-name.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property-synth.m (renamed from clang/test/CodeGenObjC/debug-property-synth.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property.m (renamed from clang/test/CodeGenObjC/debug-info-property.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property2.m (renamed from clang/test/CodeGenObjC/debug-info-property2.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property3.m (renamed from clang/test/CodeGenObjC/debug-info-property3.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property4.m (renamed from clang/test/CodeGenObjC/debug-info-property4.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/property5.m (renamed from clang/test/CodeGenObjC/debug-info-property5.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/pubtypes.m (renamed from clang/test/CodeGenObjC/debug-info-pubtypes.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/selector.m (renamed from clang/test/CodeGenObjC/debug-info-selector.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/self.m (renamed from clang/test/CodeGenObjC/debug-info-self.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/static-var.m (renamed from clang/test/CodeGenObjC/debug-info-static-var.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/synthesis.m (renamed from clang/test/CodeGenObjC/debug-info-synthesis.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/ubsan-check-debuglocs.m (renamed from clang/test/CodeGenObjC/ubsan-check-debuglocs.m)0
-rw-r--r--clang/test/DebugInfo/ObjC/variadic-method.m (renamed from clang/test/CodeGenObjC/debug-info-variadic-method.m)0
-rw-r--r--clang/test/DebugInfo/ObjCXX/block-capture-this.mm (renamed from clang/test/CodeGenObjCXX/debug-info-block-capture-this.mm)0
-rw-r--r--clang/test/DebugInfo/ObjCXX/cyclic.mm (renamed from clang/test/CodeGenObjCXX/debug-info-cyclic.mm)0
-rw-r--r--clang/test/DebugInfo/ObjCXX/debug-info.mm (renamed from clang/test/CodeGenObjCXX/debug-info.mm)0
-rw-r--r--clang/test/DebugInfo/ObjCXX/line.mm (renamed from clang/test/CodeGenObjCXX/debug-info-line.mm)0
-rw-r--r--clang/test/DebugInfo/ObjCXX/nested-ehlocation.mm (renamed from clang/test/CodeGenObjCXX/nested-ehlocation.mm)0
-rw-r--r--clang/test/DebugInfo/ObjCXX/pr14474-gline-tables-only.mm (renamed from clang/test/CodeGenObjCXX/pr14474-gline-tables-only.mm)0
-rw-r--r--clang/test/DebugInfo/RISCV/riscv-v-debuginfo.c (renamed from clang/test/CodeGen/RISCV/riscv-v-debuginfo.c)0
-rw-r--r--clang/test/DebugInfo/X86/i128-debuginfo.c (renamed from clang/test/CodeGen/X86/i128-debuginfo.c)0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/include/c++/.keep0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/include/.keep0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/include/.keep0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/include/.keep0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/include/c++/.keep0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/include/.keep0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/include/.keep0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/include/c++/.keep0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o0
-rw-r--r--clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/include/c++/.keep0
-rw-r--r--clang/test/Driver/aarch64-cpu-defaults-appleos26.c22
-rw-r--r--clang/test/Driver/aarch64-features.c2
-rw-r--r--clang/test/Driver/aarch64-mac-cpus.c2
-rw-r--r--clang/test/Driver/aarch64-toolchain.c12
-rw-r--r--clang/test/Driver/arch-arm64e.c31
-rw-r--r--clang/test/Driver/arm-toolchain.c12
-rw-r--r--clang/test/Driver/cl-options.c1
-rw-r--r--clang/test/Driver/cl-x86-flags.c7
-rw-r--r--clang/test/Driver/clang-offload-bundler-zlib.c24
-rw-r--r--clang/test/Driver/clang-offload-bundler-zstd.c4
-rw-r--r--clang/test/Driver/cuda-bad-arch.cu2
-rw-r--r--clang/test/Driver/cuda-cross-compiling.c8
-rw-r--r--clang/test/Driver/cuda-detect-path.cu2
-rw-r--r--clang/test/Driver/dxc_fspv_extension.hlsl8
-rw-r--r--clang/test/Driver/dxc_rootsig-define.hlsl33
-rw-r--r--clang/test/Driver/dxc_strip_rootsignature.hlsl15
-rw-r--r--clang/test/Driver/fsanitize-debug-trap-reasons.c57
-rw-r--r--clang/test/Driver/fsanitize.c5
-rw-r--r--clang/test/Driver/gcc-toolchain-libstdcxx.cpp28
-rw-r--r--clang/test/Driver/hip-macros.hip14
-rw-r--r--clang/test/Driver/hip-runtime-libs-msvc.hip7
-rw-r--r--clang/test/Driver/hipspv-toolchain.hip2
-rw-r--r--clang/test/Driver/lanai-mcmodel.c10
-rw-r--r--clang/test/Driver/ld-path.c2
-rw-r--r--clang/test/Driver/linker-wrapper-libs.c191
-rw-r--r--clang/test/Driver/modules-driver-cxx20-module-usage-scanner.cpp192
-rw-r--r--clang/test/Driver/print-supported-extensions-riscv.c9
-rw-r--r--clang/test/Driver/program-path-priority.c2
-rw-r--r--clang/test/Driver/riscv-cpus.c6
-rw-r--r--clang/test/Driver/riscv32-toolchain.c21
-rw-r--r--clang/test/Driver/riscv64-toolchain.c21
-rw-r--r--clang/test/Driver/spirv-openmp-toolchain.c9
-rw-r--r--clang/test/Driver/spirv-toolchain.cl2
-rw-r--r--clang/test/Driver/x86-target-features.c4
-rw-r--r--clang/test/ExtractAPI/class_template_param_inheritance.cpp2
-rw-r--r--clang/test/FixIt/fixit-enum-scoped.cpp95
-rw-r--r--clang/test/Frontend/fsanitize-debug-trap-reasons.c6
-rw-r--r--clang/test/Frontend/skip-function-bodies.cpp6
-rw-r--r--clang/test/Headers/__clang_hip_cmath.hip16
-rw-r--r--clang/test/Headers/__clang_hip_math.hip160
-rw-r--r--clang/test/Headers/__cpuidex_conflict.c1
-rw-r--r--clang/test/Headers/mm3dnow.c3
-rw-r--r--clang/test/Headers/pmmintrin.c3
-rw-r--r--clang/test/Headers/spirv_functions.cpp2
-rw-r--r--clang/test/Headers/spirv_ids.cpp2
-rw-r--r--clang/test/Headers/x86-intrinsics-headers.c3
-rw-r--r--clang/test/Headers/x86intrin.c3
-rw-r--r--clang/test/Headers/x86intrin.cpp3
-rw-r--r--clang/test/Import/builtin-template/Inputs/S.cpp10
-rw-r--r--clang/test/Import/builtin-template/test.cpp12
-rw-r--r--clang/test/Index/Core/index-instantiated-source.cpp4
-rw-r--r--clang/test/Index/Core/index-source.cpp4
-rw-r--r--clang/test/Index/c-index-api-loadTU-test.m2
-rw-r--r--clang/test/Index/copy-assignment-operator.cpp4
-rw-r--r--clang/test/Index/index-refs.cpp4
-rw-r--r--clang/test/Index/keep-going.cpp4
-rw-r--r--clang/test/Index/move-assignment-operator.cpp2
-rw-r--r--clang/test/Index/opencl-types.cl16
-rw-r--r--clang/test/Index/paren-type.c2
-rw-r--r--clang/test/Index/print-type-size.cpp8
-rw-r--r--clang/test/Index/print-type.c14
-rw-r--r--clang/test/Index/print-type.cpp56
-rw-r--r--clang/test/Index/recursive-cxx-member-calls.cpp6
-rw-r--r--clang/test/Index/redeclarations.cpp2
-rw-r--r--clang/test/Index/skip-parsed-bodies/compile_commands.json6
-rw-r--r--clang/test/Interpreter/assignment-with-implicit-ctor.cpp1
-rw-r--r--clang/test/Interpreter/code-undo.cpp1
-rw-r--r--clang/test/Interpreter/const.cpp1
-rw-r--r--clang/test/Interpreter/cxx20-modules.cppm1
-rw-r--r--clang/test/Interpreter/execute-stmts.cpp1
-rw-r--r--clang/test/Interpreter/execute-weak.cpp2
-rw-r--r--clang/test/Interpreter/execute.c1
-rw-r--r--clang/test/Interpreter/execute.cpp2
-rw-r--r--clang/test/Interpreter/fail.cpp1
-rw-r--r--clang/test/Interpreter/global-dtor.cpp3
-rw-r--r--clang/test/Interpreter/incremental-mode.cpp2
-rw-r--r--clang/test/Interpreter/inline-asm.cpp1
-rw-r--r--clang/test/Interpreter/inline-virtual.cpp1
-rw-r--r--clang/test/Interpreter/lambda.cpp3
-rw-r--r--clang/test/Interpreter/lit.local.cfg6
-rw-r--r--clang/test/Interpreter/multiline.cpp1
-rw-r--r--clang/test/Interpreter/pretty-print.c1
-rw-r--r--clang/test/Interpreter/pretty-print.cpp2
-rw-r--r--clang/test/Interpreter/simple-exception.cpp3
-rw-r--r--clang/test/Lexer/cross-windows-on-linux.cpp2
-rw-r--r--clang/test/Lexer/has_feature_cfi.c87
-rw-r--r--clang/test/Misc/diag-template-diffing-cxx11.cpp8
-rw-r--r--clang/test/Misc/pragma-attribute-supported-attributes-list.test2
-rw-r--r--clang/test/Modules/GH153933.cpp23
-rw-r--r--clang/test/Modules/GH155028-1.cpp17
-rw-r--r--clang/test/Modules/befriend-2.cppm65
-rw-r--r--clang/test/Modules/befriend-3.cppm19
-rw-r--r--clang/test/Modules/merge-records.cppm21
-rw-r--r--clang/test/Modules/modules-merge-enum.m24
-rw-r--r--clang/test/Modules/odr_hash.cpp12
-rw-r--r--clang/test/Modules/pr138558.cppm54
-rw-r--r--clang/test/Modules/pr97313.cppm2
-rw-r--r--clang/test/Modules/redundant-template-default-arg2.cpp4
-rw-r--r--clang/test/Modules/safe_buffers_optout.cpp4
-rw-r--r--clang/test/Modules/skip-body-2.cppm58
-rw-r--r--clang/test/Modules/skip-body.cppm63
-rw-r--r--clang/test/OpenMP/allocate_modifiers_messages.cpp2
-rw-r--r--clang/test/OpenMP/amdgcn_debug_nowait.c16
-rw-r--r--clang/test/OpenMP/amdgcn_target_parallel_num_threads_codegen.cpp1095
-rw-r--r--clang/test/OpenMP/bug54082.c24
-rw-r--r--clang/test/OpenMP/bug56913.c4
-rw-r--r--clang/test/OpenMP/bug57757.cpp2
-rw-r--r--clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp2880
-rw-r--r--clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp3541
-rw-r--r--clang/test/OpenMP/error_codegen.cpp734
-rw-r--r--clang/test/OpenMP/error_message.cpp6
-rw-r--r--clang/test/OpenMP/irbuilder_unroll_partial_factor_for.c2
-rw-r--r--clang/test/OpenMP/irbuilder_unroll_partial_heuristic_constant_for.c2
-rw-r--r--clang/test/OpenMP/irbuilder_unroll_partial_heuristic_runtime_for.c2
-rw-r--r--clang/test/OpenMP/irbuilder_unroll_unroll_partial_factor.c2
-rw-r--r--clang/test/OpenMP/irbuilder_unroll_unroll_partial_heuristic.c2
-rw-r--r--clang/test/OpenMP/nvptx_target_codegen.cpp913
-rw-r--r--clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp760
-rw-r--r--clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp192
-rw-r--r--clang/test/OpenMP/parallel_generic_loop_codegen.cpp214
-rw-r--r--clang/test/OpenMP/parallel_message_messages.cpp28
-rw-r--r--clang/test/OpenMP/parallel_num_threads_codegen.cpp46
-rw-r--r--clang/test/OpenMP/target_default_ast.cpp81
-rw-r--r--clang/test/OpenMP/target_default_messages.cpp51
-rw-r--r--clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp2
-rw-r--r--clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp2
-rw-r--r--clang/test/OpenMP/target_parallel_generic_loop_codegen.cpp436
-rw-r--r--clang/test/OpenMP/target_parallel_num_threads_messages.cpp106
-rw-r--r--clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp2956
-rw-r--r--clang/test/OpenMP/target_update_strided_messages.c38
-rw-r--r--clang/test/OpenMP/target_update_strided_multiple_messages.c46
-rw-r--r--clang/test/OpenMP/target_update_strided_partial_messages.c32
-rw-r--r--clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp1447
-rw-r--r--clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp1911
-rw-r--r--clang/test/PCH/cxx-explicit-specifier.cpp4
-rw-r--r--clang/test/PCH/dedup_types.cpp20
-rw-r--r--clang/test/Parser/MicrosoftExtensions.cpp2
-rw-r--r--clang/test/Parser/brackets.cpp51
-rw-r--r--clang/test/Parser/cxx-variadic-func.cpp21
-rw-r--r--clang/test/Parser/cxx0x-attributes-preprocessor-tokens.cpp58
-rw-r--r--clang/test/Parser/cxx1z-class-template-argument-deduction.cpp6
-rw-r--r--clang/test/Parser/cxx2c-oxford-variadic-comma.cpp1
-rw-r--r--clang/test/Parser/diagnose_if.cpp12
-rw-r--r--clang/test/Parser/explicit-bool-pre-cxx17.cpp15
-rw-r--r--clang/test/ParserOpenACC/parse-clauses.c2
-rw-r--r--clang/test/Preprocessor/embed_constexpr.c5
-rw-r--r--clang/test/Preprocessor/embed_parsing_errors.c9
-rw-r--r--clang/test/Preprocessor/file_test.c2
-rw-r--r--clang/test/Preprocessor/init.c8
-rw-r--r--clang/test/Preprocessor/predefined-arch-macros.c2
-rw-r--r--clang/test/Preprocessor/ptrauth_extension.c30
-rw-r--r--clang/test/Preprocessor/ptrauth_feature.c10
-rw-r--r--clang/test/Preprocessor/riscv-target-features.c9
-rw-r--r--clang/test/Preprocessor/sanitizer-predefines.c8
-rw-r--r--clang/test/Preprocessor/x86_target_features.c2
-rw-r--r--clang/test/Sema/GH155794.c6
-rw-r--r--clang/test/Sema/aarch64-sve-intrinsics/acle_sve_compact.cpp18
-rw-r--r--clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_aes_bitperm_sha3_sm4.cpp32
-rw-r--r--clang/test/Sema/address-packed.c8
-rw-r--r--clang/test/Sema/attr-cfi-salt.c60
-rw-r--r--clang/test/Sema/builtin-masked.c46
-rw-r--r--clang/test/Sema/builtin-object-size.c4
-rw-r--r--clang/test/Sema/builtins-elementwise-math.c101
-rw-r--r--clang/test/Sema/builtins-wasm.c17
-rw-r--r--clang/test/Sema/c2x-nodiscard.c17
-rw-r--r--clang/test/Sema/constant-builtins-vector.cpp101
-rw-r--r--clang/test/Sema/constexpr.c10
-rw-r--r--clang/test/Sema/designated-initializers.c7
-rw-r--r--clang/test/Sema/format-strings-signedness.c60
-rw-r--r--clang/test/Sema/gh152826.c7
-rw-r--r--clang/test/Sema/implicit-void-ptr-cast.c22
-rw-r--r--clang/test/Sema/ptrauth-qualifier.c16
-rw-r--r--clang/test/Sema/warn-alloc-size.c49
-rw-r--r--clang/test/Sema/warn-lifetime-safety-dataflow.cpp153
-rw-r--r--clang/test/Sema/warn-lifetime-safety.cpp273
-rw-r--r--clang/test/SemaCXX/MicrosoftExtensions.cpp8
-rw-r--r--clang/test/SemaCXX/builtin-get-vtable-pointer.cpp2
-rw-r--r--clang/test/SemaCXX/class-base-member-init.cpp2
-rw-r--r--clang/test/SemaCXX/co_await-ast.cpp34
-rw-r--r--clang/test/SemaCXX/compound-literal.cpp34
-rw-r--r--clang/test/SemaCXX/constant-expression-cxx11.cpp16
-rw-r--r--clang/test/SemaCXX/constant-expression.cpp2
-rw-r--r--clang/test/SemaCXX/constructor.cpp2
-rw-r--r--clang/test/SemaCXX/coroutine-allocs.cpp6
-rw-r--r--clang/test/SemaCXX/coroutine-traits-undefined-template.cpp2
-rw-r--r--clang/test/SemaCXX/coroutines.cpp4
-rw-r--r--clang/test/SemaCXX/ctad.cpp4
-rw-r--r--clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp10
-rw-r--r--clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp6
-rw-r--r--clang/test/SemaCXX/cxx20-ctad-type-alias.cpp38
-rw-r--r--clang/test/SemaCXX/cxx23-invalid-constexpr.cpp1
-rw-r--r--clang/test/SemaCXX/cxx2a-destroying-delete.cpp2
-rw-r--r--clang/test/SemaCXX/cxx2b-consteval-propagate.cpp16
-rw-r--r--clang/test/SemaCXX/cxx2b-deducing-this.cpp4
-rw-r--r--clang/test/SemaCXX/cxx2c-variadic-friends.cpp2
-rw-r--r--clang/test/SemaCXX/destructor.cpp6
-rw-r--r--clang/test/SemaCXX/elaborated-type-specifier.cpp2
-rw-r--r--clang/test/SemaCXX/enum-scoped.cpp111
-rw-r--r--clang/test/SemaCXX/err_init_conversion_failed.cpp2
-rw-r--r--clang/test/SemaCXX/gh102293.cpp2
-rw-r--r--clang/test/SemaCXX/gh113323.cpp6
-rw-r--r--clang/test/SemaCXX/incomplete-call.cpp2
-rw-r--r--clang/test/SemaCXX/lambda-expressions.cpp5
-rw-r--r--clang/test/SemaCXX/matrix-casts.cpp4
-rw-r--r--clang/test/SemaCXX/nested-name-spec.cpp8
-rw-r--r--clang/test/SemaCXX/new-delete.cpp14
-rw-r--r--clang/test/SemaCXX/opaque-enum-declaration-in-class-template.cpp2
-rw-r--r--clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp4
-rw-r--r--clang/test/SemaCXX/pr100095.cpp1
-rw-r--r--clang/test/SemaCXX/pseudo-destructors.cpp2
-rw-r--r--clang/test/SemaCXX/ptrauth-triviality.cpp6
-rw-r--r--clang/test/SemaCXX/ptrauth-type-traits.cpp401
-rw-r--r--clang/test/SemaCXX/static-assert.cpp2
-rw-r--r--clang/test/SemaCXX/sugar-common-types.cpp5
-rw-r--r--clang/test/SemaCXX/sugared-auto.cpp2
-rw-r--r--clang/test/SemaCXX/trivially-relocatable-ptrauth.cpp4
-rw-r--r--clang/test/SemaCXX/type-aware-coroutines.cpp8
-rw-r--r--clang/test/SemaCXX/type-trait-synthesises-from-spaceship.cpp212
-rw-r--r--clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp76
-rw-r--r--clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp43
-rw-r--r--clang/test/SemaCXX/undefined-partial-specialization.cpp2
-rw-r--r--clang/test/SemaCXX/using-decl-templates.cpp8
-rw-r--r--clang/test/SemaCXX/vector-bool.cpp4
-rw-r--r--clang/test/SemaCXX/warn-unused-result.cpp89
-rw-r--r--clang/test/SemaCXX/wmissing-noreturn-suggestion.cpp12
-rw-r--r--clang/test/SemaHIP/amdgcnspirv-implicit-alloc-function-calling-conv.hip32
-rw-r--r--clang/test/SemaObjC/attr-nodiscard.m9
-rw-r--r--clang/test/SemaObjC/exprs.m7
-rw-r--r--clang/test/SemaObjC/ptrauth-qualifier.m16
-rw-r--r--clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h2
-rw-r--r--clang/test/SemaObjCXX/arc-bridged-cast.mm4
-rw-r--r--clang/test/SemaObjCXX/attr-nodiscard.mm9
-rw-r--r--clang/test/SemaObjCXX/discarded-block-type-inference.mm15
-rw-r--r--clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm4
-rw-r--r--clang/test/SemaObjCXX/objcbridge-attribute-arc.mm178
-rw-r--r--clang/test/SemaObjCXX/objcbridge-attribute.mm180
-rw-r--r--clang/test/SemaObjCXX/objcbridge-related-attribute.mm12
-rw-r--r--clang/test/SemaObjCXX/objcbridge-static-cast.mm104
-rw-r--r--clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp2
-rw-r--r--clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp25
-rw-r--r--clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-reduction-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp63
-rw-r--r--clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp4
-rw-r--r--clang/test/SemaOpenACC/gh154008.cpp5
-rw-r--r--clang/test/SemaOpenACC/init-construct.cpp6
-rw-r--r--clang/test/SemaOpenACC/loop-construct-reduction-clause.cpp30
-rw-r--r--clang/test/SemaOpenACC/shutdown-construct.cpp6
-rw-r--r--clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl39
-rw-r--r--clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl7
-rw-r--r--clang/test/SemaSYCL/sycl-external-attr-appertainment.cpp36
-rw-r--r--clang/test/SemaSYCL/sycl-external-attr-grammar.cpp14
-rw-r--r--clang/test/SemaSYCL/sycl-external-attr-ignored.cpp15
-rw-r--r--clang/test/SemaSYCL/sycl-external-attr.cpp154
-rw-r--r--clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp13
-rw-r--r--clang/test/SemaTemplate/aggregate-deduction-candidate.cpp84
-rw-r--r--clang/test/SemaTemplate/class-template-ctor-initializer.cpp13
-rw-r--r--clang/test/SemaTemplate/ctad.cpp41
-rw-r--r--clang/test/SemaTemplate/current-instantiation.cpp2
-rw-r--r--clang/test/SemaTemplate/deduction-crash.cpp4
-rw-r--r--clang/test/SemaTemplate/deduction-guide.cpp136
-rw-r--r--clang/test/SemaTemplate/dedup-types-builtin.cpp225
-rw-r--r--clang/test/SemaTemplate/dependent-base-classes.cpp10
-rw-r--r--clang/test/SemaTemplate/dependent-names.cpp2
-rw-r--r--clang/test/SemaTemplate/elaborated-type-specifier.cpp11
-rw-r--r--clang/test/SemaTemplate/instantiate-requires-expr.cpp4
-rw-r--r--clang/test/SemaTemplate/make_integer_seq.cpp162
-rw-r--r--clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp2
-rw-r--r--clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp12
-rw-r--r--clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp6
-rw-r--r--clang/test/SemaTemplate/nested-name-spec-template.cpp76
-rw-r--r--clang/test/SemaTemplate/nested-template.cpp4
-rw-r--r--clang/test/SemaTemplate/overload-candidates.cpp12
-rw-r--r--clang/test/SemaTemplate/temp_arg_nontype.cpp2
-rw-r--r--clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp5
-rw-r--r--clang/test/SemaTemplate/template-id-expr.cpp2
-rw-r--r--clang/test/SemaTemplate/type_pack_element.cpp86
-rw-r--r--clang/test/SemaTemplate/typename-specifier-4.cpp2
-rw-r--r--clang/test/SemaTemplate/typename-specifier.cpp6
-rw-r--r--clang/test/SemaTemplate/using-decl.cpp12
-rw-r--r--clang/test/lit.cfg.py12
-rw-r--r--clang/test/lit.site.cfg.py.in1
-rw-r--r--clang/test/utils/update_cc_test_checks/lit.local.cfg2
-rwxr-xr-xclang/tools/clang-format/git-clang-format2
-rw-r--r--clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp216
-rw-r--r--clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp20
-rw-r--r--clang/tools/diagtool/ListWarnings.cpp3
-rw-r--r--clang/tools/libclang/CIndex.cpp173
-rw-r--r--clang/tools/libclang/CIndexCodeCompletion.cpp4
-rw-r--r--clang/tools/libclang/CMakeLists.txt6
-rw-r--r--clang/tools/libclang/CXCursor.cpp8
-rw-r--r--clang/tools/libclang/CXIndexDataConsumer.cpp26
-rw-r--r--clang/tools/libclang/CXType.cpp36
-rw-r--r--clang/tools/libclang/CursorVisitor.h4
-rw-r--r--clang/unittests/AST/ASTContextParentMapTest.cpp10
-rw-r--r--clang/unittests/AST/ASTExprTest.cpp4
-rw-r--r--clang/unittests/AST/ASTImporterFixtures.h3
-rw-r--r--clang/unittests/AST/ASTImporterTest.cpp132
-rw-r--r--clang/unittests/AST/ByteCode/Descriptor.cpp7
-rw-r--r--clang/unittests/AST/DeclPrinterTest.cpp62
-rw-r--r--clang/unittests/AST/DeclTest.cpp18
-rw-r--r--clang/unittests/AST/ProfilingTest.cpp8
-rw-r--r--clang/unittests/AST/RandstructTest.cpp3
-rw-r--r--clang/unittests/AST/RecursiveASTVisitorTest.cpp5
-rw-r--r--clang/unittests/AST/SizelessTypesTest.cpp2
-rw-r--r--clang/unittests/AST/StructuralEquivalenceTest.cpp15
-rw-r--r--clang/unittests/AST/TemplateNameTest.cpp13
-rw-r--r--clang/unittests/AST/TypePrinterTest.cpp13
-rw-r--r--clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp25
-rw-r--r--clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp121
-rw-r--r--clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp260
-rw-r--r--clang/unittests/Analysis/FlowSensitive/CMakeLists.txt1
-rw-r--r--clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp6
-rw-r--r--clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp96
-rw-r--r--clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp2
-rw-r--r--clang/unittests/Analysis/FlowSensitive/FormulaTest.cpp199
-rw-r--r--clang/unittests/Analysis/FlowSensitive/TransferTest.cpp280
-rw-r--r--clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp74
-rw-r--r--clang/unittests/Analysis/LifetimeSafetyTest.cpp13
-rw-r--r--clang/unittests/CodeGen/CodeGenExternalTest.cpp5
-rw-r--r--clang/unittests/Format/AlignBracketsTest.cpp784
-rw-r--r--clang/unittests/Format/CMakeLists.txt2
-rw-r--r--clang/unittests/Format/ConfigParseTest.cpp12
-rw-r--r--clang/unittests/Format/FormatTest.cpp820
-rw-r--r--clang/unittests/Format/FormatTestTableGen.cpp7
-rw-r--r--clang/unittests/Format/NumericLiteralInfoTest.cpp71
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp15
-rw-r--r--clang/unittests/Index/IndexTests.cpp2
-rw-r--r--clang/unittests/Interpreter/CMakeLists.txt69
-rw-r--r--clang/unittests/Interpreter/CodeCompletionTest.cpp6
-rw-r--r--clang/unittests/Interpreter/IncrementalCompilerBuilderTest.cpp8
-rw-r--r--clang/unittests/Interpreter/InterpreterExtensionsTest.cpp7
-rw-r--r--clang/unittests/Interpreter/InterpreterTest.cpp8
-rw-r--r--clang/unittests/Interpreter/InterpreterTestFixture.h6
-rw-r--r--clang/unittests/Lex/CMakeLists.txt1
-rw-r--r--clang/unittests/Lex/LexerTest.cpp4
-rw-r--r--clang/unittests/Lex/ModuleDeclStateTest.cpp128
-rw-r--r--clang/unittests/Lex/NoTrivialPPDirectiveTracerTest.cpp182
-rw-r--r--clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp46
-rw-r--r--clang/unittests/Sema/CMakeLists.txt6
-rw-r--r--clang/unittests/Sema/HeuristicResolverTest.cpp64
-rw-r--r--clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp24
-rw-r--r--clang/unittests/StaticAnalyzer/SValTest.cpp16
-rw-r--r--clang/unittests/Tooling/LookupTest.cpp18
-rw-r--r--clang/unittests/Tooling/QualTypeNamesTest.cpp40
-rw-r--r--clang/unittests/Tooling/RangeSelectorTest.cpp6
-rw-r--r--clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp12
-rw-r--r--clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp2
-rw-r--r--clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp19
-rw-r--r--clang/unittests/Tooling/RefactoringTest.cpp9
-rw-r--r--clang/utils/TableGen/ASTTableGen.h2
-rw-r--r--clang/utils/TableGen/ClangTypeNodesEmitter.cpp24
-rw-r--r--clang/utils/TableGen/SveEmitter.cpp4
-rw-r--r--clang/www/c_status.html7
1865 files changed, 93779 insertions, 36412 deletions
diff --git a/clang/bindings/python/clang/cindex.py b/clang/bindings/python/clang/cindex.py
index 8246743..812ad2c 100644
--- a/clang/bindings/python/clang/cindex.py
+++ b/clang/bindings/python/clang/cindex.py
@@ -1909,6 +1909,15 @@ class Cursor(Structure):
@property
@cursor_null_guard
+ def language(self) -> LanguageKind:
+ """Determine the "language" of the entity referred to by a given cursor."""
+ if not hasattr(self, "_language"):
+ self._language = conf.lib.clang_getCursorLanguage(self)
+
+ return LanguageKind.from_id(self._language)
+
+ @property
+ @cursor_null_guard
def tls_kind(self) -> TLSKind:
"""Return the thread-local storage (TLS) kind of this cursor."""
if not hasattr(self, "_tls_kind"):
@@ -2584,6 +2593,17 @@ class LinkageKind(BaseEnumeration):
EXTERNAL = 4
+class LanguageKind(BaseEnumeration):
+ """
+ Describe the "language" of the entity referred to by a cursor.
+ """
+
+ INVALID = 0
+ C = 1
+ OBJ_C = 2
+ C_PLUS_PLUS = 3
+
+
class TLSKind(BaseEnumeration):
"""Describes the kind of thread-local storage (TLS) of a cursor."""
@@ -4084,6 +4104,7 @@ FUNCTION_LIST: list[LibFunc] = [
("clang_getCursorDisplayName", [Cursor], _CXString),
("clang_getCursorExceptionSpecificationType", [Cursor], c_int),
("clang_getCursorExtent", [Cursor], SourceRange),
+ ("clang_getCursorLanguage", [Cursor], c_int),
("clang_getCursorLexicalParent", [Cursor], Cursor),
("clang_getCursorLinkage", [Cursor], c_int),
("clang_getCursorLocation", [Cursor], SourceLocation),
diff --git a/clang/bindings/python/tests/CMakeLists.txt b/clang/bindings/python/tests/CMakeLists.txt
index a0ddabc..d9a6bbf 100644
--- a/clang/bindings/python/tests/CMakeLists.txt
+++ b/clang/bindings/python/tests/CMakeLists.txt
@@ -35,7 +35,7 @@ if(WIN32)
endif()
# The Python FFI interface is broken on AIX: https://bugs.python.org/issue38628.
-if(${CMAKE_SYSTEM_NAME} MATCHES "AIX")
+if("${CMAKE_SYSTEM_NAME}" MATCHES "AIX")
set(RUN_PYTHON_TESTS FALSE)
endif()
diff --git a/clang/bindings/python/tests/cindex/test_cursor_language.py b/clang/bindings/python/tests/cindex/test_cursor_language.py
new file mode 100644
index 0000000..de07a7b
--- /dev/null
+++ b/clang/bindings/python/tests/cindex/test_cursor_language.py
@@ -0,0 +1,27 @@
+import os
+
+from clang.cindex import Config, LanguageKind
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+ Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
+
+import unittest
+
+from .util import get_cursor, get_tu
+
+
+class TestCursorLanguage(unittest.TestCase):
+ def test_c(self):
+ tu = get_tu("int a;", lang="c")
+ main_func = get_cursor(tu.cursor, "a")
+ self.assertEqual(main_func.language, LanguageKind.C)
+
+ def test_c(self):
+ tu = get_tu("class Cls {};", lang="cpp")
+ main_func = get_cursor(tu.cursor, "Cls")
+ self.assertEqual(main_func.language, LanguageKind.C_PLUS_PLUS)
+
+ def test_obj_c(self):
+ tu = get_tu("@interface If : NSObject", lang="objc")
+ main_func = get_cursor(tu.cursor, "If")
+ self.assertEqual(main_func.language, LanguageKind.OBJ_C)
diff --git a/clang/bindings/python/tests/cindex/test_enums.py b/clang/bindings/python/tests/cindex/test_enums.py
index 9e7f44f..48452fd 100644
--- a/clang/bindings/python/tests/cindex/test_enums.py
+++ b/clang/bindings/python/tests/cindex/test_enums.py
@@ -6,6 +6,7 @@ from clang.cindex import (
BinaryOperator,
CursorKind,
ExceptionSpecificationKind,
+ LanguageKind,
LinkageKind,
RefQualifierKind,
StorageClass,
@@ -26,6 +27,7 @@ class TestEnums(unittest.TestCase):
AccessSpecifier,
TypeKind,
RefQualifierKind,
+ LanguageKind,
LinkageKind,
TLSKind,
StorageClass,
diff --git a/clang/bindings/python/tests/cindex/test_type.py b/clang/bindings/python/tests/cindex/test_type.py
index 34081bb..cc101be 100644
--- a/clang/bindings/python/tests/cindex/test_type.py
+++ b/clang/bindings/python/tests/cindex/test_type.py
@@ -63,7 +63,7 @@ class TestType(unittest.TestCase):
self.assertIsNotNone(fields[1].translation_unit)
self.assertEqual(fields[1].spelling, "b")
self.assertFalse(fields[1].type.is_const_qualified())
- self.assertEqual(fields[1].type.kind, TypeKind.ELABORATED)
+ self.assertEqual(fields[1].type.kind, TypeKind.TYPEDEF)
self.assertEqual(fields[1].type.get_canonical().kind, TypeKind.INT)
self.assertEqual(fields[1].type.get_declaration().spelling, "I")
self.assertEqual(fields[1].type.get_typedef_name(), "I")
diff --git a/clang/cmake/caches/Fuchsia.cmake b/clang/cmake/caches/Fuchsia.cmake
index a3f86f6..46ae7c6 100644
--- a/clang/cmake/caches/Fuchsia.cmake
+++ b/clang/cmake/caches/Fuchsia.cmake
@@ -179,7 +179,7 @@ set(BOOTSTRAP_LLVM_ENABLE_LLD ON CACHE BOOL "")
set(BOOTSTRAP_LLVM_ENABLE_LTO ON CACHE BOOL "")
if(FUCHSIA_ENABLE_PGO)
- set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED ON CACHE BOOL "")
+ set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED IR CACHE BOOL "")
set(_FUCHSIA_BOOTSTRAP_TARGETS
generate-profdata
diff --git a/clang/cmake/caches/PGO.cmake b/clang/cmake/caches/PGO.cmake
index 15bc755..d6471160 100644
--- a/clang/cmake/caches/PGO.cmake
+++ b/clang/cmake/caches/PGO.cmake
@@ -5,7 +5,7 @@ set(LLVM_ENABLE_PROJECTS "clang;lld" CACHE STRING "")
set(LLVM_ENABLE_RUNTIMES "compiler-rt;libcxx;libcxxabi;libunwind" CACHE STRING "")
set(LLVM_TARGETS_TO_BUILD Native CACHE STRING "")
-set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED ON CACHE BOOL "")
+set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED IR CACHE BOOL "")
set(CLANG_BOOTSTRAP_TARGETS
generate-profdata
stage2
diff --git a/clang/docs/APINotes.rst b/clang/docs/APINotes.rst
index e5ec154..dec4b18 100644
--- a/clang/docs/APINotes.rst
+++ b/clang/docs/APINotes.rst
@@ -206,6 +206,17 @@ declaration kind), all of which are optional:
- Name: tzdb
SwiftCopyable: false
+ A non-copyable type can have a "destroy" operation, specified with
+ `SwiftDestroyOp`, which will be invoked on the instance when it is no
+ longer in use to free up resources.
+
+ ::
+
+ Tags:
+ - Name: WGPUAdapterInfo
+ SwiftCopyable: false
+ SwiftDestroyOp: wgpuAdapterInfoFreeMembers
+
:SwiftConformsTo:
Allows annotating a C++ class as conforming to a Swift protocol. Equivalent
diff --git a/clang/docs/AddressSanitizer.rst b/clang/docs/AddressSanitizer.rst
index 8d9295f..21e1a36 100644
--- a/clang/docs/AddressSanitizer.rst
+++ b/clang/docs/AddressSanitizer.rst
@@ -297,7 +297,7 @@ Instrumentation code outlining
By default AddressSanitizer inlines the instrumentation code to improve the
run-time performance, which leads to increased binary size. Using the
-(clang flag ``-fsanitize-address-outline-instrumentation` default: ``false``)
+(clang flag ``-fsanitize-address-outline-instrumentation`` default: ``false``)
flag forces all code instrumentation to be outlined, which reduces the size
of the generated code, but also reduces the run-time performance.
diff --git a/clang/docs/AutomaticReferenceCounting.rst b/clang/docs/AutomaticReferenceCounting.rst
index bcac732..80bbd251 100644
--- a/clang/docs/AutomaticReferenceCounting.rst
+++ b/clang/docs/AutomaticReferenceCounting.rst
@@ -740,7 +740,7 @@ following rules apply:
* If the qualifier is so applied at a position in the declaration
where the next-innermost declarator is a function declarator, and
- there is an block declarator within that function declarator, then
+ there is a block declarator within that function declarator, then
the qualifier applies instead to that block declarator and this rule
is considered afresh beginning from the new position.
@@ -924,7 +924,7 @@ not support ``__weak`` references.
A class may indicate that it does not support weak references by providing the
``objc_arc_weak_reference_unavailable`` attribute on the class's interface declaration. A
-retainable object pointer type is **weak-unavailable** if
+retainable object pointer type is **weak-unavailable** if it
is a pointer to an (optionally protocol-qualified) Objective-C class ``T`` where
``T`` or one of its superclasses has the ``objc_arc_weak_reference_unavailable``
attribute. A program is ill-formed if it applies the ``__weak`` ownership
@@ -1129,7 +1129,7 @@ be the same for identical code.
C and C++ while still automatically managing memory. While it is
usually simpler and more idiomatic to use Objective-C objects for
secondary data structures, doing so can introduce extra allocation
- and message-send overhead, which can cause to unacceptable
+ and message-send overhead, which can cause unacceptable
performance. Using structs can resolve some of this tension.
``__autoreleasing`` is forbidden because it is treacherous to rely
@@ -1446,7 +1446,7 @@ ill-formed.
Template arguments
^^^^^^^^^^^^^^^^^^
-If a template argument for a template type parameter is an retainable object
+If a template argument for a template type parameter is a retainable object
owner type that does not have an explicit ownership qualifier, it is adjusted
to have ``__strong`` qualification. This adjustment occurs regardless of
whether the template argument was deduced or explicitly specified.
@@ -2064,7 +2064,7 @@ You can test if your compiler has support for ``objc_externally_retained`` with
``self``
--------
-The ``self`` parameter variable of an non-init Objective-C method is considered
+The ``self`` parameter variable of a non-init Objective-C method is considered
:ref:`externally-retained <arc.misc.externally_retained>` by the implementation.
It is undefined behavior, or at least dangerous, to cause an object to be
deallocated during a message send to that object. In an init method, ``self``
@@ -2334,7 +2334,7 @@ emit, which are described in the remainder of this section.
* the machine code to do so is significantly smaller,
* it is much easier to recognize the C functions in the ARC optimizer, and
- * a sufficient sophisticated runtime may be able to avoid the message send in
+ * a sufficiently sophisticated runtime may be able to avoid the message send in
common cases.
Several other of these functions are "fused" operations which can be
diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst
index 02986a9..3ac9e37 100644
--- a/clang/docs/ClangFormatStyleOptions.rst
+++ b/clang/docs/ClangFormatStyleOptions.rst
@@ -126,6 +126,9 @@ clang-format is turned off or back on.
// clang-format on
void formatted_code_again;
+In addition, the ``OneLineFormatOffRegex`` option gives you a concise way to
+disable formatting for all of the lines that match the regular expression.
+
Configuring Style in Code
=========================
@@ -6483,13 +6486,51 @@ the configuration (without a prefix: ``Auto``).
.. _SpaceInEmptyBlock:
**SpaceInEmptyBlock** (``Boolean``) :versionbadge:`clang-format 10` :ref:`¶ <SpaceInEmptyBlock>`
- If ``true``, spaces will be inserted into ``{}``.
+ This option is **deprecated**. See ``Block`` of ``SpaceInEmptyBraces``.
+
+.. _SpaceInEmptyBraces:
+
+**SpaceInEmptyBraces** (``SpaceInEmptyBracesStyle``) :versionbadge:`clang-format 22` :ref:`¶ <SpaceInEmptyBraces>`
+ Specifies when to insert a space in empty braces.
+
+ .. note::
+
+ This option doesn't apply to initializer braces if
+ ``Cpp11BracedListStyle`` is set to ``true``.
+
+ Possible values:
+
+ * ``SIEB_Always`` (in configuration: ``Always``)
+ Always insert a space in empty braces.
+
+ .. code-block:: c++
+
+ void f() { }
+ class Unit { };
+ auto a = [] { };
+ int x{ };
+
+ * ``SIEB_Block`` (in configuration: ``Block``)
+ Only insert a space in empty blocks.
+
+ .. code-block:: c++
+
+ void f() { }
+ class Unit { };
+ auto a = [] { };
+ int x{};
+
+ * ``SIEB_Never`` (in configuration: ``Never``)
+ Never insert a space in empty braces.
+
+ .. code-block:: c++
+
+ void f() {}
+ class Unit {};
+ auto a = [] {};
+ int x{};
- .. code-block:: c++
- true: false:
- void f() { } vs. void f() {}
- while (true) { } while (true) {}
.. _SpaceInEmptyParentheses:
diff --git a/clang/docs/ClangLinkerWrapper.rst b/clang/docs/ClangLinkerWrapper.rst
index e69cdba..eb38d2b 100644
--- a/clang/docs/ClangLinkerWrapper.rst
+++ b/clang/docs/ClangLinkerWrapper.rst
@@ -60,6 +60,10 @@ only for the linker wrapper will be forwarded to the wrapped linker job.
--v Display the version number and exit
-- The separator for the wrapped linker arguments
+The linker wrapper will generate the appropriate runtime calls to register the
+generated device binary with the offloading runtime. To do this step manually we
+provide the ``llvm-offload-wrapper`` utility.
+
Relocatable Linking
===================
diff --git a/clang/docs/ClangOffloadBundler.rst b/clang/docs/ClangOffloadBundler.rst
index 62cf164..5570dbb 100644
--- a/clang/docs/ClangOffloadBundler.rst
+++ b/clang/docs/ClangOffloadBundler.rst
@@ -525,15 +525,15 @@ The compressed offload bundle begins with a header followed by the compressed bi
This is a unique identifier to distinguish compressed offload bundles. The value is the string 'CCOB' (Compressed Clang Offload Bundle).
- **Version Number (16-bit unsigned int)**:
- This denotes the version of the compressed offload bundle format. The current version is `2`.
+ This denotes the version of the compressed offload bundle format. The current version is `3`.
- **Compression Method (16-bit unsigned int)**:
This field indicates the compression method used. The value corresponds to either `zlib` or `zstd`, represented as a 16-bit unsigned integer cast from the LLVM compression enumeration.
-- **Total File Size (32-bit unsigned int)**:
+- **Total File Size (unsigned int, 32-bit in v2, 64-bit in v3)**:
This is the total size (in bytes) of the file, including the header. Available in version 2 and above.
-- **Uncompressed Binary Size (32-bit unsigned int)**:
+- **Uncompressed Binary Size (unsigned int, 32-bit in v2, 64-bit in v3)**:
This is the size (in bytes) of the binary data before it was compressed.
- **Hash (64-bit unsigned int)**:
@@ -542,4 +542,4 @@ The compressed offload bundle begins with a header followed by the compressed bi
- **Compressed Data**:
The actual compressed binary data follows the header. Its size can be inferred from the total size of the file minus the header size.
- > **Note**: Version 3 of the format is under development. It uses 64-bit fields for Total File Size and Uncompressed Binary Size to support files larger than 4GB. To experiment with version 3, set the environment variable `COMPRESSED_BUNDLE_FORMAT_VERSION=3`. This support is experimental and not recommended for production use.
+ > **Note**: Version 3 is now the default format. For backward compatibility with older HIP runtimes that support version 2 only, set the environment variable `COMPRESSED_BUNDLE_FORMAT_VERSION=2`.
diff --git a/clang/docs/DebuggingCoroutines.rst b/clang/docs/DebuggingCoroutines.rst
index 8702152..9eaf8d4 100644
--- a/clang/docs/DebuggingCoroutines.rst
+++ b/clang/docs/DebuggingCoroutines.rst
@@ -9,14 +9,14 @@ Introduction
============
Coroutines in C++ were introduced in C++20, and the user experience for
-debugging them can still be challenging. This document guides you how to most
+debugging them can still be challenging. This document guides you on how to most
efficiently debug coroutines and how to navigate existing shortcomings in
debuggers and compilers.
Coroutines are generally used either as generators or for asynchronous
programming. In this document, we will discuss both use cases. Even if you are
using coroutines for asynchronous programming, you should still read the
-generators section, as it will introduce foundational debugging techniques also
+generators section, as it introduces foundational debugging techniques also
applicable to the debugging of asynchronous programs.
Both compilers (clang, gcc, ...) and debuggers (lldb, gdb, ...) are
@@ -34,15 +34,15 @@ scripting. This guide comes with a basic GDB script for coroutine debugging.
This guide will first showcase the more polished, bleeding-edge experience, but
will also show you how to debug coroutines with older toolchains. In general,
the older your toolchain, the deeper you will have to dive into the
-implementation details of coroutines (such as their ABI). The further down in
-this document you go, the more low-level, technical the content will become. If
+implementation details of coroutines (such as their ABI). The further down you go in
+this document, the more low-level, technical the content will become. If
you are on an up-to-date toolchain, you will hopefully be able to stop reading
earlier.
Debugging generators
====================
-One of the two major use cases for coroutines in C++ are generators, i.e.,
+One of the two major use cases for coroutines in C++ is generators, i.e.,
functions which can produce values via ``co_yield``. Values are produced
lazily, on-demand. For this purpose, every time a new value is requested, the
coroutine gets resumed. As soon as it reaches a ``co_yield`` and thereby
@@ -141,7 +141,7 @@ a regular function.
Note the two additional variables ``__promise`` and ``__coro_frame``. Those
show the internal state of the coroutine. They are not relevant for our
-generator example, but will be relevant for asynchronous programming described
+generator example but will be relevant for asynchronous programming described
in the next section.
Stepping out of a coroutine
@@ -174,7 +174,7 @@ Inspecting a suspended coroutine
--------------------------------
The ``print10Elements`` function receives an opaque ``generator`` type. Let's
-assume we are suspended at the ``++gen;`` line, and want to inspect the
+assume we are suspended at the ``++gen;`` line and want to inspect the
generator and its internal state.
To do so, we can simply look into the ``gen.hdl`` variable. LLDB comes with a
@@ -188,7 +188,7 @@ We can see two function pointers ``resume`` and ``destroy``. These pointers
point to the resume / destroy functions. By inspecting those function pointers,
we can see that our ``generator`` is actually backed by our ``fibonacci``
coroutine. When using VS Code + lldb-dap, you can Cmd+Click on the function
-address (``0x555...`` in the screenshot) to directly jump to the function
+address (``0x555...`` in the screenshot) to jump directly to the function
definition backing your coroutine handle.
Next, we see the ``promise``. In our case, this reveals the current value of
@@ -247,12 +247,12 @@ the line number of the current suspension point in the promise:
};
This stores the return address of ``await_suspend`` within the promise.
-Thereby, we can read it back from the promise of a suspended coroutine, and map
+Thereby, we can read it back from the promise of a suspended coroutine and map
it to an exact source code location. For a complete example, see the ``task``
type used below for asynchronous programming.
Alternatively, we can modify the C++ code to store the line number in the
-promise type. We can use a ``std::source_location`` to get the line number of
+promise type. We can use ``std::source_location`` to get the line number of
the await and store it inside the ``promise_type``. In the debugger, we can
then read the line number from the promise of the suspended coroutine.
@@ -270,7 +270,7 @@ then read the line number from the promise of the suspended coroutine.
};
The downside of both approaches is that they come at the price of additional
-runtime cost. In particular the second approach increases binary size, since it
+runtime cost. In particular, the second approach increases binary size, since it
requires additional ``std::source_location`` objects, and those source
locations are not stripped by split-dwarf. Whether the first approach is worth
the additional runtime cost is a trade-off you need to make yourself.
@@ -285,7 +285,7 @@ provide custom debugging support, so in addition to this guide, you might want
to check out their documentation.
When using coroutines for asynchronous programming, your library usually
-provides you some ``task`` type. This type usually looks similar to this:
+provides you with some ``task`` type. This type usually looks similar to this:
.. code-block:: c++
@@ -479,7 +479,7 @@ One such solution is to store the list of in-flight coroutines in a collection:
};
With this in place, it is possible to inspect ``inflight_coroutines`` from the
-debugger, and rely on LLDB's ``std::coroutine_handle`` pretty-printer to
+debugger and rely on LLDB's ``std::coroutine_handle`` pretty-printer to
inspect the coroutines.
This technique will track *all* coroutines, also the ones which are currently
@@ -498,8 +498,8 @@ LLDB before 21.0 did not yet show the ``__coro_frame`` inside
``coroutine_handle``. To inspect the coroutine frame, you had to use the
approach described in the :ref:`devirtualization` section.
-LLDB before 18.0 was hiding the ``__promise`` and ``__coro_frame``
-variable by default. The variables are still present, but they need to be
+LLDB before 18.0 hid the ``__promise`` and ``__coro_frame``
+variables by default. The variables are still present, but they need to be
explicitly added to the "watch" pane in VS Code or requested via
``print __promise`` and ``print __coro_frame`` from the debugger console.
@@ -511,9 +511,9 @@ section.
Toolchain Implementation Details
================================
-This section covers the ABI, as well as additional compiler-specific behavior.
+This section covers the ABI as well as additional compiler-specific behavior.
The ABI is followed by all compilers, on all major systems, including Windows,
-Linux and macOS. Different compilers emit different debug information, though.
+Linux, and macOS. Different compilers emit different debug information, though.
Ramp, resume and destroy functions
----------------------------------
@@ -595,7 +595,7 @@ functions as their first two members. As such, we can read the function
pointers from the coroutine frame and then obtain the function's name from its
address.
-The promise is guaranteed to be at a 16 byte offset from the coroutine frame.
+The promise is guaranteed to be at a 16-byte offset from the coroutine frame.
If we have a coroutine handle at address 0x416eb0, we can hence reinterpret-cast
the promise as follows:
@@ -607,8 +607,8 @@ Implementation in clang / LLVM
------------------------------
The C++ Coroutines feature in the Clang compiler is implemented in two parts of
-the compiler. Semantic analysis is performed in Clang, and Coroutine
-construction and optimization takes place in the LLVM middle-end.
+the compiler. Semantic analysis is performed in Clang, and coroutine
+construction and optimization take place in the LLVM middle-end.
For each coroutine function, the frontend generates a single corresponding
LLVM-IR function. This function uses special ``llvm.coro.suspend`` intrinsics
@@ -622,7 +622,7 @@ points into the coroutine frame. Most of the heavy lifting to preserve debugging
information is done in this pass. This pass needs to rewrite all variable
locations to point into the coroutine frame.
-Afterwards, a couple of additional optimizations are applied, before code
+Afterwards, a couple of additional optimizations are applied before code
gets emitted, but none of them are really interesting regarding debugging
information.
@@ -636,8 +636,8 @@ However, this is not possible for coroutine frames because the frames are
constructed in the LLVM middle-end.
To mitigate this problem, the LLVM middle end attempts to generate some debug
-information, which is unfortunately incomplete, since much of the language
-specific information is missing in the middle end.
+information, which is unfortunately incomplete, since much of the
+language-specific information is missing in the middle end.
.. _devirtualization:
@@ -655,7 +655,7 @@ There are two possible approaches to do so:
We can lookup their types and thereby get the types of promise
and coroutine frame.
-In gdb, one can use the following approach to devirtualize coroutine type,
+In gdb, one can use the following approach to devirtualize a coroutine type,
assuming we have a ``std::coroutine_handle`` is at address 0x418eb0:
::
@@ -679,7 +679,7 @@ LLDB comes with devirtualization support out of the box, as part of the
pretty-printer for ``std::coroutine_handle``. Internally, this pretty-printer
uses the second approach. We look up the types in the destroy function and not
the resume function because the resume function pointer will be set to a
-nullptr as soon as a coroutine reaches its final suspension point. If we used
+``nullptr`` as soon as a coroutine reaches its final suspension point. If we used
the resume function, devirtualization would hence fail for all coroutines that
have reached their final suspension point.
@@ -687,10 +687,10 @@ Interpreting the coroutine frame in optimized builds
----------------------------------------------------
The ``__coro_frame`` variable usually refers to the coroutine frame of an
-*in-flight* coroutine. This means, the coroutine is currently executing.
+*in-flight* coroutine. This means the coroutine is currently executing.
However, the compiler only guarantees the coroutine frame to be in a consistent
state while the coroutine is suspended. As such, the variables inside the
-``__coro_frame`` variable might be outdated, in particular when optimizations
+``__coro_frame`` variable might be outdated, particularly when optimizations
are enabled.
Furthermore, when optimizations are enabled, the compiler will layout the
@@ -731,7 +731,7 @@ despite ``a`` being frequently incremented.
While this might be surprising, this is a result of the optimizer recognizing
that it can eliminate most of the load/store operations.
-The above code gets optimized to the equivalent of:
+The above code is optimized to the equivalent of:
.. code-block:: c++
@@ -1180,5 +1180,5 @@ The authors of the Folly libraries wrote a blog post series on how they debug co
* `Async stack traces in folly: Improving debugging in the developer lifecycle <https://developers.facebook.com/blog/post/2021/10/21/async-stack-traces-folly-improving-debugging-developer-lifecycle/>`_
Besides some topics also covered here (stack traces from the debugger), Folly's blog post series also covers
-more additional topics, such as capturing async stack traces in performance profiles via eBPF filters
+additional topics, such as capturing async stack traces in performance profiles via eBPF filters
and printing async stack traces on crashes.
diff --git a/clang/docs/InternalsManual.rst b/clang/docs/InternalsManual.rst
index 756db85..bd74227 100644
--- a/clang/docs/InternalsManual.rst
+++ b/clang/docs/InternalsManual.rst
@@ -343,7 +343,7 @@ Class:
Description:
This is a formatter which represents the argument number in a human-readable
format: the value ``123`` stays ``123``, ``12345`` becomes ``12.34k``,
- ``6666666` becomes ``6.67M``, and so on for 'G' and 'T'.
+ ``6666666`` becomes ``6.67M``, and so on for 'G' and 'T'.
**"objcclass" format**
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index b5bb198..9767fde 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -635,11 +635,12 @@ C and C++. For example:
return v;
}
+
Boolean vectors are a Clang extension of the ext vector type. Boolean vectors
are intended, though not guaranteed, to map to vector mask registers. The size
parameter of a boolean vector type is the number of bits in the vector. The
boolean vector is dense and each bit in the boolean vector is one vector
-element.
+element. Query for this feature with ``__has_feature(ext_vector_type_boolean)``.
The semantics of boolean vectors borrows from C bit-fields with the following
differences:
@@ -657,6 +658,16 @@ The size and alignment are both the number of bits rounded up to the next power
of two, but the alignment is at most the maximum vector alignment of the
target.
+A boolean vector can be used in a ternary `?:` operator to select vector
+elements of a different type.
+
+.. code-block:: c++
+
+ typedef int int4 __attribute__((ext_vector_type(4)));
+ typedef bool bool4 __attribute__((ext_vector_type(4)));
+
+ int4 blend(bool4 cond, int4 a, int4 b) { return cond ? a : b; }
+
Vector Literals
---------------
@@ -757,9 +768,12 @@ elementwise to the input.
Unless specified otherwise operation(±0) = ±0 and operation(±infinity) = ±infinity
-The integer elementwise intrinsics, including ``__builtin_elementwise_popcount``,
+The elementwise intrinsics ``__builtin_elementwise_popcount``,
``__builtin_elementwise_bitreverse``, ``__builtin_elementwise_add_sat``,
-``__builtin_elementwise_sub_sat`` can be called in a ``constexpr`` context.
+``__builtin_elementwise_sub_sat``, ``__builtin_elementwise_max``,
+``__builtin_elementwise_min``, ``__builtin_elementwise_abs``,
+``__builtin_elementwise_ctlz``, ``__builtin_elementwise_cttz``, and
+``__builtin_elementwise_fma`` can be called in a ``constexpr`` context.
No implicit promotion of integer types takes place. The mixing of integer types
of different sizes and signs is forbidden in binary and ternary builtins.
@@ -859,6 +873,23 @@ of different sizes and signs is forbidden in binary and ternary builtins.
semantics, see `LangRef
<http://llvm.org/docs/LangRef.html#llvm-min-intrinsics-comparation>`_
for the comparison.
+T __builtin_elementwise_fshl(T x, T y, T z) perform a funnel shift left. Concatenate x and y (x is the most integer types
+ significant bits of the wide value), the combined value is shifted
+ left by z, and the most significant bits are extracted to produce
+ a result that is the same size as the original arguments.
+
+T __builtin_elementwise_fshr(T x, T y, T z) perform a funnel shift right. Concatenate x and y (x is the most integer types
+ significant bits of the wide value), the combined value is shifted
+ right by z, and the least significant bits are extracted to produce
+ a result that is the same size as the original arguments.
+ T __builtin_elementwise_ctlz(T x[, T y]) return the number of leading 0 bits in the first argument. If integer types
+ the first argument is 0 and an optional second argument is provided,
+ the second argument is returned. It is undefined behaviour if the
+ first argument is 0 and no second argument is provided.
+ T __builtin_elementwise_cttz(T x[, T y]) return the number of trailing 0 bits in the first argument. If integer types
+ the first argument is 0 and an optional second argument is provided,
+ the second argument is returned. It is undefined behaviour if the
+ first argument is 0 and no second argument is provided.
============================================== ====================================================================== =========================================
@@ -911,6 +942,41 @@ Let ``VT`` be a vector type and ``ET`` the element type of ``VT``.
for the comparison.
======================================= ====================================================================== ==================================
+*Masked Builtins*
+
+Each builtin accesses memory according to a provided boolean mask. These are
+provided as ``__builtin_masked_load`` and ``__builtin_masked_store``. The first
+argument is always boolean mask vector. The ``__builtin_masked_load`` builtin
+takes an optional third vector argument that will be used for the result of the
+masked-off lanes. These builtins assume the memory is always aligned.
+
+The ``__builtin_masked_expand_load`` and ``__builtin_masked_compress_store``
+builtins have the same interface but store the result in consecutive indices.
+Effectively this performs the ``if (mask[i]) val[i] = ptr[j++]`` and ``if
+(mask[i]) ptr[j++] = val[i]`` pattern respectively.
+
+Example:
+
+.. code-block:: c++
+
+ using v8b = bool [[clang::ext_vector_type(8)]];
+ using v8i = int [[clang::ext_vector_type(8)]];
+
+ v8i load(v8b mask, v8i *ptr) { return __builtin_masked_load(mask, ptr); }
+
+ v8i load_expand(v8b mask, v8i *ptr) {
+ return __builtin_masked_expand_load(mask, ptr);
+ }
+
+ void store(v8b mask, v8i val, v8i *ptr) {
+ __builtin_masked_store(mask, val, ptr);
+ }
+
+ void store_compress(v8b mask, v8i val, v8i *ptr) {
+ __builtin_masked_compress_store(mask, val, ptr);
+ }
+
+
Matrix Types
============
@@ -1704,11 +1770,41 @@ Variadic Friends __cpp_variadic_friend C
Trivial Relocatability __cpp_trivial_relocatability C++26 C++03
--------------------------------------------- -------------------------------- ------------- -------------
Designated initializers (N494) C99 C89
+``_Complex`` (N693) C99 C89, C++
+``_Bool`` (N815) C99 C89
+Variable-length arrays (N683) C99 C89, C++
+Flexible array members C99 C89, C++
+static and type quals in arrays C99 C89
+``long long`` (N601) C99 C89
+Hexadecimal floating constants (N308) C99 C89
+Compound literals (N716) C99 C89, C++
+``//`` comments (N644) C99 C89
+Mixed declarations and code (N740) C99 C89
+init-statement in for (N740) C99 C89
+Variadic macros (N707) C99 C89
+Empty macro arguments (N570) C99 C89
+Trailing comma in enum declaration C99 C89
+Implicit ``return 0`` in ``main`` C99 C89
+``__func__`` (N611) C99 C89
+``_Generic`` (N1441) C11 C89, C++
+``_Static_assert`` (N1330) C11 C89, C++
+``_Atomic`` (N1485) C11 C89, C++
+``_Thread_local`` (N1364) C11 C89, C++
Array & element qualification (N2607) C23 C89
Attributes (N2335) C23 C89
``#embed`` (N3017) C23 C89, C++
+Enum with fixed underlying type (N3030) C23 C89
+``#warning`` (N2686) C23 C89
+``_BitInt`` (N3035) C23 C89, C++
+Binary literals (N2549) C23 C89
+Unnamed parameters in a function definition C23 C89
+Free positioning of labels (N2508) C23 C89
+``#elifdef`` (N2645) C23 C89
+``__has_include`` (N2799) C23 C89
Octal literals prefixed with ``0o`` or ``0O`` C2y C89, C++
``_Countof`` (N3369, N3469) C2y C89
+``_Generic`` with a type operand (N3260) C2y C89, C++
+``++``/``--`` on ``_Complex`` value (N3259) C2y C89, C++
============================================= ================================ ============= =============
Builtin type aliases
@@ -1751,6 +1847,37 @@ __make_integer_seq
This alias returns ``IntSeq`` instantiated with ``IntSeqT = T``and ``Ints`` being the pack ``0, ..., N - 1``.
+__builtin_dedup_pack
+--------------------
+
+.. code-block:: c++
+
+ template <class... Ts>
+ using __builtin_dedup_pack = ...;
+
+This alias takes a template parameter pack ``Ts`` and produces a new unexpanded pack containing the unique types
+from ``Ts``, with the order of the first occurrence of each type preserved.
+It is useful in template metaprogramming to normalize type lists.
+
+The resulting pack can be expanded in contexts like template argument lists or base specifiers.
+
+**Example of Use**:
+
+.. code-block:: c++
+
+ template <typename...> struct TypeList;
+
+ // The resulting type is TypeList<int, double, char>
+ template <typename ...ExtraTypes>
+ using MyTypeList = TypeList<__builtin_dedup_pack<int, double, int, char, double, ExtraTypes...>...>;
+
+**Limitations**:
+
+* This builtin can only be used inside a template.
+* The resulting pack is currently only supported for expansion in template argument lists and base specifiers.
+* This builtin cannot be assigned to a template template parameter.
+
+
Type Trait Primitives
=====================
@@ -1922,6 +2049,9 @@ The following type trait primitives are supported by Clang. Those traits marked
Returns true if a reference ``T`` can be copy-initialized from a temporary of type
a non-cv-qualified ``U``.
* ``__underlying_type`` (C++, GNU, Microsoft)
+* ``__builtin_lt_synthesises_from_spaceship``, ``__builtin_gt_synthesises_from_spaceship``,
+ ``__builtin_le_synthesises_from_spaceship``, ``__builtin_ge_synthesises_from_spaceship`` (Clang):
+ These builtins can be used to determine whether the corresponding operator is synthesised from a spaceship operator.
In addition, the following expression traits are supported:
@@ -4072,7 +4202,7 @@ builtin, the mangler emits their usual pattern without any special treatment.
-----------------------
``__builtin_popcountg`` returns the number of 1 bits in the argument. The
-argument can be of any unsigned integer type.
+argument can be of any unsigned integer type or fixed boolean vector.
**Syntax**:
@@ -4104,7 +4234,13 @@ such as ``unsigned __int128`` and C23 ``unsigned _BitInt(N)``.
``__builtin_clzg`` (respectively ``__builtin_ctzg``) returns the number of
leading (respectively trailing) 0 bits in the first argument. The first argument
-can be of any unsigned integer type.
+can be of any unsigned integer type or fixed boolean vector.
+
+For boolean vectors, these builtins interpret the vector like a bit-field where
+the ith element of the vector is bit i of the bit-field, counting from the
+least significant end. ``__builtin_clzg`` returns the number of zero elements at
+the end of the vector, while ``__builtin_ctzg`` returns the number of zero
+elements at the start of the vector.
If the first argument is 0 and an optional second argument of ``int`` type is
provided, then the second argument is returned. If the first argument is 0, but
@@ -4330,7 +4466,7 @@ fall into one of the specified floating-point classes.
if (__builtin_isfpclass(x, 448)) {
// `x` is positive finite value
- ...
+ ...
}
**Description**:
@@ -5044,6 +5180,23 @@ If no address spaces names are provided, all address spaces are fenced.
__builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup", "local")
__builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup", "local", "global")
+__builtin_amdgcn_ballot_w{32,64}
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``__builtin_amdgcn_ballot_w{32,64}`` returns a bitmask that contains its
+boolean argument as a bit for every lane of the current wave that is currently
+active (i.e., that is converged with the executing thread), and a 0 bit for
+every lane that is not active.
+
+The result is uniform, i.e. it is the same in every active thread of the wave.
+
+__builtin_amdgcn_inverse_ballot_w{32,64}
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given a wave-uniform bitmask, ``__builtin_amdgcn_inverse_ballot_w{32,64}(mask)``
+returns the bit at the position of the current lane. It is almost equivalent to
+``(mask & (1 << lane_id)) != 0``, except that its behavior is only defined if
+the given mask has the same value for all active lanes of the current wave.
ARM/AArch64 Language Extensions
-------------------------------
diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst
index 58cd10a..0fa4ba3 100644
--- a/clang/docs/OpenMPSupport.rst
+++ b/clang/docs/OpenMPSupport.rst
@@ -191,7 +191,7 @@ implementation.
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| device | teams construct on the host device | :good:`done` | r371553 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | support non-contiguous array sections for target update | :good:`done` | |
+| device | support non-contiguous array sections for target update | :good:`done` | https://github.com/llvm/llvm-project/pull/144635 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| device | pointer attachment | :good:`done` | |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
@@ -256,7 +256,7 @@ implementation.
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| device | device-specific environment variables | :none:`unclaimed` | |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | omp_target_is_accessible routine | :none:`unclaimed` | |
+| device | omp_target_is_accessible routine | :part:`In Progress` | https://github.com/llvm/llvm-project/pull/138294 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| device | omp_get_mapped_ptr routine | :good:`done` | D141545 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
@@ -338,7 +338,7 @@ implementation.
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| OMPT | new 'emi' callbacks for external monitoring interfaces | :good:`done` | |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| OMPT | device tracing interface | :none:`unclaimed` | |
+| OMPT | device tracing interface | :none:`in progress` | jplehr |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| task | 'strict' modifier for taskloop construct | :none:`unclaimed` | |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
@@ -376,7 +376,7 @@ implementation.
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| loop stripe transformation | :good:`done` | https://github.com/llvm/llvm-project/pull/119891 |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| work distribute construct | :none:`unclaimed` | :none:`unclaimed` | |
+| workdistribute construct | | :none:`in progress` | @skc7, @mjklemm |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| task_iteration | :none:`unclaimed` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
@@ -449,7 +449,7 @@ implementation.
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| Clarifications to Fortran map semantics | :none:`unclaimed` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| default clause at target construct | :part:`In Progress` | :none:`unclaimed` | |
+| default clause at target construct | :part:`In Progress` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| ref count update use_device_{ptr, addr} | :none:`unclaimed` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
@@ -470,12 +470,40 @@ implementation.
| need_device_addr modifier for adjust_args clause | :part:`partial` | :none:`unclaimed` | Parsing/Sema: https://github.com/llvm/llvm-project/pull/143442 |
| | | | https://github.com/llvm/llvm-project/pull/149586 |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Prescriptive num_threads | :part:`In Progress` | :none:`unclaimed` | |
+| Prescriptive num_threads | :part:`In Progress` | :none:`unclaimed` | ro-i |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Message and severity clauses | :part:`In Progress` | :none:`unclaimed` | |
+| Message and severity clauses | :part:`In Progress` | :none:`unclaimed` | ro-i |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| Local clause on declare target | :part:`In Progress` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| groupprivate directive | :part:`In Progress` | :part:`In Progress` | Flang: kparzysz, mjklemm |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| variable-category on default clause | :part:`In Progress` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Changes to omp_target_is_accessible | :part:`In Progress` | :part:`In Progress` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+
+
+.. _OpenMP 6.1 implementation details:
+
+OpenMP 6.1 Implementation Details (Experimental)
+================================================
+
+The following table provides a quick overview over various OpenMP 6.1 features
+and their implementation status. Since OpenMP 6.1 has not yet been released, the
+following features are experimental and are subject to change at any time.
+Please post on the `Discourse forums (Runtimes - OpenMP category)`_ for more
+information or if you want to help with the
+implementation.
+
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+|Feature | C/C++ Status | Fortran Status | Reviews |
++=============================================================+===========================+===========================+==========================================================================+
+| dyn_groupprivate clause | :part:`In Progress` | :part:`In Progress` | C/C++: kevinsala (https://github.com/llvm/llvm-project/pull/152651 |
+| | | | https://github.com/llvm/llvm-project/pull/152830 |
+| | | | https://github.com/llvm/llvm-project/pull/152831) |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+
OpenMP Extensions
=================
diff --git a/clang/docs/PointerAuthentication.rst b/clang/docs/PointerAuthentication.rst
index 913291c..96eb498 100644
--- a/clang/docs/PointerAuthentication.rst
+++ b/clang/docs/PointerAuthentication.rst
@@ -47,16 +47,16 @@ This document serves four purposes:
- It documents several language extensions that are useful on targets using
pointer authentication.
-- It will eventually present a theory of operation for the security mitigation,
- describing the basic requirements for correctness, various weaknesses in the
- mechanism, and ways in which programmers can strengthen its protections
- (including recommendations for language implementors).
+- It presents a theory of operation for the security mitigation, describing the
+ basic requirements for correctness, various weaknesses in the mechanism, and
+ ways in which programmers can strengthen its protections (including
+ recommendations for language implementors).
-- It will eventually document the language ABIs currently used for C, C++,
- Objective-C, and Swift on arm64e, although these are not yet stable on any
- target.
+- It documents the stable ABI of the C, C++, and Objective-C languages on arm64e
+ platforms.
-Basic Concepts
+
+Basic concepts
--------------
The simple address of an object or function is a **raw pointer**. A raw
@@ -125,7 +125,7 @@ independently for I and D keys.)
interfaces or as primitives in a compiler IR because they expose raw
pointers. Raw pointers require special attention in the language
implementation to avoid the accidental creation of exploitable code
- sequences.
+ sequences; see the section on `Attackable code sequences`_.
The following details are all implementation-defined:
@@ -167,10 +167,15 @@ a cryptographic signature, other implementations may be possible. See
signing key, and stores it in the high bits as the signature. ``auth``
removes the signature, computes the same hash, and compares the result with
the stored signature. ``strip`` removes the signature without
- authenticating it. While ``aut*`` instructions do not themselves trap on
- failure in Armv8.3 PAuth, they do with the later optional FPAC extension.
- An implementation can also choose to emulate this trapping behavior by
- emitting additional instructions around ``aut*``.
+ authenticating it. The ``aut`` instructions in the baseline Armv8.3 PAuth
+ feature do not guarantee to trap on authentication failure; instead, they
+ simply corrupt the pointer so that later uses will likely trap. Unless the
+ "later use" follows immediately and cannot be recovered from (e.g. with a
+ signal handler), this does not provide adequate protection against
+ `authentication oracles`_, so implementations must emit additional
+ instructions to force an immediate trap. This is unnecessary if the
+ processor provides the optional ``FPAC`` extension, which guarantees an
+ immediate trap.
- ``sign_generic`` corresponds to the ``pacga`` instruction, which takes two
64-bit values and produces a 64-bit cryptographic hash. Implementations of
@@ -234,7 +239,7 @@ implementation-defined.
.. _Signing schemas:
-Signing Schemas
+Signing schemas
~~~~~~~~~~~~~~~
Correct use of pointer authentication requires the signing code and the
@@ -255,33 +260,172 @@ signing schema breaks down even more simply:
It is important that the signing schema be independently derived at all signing
and authentication sites. Preferably, the schema should be hard-coded
everywhere it is needed, but at the very least, it must not be derived by
-inspecting information stored along with the pointer.
+inspecting information stored along with the pointer. See the section on
+`Attacks on pointer authentication`_ for more information.
-Language Features
------------------
-There is currently one main pointer authentication language feature:
+Language features
+-----------------
-- The language provides the ``<ptrauth.h>`` intrinsic interface for manually
- signing and authenticating pointers in code. These can be used in
+There are three levels of the pointer authentication language feature:
+
+- The language implementation automatically signs and authenticates function
+ pointers (and certain data pointers) across a variety of standard situations,
+ including return addresses, function pointers, and C++ virtual functions. The
+ intent is for all pointers to code in program memory to be signed in some way
+ and for all branches to code in program text to authenticate those
+ signatures. In addition to the code pointers themselves, we also use pointer
+ authentication to protect data values that directly or indirectly influence
+ control flow or program integrity, or can provide attackers with some other
+ powerful program compromise.
+
+- The language also provides extensions to override the default rules used by
+ the language implementation. For example, the ``__ptrauth`` type qualifier
+ can be used to change how pointers or pointer sized integers are signed when
+ they are stored in a particular variable or field; this provides much stronger
+ protection than is guaranteed by the default rules for C function and data
+ pointers.
+
+- Finally, the language provides the ``<ptrauth.h>`` intrinsic interface for
+ manually signing and authenticating pointers in code. These can be used in
circumstances where very specific behavior is required.
+Language implementation
+~~~~~~~~~~~~~~~~~~~~~~~
+
+For the most part, pointer authentication is an unobserved detail of the
+implementation of the programming language. Any element of the language
+implementation that would perform an indirect branch to a pointer is implicitly
+altered so that the pointer is signed when first constructed and authenticated
+when the branch is performed. This includes:
+
+- indirect-call features in the programming language, such as C function
+ pointers, C++ virtual functions, C++ member function pointers, the "blocks"
+ C extension, and so on;
+
+- returning from a function, no matter how it is called; and
+
+- indirect calls introduced by the implementation, such as branches through the
+ global offset table (GOT) used to implement direct calls to functions defined
+ outside of the current shared object.
+
+For more information about this, see the `Language ABI`_ section.
+
+However, some aspects of the implementation are observable by the programmer or
+otherwise require special notice.
+
+C data pointers
+^^^^^^^^^^^^^^^
+
+The current implementation in Clang does not sign pointers to ordinary data by
+default. For a partial explanation of the reasoning behind this, see the
+`Theory of Operation`_ section.
+
+A specific data pointer which is more security-sensitive than most can be
+signed using the `__ptrauth qualifier`_ or using the ``<ptrauth.h>``
+intrinsics.
+
+C function pointers
+^^^^^^^^^^^^^^^^^^^
+
+The C standard imposes restrictions on the representation and semantics of
+function pointer types which make it difficult to achieve satisfactory
+signature diversity in the default language rules. See `Attacks on pointer
+authentication`_ for more information about signature diversity. Programmers
+should strongly consider using the ``__ptrauth`` qualifier to improve the
+protections for important function pointers, such as the components of of
+a hand-rolled "v-table"; see the section on the `__ptrauth qualifier`_ for
+details.
+
+The value of a pointer to a C function includes a signature, even when the
+value is cast to a non-function-pointer type like ``void*`` or ``intptr_t``. On
+implementations that use high bits to store the signature, this means that
+relational comparisons and hashes will vary according to the exact signature
+value, which is likely to change between executions of a program. In some
+implementations, it may also vary based on the exact function pointer type.
+
+Null pointers
+^^^^^^^^^^^^^
+
+In principle, an implementation could derive the signed null pointer value
+simply by applying the standard signing algorithm to the raw null pointer
+value. However, for likely signing algorithms, this would mean that the signed
+null pointer value would no longer be statically known, which would have many
+negative consequences. For one, it would become substantially more expensive
+to emit null pointer values or to perform null-pointer checks. For another,
+the pervasive (even if technically unportable) assumption that null pointers
+are bitwise zero would be invalidated, making it substantially more difficult
+to adopt pointer authentication, as well as weakening common optimizations for
+zero-initialized memory such as the use of ``.bzz`` sections. Therefore it is
+beneficial to treat null pointers specially by giving them their usual
+representation. On AArch64, this requires additional code when working with
+possibly-null pointers, such as when copying a pointer field that has been
+signed with address diversity.
+
+While this representation of nulls is the safest option for the general case,
+there are some situations in which a null pointer may have important semantic
+or security impact. For that purpose Clang has the concept of a pointer
+authentication schema that signs and authenticates null values.
+
+Return addresses
+^^^^^^^^^^^^^^^^
+
+The current implementation in Clang implicitly signs the return addresses in
+function calls. While the value of the return address is technically an
+implementation detail of a function, there are some important libraries and
+development tools which rely on manually walking the chain of stack frames.
+These tools must be updated to correctly account for pointer authentication,
+either by stripping signatures (if security is not important for the tool, e.g.
+if it is capturing a stack trace during a crash) or properly authenticating
+them. More information about how these values are signed is available in the
+`Language ABI`_ section.
+
+C++ virtual functions
+^^^^^^^^^^^^^^^^^^^^^
+
+The current implementation in Clang signs virtual function pointers with
+a discriminator derived from the full signature of the overridden method,
+including the method name and parameter types. It is possible to write C++
+code that relies on v-table layout remaining constant despite changes to
+a method signature; for example, a parameter might be a ``typedef`` that
+resolves to a different type based on a build setting. Such code violates
+C++'s One Definition Rule (ODR), but that violation is not normally detected;
+however, pointer authentication will detect it.
-Language Extensions
+Language extensions
~~~~~~~~~~~~~~~~~~~
-Feature Testing
+Feature testing
^^^^^^^^^^^^^^^
Whether the current target uses pointer authentication can be tested for with
a number of different tests.
-- ``__has_feature(ptrauth_intrinsics)`` is true if ``<ptrauth.h>`` provides its
- normal interface. This may be true even on targets where pointer
- authentication is not enabled by default.
+- ``__PTRAUTH__`` macro is defined if ``<ptrauth.h>`` provides its normal
+ interface. This implies support for the pointer authentication intrinsics
+ and the ``__ptrauth`` qualifier.
-__ptrauth Qualifier
-^^^^^^^^^^^^^^^^^^^
+- ``__has_feature(ptrauth_returns)`` is true if the target uses pointer
+ authentication to protect return addresses.
+
+- ``__has_feature(ptrauth_calls)`` is true if the target uses pointer
+ authentication to protect indirect branches. On arm64e this implies
+ ``__has_feature(ptrauth_returns)``, ``__has_feature(ptrauth_intrinsics)``,
+ and the ``__PTRAUTH__`` macro.
+
+- For backwards compatibility purposes ``__has_feature(ptrauth_intrinsics)``
+ and ``__has_feature(ptrauth_qualifier)`` are available on arm64e targets.
+ These features are synonymous with each other, and are equivalent to testing
+ for the ``__PTRAUTH__`` macro definition. Use of these features should be
+ restricted to cases where backwards compatibility is required, and should be
+ paired with ``defined(__PTRAUTH__)``.
+
+
+Clang provides several other tests only for historical purposes; for current
+purposes they are all equivalent to ``ptrauth_calls``.
+
+``__ptrauth`` qualifier
+^^^^^^^^^^^^^^^^^^^^^^^
``__ptrauth(key, address, discriminator)`` is an extended type
qualifier which causes so-qualified objects to hold pointers or pointer sized
@@ -293,6 +437,11 @@ type, either to a function or to an object, or a pointer sized integer. It
currently cannot be an Objective-C pointer type, a C++ reference type, or a
block pointer type; these restrictions may be lifted in the future.
+The current implementation in Clang is known to not provide adequate safety
+guarantees against the creation of `signing oracles`_ when assigning data
+pointers to ``__ptrauth``-qualified gl-values. See the section on `safe
+derivation`_ for more information.
+
The qualifier's operands are as follows:
- ``key`` - an expression evaluating to a key value from ``<ptrauth.h>``; must
@@ -327,6 +476,57 @@ a discriminator determined as follows:
is ``ptrauth_blend_discriminator(&x, discriminator)``; see
`ptrauth_blend_discriminator`_.
+Non-triviality from address diversity
++++++++++++++++++++++++++++++++++++++
+
+Address diversity must impose additional restrictions in order to allow the
+implementation to correctly copy values. In C++, a type qualified with address
+diversity is treated like a class type with non-trivial copy/move constructors
+and assignment operators, with the usual effect on containing classes and
+unions. C does not have a standard concept of non-triviality, and so we must
+describe the basic rules here, with the intention of imitating the emergent
+rules of C++:
+
+- A type may be **non-trivial to copy**.
+
+- A type may also be **illegal to copy**. Types that are illegal to copy are
+ always non-trivial to copy.
+
+- A type may also be **address-sensitive**. This includes types that use self
+ referencing pointers, data protected by address diversified pointer
+ authentication, or other similar concepts.
+
+- A type qualified with a ``ptrauth`` qualifier or implicit authentication
+ schema that requires address diversity is non-trivial to copy and
+ address-sensitive.
+
+- An array type is illegal to copy, non-trivial to copy, or address-sensitive
+ if its element type is illegal to copy, non-trivial to copy, or
+ address-sensitive, respectively.
+
+- A struct type is illegal to copy, non-trivial to copy, or address-sensitive
+ if it has a field whose type is illegal to copy, non-trivial to copy, or
+ address-sensitive, respectively.
+
+- A union type is both illegal and non-trivial to copy if it has a field whose
+ type is non-trivial or illegal to copy.
+
+- A union type is address-sensitive if it has a field whose type is
+ address-sensitive.
+
+- A program is ill-formed if it uses a type that is illegal to copy as
+ a function parameter, argument, or return type.
+
+- A program is ill-formed if an expression requires a type to be copied that is
+ illegal to copy.
+
+- Otherwise, copying a type that is non-trivial to copy correctly copies its
+ subobjects.
+
+- Types that are address-sensitive must always be passed and returned
+ indirectly. Thus, changing the address-sensitivity of a type may be
+ ABI-breaking even if its size and alignment do not change.
+
``<ptrauth.h>``
~~~~~~~~~~~~~~~
@@ -433,7 +633,7 @@ Produce a signed pointer for the given raw pointer without applying any
authentication or extra treatment. This operation is not required to have the
same behavior on a null pointer that the language implementation would.
-This is a treacherous operation that can easily result in signing oracles.
+This is a treacherous operation that can easily result in `signing oracles`_.
Programs should use it seldom and carefully.
``ptrauth_auth_and_resign``
@@ -454,7 +654,29 @@ a null pointer that the language implementation would.
The code sequence produced for this operation must not be directly attackable.
However, if the discriminator values are not constant integers, their
computations may still be attackable. In the future, Clang should be enhanced
-to guaranteed non-attackability if these expressions are safely-derived.
+to guaranteed non-attackability if these expressions are
+:ref:`safely-derived<Safe derivation>`.
+
+``ptrauth_auth_function``
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: c
+
+ ptrauth_auth_function(pointer, key, discriminator)
+
+Authenticate that ``pointer`` is signed with ``key`` and ``discriminator`` and
+re-sign it to the standard schema for a function pointer of its type.
+
+``pointer`` must have function pointer type. The result will have the same
+type as ``pointer``. This operation is not required to have the same behavior
+on a null pointer that the language implementation would.
+
+This operation makes the same attackability guarantees as
+``ptrauth_auth_and_resign``.
+
+If this operation appears syntactically as the function operand of a call,
+Clang guarantees that the call will directly authenticate the function value
+using the given schema rather than re-signing to the standard schema.
``ptrauth_auth_data``
^^^^^^^^^^^^^^^^^^^^^
@@ -500,12 +722,921 @@ type. Implementations are not required to make all bits of the result equally
significant; in particular, some implementations are known to not leave
meaningful data in the low bits.
+Standard ``__ptrauth`` qualifiers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``<ptrauth.h>`` additionally provides several macros which expand to
+``__ptrauth`` qualifiers for common ABI situations.
+
+For convenience, these macros expand to nothing when pointer authentication is
+disabled.
+
+These macros can be found in the header; some details of these macros may be
+unstable or implementation-specific.
+
+
+Theory of operation
+-------------------
+
+The threat model of pointer authentication is as follows:
+
+- The attacker has the ability to read and write to a certain range of
+ addresses, possibly the entire address space. However, they are constrained
+ by the normal rules of the process: for example, they cannot write to memory
+ that is mapped read-only, and if they access unmapped memory it will trigger
+ a trap.
+
+- The attacker has no ability to add arbitrary executable code to the program.
+ For example, the program does not include malicious code to begin with, and
+ the attacker cannot alter existing instructions, load a malicious shared
+ library, or remap writable pages as executable. If the attacker wants to get
+ the process to perform a specific sequence of actions, they must somehow
+ subvert the normal control flow of the process.
+
+In both of the above paragraphs, it is merely assumed that the attacker's
+*current* capabilities are restricted; that is, their current exploit does not
+directly give them the power to do these things. The attacker's immediate goal
+may well be to leverage their exploit to gain these capabilities, e.g. to load
+a malicious dynamic library into the process, even though the process does not
+directly contain code to do so.
+
+Note that any bug that fits the above threat model can be immediately exploited
+as a denial-of-service attack by simply performing an illegal access and
+crashing the program. Pointer authentication cannot protect against this.
+While denial-of-service attacks are unfortunate, they are also unquestionably
+the best possible result of a bug this severe. Therefore, pointer authentication
+enthusiastically embraces the idea of halting the program on a pointer
+authentication failure rather than continuing in a possibly-compromised state.
+
+Pointer authentication is a form of control-flow integrity (CFI) enforcement.
+The basic security hypothesis behind CFI enforcement is that many bugs can only
+be usefully exploited (other than as a denial-of-service) by leveraging them to
+subvert the control flow of the program. If this is true, then by inhibiting or
+limiting that subversion, it may be possible to largely mitigate the security
+consequences of those bugs by rendering them impractical (or, ideally,
+impossible) to exploit.
+
+Every indirect branch in a program has a purpose. Using human intelligence, a
+programmer can describe where a particular branch *should* go according to this
+purpose: a ``return`` in ``printf`` should return to the call site, a particular
+call in ``qsort`` should call the comparator that was passed in as an argument,
+and so on. But for CFI to enforce that every branch in a program goes where it
+*should* in this sense would require CFI to perfectly enforce every semantic
+rule of the program's abstract machine; that is, it would require making the
+programming environment perfectly sound. That is out of scope. Instead, the
+goal of CFI is merely to catch attempts to make a branch go somewhere that its
+obviously *shouldn't* for its purpose: for example, to stop a call from
+branching into the middle of a function rather than its beginning. As the
+information available to CFI gets better about the purpose of the branch, CFI
+can enforce tighter and tighter restrictions on where the branch is permitted to
+go. Still, ultimately CFI cannot make the program sound. This may help explain
+why pointer authentication makes some of the choices it does: for example, to
+sign and authenticate mostly code pointers rather than every pointer in the
+program. Preventing attackers from redirecting branches is both particularly
+important and particularly approachable as a goal. Detecting corruption more
+broadly is infeasible with these techniques, and the attempt would have far
+higher cost.
+
+Attacks on pointer authentication
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Pointer authentication works as follows. Every indirect branch in a program has
+a purpose. For every purpose, the implementation chooses a
+:ref:`signing schema<Signing schemas>`. At some place where a pointer is known
+to be correct for its purpose, it is signed according to the purpose's schema.
+At every place where the pointer is needed for its purpose, it is authenticated
+according to the purpose's schema. If that authentication fails, the program is
+halted.
+
+There are a variety of ways to attack this.
+
+Attacks of interest to programmers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These attacks arise from weaknesses in the default protections offered by
+pointer authentication. They can be addressed by using attributes or intrinsics
+to opt in to stronger protection.
+
+Substitution attacks
+++++++++++++++++++++
+
+An attacker can simply overwrite a pointer intended for one purpose with a
+pointer intended for another purpose if both purposes use the same signing
+schema and that schema does not use address diversity.
+
+The most common source of this weakness is when code relies on using the default
+language rules for C function pointers. The current implementation uses the
+exact same signing schema for all C function pointers, even for functions of
+substantially different type. While efforts are ongoing to improve constant
+diversity for C function pointers of different type, there are necessary limits
+to this. The C standard requires function pointers to be copyable with
+``memcpy``, which means that function pointers can never use address diversity.
+Furthermore, even if a function pointer can only be replaced with another
+function of the exact same type, that can still be useful to an attacker, as in
+the following example of a hand-rolled "v-table":
+
+.. code-block:: c
+
+ struct ObjectOperations {
+ void (*retain)(Object *);
+ void (*release)(Object *);
+ void (*deallocate)(Object *);
+ void (*logStatus)(Object *);
+ };
+
+The weakness in this design is that by lacking any context specific
+discriminator, this means an attacker can substitute any of these fields with
+any other function pointer signed with the default schema. Similarly the lack of
+address diversity allows an attacker to replace the functions in one type's
+"v-table" with those of another. This can be mitigated by overriding the default
+authentication schema with a more specific signing schema for each purpose. For
+instance, in this example, the ``__ptrauth`` qualifier can be used with a
+different constant discriminator for each field. Since there's no particular
+reason it's important for this v-table to be copyable with ``memcpy``, the
+functions can also be signed with address diversity:
+
+.. code-block:: c
+
+ #if defined(__PTRAUTH__)
+ #define objectOperation(discriminator) \
+ __ptrauth(ptrauth_key_function_pointer, 1, discriminator)
+ #else
+ #define objectOperation(discriminator)
+ #endif
+
+ struct ObjectOperations {
+ void (*objectOperation(0xf017) retain)(Object *);
+ void (*objectOperation(0x2639) release)(Object *);
+ void (*objectOperation(0x8bb0) deallocate)(Object *);
+ void (*objectOperation(0xc5d4) logStatus)(Object *);
+ };
+
+This weakness can also sometimes be mitigated by simply keeping the signed
+pointer in constant memory, but this is less effective than using better signing
+diversity.
+
+.. _Access path attacks:
+
+Access path attacks
++++++++++++++++++++
+
+If a signed pointer is often accessed indirectly (that is, by first loading the
+address of the object where the signed pointer is stored), an attacker can
+affect uses of it by overwriting the intermediate pointer in the access path.
+
+The most common scenario exhibiting this weakness is an object with a pointer to
+a "v-table" (a structure holding many function pointers). An attacker does not
+need to replace a signed function pointer in the v-table if they can instead
+simply replace the v-table pointer in the object with their own pointer ---
+perhaps to memory where they've constructed their own v-table, or to existing
+memory that coincidentally happens to contain a signed pointer at the right
+offset that's been signed with the right signing schema.
+
+This attack arises because data pointers are not signed by default. It works
+even if the signed pointer uses address diversity: address diversity merely
+means that each pointer is signed with its own storage address,
+which (by design) is invariant to changes in the accessing pointer.
+
+Using sufficiently diverse signing schemas within the v-table can provide
+reasonably strong mitigation against this weakness. Always use address and type
+diversity in v-tables to prevent attackers from assembling their own v-table.
+Avoid re-using constant discriminators to prevent attackers from replacing a
+v-table pointer with a pointer to totally unrelated memory that just happens to
+contain an similarly-signed pointer, or reused memory containing a different
+type.
+
+Further mitigation can be attained by signing pointers to v-tables. Any
+signature at all should prevent attackers from forging v-table pointers; they
+will need to somehow harvest an existing signed pointer from elsewhere in
+memory. Using a meaningful constant discriminator will force this to be
+harvested from an object with similar structure (e.g. a different implementation
+of the same interface). Using address diversity will prevent such harvesting
+entirely. However, care must be taken when sourcing the v-table pointer
+originally; do not blindly sign a pointer that is not
+:ref:`safely derived<Safe derivation>`.
+
+.. _Signing oracles:
+
+Signing oracles
++++++++++++++++
+
+A signing oracle is a bit of code which can be exploited by an attacker to sign
+an arbitrary pointer in a way that can later be recovered. Such oracles can be
+used by attackers to forge signatures matching the oracle's signing schema,
+which is likely to cause a total compromise of pointer authentication's
+effectiveness.
+
+This attack only affects ordinary programmers if they are using certain
+treacherous patterns of code. Currently this includes:
+
+- all uses of the ``__ptrauth_sign_unauthenticated`` intrinsic and
+- assigning values to ``__ptrauth``-qualified l-values.
+
+Care must be taken in these situations to ensure that the pointer being signed
+has been :ref:`safely derived<Safe derivation>` or is otherwise not possible to
+attack. (In some cases, this may be challenging without compiler support.)
+
+A diagnostic will be added in the future for implicitly dangerous patterns of
+code, such as assigning a non-safely-derived values to a
+``__ptrauth``-qualified l-value.
+
+.. _Authentication oracles:
+
+Authentication oracles
+++++++++++++++++++++++
+
+An authentication oracle is a bit of code which can be exploited by an attacker
+to leak whether a signed pointer is validly signed without halting the program
+if it isn't. Such oracles can be used to forge signatures matching the oracle's
+signing schema if the attacker can repeatedly invoke the oracle for different
+candidate signed pointers. This is likely to cause a total compromise of pointer
+authentication's effectiveness.
+
+There should be no way for an ordinary programmer to create an authentication
+oracle using the current set of operations. However, implementation flaws in the
+past have occasionally given rise to authentication oracles due to a failure to
+immediately trap on authentication failure.
+
+The likelihood of creating an authentication oracle is why there is currently no
+intrinsic which queries whether a signed pointer is validly signed.
+
+
+Attacks of interest to implementors
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These attacks are not inherent to the model; they arise from mistakes in either
+implementing or using the `sign` and `auth` operations. Avoiding these mistakes
+requires careful work throughout the system.
+
+Failure to trap on authentication failure
++++++++++++++++++++++++++++++++++++++++++
+
+Any failure to halt the program on an authentication failure is likely to be
+exploitable by attackers to create an
+:ref:`authentication oracle<Authentication oracles>`.
+
+There are several different ways to introduce this problem:
+
+- The implementation might try to halt the program in some way that can be
+ intercepted.
+
+ For example, the Armv8.3 ``aut`` instructions do not directly trap on
+ authentication failure on processors that lack the ``FPAC`` extension.
+ Instead, they corrupt their results to be invalid pointers, with the idea that
+ subsequent uses of those pointers will trigger traps as bad memory accesses.
+ However, most kernels do not immediately halt programs that trap due to bad
+ memory accesses; instead, they notify the process to give it an opportunity to
+ recover. If this happens with an ``auth`` failure, the attacker may be able to
+ exploit the recovery path in a way that creates an oracle. Kernels must
+ provide a way for a process to trap unrecoverably, and this should cover all
+ ``FPAC`` traps. Compilers must ensure that ``auth`` failures trigger an
+ unrecoverable trap, ideally by taking advantage of ``FPAC``, but if necessary
+ by emitting extra instructions.
+
+- A compiler might use an intermediate representation (IR) for ``sign`` and
+ ``auth`` operations that cannot make adequate correctness guarantees.
+
+ For example, suppose that an IR uses ARMv8.3-like semantics for ``auth``: the
+ operation merely corrupts its result on failure instead of promising to trap.
+ A frontend might emit patterns of IR that always follow an ``auth`` with a
+ memory access, thinking that this ensures correctness. But if the IR can be
+ transformed to insert code between the ``auth`` and the access, or if the
+ ``auth`` can be speculated, then this potentially creates an oracle. It is
+ better for ``auth`` to semantically guarantee to trap, potentially requiring
+ an explicit check in the generated code. An ARMv8.3-like target can avoid this
+ explicit check in the common case by recognizing the pattern of an ``auth``
+ followed immediately by an access.
+
+Attackable code sequences
++++++++++++++++++++++++++
+
+If code that is part of a pointer authentication operation is interleaved with
+code that may itself be vulnerable to attacks, an attacker may be able to use
+this to create a :ref:`signing<Signing oracles>` or
+:ref:`authentication<Authentication oracles>` oracle.
+
+For example, suppose that the compiler is generating a call to a function and
+passing two arguments: a signed constant pointer and a value derived from a
+call. In ARMv8.3, this code might look like so:
+
+.. code-block:: asm
+
+ adr x19, _callback. ; compute &_callback
+ paciza x19 ; sign it with a constant discriminator of 0
+ blr _argGenerator ; call _argGenerator() (returns in x0)
+ mov x1, x0 ; move call result to second arg register
+ mov x0, x19 ; move signed &_callback to first arg register
+ blr _function ; call _function
+
+This code is correct, as would be a sequencing that does *both* the ``adr`` and
+the ``paciza`` after the call to ``_argGenerator``. But a sequence that
+computes the address of ``_callback`` but leaves it as a raw pointer in a
+register during the call to ``_argGenerator`` would be vulnerable:
+
+.. code-block:: asm
+
+ adr x19, _callback. ; compute &_callback
+ blr _argGenerator ; call _argGenerator() (returns in x0)
+ mov x1, x0 ; move call result to second arg register
+ paciza x19 ; sign &_callback
+ mov x0, x19 ; move signed &_callback to first arg register
+ blr _function ; call _function
+
+If ``_argGenerator`` spills ``x19`` (a callee-save register), and if the
+attacker can perform a write during this call, then the attacker can overwrite
+the spill slot with an arbitrary pointer that will eventually be unconditionally
+signed after the function returns. This would be a signing oracle.
+
+The implementation can avoid this by obeying two basic rules:
+
+- The compiler's intermediate representations (IR) should not provide operations
+ that expose intermediate raw pointers. This may require providing extra
+ operations that perform useful combinations of operations.
+
+ For example, there should be an "atomic" auth-and-resign operation that should
+ be used instead of emitting an ``auth`` operation whose result is fed into a
+ ``sign``.
+
+ Similarly, if a pointer should be authenticated as part of doing a memory
+ access or a call, then the access or call should be decorated with enough
+ information to perform the authentication; there should not be a separate
+ ``auth`` whose result is used as the pointer operand for the access or call.
+ (In LLVM IR, we do this for calls, but not yet for loads or stores.)
+
+ "Operations" includes things like materializing a signed value to a known
+ function or global variable. The compiler must be able to recognize and emit
+ this as a unified operation, rather than potentially splitting it up as in
+ the example above.
+
+- The compiler backend should not be too aggressive about scheduling
+ instructions that are part of a pointer authentication operation. This may
+ require custom code-generation of these operations in some cases.
+
+Register clobbering
++++++++++++++++++++
+
+As a refinement of the section on `Attackable code sequences`_, if the attacker
+has the ability to modify arbitrary *register* state at arbitrary points in the
+program, then special care must be taken.
+
+For example, ARMv8.3 might materialize a signed function pointer like so:
+
+.. code-block:: asm
+
+ adr x0, _callback. ; compute &_callback
+ paciza x0 ; sign it with a constant discriminator of 0
+
+If an attacker has the ability to overwrite ``x0`` between these two
+instructions, this code sequence is vulnerable to becoming a signing oracle.
+
+For the most part, this sort of attack is not possible: it is a basic element of
+the design of modern computation that register state is private and inviolable.
+However, in systems that support asynchronous interrupts, this property requires
+the cooperation of the interrupt-handling code. If that code saves register
+state to memory, and that memory can be overwritten by an attacker, then
+essentially the attack can overwrite arbitrary register state at an arbitrary
+point. This could be a concern if the threat model includes attacks on the
+kernel or if the program uses user-space preemptive multitasking.
+
+(Readers might object that an attacker cannot rely on asynchronous interrupts
+triggering at an exact instruction boundary. In fact, researchers have had some
+success in doing exactly that. Even ignoring that, though, we should aim to
+protect against lucky attackers just as much as good ones.)
+
+To protect against this, saved register state must be at least partially signed
+(using something like `ptrauth_sign_generic_data`_). This is required for
+correctness anyway because saved thread states include security-critical
+registers such as SP, FP, PC, and LR (where applicable). Ideally, this
+signature would cover all the registers, but since saving and restoring
+registers can be very performance-sensitive, that may not be acceptable. It is
+sufficient to set aside a small number of scratch registers that will be
+guaranteed to be preserved correctly; the compiler can then be careful to only
+store critical values like intermediate raw pointers in those registers.
+
+``setjmp`` and ``longjmp`` should sign and authenticate the core registers (SP,
+FP, PC, and LR), but they do not need to worry about intermediate values because
+``setjmp`` can only be called synchronously, and the compiler should never
+schedule pointer-authentication operations interleaved with arbitrary calls.
+
+.. _Relative addresses:
+
+Attacks on relative addressing
+++++++++++++++++++++++++++++++
+
+Relative addressing is a technique used to compress and reduce the load-time
+cost of infrequently-used global data. The pointer authentication system is
+unlikely to support signing or authenticating a relative address, and in most
+cases it would defeat the point to do so: it would take additional storage
+space, and applying the signature would take extra work at load time.
+
+Relative addressing is not precluded by the use of pointer authentication, but
+it does take extra considerations to make it secure:
+
+- Relative addresses must only be stored in read-only memory. A writable
+ relative address can be overwritten to point nearly anywhere, making it
+ inherently insecure; this danger can only be compensated for with techniques
+ for protecting arbitrary data like `ptrauth_sign_generic_data`_.
+
+- Relative addresses must only be accessed through signed pointers with adequate
+ diversity. If an attacker can perform an `access path attack` to replace the
+ pointer through which the relative address is accessed, they can easily cause
+ the relative address to point wherever they want.
+
+Signature forging
++++++++++++++++++
+
+If an attacker can exactly reproduce the behavior of the signing algorithm, and
+they know all the correct inputs to it, then they can perfectly forge a
+signature on an arbitrary pointer.
+
+There are three components to avoiding this mistake:
+
+- The abstract signing algorithm should be good: it should not have glaring
+ flaws which would allow attackers to predict its result with better than
+ random accuracy without knowing all the inputs (like the key values).
+
+- The key values should be kept secret. If at all possible, they should never
+ be stored in accessible memory, or perhaps only stored encrypted.
+
+- Contexts that are meant to be independently protected should use different
+ key values. For example, the kernel should not use the same keys as user
+ processes. Different user processes should also use different keys from each
+ other as much as possible, although this may pose its own technical
+ challenges.
+
+Remapping
++++++++++
+
+If an attacker can change the memory protections on certain pages of the
+program's memory, that can substantially weaken the protections afforded by
+pointer authentication.
+
+- If an attacker can inject their own executable code, they can also certainly
+ inject code that can be used as a :ref:`signing oracle<Signing Oracles>`.
+ The same is true if they can write to the instruction stream.
+
+- If an attacker can remap read-only program data sections to be writable, then
+ any use of :ref:`relative addresses` in global data becomes insecure.
+
+- On platforms that use them, if an attacker can remap the memory containing
+ the `global offset tables`_ as writable, then any unsigned pointers in those
+ tables are insecure.
+
+Remapping memory in this way often requires the attacker to have already
+substantively subverted the control flow of the process. Nonetheless, if the
+operating system has a mechanism for mapping pages in a way that cannot be
+remapped, this should be used wherever possible.
+
+.. _Safe Derivation:
+
+Safe derivation
+~~~~~~~~~~~~~~~
+
+Whether a data pointer is stored, even briefly, as a raw pointer can affect the
+security-correctness of a program. (Function pointers are never implicitly
+stored as raw pointers; raw pointers to functions can only be produced with the
+``<ptrauth.h>`` intrinsics.) Repeated re-signing can also impact performance.
+Clang makes a modest set of guarantees in this area:
+
+- An expression of pointer type is said to be **safely derived** if:
+
+ - it takes the address of a global variable or function, or
+
+ - it is a load from a gl-value of ``__ptrauth``-qualified type, or
+
+ - it is a load from read-only memory that has been initialized from a safely
+ derived source, such as the `data const` section of a binary or library.
+
+- If a value that is safely derived is assigned to a ``__ptrauth``-qualified
+ object, including by initialization, then the value will be directly signed as
+ appropriate for the target qualifier and will not be stored as a raw pointer.
+
+- If the function expression of a call is a gl-value of ``__ptrauth``-qualified
+ type, then the call will be authenticated directly according to the source
+ qualifier and will not be resigned to the default rule for a function pointer
+ of its type.
+
+These guarantees are known to be inadequate for data pointer security. In
+particular, Clang should be enhanced to make the following guarantees:
+
+- A pointer should additionally be considered safely derived if it is:
+
+ - the address of a gl-value that is safely derived,
+
+ - the result of pointer arithmetic on a pointer that is safely derived (with
+ some restrictions on the integer operand),
+
+ - the result of a comma operator where the second operand is safely derived,
+
+ - the result of a conditional operator where the selected operand is safely
+ derived, or
+
+ - the result of loading from a safely derived gl-value.
+
+- A gl-value should be considered safely derived if it is:
+
+ - a dereference of a safely derived pointer,
+
+ - a member access into a safely derived gl-value, or
+
+ - a reference to a variable.
+
+- An access to a safely derived gl-value should be guaranteed to not allow
+ replacement of any of the safely-derived component values at any point in the
+ access. "Access" should include loading a function pointer.
+
+- Assignments should include pointer-arithmetic operators like ``+=``.
+
+Making these guarantees will require further work, including significant new
+support in LLVM IR.
+
+Furthermore, Clang should implement a warning when assigning a data pointer that
+is not safely derived to a ``__ptrauth``-qualified gl-value.
+
+
+Language ABI
+------------
+
+This section describes the pointer-authentication ABI currently implemented in
+Clang for the Apple arm64e target. As other targets adopt pointer
+authentication, this section should be generalized to express their ABIs as
+well.
+
+Key assignments
+~~~~~~~~~~~~~~~
+
+ARMv8.3 provides four abstract signing keys: ``IA``, ``IB``, ``DA``, and ``DB``.
+The architecture designates ``IA`` and ``IB`` for signing code pointers and
+``DA`` and ``DB`` for signing data pointers; this is reinforced by two
+properties:
+
+- The ISA provides instructions that perform combined auth+call and auth+load
+ operations; these instructions can only use the ``I`` keys and ``D`` keys,
+ respectively.
+
+- AArch64's TBI feature can be separately enabled for code pointers (controlling
+ whether indirect-branch instructions ignore those bits) and data pointers
+ (controlling whether memory-access instructions) ignore those bits. If TBI is
+ enabled for a kind of pointer, the sign and auth operations preserve the TBI
+ bits when signing with an associated keys (at the cost of shrinking the number
+ of available signing bits by 8).
+
+arm64e then further subdivides the keys as follows:
+
+- The ``A`` keys are used for primarily "global" purposes like signing v-tables
+ and function pointers. These keys are sometimes called *process-independent*
+ or *cross-process* because on existing OSes they are not changed when changing
+ processes, although this is not a platform guarantee.
+
+- The ``B`` keys are used for primarily "local" purposes like signing return
+ addresses. These keys are sometimes called *process-specific* because they
+ are typically different between processes. However, they are in fact shared
+ across processes in one situation: systems which provide ``fork`` cannot
+ change these keys in the child process; they can only be changed during
+ ``exec``.
+
+Implementation-defined algorithms and quantities
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The cryptographic hash algorithm used to compute signatures in ARMv8.3 is a
+private detail of the hardware implementation.
+
+arm64e restricts constant discriminators (used in ``__ptrauth`` and
+``ptrauth_blend_discriminator``) to the range from 0 to 65535, inclusive. A 0
+discriminator generally signifies that no blending is required; see the
+documentation for ``ptrauth_blend_discriminator``. This range is somewhat
+narrow but has two advantages:
+
+- The AArch64 ISA allows an arbitrary 16-bit immediate to be written over the
+ top 16 bits of a register in a single instruction:
+
+ .. code-block:: asm
+
+ movk xN, #0x4849, LSL 48
+
+ This is ideal for the discriminator blending operation because it adds minimal
+ code-size overhead and avoids overwriting any interesting bits from the
+ pointer. Blending in a wider constant discriminator would either clobber
+ interesting bits (e.g. if it was loaded with ``movk xN, #0x4c4f, LSL 32``) or
+ require significantly more code (e.g. if the discriminator was loaded with a
+ ``mov+bfi`` sequence).
+
+- It is possible to pack a 16-bit discriminator into loader metadata with
+ minimal compromises, whereas a wider discriminator would require extra
+ metadata storage and therefore significantly impact load times.
+
+The string hash used by ``ptrauth_string_discriminator`` is a 64-bit SipHash-2-4
+using the constant seed ``b5d4c9eb79104a796fec8b1b428781d4`` (big-endian), with
+the result reduced by modulo to the range of non-zero discriminators (i.e.
+``(rawHash % 65535) + 1``).
+
+Return addresses
+~~~~~~~~~~~~~~~~
+
+The kernel must ensure that attackers cannot replace LR due to an asynchronous
+exception; see `Register clobbering`_. If this is done by generally protecting
+LR, then functions which don't spill LR to the stack can avoid signing it
+entirely. Otherwise, the return address must be signed; on arm64e it is signed
+with the ``IB`` key using the stack pointer on entry as the discriminator.
+
+Protecting return addresses is of such particular importance that the ``IB`` key
+is almost entirely reserved for this purpose.
+
+Global offset tables
+~~~~~~~~~~~~~~~~~~~~
+
+The global offset table (GOT) is not part of the language ABI, but it is a
+common implementation technique for dynamic linking which deserves special
+discussion here.
+
+Whenever possible, signed pointers should be materialized directly in code
+rather than via the GOT, e.g. using an ``adrp+add+pac`` sequence on ARMv8.3.
+This decreases the amount of work necessary at load time to initialize the GOT,
+but more importantly, it defines away the potential for several attacks:
+
+- Attackers cannot change instructions, so there is no way to cause this code
+ sequence to materialize a different pointer, whereas an access via the GOT
+ always has *at minimum* a probabilistic chance to be the target of successful
+ `substitution attacks`_.
+
+- The GOT is a dense pool of fixed pointers at a fixed offset relative to code;
+ attackers can search this pool for useful pointers that can be used in
+ `substitution attacks`_, whereas pointers that are only materialized directly
+ are not so easily available.
+
+- Similarly, attackers can use `access path attacks`_ to replace a pointer to a
+ signed pointer with a pointer to the GOT if the signing schema used within the
+ GOT happens to be the same as the original pointer. This kind of collision
+ becomes much less likely to be useful the fewer pointers are in the GOT in the
+ first place.
+
+If this can be done for a symbol, then the compiler need only ensure that it
+materializes the signed pointer using registers that are safe against
+`register clobbering`_.
+
+However, many symbols can only be accessed via the GOT, e.g. because they
+resolve to definitions outside of the current image. In this case, care must
+be taken to ensure that using the GOT does not introduce weaknesses.
+
+- If the entire GOT can be mapped read-only after loading, then no signing is
+ required within the GOT. In fact, not signing pointers in the GOT is
+ preferable in this case because it makes the GOT useless for the harvesting
+ and access-path attacks above. Storing raw pointers in this way is usually
+ extremely unsafe, but for the special case of an immutable GOT entry it's fine
+ because the GOT is always accessed via an address that is directly
+ materialized in code and thus provably unattackable. (But see `Remapping`_.)
+
+- Otherwise, GOT entries which are used for producing a signed pointer constant
+ must be signed. The signing schema used in the GOT need not match the target
+ signing schema for the signed constant. To counteract the threats of
+ substitution attacks, it's best if GOT entries can be signed with address
+ diversity. Using a good constant discriminator as well (perhaps derived from
+ the symbol name) can make it less useful to use a pointer to the GOT as the
+ replacement in an :ref:`access path attack<Access path attacks>`.
+
+In either case, the compiler must ensure that materializing the address of a GOT
+entry as part of producing a signed pointer constant is not vulnerable to
+`register clobbering`_. If the linker also generates code for this, e.g. for
+call stubs, this generated code must take the same precautions.
+
+Dynamic symbol lookup
+~~~~~~~~~~~~~~~~~~~~~
+
+On platforms that support dynamically loading or resolving symbols it is
+necessary for them to define the pointer authentication semantics of the APIs
+provided to perform such lookups. While the platform may choose to reply
+unsigned pointers from such function and rely on the caller performing the
+initial signing, doing so creates the opportunity for caller side errors that
+create :ref:`signing oracles<Signing Oracles>`.
+
+On arm64e the `dlsym` function is used to resolve a symbol at runtime. If the
+resolved symbol is a function or other code pointer the returned pointer is
+signed using the default function signing schema described in
+:ref:`C function pointers<C function abi>`. If the resolved symbol is not a code pointer it is
+returned as an unsigned pointer.
+
+.. _C function abi:
+
+C function pointers
+~~~~~~~~~~~~~~~~~~~
+
+On arm64e, C function pointers are currently signed with the ``IA`` key without
+address diversity and with a constant discriminator of 0.
+
+The C and C++ standards do not permit C function pointers to be signed with
+address diversity by default: in C++ terms, function pointer types are required
+to be trivially copyable, which means they must be copyable with ``memcpy``.
+
+The use of a uniform constant discriminator greatly simplifies the adoption of
+arm64e, but it is a significant weakness in the mitigation because it allows any
+C function pointer to be replaced with another. Clang supports
+`-fptrauth-function-pointer-type-discrimination`, which enables a variant ABI
+that uses type discrimination for function pointers. When generating the type
+based discriminator for a function type all primitive integer types are
+considered equivalent due to the prevalence of mismatching integer parameter
+types in real world code. Type discrimination of function pointers is
+ABI-incompatible with the standard arm64e ABI, but it can be used in constrained
+contexts such as embedded systems or in code that does not require function
+pointer interoperation with the standard ABI (e.g. because it does not pass
+function pointers back and forth, or only does so through
+``__ptrauth``-qualified l-values).
+
+C++ virtual tables
+~~~~~~~~~~~~~~~~~~
+
+By default the pointer to a C++ virtual table is currently signed with the
+``DA`` key, address diversity, and a constant discriminator equal to the string
+hash (see `ptrauth_string_discriminator`_) of the mangled v-table identifier
+of the primary base class for the v-table. To support existing code or ABI
+constraints it is possible to use the `ptrauth_vtable_pointer` attribute to
+override the schema used for the v-table pointer of the base type of
+polymorphic class hierarchy. This attribute permits the configuration of the
+key, address diversity mode, and any extra constant discriminator to be used.
+
+Virtual functions in a C++ virtual table are signed with the ``IA`` key, address
+diversity, and a constant discriminator equal to the string hash (see
+`ptrauth_string_discriminator`_) of the mangled name of the function which
+originally gave rise to the v-table slot.
+
+C++ dynamic_cast
+~~~~~~~~~~~~~~~~
+
+C++'s ``dynamic_cast`` presents a difficulty relative to other polymorphic
+languages that have a
+`top type <https://en.wikipedia.org/wiki/Any_type>` as the use of declaration
+diversity for v-table pointers results in distinct signing schemas for each
+isolated type hierarchy. As a result it is not possible for the Itanium ABI
+defined ``__dynamic_cast`` entry point to directly authenticate the v-table
+pointer of the provided object.
+
+The current implementation uses a forced authentication of the subject object's
+v-table prior to invoking ``__dynamic_cast`` to partially verify that the
+object's vtable is valid. The ``__dynamic_cast`` implementation currently relies
+on this caller side check to limit the substitutability of the v-table pointer
+with an incorrect or invalid v-table. The subsequent implementation of the
+dynamic cast algorithm is built on pointer auth protected ``type_info`` objects.
+
+In future a richer solution may be developed to support vending the correct
+authentication schema directly to the ``dynamic_cast`` implementation.
+
+C++ std::type_info v-table pointers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The v-table pointer of the ``std::type_info`` type is signed with the ``DA`` key
+and no additional diversity.
+
+C++ member function pointers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A member function pointer is signed with the ``IA`` key, no address diversity,
+and a constant discriminator equal to the string hash
+(see `ptrauth_string_discriminator`_) of the member pointer type. Address
+diversity is not permitted by C++ for member function pointers because they must
+be trivially-copyable types.
+
+The Itanium C++ ABI specifies that member function pointers to virtual functions
+simply store an offset to the correct v-table slot. This ABI cannot be used
+securely with pointer authentication because there is no safe place to store the
+constant discriminator for the target v-table slot: if it's stored with the
+offset, an attacker can simply overwrite it with the right discriminator for the
+offset. Even if the programmer never uses pointers to virtual functions, the
+existence of this code path makes all member function pointer dereferences
+insecure.
+
+arm64e changes this ABI so that virtual function pointers are stored using
+dispatch thunks with vague linkage. Because arm64e supports interoperation with
+``arm64`` code when pointer authentication is disabled, an arm64e member
+function pointer dereference still recognizes the virtual-function
+representation but uses an bogus discriminator on that path that should always
+trap if pointer authentication is enabled dynamically.
+
+The use of dispatch thunks means that ``==`` on member function pointers is no
+longer reliable for virtual functions, but this is acceptable because the
+standard makes no guarantees about it in the first place.
+
+The use of dispatch thunks also is required to support declaration specific
+authentication schemas for v-table pointers.
+
+C++ mangling
+~~~~~~~~~~~~
+
+When the ``__ptrauth`` qualifier appears in a C++ mangled name,
+it is mangled as a vendor qualifier with the signature
+``U9__ptrauthILj<key>ELb<addressDiscriminated>ELj<extraDiscriminator>EE``.
+
+e.g. ``int * __ptrauth(1, 0, 1234)`` will be mangled as
+``U9__ptrauthILj1ELb0ELj1234EE``.
+
+If the vtable pointer authentication scheme of a polymorphic class is overridden
+we mangle the override information with the vendor qualifier
+``__vtptrauth(int key, bool addressDiscriminated, unsigned extraDiscriminator)``,
+where the extra discriminator is the explicit value the specified discrimination
+mode evalutes to.
+
+Blocks
+~~~~~~
+
+Block pointers are data pointers which must interoperate with the ObjC `id` type
+and therefore cannot be signed themselves. As blocks conform to the ObjC `id`
+type, they contain an ``isa`` pointer signed as described
+:ref:`below<Objc isa and super>`.
+
+The invocation pointer in a block is signed with the ``IA`` key using address
+diversity and a constant dicriminator of 0. Using a uniform discriminator is
+seen as a weakness to be potentially improved, but this is tricky due to the
+subtype polymorphism directly permitted for blocks.
+
+Block descriptors and ``__block`` variables can contain pointers to functions
+that can be used to copy or destroy the object. These functions are signed with
+the ``IA`` key, address diversity, and a constant discriminator of 0. The
+structure of block descriptors is under consideration for improvement.
+
+Objective-C runtime
+~~~~~~~~~~~~~~~~~~~
+
+In addition to the compile time ABI design, the Objective-C runtime provides
+additional protection to methods and other metadata that have been loaded into
+the Objective-C method cache; this protection is private to the runtime.
+
+Objective-C methods
+~~~~~~~~~~~~~~~~~~~
+
+Objective-C method lists sign methods with the ``IA`` key using address
+diversity and a constant discriminator of 0. Using a uniform constant
+discriminator is believed to be acceptable because these tables are only
+accessed internally to the Objective-C runtime.
+
+Objective-C class method list pointer
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The method list pointer in Objective-C classes are signed with the ``DA`` key
+using address diversity, and a constant discriminator of 0xC310.
+
+Objective-C class read-only data pointer
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The read-only data pointer in Objective-C classes are signed with the ``DA`` key
+using address diversity, and a constant discriminator of 0x61F8.
+
+.. _Objc isa and super:
+
+Objective-C ``isa`` and ``super`` pointers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An Objective-C object's ``isa`` and ``super`` pointers are both signed with
+the ``DA`` key using address diversity and constant discriminators of 0x6AE1
+and 0x25DA respectively.
+
+Objective-C ``SEL`` pointers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default, the type of an Objective-C instance variable of type ``SEL``, when
+the qualifiers do not include an explicit ``__ptrauth`` qualifier, is adjusted
+to be qualified with ``__ptrauth(ptrauth_key_asdb, 1, 0x57C2)``.
+
+This provides a measure of implicit at-rest protection to Objective-C classes
+that store selectors, as in the common target-action design pattern. This
+prevents attackers from overriding the selector to invoke an arbitrary different
+method, which is a major attack vector in Objective-C. Since ``SEL`` values are
+not normally passed around as signed pointers, there is a
+:ref:`signing oracle<Signing Oracles>` associated with the initialization of the
+ivar, but the use of address and constant diversity limit the risks.
+
+The implicit qualifier means that the type of the ivar does not match its
+declaration, which can cause type errors if the address of the ivar is taken:
+
+.. code-block:: ObjC
+
+ @interface A : NSObject {
+ SEL _s;
+ }
+ @end
+
+ void f(SEL *);
+
+ @implementation A
+ -(void)g
+ {
+ f(&_s);
+ }
+ @end
+
+To fix such an mismatch the schema macro from `<ptrauth.h>`:
+
+.. code-block:: ObjC
+
+ #include <ptrauth.h>
+
+ void f(SEL __ptrauth_objc_sel*);
+or less safely, and introducing the possibility of an
+:ref:`signing or authentication oracle<Signing oracles>`, an unauthencaticated
+temporary may be used as intermediate storage.
-Alternative Implementations
+Alternative implementations
---------------------------
-Signature Storage
+Signature storage
~~~~~~~~~~~~~~~~~
It is not critical for the security of pointer authentication that the
@@ -536,7 +1667,7 @@ Storing the signature in the high bits, as Armv8.3 does, has several trade-offs:
return signed pointers. This means that clients of these APIs will not
require insecure code in order to correctly receive a function pointer.
-Hashing vs. Encrypting Pointers
+Hashing vs. encrypting pointers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Armv8.3 implements ``sign`` by computing a cryptographic hash and storing that
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 0e9fcaa..c0c2766 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -1,3 +1,6 @@
+.. If you want to modify sections/contents permanently, you should modify both
+ ReleaseNotes.rst and ReleaseNotesTemplate.txt.
+
===========================================
Clang |release| |ReleaseNotesTitle|
===========================================
@@ -34,6 +37,22 @@ latest release, please see the `Clang Web Site <https://clang.llvm.org>`_ or the
Potentially Breaking Changes
============================
+- Clang will now emit a warning if the auto-detected GCC installation
+ directory (i.e. the one with the largest version number) does not
+ contain libstdc++ include directories although a "complete" GCC
+ installation directory containing the include directories is
+ available. It is planned to change the auto-detection to prefer the
+ "complete" directory in the future. The warning will disappear if
+ the libstdc++ include directories are either installed or removed
+ for all GCC installation directories considered by the
+ auto-detection; see the output of ``clang -v`` for a list of those
+ directories. If the GCC installations cannot be modified and
+ maintaining the current choice of the auto-detection is desired, the
+ GCC installation directory can be selected explicitly using the
+ ``--gcc-install-dir`` command line argument. This will silence the
+ warning. It can also be disabled using the
+ ``-Wno-gcc-install-dir-libstdcxx`` command line flag.
+
C/C++ Language Potentially Breaking Changes
-------------------------------------------
@@ -70,12 +89,19 @@ ABI Changes in This Version
AST Dumping Potentially Breaking Changes
----------------------------------------
+- How nested name specifiers are dumped and printed changes, keeping track of clang AST changes.
Clang Frontend Potentially Breaking Changes
-------------------------------------------
+- Members of anonymous unions/structs are now injected as ``IndirectFieldDecl``
+ into the enclosing record even if their names conflict with other names in the
+ scope. These ``IndirectFieldDecl`` are marked invalid.
Clang Python Bindings Potentially Breaking Changes
--------------------------------------------------
+- TypeKind ``ELABORATED`` is not used anymore, per clang AST changes removing
+ ElaboratedTypes. The value becomes unused, and all the existing users should
+ expect the former underlying type to be reported instead.
What's New in Clang |release|?
==============================
@@ -83,6 +109,11 @@ What's New in Clang |release|?
C++ Language Changes
--------------------
+- A new family of builtins ``__builtin_*_synthesises_from_spaceship`` has been added. These can be queried to know
+ whether the ``<`` (``lt``), ``>`` (``gt``), ``<=`` (``le``), or ``>=`` (``ge``) operators are synthesised from a
+ ``<=>``. This makes it possible to optimize certain facilities by using the ``<=>`` operation directly instead of
+ doing multiple comparisons.
+
C++2c Feature Support
^^^^^^^^^^^^^^^^^^^^^
@@ -109,20 +140,78 @@ C23 Feature Support
Non-comprehensive list of changes in this release
-------------------------------------------------
+- Added ``__builtin_elementwise_fshl`` and ``__builtin_elementwise_fshr``.
+
+- ``__builtin_elementwise_abs`` can now be used in constant expression.
+
- Added ``__builtin_elementwise_minnumnum`` and ``__builtin_elementwise_maxnumnum``.
-- Trapping UBSan (e.g. ``-fsanitize-trap=undefined``) now emits a string describing the reason for
- trapping into the generated debug info. This feature allows debuggers (e.g. LLDB) to display
- the reason for trapping if the trap is reached. The string is currently encoded in the debug
- info as an artificial frame that claims to be inlined at the trap location. The function used
- for the artificial frame is an artificial function whose name encodes the reason for trapping.
- The encoding used is currently the same as ``__builtin_verbose_trap`` but might change in the future.
- This feature is enabled by default but can be disabled by compiling with
- ``-fno-sanitize-annotate-debug-info-traps``.
+- Trapping UBSan (e.g. ``-fsanitize=undefined -fsanitize-trap=undefined``) now
+ emits a string describing the reason for trapping into the generated debug
+ info. This feature allows debuggers (e.g. LLDB) to display the reason for
+ trapping if the trap is reached. The string is currently encoded in the debug
+ info as an artificial frame that claims to be inlined at the trap location.
+ The function used for the artificial frame is an artificial function whose
+ name encodes the reason for trapping. The encoding used is currently the same
+ as ``__builtin_verbose_trap`` but might change in the future. This feature is
+ enabled by default but can be disabled by compiling with
+ ``-fno-sanitize-debug-trap-reasons``. The feature has a ``basic`` and
+ ``detailed`` mode (the default). The ``basic`` mode emits a hard-coded string
+ per trap kind (e.g. ``Integer addition overflowed``) and the ``detailed`` mode
+ emits a more descriptive string describing each individual trap (e.g. ``signed
+ integer addition overflow in 'a + b'``). The ``detailed`` mode produces larger
+ debug info than ``basic`` but is more helpful for debugging. The
+ ``-fsanitize-debug-trap-reasons=`` flag can be used to switch between the
+ different modes or disable the feature entirely. Note due to trap merging in
+ optimized builds (i.e. in each function all traps of the same kind get merged
+ into the same trap instruction) the trap reasons might be removed. To prevent
+ this build without optimizations (i.e. use `-O0` or use the `optnone` function
+ attribute) or use the `fno-sanitize-merge=` flag in optimized builds.
+
+- ``__builtin_elementwise_max`` and ``__builtin_elementwise_min`` functions for integer types can
+ now be used in constant expressions.
+
+- A vector of booleans is now a valid condition for the ternary ``?:`` operator.
+ This binds to a simple vector select operation.
+
+- Added ``__builtin_masked_load``, ``__builtin_masked_expand_load``,
+ ``__builtin_masked_store``, ``__builtin_masked_compress_store`` for
+ conditional memory loads from vectors. Binds to the LLVM intrinsics of the
+ same name.
+
+- The ``__builtin_popcountg``, ``__builtin_ctzg``, and ``__builtin_clzg``
+ functions now accept fixed-size boolean vectors.
+
+- Use of ``__has_feature`` to detect the ``ptrauth_qualifier`` and ``ptrauth_intrinsics``
+ features has been deprecated, and is restricted to the arm64e target only. The
+ correct method to check for these features is to test for the ``__PTRAUTH__``
+ macro.
+
+- Added a new builtin, ``__builtin_dedup_pack``, to remove duplicate types from a parameter pack.
+ This feature is particularly useful in template metaprogramming for normalizing type lists.
+ The builtin produces a new, unexpanded parameter pack that can be used in contexts like template
+ argument lists or base specifiers.
+
+ .. code-block:: c++
+
+ template <typename...> struct TypeList;
+
+ // The resulting type is TypeList<int, double, char>
+ using MyTypeList = TypeList<__builtin_dedup_pack<int, double, int, char, double>...>;
+
+ Currently, the use of ``__builtin_dedup_pack`` is limited to template arguments and base
+ specifiers, it also must be used within a template context.
+
New Compiler Flags
------------------
-- New option ``-fno-sanitize-annotate-debug-info-traps`` added to disable emitting trap reasons into the debug info when compiling with trapping UBSan (e.g. ``-fsanitize-trap=undefined``).
+- New option ``-fno-sanitize-debug-trap-reasons`` added to disable emitting trap reasons into the debug info when compiling with trapping UBSan (e.g. ``-fsanitize-trap=undefined``).
+- New option ``-fsanitize-debug-trap-reasons=`` added to control emitting trap reasons into the debug info when compiling with trapping UBSan (e.g. ``-fsanitize-trap=undefined``).
+
+
+Lanai Support
+^^^^^^^^^^^^^^
+- The option ``-mcmodel={small,medium,large}`` is supported again.
Deprecated Compiler Flags
-------------------------
@@ -143,8 +232,47 @@ Improvements to Clang's diagnostics
Moved the warning for a missing (though implied) attribute on a redeclaration into this group.
Added a new warning in this group for the case where the attribute is missing/implicit on
an override of a virtual method.
-- Fixed fix-it hint for fold expressions. Clang now correctly places the suggested right
+- Fixed fix-it hint for fold expressions. Clang now correctly places the suggested right
parenthesis when diagnosing malformed fold expressions. (#GH151787)
+- Added fix-it hint for when scoped enumerations require explicit conversions for binary operations. (#GH24265)
+
+- Fixed an issue where emitted format-signedness diagnostics were not associated with an appropriate
+ diagnostic id. Besides being incorrect from an API standpoint, this was user visible, e.g.:
+ "format specifies type 'unsigned int' but the argument has type 'int' [-Wformat]"
+ "signedness of format specifier 'u' is incompatible with 'c' [-Wformat]"
+ This was misleading, because even though -Wformat is required in order to emit the diagnostics,
+ the warning flag the user needs to concerned with here is -Wformat-signedness, which is also
+ required and is not enabled by default. With the change you'll now see:
+ "format specifies type 'unsigned int' but the argument has type 'int', which differs in signedness [-Wformat-signedness]"
+ "signedness of format specifier 'u' is incompatible with 'c' [-Wformat-signedness]"
+ and the API-visible diagnostic id will be appropriate.
+
+- Fixed false positives in ``-Waddress-of-packed-member`` diagnostics when
+ potential misaligned members get processed before they can get discarded.
+ (#GH144729)
+
+- Clang now emits dignostic with correct message in case of assigning to const reference captured in lambda. (#GH105647)
+
+- Fixed false positive in ``-Wmissing-noreturn`` diagnostic when it was requiring the usage of
+ ``[[noreturn]]`` on lambdas before C++23 (#GH154493).
+
+- Clang now diagnoses the use of ``#`` and ``##`` preprocessor tokens in
+ attribute argument lists in C++ when ``-pedantic`` is enabled. The operators
+ can be used in macro replacement lists with the usual preprocessor semantics,
+ however, non-preprocessor use of tokens now triggers a pedantic warning in C++.
+ Compilation in C mode is unchanged, and still permits these tokens to be used. (#GH147217)
+
+- Clang now diagnoses misplaced array bounds on declarators for template
+ specializations in th same way as it already did for other declarators.
+ (#GH147333)
+
+- A new warning ``-Walloc-size`` has been added to detect calls to functions
+ decorated with the ``alloc_size`` attribute don't allocate enough space for
+ the target pointer type.
+
+- The :doc:`ThreadSafetyAnalysis` attributes ``ACQUIRED_BEFORE(...)`` and
+ ``ACQUIRED_AFTER(...)`` have been moved to the stable feature set and no
+ longer require ``-Wthread-safety-beta`` to be used.
Improvements to Clang's time-trace
----------------------------------
@@ -156,11 +284,24 @@ Bug Fixes in This Version
-------------------------
- Fix a crash when marco name is empty in ``#pragma push_macro("")`` or
``#pragma pop_macro("")``. (#GH149762).
-- `-Wunreachable-code`` now diagnoses tautological or contradictory
+- Fix a crash in variable length array (e.g. ``int a[*]``) function parameter type
+ being used in ``_Countof`` expression. (#GH152826).
+- ``-Wunreachable-code`` now diagnoses tautological or contradictory
comparisons such as ``x != 0 || x != 1.0`` and ``x == 0 && x == 1.0`` on
targets that treat ``_Float16``/``__fp16`` as native scalar types. Previously
the warning was silently lost because the operands differed only by an implicit
cast chain. (#GH149967).
+- Fix crash in ``__builtin_function_start`` by checking for invalid
+ first parameter. (#GH113323).
+- Fixed a crash with incompatible pointer to integer conversions in designated
+ initializers involving string literals. (#GH154046)
+- Clang now emits a frontend error when a function marked with the `flatten` attribute
+ calls another function that requires target features not enabled in the caller. This
+ prevents a fatal error in the backend.
+- Fixed scope of typedefs present inside a template class. (#GH91451)
+- Builtin elementwise operators now accept vector arguments that have different
+ qualifiers on their elements. For example, vector of 4 ``const float`` values
+ and vector of 4 ``float`` values. (#GH155405)
Bug Fixes to Compiler Builtins
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -170,8 +311,11 @@ Bug Fixes to Compiler Builtins
Bug Fixes to Attribute Support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- ``[[nodiscard]]`` is now respected on Objective-C and Objective-C++ methods.
- (#GH141504)
+- ``[[nodiscard]]`` is now respected on Objective-C and Objective-C++ methods
+ (#GH141504) and on types returned from indirect calls (#GH142453).
+- Fixes some late parsed attributes, when applied to function definitions, not being parsed
+ in function try blocks, and some situations where parsing of the function body
+ is skipped, such as error recovery and code completion. (#GH153551)
- Using ``[[gnu::cleanup(some_func)]]`` where some_func is annotated with
``[[gnu::error("some error")]]`` now correctly triggers an error. (#GH146520)
@@ -180,13 +324,26 @@ Bug Fixes to C++ Support
- Diagnose binding a reference to ``*nullptr`` during constant evaluation. (#GH48665)
- Suppress ``-Wdeprecated-declarations`` in implicitly generated functions. (#GH147293)
- Fix a crash when deleting a pointer to an incomplete array (#GH150359).
+- Fixed a mismatched lambda scope bug when propagating up ``consteval`` within nested lambdas. (#GH145776)
- Fix an assertion failure when expression in assumption attribute
(``[[assume(expr)]]``) creates temporary objects.
- Fix the dynamic_cast to final class optimization to correctly handle
casts that are guaranteed to fail (#GH137518).
+- Fix bug rejecting partial specialization of variable templates with auto NTTPs (#GH118190).
+- Fix a crash if errors "member of anonymous [...] redeclares" and
+ "intializing multiple members of union" coincide (#GH149985).
+- Fix a crash when using ``explicit(bool)`` in pre-C++11 language modes. (#GH152729)
+- Fix the parsing of variadic member functions when the ellipis immediately follows a default argument.(#GH153445)
+- Fixed a bug that caused ``this`` captured by value in a lambda with a dependent explicit object parameter to not be
+ instantiated properly. (#GH154054)
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
+- Fix incorrect name qualifiers applied to alias CTAD. (#GH136624)
+- Fixed ElaboratedTypes appearing within NestedNameSpecifier, which was not a
+ legal representation. This is fixed because ElaboratedTypes don't exist anymore. (#GH43179) (#GH68670) (#GH92757)
+- Fix unrecognized html tag causing undesirable comment lexing (#GH152944)
+- Fix comment lexing of special command names (#GH152943)
Miscellaneous Bug Fixes
^^^^^^^^^^^^^^^^^^^^^^^
@@ -208,6 +365,13 @@ NVPTX Support
X86 Support
^^^^^^^^^^^
+- More SSE, AVX and AVX512 intrinsics, including initializers and general
+ arithmetic can now be used in C++ constant expressions.
+- Some SSE, AVX and AVX512 intrinsics have been converted to wrap
+ generic __builtin intrinsics.
+- NOTE: Please avoid use of the __builtin_ia32_* intrinsics - these are not
+ guaranteed to exist in future releases, or match behaviour with previous
+ releases of clang or other compilers.
Arm and AArch64 Support
^^^^^^^^^^^^^^^^^^^^^^^
@@ -227,6 +391,9 @@ RISC-V Support
- Add support for `__attribute__((interrupt("rnmi")))` to be used with the `Smrnmi` extension.
With this the `Smrnmi` extension is fully supported.
+- Add `-march=unset` to clear any previous `-march=` value. This ISA string will
+ be computed from `-mcpu` or the platform default.
+
CUDA/HIP Language Changes
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -256,11 +423,20 @@ Fixed Point Support in Clang
AST Matchers
------------
+- Removed elaboratedType matchers, and related nested name specifier changes,
+ following the corresponding changes in the clang AST.
- Ensure ``hasBitWidth`` doesn't crash on bit widths that are dependent on template
parameters.
+- Add a boolean member ``IgnoreSystemHeaders`` to ``MatchFinderOptions``. This
+ allows it to ignore nodes in system headers when traversing the AST.
+
+- ``hasConditionVariableStatement`` now supports ``for`` loop, ``while`` loop
+ and ``switch`` statements.
+
clang-format
------------
+- Add ``SpaceInEmptyBraces`` option and set it to ``Always`` for WebKit style.
libclang
--------
@@ -280,8 +456,9 @@ New features
Crash and bug fixes
^^^^^^^^^^^^^^^^^^^
-- Fixed a crash in the static analyzer that when the expression in an
+- Fixed a crash in the static analyzer that when the expression in an
``[[assume(expr)]]`` attribute was enclosed in parentheses. (#GH151529)
+- Fixed a crash when parsing ``#embed`` parameters with unmatched closing brackets. (#GH152829)
Improvements
^^^^^^^^^^^^
@@ -296,12 +473,14 @@ Sanitizers
Python Binding Changes
----------------------
+- Exposed `clang_getCursorLanguage` via `Cursor.language`.
OpenMP Support
--------------
- Added parsing and semantic analysis support for the ``need_device_addr``
modifier in the ``adjust_args`` clause.
- Allow array length to be omitted in array section subscript expression.
+- Fixed non-contiguous strided update in the ``omp target update`` directive with the ``from`` clause.
Improvements
^^^^^^^^^^^^
diff --git a/clang/docs/ReleaseNotesTemplate.txt b/clang/docs/ReleaseNotesTemplate.txt
new file mode 100644
index 0000000..cc04deb
--- /dev/null
+++ b/clang/docs/ReleaseNotesTemplate.txt
@@ -0,0 +1,246 @@
+.. If you want to modify sections/contents permanently, you should modify both
+ ReleaseNotes.rst and ReleaseNotesTemplate.txt.
+
+===========================================
+Clang |release| |ReleaseNotesTitle|
+===========================================
+
+.. contents::
+ :local:
+ :depth: 2
+
+Written by the `LLVM Team <https://llvm.org/>`_
+
+.. only:: PreRelease
+
+ .. warning::
+ These are in-progress notes for the upcoming Clang |version| release.
+ Release notes for previous releases can be found on
+ `the Releases Page <https://llvm.org/releases/>`_.
+
+Introduction
+============
+
+This document contains the release notes for the Clang C/C++/Objective-C
+frontend, part of the LLVM Compiler Infrastructure, release |release|. Here we
+describe the status of Clang in some detail, including major
+improvements from the previous release and new feature work. For the
+general LLVM release notes, see `the LLVM
+documentation <https://llvm.org/docs/ReleaseNotes.html>`_. For the libc++ release notes,
+see `this page <https://libcxx.llvm.org/ReleaseNotes.html>`_. All LLVM releases
+may be downloaded from the `LLVM releases web site <https://llvm.org/releases/>`_.
+
+For more information about Clang or LLVM, including information about the
+latest release, please see the `Clang Web Site <https://clang.llvm.org>`_ or the
+`LLVM Web Site <https://llvm.org>`_.
+
+Potentially Breaking Changes
+============================
+
+C/C++ Language Potentially Breaking Changes
+-------------------------------------------
+
+C++ Specific Potentially Breaking Changes
+-----------------------------------------
+
+ABI Changes in This Version
+---------------------------
+
+AST Dumping Potentially Breaking Changes
+----------------------------------------
+
+Clang Frontend Potentially Breaking Changes
+-------------------------------------------
+
+Clang Python Bindings Potentially Breaking Changes
+--------------------------------------------------
+
+What's New in Clang |release|?
+==============================
+
+C++ Language Changes
+--------------------
+
+C++2c Feature Support
+^^^^^^^^^^^^^^^^^^^^^
+
+C++23 Feature Support
+^^^^^^^^^^^^^^^^^^^^^
+
+C++20 Feature Support
+^^^^^^^^^^^^^^^^^^^^^
+
+C++17 Feature Support
+^^^^^^^^^^^^^^^^^^^^^
+
+Resolutions to C++ Defect Reports
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+C Language Changes
+------------------
+
+C2y Feature Support
+^^^^^^^^^^^^^^^^^^^
+
+C23 Feature Support
+^^^^^^^^^^^^^^^^^^^
+
+Non-comprehensive list of changes in this release
+-------------------------------------------------
+
+New Compiler Flags
+------------------
+
+Deprecated Compiler Flags
+-------------------------
+
+Modified Compiler Flags
+-----------------------
+
+Removed Compiler Flags
+----------------------
+
+Attribute Changes in Clang
+--------------------------
+
+Improvements to Clang's diagnostics
+-----------------------------------
+
+Improvements to Clang's time-trace
+----------------------------------
+
+Improvements to Coverage Mapping
+--------------------------------
+
+Bug Fixes in This Version
+-------------------------
+
+Bug Fixes to Compiler Builtins
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Bug Fixes to Attribute Support
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Bug Fixes to C++ Support
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Bug Fixes to AST Handling
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Miscellaneous Bug Fixes
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Miscellaneous Clang Crashes Fixed
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+OpenACC Specific Changes
+------------------------
+
+Target Specific Changes
+-----------------------
+
+AMDGPU Support
+^^^^^^^^^^^^^^
+
+NVPTX Support
+^^^^^^^^^^^^^^
+
+X86 Support
+^^^^^^^^^^^
+
+Arm and AArch64 Support
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Android Support
+^^^^^^^^^^^^^^^
+
+Windows Support
+^^^^^^^^^^^^^^^
+
+LoongArch Support
+^^^^^^^^^^^^^^^^^
+
+RISC-V Support
+^^^^^^^^^^^^^^
+
+CUDA/HIP Language Changes
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+CUDA Support
+^^^^^^^^^^^^
+
+AIX Support
+^^^^^^^^^^^
+
+NetBSD Support
+^^^^^^^^^^^^^^
+
+WebAssembly Support
+^^^^^^^^^^^^^^^^^^^
+
+AVR Support
+^^^^^^^^^^^
+
+DWARF Support in Clang
+----------------------
+
+Floating Point Support in Clang
+-------------------------------
+
+Fixed Point Support in Clang
+----------------------------
+
+AST Matchers
+------------
+
+clang-format
+------------
+
+libclang
+--------
+
+Code Completion
+---------------
+
+Static Analyzer
+---------------
+
+New features
+^^^^^^^^^^^^
+
+Crash and bug fixes
+^^^^^^^^^^^^^^^^^^^
+
+Improvements
+^^^^^^^^^^^^
+
+Moved checkers
+^^^^^^^^^^^^^^
+
+.. _release-notes-sanitizers:
+
+Sanitizers
+----------
+
+Python Binding Changes
+----------------------
+
+OpenMP Support
+--------------
+
+Improvements
+^^^^^^^^^^^^
+
+Additional Information
+======================
+
+A wide variety of additional information is available on the `Clang web
+page <https://clang.llvm.org/>`_. The web page contains versions of the
+API documentation which are up-to-date with the Git version of
+the source code. You can access versions of these documents specific to
+this release by going into the "``clang/docs/``" directory in the Clang
+tree.
+
+If you have any questions or comments about Clang, please feel free to
+contact us on the `Discourse forums (Clang Frontend category)
+<https://discourse.llvm.org/c/clang/6>`_.
diff --git a/clang/docs/SanitizerSpecialCaseList.rst b/clang/docs/SanitizerSpecialCaseList.rst
index 194f2fc..307c001 100644
--- a/clang/docs/SanitizerSpecialCaseList.rst
+++ b/clang/docs/SanitizerSpecialCaseList.rst
@@ -128,7 +128,7 @@ precedence. Here are a few examples.
type:T
$ cat ignorelist4.txt
- # Function `bad_bar`` will be instrumented.
+ # Function `bad_bar` will be instrumented.
# Function `good_bar` will not be instrumented.
fun:*
fun:*bar
diff --git a/clang/docs/SourceBasedCodeCoverage.rst b/clang/docs/SourceBasedCodeCoverage.rst
index 3e86424..2f11407 100644
--- a/clang/docs/SourceBasedCodeCoverage.rst
+++ b/clang/docs/SourceBasedCodeCoverage.rst
@@ -66,17 +66,17 @@ supported. Uninstrumented code simply won't be accounted for in reports.
To compile code with Modified Condition/Decision Coverage (MC/DC) enabled,
pass ``-fcoverage-mcdc`` in addition to the clang options specified above.
-MC/DC is an advanced form of code coverage most applicable in the embedded
+MC/DC is an advanced form of code coverage most applicable to the embedded
space.
Running the instrumented program
================================
-The next step is to run the instrumented program. When the program exits it
+The next step is to run the instrumented program. When the program exits, it
will write a **raw profile** to the path specified by the ``LLVM_PROFILE_FILE``
environment variable. If that variable does not exist, the profile is written
to ``default.profraw`` in the current directory of the program. If
-``LLVM_PROFILE_FILE`` contains a path to a non-existent directory, the missing
+``LLVM_PROFILE_FILE`` specifies a path to a non-existent directory, the missing
directory structure will be created. Additionally, the following special
**pattern strings** are rewritten:
@@ -97,7 +97,7 @@ directory structure will be created. Additionally, the following special
* "%b" expands out to the binary ID (build ID). It can be used with "%Nm" to
avoid binary signature collisions. To use it, the program should be compiled
with the build ID linker option (``--build-id`` for GNU ld or LLD,
- ``/build-id`` for lld-link on Windows). Linux, Windows and AIX are supported.
+ ``/build-id`` for lld-link on Windows). Linux, Windows, and AIX are supported.
* "%c" expands out to nothing, but enables a mode in which profile counter
updates are continuously synced to a file. This means that if the
@@ -128,7 +128,7 @@ and set bias to the offset between the original and the new counter location,
at which point every subsequent counter access will be to the new location,
which allows updating profile directly akin to the continuous mode.
-The advantage of this approach is that doesn't require any special OS support.
+The advantage of this approach is that it doesn't require any special OS support.
The disadvantage is the extra overhead due to additional instructions required
for each counter access (overhead both in terms of binary size and performance)
plus duplication of counters (i.e. one copy in the binary itself and another
@@ -137,7 +137,7 @@ other platforms by passing the ``-runtime-counter-relocation`` option to the
backend during compilation.
For a program such as the `Lit <https://llvm.org/docs/CommandGuide/lit.html>`_
-testing tool which invokes other programs, it may be necessary to set
+testing tool, which invokes other programs, it may be necessary to set
``LLVM_PROFILE_FILE`` for each invocation. The pattern strings "%p" or "%Nm"
may help to avoid corruption due to concurrency. Note that "%p" is also a Lit
token and needs to be escaped as "%%p".
@@ -149,7 +149,7 @@ token and needs to be escaped as "%%p".
Creating coverage reports
=========================
-Raw profiles have to be **indexed** before they can be used to generate
+Raw profiles must be **indexed** before they can be used to generate
coverage reports. This is done using the "merge" tool in ``llvm-profdata``
(which can combine multiple raw profiles and index them at the same time):
@@ -240,13 +240,13 @@ line-oriented report, try:
TOTAL 13 0 100.00% 3 0 100.00% 13 0 100.00% 12 2 83.33%
The ``llvm-cov`` tool supports specifying a custom demangler, writing out
-reports in a directory structure, and generating html reports. For the full
+reports in a directory structure, and generating HTML reports. For the full
list of options, please refer to the `command guide
<https://llvm.org/docs/CommandGuide/llvm-cov.html>`_.
A few final notes:
-* The ``-sparse`` flag is optional but can result in dramatically smaller
+* The ``-sparse`` flag is optional but can produce dramatically smaller
indexed profiles. This option should not be used if the indexed profile will
be reused for PGO.
@@ -255,7 +255,7 @@ A few final notes:
information directly into an existing raw profile on disk. The details are
out of scope.
-* The ``llvm-profdata`` tool can be used to merge together multiple raw or
+* The ``llvm-profdata`` tool can be used to merge multiple raw or
indexed profiles. To combine profiling data from multiple runs of a program,
try e.g:
@@ -299,7 +299,7 @@ There are six statistics tracked in a coverage summary:
source code that may each evaluate to either "true" or "false". These
conditions may comprise larger boolean expressions linked by boolean logical
operators. For example, "x = (y == 2) || (z < 10)" is a boolean expression
- that is comprised of two individual conditions, each of which evaluates to
+ comprised of two individual conditions, each of which evaluates to
either true or false, producing four total branch outcomes.
* Modified Condition/Decision Coverage (MC/DC) is the percentage of individual
@@ -316,7 +316,7 @@ There are six statistics tracked in a coverage summary:
``-show-mcdc-summary`` option as long as code was also compiled using the
clang option ``-fcoverage-mcdc``.
- * Boolean expressions that are only comprised of one condition (and therefore
+ * Boolean expressions comprised of only one condition (and therefore
have no logical operators) are not included in MC/DC analysis and are
trivially deducible using branch coverage.
@@ -366,7 +366,7 @@ By default the compiler runtime uses a static initializer to determine the
profile output path and to register a writer function. To collect profiles
without using static initializers, do this manually:
-* Export a ``int __llvm_profile_runtime`` symbol from each instrumented shared
+* Export an ``int __llvm_profile_runtime`` symbol from each instrumented shared
library and executable. When the linker finds a definition of this symbol, it
knows to skip loading the object which contains the profiling runtime's
static initializer.
@@ -380,7 +380,7 @@ without using static initializers, do this manually:
to ``__llvm_profile_write_file``.
* Forward-declare ``int __llvm_profile_write_file(void)`` and call it to write
- out a profile. This function returns 0 when it succeeds, and a non-zero value
+ out a profile. This function returns 0 on success, and a non-zero value
otherwise. Calling this function multiple times appends profile data to an
existing on-disk raw profile.
@@ -418,7 +418,7 @@ Collecting coverage reports for the llvm project
================================================
To prepare a coverage report for llvm (and any of its sub-projects), add
-``-DLLVM_BUILD_INSTRUMENTED_COVERAGE=On`` to the cmake configuration. Raw
+``-DLLVM_BUILD_INSTRUMENTED_COVERAGE=On`` to the CMake configuration. Raw
profiles will be written to ``$BUILD_DIR/profiles/``. To prepare an html
report, run ``llvm/utils/prepare-code-coverage-artifact.py``.
@@ -429,7 +429,7 @@ To specify an alternate directory for raw profiles, use
Drawbacks and limitations
=========================
-* Prior to version 2.26, the GNU binutils BFD linker is not able link programs
+* Prior to version 2.26, the GNU binutils BFD linker cannot link programs
compiled with ``-fcoverage-mapping`` in its ``--gc-sections`` mode. Possible
workarounds include disabling ``--gc-sections``, upgrading to a newer version
of BFD, or using the Gold linker.
diff --git a/clang/docs/StandardCPlusPlusModules.rst b/clang/docs/StandardCPlusPlusModules.rst
index 31d0a5e..7155ad6 100644
--- a/clang/docs/StandardCPlusPlusModules.rst
+++ b/clang/docs/StandardCPlusPlusModules.rst
@@ -226,8 +226,8 @@ one-phase compilation model is simpler for build systems to implement while the
two-phase compilation has the potential to compile faster due to higher
parallelism. As an example, if there are two module units ``A`` and ``B``, and
``B`` depends on ``A``, the one-phase compilation model needs to compile them
-serially, whereas the two-phase compilation model is able to be compiled as
-soon as ``A.pcm`` is available, and thus can be compiled simultaneously as the
+serially, whereas the two-phase compilation model can be compiled as
+soon as ``A.pcm`` is available, and thus can be compiled simultaneously with the
``A.pcm`` to ``A.o`` compilation step.
File name requirements
@@ -391,7 +391,7 @@ And the compilation processes for module units are like:
As the diagrams show, we need to compile the BMI from module units to object
files and then link the object files. (However, this cannot be done for the BMI
from header units. See the section on :ref:`header units <header-units>` for
-more details.
+more details.)
BMIs cannot be shipped in an archive to create a module library. Instead, the
BMIs(``*.pcm``) are compiled into object files(``*.o``) and those object files
@@ -403,7 +403,7 @@ clang-cl
``clang-cl`` supports the same options as ``clang++`` for modules as detailed above;
there is no need to prefix these options with ``/clang:``. Note that ``cl.exe``
`options to emit/consume IFC files <https://devblogs.microsoft.com/cppblog/using-cpp-modules-in-msvc-from-the-command-line-part-1/>` are *not* supported.
-The resultant precompiled modules are also not compatible for use with ``cl.exe``.
+The resulting precompiled modules are also not compatible for use with ``cl.exe``.
We recommend that build system authors use the above-mentioned ``clang++`` options with ``clang-cl`` to build modules.
@@ -411,7 +411,7 @@ Consistency Requirements
~~~~~~~~~~~~~~~~~~~~~~~~
Modules can be viewed as a kind of cache to speed up compilation. Thus, like
-other caching techniques, it is important to maintain cache consistency which
+other caching techniques, it is important to maintain cache consistency, which
is why Clang does very strict checking for consistency.
Options consistency
@@ -472,8 +472,8 @@ To overcome these requirements and simplify cases like distributed builds and sa
builds, users can use the ``-fmodules-embed-all-files`` flag to embed all input files
into the BMI so that Clang does not need to open the corresponding file on disk.
-When the ``-fmodules-embed-all-files`` flag are enabled, Clang explicitly emits the source
-code into the BMI file, the contents of the BMI file contain a sufficiently verbose
+When the ``-fmodules-embed-all-files`` flag is enabled, Clang explicitly emits the source
+code into the BMI file; the BMI file contains a sufficiently verbose
representation to reproduce the original source file.
.. [1] Input files: The source files which took part in the compilation of the BMI.
@@ -578,7 +578,7 @@ handle the dynamic initialization of non-inline variables in the module unit.
The importable module unit has to emit the initializer even if there is no
dynamic initialization; otherwise, the importer may call a nonexistent
function. The initializer function emits calls to imported modules first
-followed by calls to all to of the dynamic initializers in the current module
+followed by calls to all of the dynamic initializers in the current module
unit.
Translation units that explicitly or implicitly import a named module must call
@@ -689,9 +689,9 @@ ensure it is reachable, e.g. ``using N::g;``.
As of Clang 22.x, the Reduced BMI is enabled by default. You may still want to
use Full BMI with ``-fno-modules-reduced-bmi`` in the following case:
-1. Your build system uses two-phase compilation but it haven't adjusted the
+1. Your build system uses two-phase compilation, but it hasn't adjusted the
implementation for reduced BMI.
-2. You meet a regression with Reduced BMI that you cannot work around. Please
+2. You encounter a regression with Reduced BMI that you cannot work around. Please
report an issue for this case.
Experimental Non-Cascading Changes
@@ -699,7 +699,7 @@ Experimental Non-Cascading Changes
This section is primarily for build system vendors. For end compiler users,
if you don't want to read it all, this is helpful to reduce recompilations.
-We encourage build system vendors and end users try this out and bring feedback.
+We encourage build system vendors and end users to try this out and bring feedback.
Before Clang 19, a change in BMI of any (transitive) dependency would cause the
outputs of the BMI to change. Starting with Clang 19, changes to non-direct
@@ -786,7 +786,7 @@ We encourage build systems to add an experimental mode that
reuses the cached BMI when **direct** dependencies did not change,
even if **transitive** dependencies did change.
-Given there are potential compiler bugs, we recommend that build systems
+Given that there are potential compiler bugs, we recommend that build systems
support this feature as a configurable option so that users
can go back to the transitive change mode safely at any time.
@@ -813,7 +813,7 @@ With reduced BMI, non-cascading changes can be more powerful. For example,
$ md5sum B.pcm
6c2bd452ca32ab418bf35cd141b060b9 B.pcm
-And let's change the implementation for ``A.cppm`` into:
+And let's change the implementation for ``A.cppm`` to:
.. code-block:: c++
@@ -830,7 +830,7 @@ and recompile the example:
$ md5sum B.pcm
6c2bd452ca32ab418bf35cd141b060b9 B.pcm
-We should find the contents of ``B.pcm`` remains the same. In this case, the build system is
+We should find the contents of ``B.pcm`` remain the same. In this case, the build system is
allowed to skip recompilations of TUs which solely and directly depend on module ``B``.
This only happens with a reduced BMI. With reduced BMIs, we won't record the function body
@@ -845,7 +845,7 @@ Reduce duplications
While it is valid to have duplicated declarations in the global module fragments
of different module units, it is not free for Clang to deal with the duplicated
-declarations. A translation unit will compile more slowly if there is a lot of
+declarations. A translation unit will compile more slowly if there are a lot of
duplicated declarations between the translation unit and modules it imports.
For example:
@@ -937,7 +937,7 @@ possible. However, it may be a breaking change for existing code or libraries
to switch to modules. As a result, many existing libraries need to provide
both headers and module interfaces for a while to not break existing users.
-This section suggests some suggestions on how to ease the transition process
+This section provides some suggestions on how to ease the transition process
for existing libraries. **Note that this information is only intended as
guidance, rather than as requirements to use modules in Clang.** It presumes
the project is starting with no module-based dependencies.
@@ -1140,7 +1140,7 @@ module unit which is internal to the module itself.
Providing a header to skip parsing redundant headers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Many redeclarations shared between translation units causes Clang to have
+Many redeclarations shared between translation units cause Clang to have
slower compile-time performance. Further, there are known issues with
`include after import <https://github.com/llvm/llvm-project/issues/61465>`_.
Even when that issue is resolved, users may still get slower compilation speed
@@ -1408,8 +1408,8 @@ P1857R3 is implemented. This is tracked by
Until then, it is recommended not to mix macros with module declarations.
-In consistent filename suffix requirement for importable module units
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Inconsistent filename suffix requirement for importable module units
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently, Clang requires the file name of an ``importable module unit`` to
have ``.cppm`` (or ``.ccm``, ``.cxxm``, ``.c++m``) as the file extension.
@@ -1484,7 +1484,7 @@ How to build projects using header units
.. warning::
The support for header units, including related command line options, is
- experimental. There are still many unanswered question about how tools
+ experimental. There are still many unanswered questions about how tools
should interact with header units. The details described here may change in
the future.
@@ -1881,7 +1881,7 @@ Individual command line options can be specified after ``--``.
options. Note that the path to the compiler executable needs to be specified
explicitly instead of using ``clang++`` directly.
-Users may want the scanner to get the transitional dependency information for
+Users may want the scanner to get the transitive dependency information for
headers. Otherwise, the project has to be scanned twice, once for headers and
once for modules. To address this, ``clang-scan-deps`` will recognize the
specified preprocessor options in the given command line and generate the
@@ -1912,7 +1912,7 @@ Possible Issues: Failed to find system headers
If encountering an error like ``fatal error: 'stddef.h' file not found``,
the specified ``<path-to-compiler-executable>/clang++`` probably refers to a
-symlink instead a real binary. There are four potential solutions to the
+symlink instead of a real binary. There are four potential solutions to the
problem:
1. Point the specified compiler executable to the real binary instead of the
diff --git a/clang/docs/ThreadSafetyAnalysis.rst b/clang/docs/ThreadSafetyAnalysis.rst
index 4fc7ff2..853a8fa 100644
--- a/clang/docs/ThreadSafetyAnalysis.rst
+++ b/clang/docs/ThreadSafetyAnalysis.rst
@@ -825,13 +825,6 @@ doesn't know that munl.mu == mutex. The SCOPED_CAPABILITY attribute handles
aliasing for MutexLocker, but does so only for that particular pattern.
-ACQUIRED_BEFORE(...) and ACQUIRED_AFTER(...) support is still experimental.
----------------------------------------------------------------------------
-
-ACQUIRED_BEFORE(...) and ACQUIRED_AFTER(...) are currently being developed under
-the ``-Wthread-safety-beta`` flag.
-
-
.. _mutexheader:
mutex.h
diff --git a/clang/docs/UsersManual.rst b/clang/docs/UsersManual.rst
index c703929..0e85c81 100644
--- a/clang/docs/UsersManual.rst
+++ b/clang/docs/UsersManual.rst
@@ -2635,7 +2635,7 @@ violates the strict aliasing rules. For example:
Strict aliasing can be explicitly enabled with ``-fstrict-aliasing`` and
disabled with ``-fno-strict-aliasing``. ``clang-cl`` defaults to
-``-fno-strict-aliasing``; see . Otherwise, Clang defaults to ``-fstrict-aliasing``.
+``-fno-strict-aliasing``. Otherwise, Clang defaults to ``-fstrict-aliasing``.
C and C++ specify slightly different rules for strict aliasing. To improve
language interoperability, Clang allows two types to alias if either language
diff --git a/clang/docs/index.rst b/clang/docs/index.rst
index 542bfc9..be654af 100644
--- a/clang/docs/index.rst
+++ b/clang/docs/index.rst
@@ -45,6 +45,7 @@ Using Clang as a Compiler
BoundsSafetyImplPlans
ControlFlowIntegrity
LTOVisibility
+ PointerAuthentication
SafeStack
ShadowCallStack
SourceBasedCodeCoverage
diff --git a/clang/include/clang/APINotes/Types.h b/clang/include/clang/APINotes/Types.h
index 0f2e496..7162571 100644
--- a/clang/include/clang/APINotes/Types.h
+++ b/clang/include/clang/APINotes/Types.h
@@ -141,6 +141,9 @@ class CommonTypeInfo : public CommonEntityInfo {
/// The NS error domain for this type.
std::optional<std::string> NSErrorDomain;
+ /// The Swift protocol that this type should be automatically conformed to.
+ std::optional<std::string> SwiftConformance;
+
public:
CommonTypeInfo() {}
@@ -165,6 +168,14 @@ public:
: std::nullopt;
}
+ std::optional<std::string> getSwiftConformance() const {
+ return SwiftConformance;
+ }
+
+ void setSwiftConformance(std::optional<std::string> conformance) {
+ SwiftConformance = conformance;
+ }
+
friend bool operator==(const CommonTypeInfo &, const CommonTypeInfo &);
CommonTypeInfo &operator|=(const CommonTypeInfo &RHS) {
@@ -175,6 +186,8 @@ public:
setSwiftBridge(RHS.getSwiftBridge());
if (!NSErrorDomain)
setNSErrorDomain(RHS.getNSErrorDomain());
+ if (SwiftConformance)
+ setSwiftConformance(RHS.getSwiftConformance());
return *this;
}
@@ -185,7 +198,8 @@ public:
inline bool operator==(const CommonTypeInfo &LHS, const CommonTypeInfo &RHS) {
return static_cast<const CommonEntityInfo &>(LHS) == RHS &&
LHS.SwiftBridge == RHS.SwiftBridge &&
- LHS.NSErrorDomain == RHS.NSErrorDomain;
+ LHS.NSErrorDomain == RHS.NSErrorDomain &&
+ LHS.SwiftConformance == RHS.SwiftConformance;
}
inline bool operator!=(const CommonTypeInfo &LHS, const CommonTypeInfo &RHS) {
@@ -737,11 +751,9 @@ public:
std::optional<std::string> SwiftImportAs;
std::optional<std::string> SwiftRetainOp;
std::optional<std::string> SwiftReleaseOp;
+ std::optional<std::string> SwiftDestroyOp;
std::optional<std::string> SwiftDefaultOwnership;
- /// The Swift protocol that this type should be automatically conformed to.
- std::optional<std::string> SwiftConformance;
-
std::optional<EnumExtensibilityKind> EnumExtensibility;
TagInfo()
@@ -787,12 +799,11 @@ public:
SwiftRetainOp = RHS.SwiftRetainOp;
if (!SwiftReleaseOp)
SwiftReleaseOp = RHS.SwiftReleaseOp;
+ if (!SwiftDestroyOp)
+ SwiftDestroyOp = RHS.SwiftDestroyOp;
if (!SwiftDefaultOwnership)
SwiftDefaultOwnership = RHS.SwiftDefaultOwnership;
- if (!SwiftConformance)
- SwiftConformance = RHS.SwiftConformance;
-
if (!HasFlagEnum)
setFlagEnum(RHS.isFlagEnum());
@@ -818,8 +829,8 @@ inline bool operator==(const TagInfo &LHS, const TagInfo &RHS) {
LHS.SwiftImportAs == RHS.SwiftImportAs &&
LHS.SwiftRetainOp == RHS.SwiftRetainOp &&
LHS.SwiftReleaseOp == RHS.SwiftReleaseOp &&
+ LHS.SwiftDestroyOp == RHS.SwiftDestroyOp &&
LHS.SwiftDefaultOwnership == RHS.SwiftDefaultOwnership &&
- LHS.SwiftConformance == RHS.SwiftConformance &&
LHS.isFlagEnum() == RHS.isFlagEnum() &&
LHS.isSwiftCopyable() == RHS.isSwiftCopyable() &&
LHS.isSwiftEscapable() == RHS.isSwiftEscapable() &&
diff --git a/clang/include/clang/AST/APNumericStorage.h b/clang/include/clang/AST/APNumericStorage.h
index 95eddbc..e1948a5 100644
--- a/clang/include/clang/AST/APNumericStorage.h
+++ b/clang/include/clang/AST/APNumericStorage.h
@@ -28,7 +28,6 @@ class APNumericStorage {
uint64_t VAL; ///< Used to store the <= 64 bits integer value.
uint64_t *pVal; ///< Used to store the >64 bits integer value.
};
- unsigned BitWidth;
bool hasAllocation() const { return llvm::APInt::getNumWords(BitWidth) > 1; }
@@ -36,6 +35,7 @@ class APNumericStorage {
void operator=(const APNumericStorage &) = delete;
protected:
+ unsigned BitWidth;
APNumericStorage() : VAL(0), BitWidth(0) {}
llvm::APInt getIntValue() const {
@@ -51,6 +51,7 @@ protected:
class APIntStorage : private APNumericStorage {
public:
llvm::APInt getValue() const { return getIntValue(); }
+ unsigned getBitWidth() const { return BitWidth; }
void setValue(const ASTContext &C, const llvm::APInt &Val) {
setIntValue(C, Val);
}
diff --git a/clang/include/clang/AST/APValue.h b/clang/include/clang/AST/APValue.h
index 9999a30..cb942ea 100644
--- a/clang/include/clang/AST/APValue.h
+++ b/clang/include/clang/AST/APValue.h
@@ -143,7 +143,7 @@ public:
AddrLabelDiff
};
- class LValueBase {
+ class alignas(uint64_t) LValueBase {
typedef llvm::PointerUnion<const ValueDecl *, const Expr *, TypeInfoLValue,
DynamicAllocLValue>
PtrTy;
diff --git a/clang/include/clang/AST/ASTConcept.h b/clang/include/clang/AST/ASTConcept.h
index 7ccac44..72da005 100644
--- a/clang/include/clang/AST/ASTConcept.h
+++ b/clang/include/clang/AST/ASTConcept.h
@@ -15,7 +15,7 @@
#define LLVM_CLANG_AST_ASTCONCEPT_H
#include "clang/AST/DeclarationName.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/TemplateBase.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/UnsignedOrNone.h"
@@ -177,12 +177,7 @@ public:
SourceLocation getLocation() const { return getConceptNameLoc(); }
- SourceLocation getBeginLoc() const LLVM_READONLY {
- // Note that if the qualifier is null the template KW must also be null.
- if (auto QualifierLoc = getNestedNameSpecifierLoc())
- return QualifierLoc.getBeginLoc();
- return getConceptNameInfo().getBeginLoc();
- }
+ SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY {
return getTemplateArgsAsWritten() &&
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index 2b7ba41..1c17333 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -230,13 +230,16 @@ class ASTContext : public RefCountedBase<ASTContext> {
SubstTemplateTypeParmTypes;
mutable llvm::FoldingSet<SubstTemplateTypeParmPackType>
SubstTemplateTypeParmPackTypes;
+ mutable llvm::FoldingSet<SubstBuiltinTemplatePackType>
+ SubstBuiltinTemplatePackTypes;
mutable llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
TemplateSpecializationTypes;
mutable llvm::FoldingSet<ParenType> ParenTypes{GeneralTypesLog2InitSize};
+ mutable llvm::FoldingSet<TagTypeFoldingSetPlaceholder> TagTypes;
+ mutable llvm::FoldingSet<FoldingSetPlaceholder<UnresolvedUsingType>>
+ UnresolvedUsingTypes;
mutable llvm::FoldingSet<UsingType> UsingTypes;
- mutable llvm::FoldingSet<TypedefType> TypedefTypes;
- mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes{
- GeneralTypesLog2InitSize};
+ mutable llvm::FoldingSet<FoldingSetPlaceholder<TypedefType>> TypedefTypes;
mutable llvm::FoldingSet<DependentNameType> DependentNameTypes;
mutable llvm::DenseMap<llvm::FoldingSetNodeID,
DependentTemplateSpecializationType *>
@@ -282,11 +285,11 @@ class ASTContext : public RefCountedBase<ASTContext> {
llvm::to_underlying(PredefinedSugarType::Kind::Last) + 1>
PredefinedSugarTypes{};
- /// The set of nested name specifiers.
+ /// Internal storage for NestedNameSpecifiers.
///
/// This set is managed by the NestedNameSpecifier class.
- mutable llvm::FoldingSet<NestedNameSpecifier> NestedNameSpecifiers;
- mutable NestedNameSpecifier *GlobalNestedNameSpecifier = nullptr;
+ mutable llvm::FoldingSet<NamespaceAndPrefixStorage>
+ NamespaceAndPrefixStorages;
/// A cache mapping from RecordDecls to ASTRecordLayouts.
///
@@ -639,7 +642,7 @@ public:
/// contain data that is address discriminated. This includes
/// implicitly authenticated values like vtable pointers, as well as
/// explicitly qualified fields.
- bool containsAddressDiscriminatedPointerAuth(QualType T) {
+ bool containsAddressDiscriminatedPointerAuth(QualType T) const {
if (!isPointerAuthenticationAvailable())
return false;
return findPointerAuthContent(T) != PointerAuthContent::None;
@@ -653,8 +656,7 @@ public:
bool containsNonRelocatablePointerAuth(QualType T) {
if (!isPointerAuthenticationAvailable())
return false;
- return findPointerAuthContent(T) ==
- PointerAuthContent::AddressDiscriminatedData;
+ return findPointerAuthContent(T) != PointerAuthContent::None;
}
private:
@@ -672,8 +674,8 @@ private:
bool isPointerAuthenticationAvailable() const {
return LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics;
}
- PointerAuthContent findPointerAuthContent(QualType T);
- llvm::DenseMap<const RecordDecl *, PointerAuthContent>
+ PointerAuthContent findPointerAuthContent(QualType T) const;
+ mutable llvm::DenseMap<const RecordDecl *, PointerAuthContent>
RecordContainsAddressDiscriminatedPointerAuth;
ImportDecl *FirstLocalImport = nullptr;
@@ -1386,8 +1388,6 @@ private:
/// Return a type with extended qualifiers.
QualType getExtQualType(const Type *Base, Qualifiers Quals) const;
- QualType getTypeDeclTypeSlow(const TypeDecl *Decl) const;
-
QualType getPipeType(QualType T, bool ReadOnly) const;
public:
@@ -1630,7 +1630,7 @@ public:
/// Return the uniqued reference to the type for a member pointer to
/// the specified type in the specified nested name.
- QualType getMemberPointerType(QualType T, NestedNameSpecifier *Qualifier,
+ QualType getMemberPointerType(QualType T, NestedNameSpecifier Qualifier,
const CXXRecordDecl *Cls) const;
/// Return a non-unique reference to the type for a variable array of
@@ -1767,34 +1767,53 @@ private:
bool IsCanon = false) const;
public:
+ QualType getTypeDeclType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypeDecl *Decl) const;
+
/// Return the unique reference to the type for the specified type
/// declaration.
- QualType getTypeDeclType(const TypeDecl *Decl,
- const TypeDecl *PrevDecl = nullptr) const {
- assert(Decl && "Passed null for Decl param");
- if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
-
- if (PrevDecl) {
- assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
- Decl->TypeForDecl = PrevDecl->TypeForDecl;
- return QualType(PrevDecl->TypeForDecl, 0);
- }
+ QualType getTypeDeclType(const TypeDecl *Decl) const;
- return getTypeDeclTypeSlow(Decl);
- }
+ /// Use the normal 'getFooBarType' constructors to obtain these types.
+ QualType getTypeDeclType(const TagDecl *) const = delete;
+ QualType getTypeDeclType(const TypedefDecl *) const = delete;
+ QualType getTypeDeclType(const TypeAliasDecl *) const = delete;
+ QualType getTypeDeclType(const UnresolvedUsingTypenameDecl *) const = delete;
+
+ CanQualType getCanonicalTypeDeclType(const TypeDecl *TD) const;
- QualType getUsingType(const UsingShadowDecl *Found,
- QualType Underlying) const;
+ QualType getUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const UsingShadowDecl *D,
+ QualType UnderlyingType = QualType()) const;
/// Return the unique reference to the type for the specified
/// typedef-name decl.
- QualType getTypedefType(const TypedefNameDecl *Decl,
- QualType Underlying = QualType()) const;
+ /// FIXME: TypeMatchesDeclOrNone is a workaround for a serialization issue:
+ /// The decl underlying type might still not be available.
+ QualType getTypedefType(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ const TypedefNameDecl *Decl, QualType UnderlyingType = QualType(),
+ std::optional<bool> TypeMatchesDeclOrNone = std::nullopt) const;
+
+ CanQualType getCanonicalTagType(const TagDecl *TD) const;
+ QualType getTagType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *TD,
+ bool OwnsTag) const;
- QualType getRecordType(const RecordDecl *Decl) const;
+private:
+ UnresolvedUsingType *getUnresolvedUsingTypeInternal(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D, void *InsertPos,
+ const Type *CanonicalType) const;
- QualType getEnumType(const EnumDecl *Decl) const;
+ TagType *getTagTypeInternal(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *Tag,
+ bool OwnsTag, bool IsInjected,
+ const Type *CanonicalType,
+ bool WithFoldingSetNode) const;
+public:
/// Compute BestType and BestPromotionType for an enum based on the highest
/// number of negative and positive bits of its elements.
/// Returns true if enum width is too large.
@@ -1843,10 +1862,11 @@ public:
return MembersRepresentableByInt;
}
- QualType
- getUnresolvedUsingType(const UnresolvedUsingTypenameDecl *Decl) const;
-
- QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const;
+ CanQualType
+ getCanonicalUnresolvedUsingType(const UnresolvedUsingTypenameDecl *D) const;
+ QualType getUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D) const;
QualType getAttributedType(attr::Kind attrKind, QualType modifiedType,
QualType equivalentType,
@@ -1876,6 +1896,7 @@ public:
QualType getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
unsigned Index, bool Final,
const TemplateArgument &ArgPack);
+ QualType getSubstBuiltinTemplatePack(const TemplateArgument &ArgPack);
QualType
getTemplateTypeParmType(unsigned Depth, unsigned Index,
@@ -1886,18 +1907,20 @@ public:
TemplateName T, ArrayRef<TemplateArgument> CanonicalArgs) const;
QualType
- getTemplateSpecializationType(TemplateName T,
+ getTemplateSpecializationType(ElaboratedTypeKeyword Keyword, TemplateName T,
ArrayRef<TemplateArgument> SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs,
QualType Underlying = QualType()) const;
QualType
- getTemplateSpecializationType(TemplateName T,
+ getTemplateSpecializationType(ElaboratedTypeKeyword Keyword, TemplateName T,
ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs,
QualType Canon = QualType()) const;
TypeSourceInfo *getTemplateSpecializationTypeInfo(
+ ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
TemplateName T, SourceLocation TLoc,
const TemplateArgumentListInfo &SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs,
@@ -1908,11 +1931,8 @@ public:
QualType getMacroQualifiedType(QualType UnderlyingTy,
const IdentifierInfo *MacroII) const;
- QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, QualType NamedType,
- TagDecl *OwnedTagDecl = nullptr) const;
QualType getDependentNameType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
+ NestedNameSpecifier NNS,
const IdentifierInfo *Name) const;
QualType getDependentTemplateSpecializationType(
@@ -1999,21 +2019,17 @@ public:
QualType getUnconstrainedType(QualType T) const;
/// C++17 deduced class template specialization type.
- QualType getDeducedTemplateSpecializationType(TemplateName Template,
+ QualType getDeducedTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ TemplateName Template,
QualType DeducedType,
bool IsDependent) const;
private:
- QualType getDeducedTemplateSpecializationTypeInternal(TemplateName Template,
- QualType DeducedType,
- bool IsDependent,
- QualType Canon) const;
+ QualType getDeducedTemplateSpecializationTypeInternal(
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ QualType DeducedType, bool IsDependent, QualType Canon) const;
public:
- /// Return the unique reference to the type for the specified TagDecl
- /// (struct/union/class/enum) decl.
- QualType getTagDeclType(const TagDecl *Decl) const;
-
/// Return the unique type for "size_t" (C99 7.17), defined in
/// <stddef.h>.
///
@@ -2089,7 +2105,9 @@ public:
/// if it hasn't yet been built.
QualType getRawCFConstantStringType() const {
if (CFConstantStringTypeDecl)
- return getTypedefType(CFConstantStringTypeDecl);
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt,
+ CFConstantStringTypeDecl);
return QualType();
}
void setCFConstantStringType(QualType T);
@@ -2186,10 +2204,11 @@ public:
}
#include "clang/Basic/BuiltinTemplates.inc"
- /// Retrieve the Objective-C "instancetype" type, if already known;
- /// otherwise, returns a NULL type;
+ /// Retrieve the Objective-C "instancetype" type.
QualType getObjCInstanceType() {
- return getTypeDeclType(getObjCInstanceTypeDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt,
+ getObjCInstanceTypeDecl());
}
/// Retrieve the typedef declaration corresponding to the Objective-C
@@ -2202,7 +2221,8 @@ public:
/// Retrieve the C FILE type.
QualType getFILEType() const {
if (FILEDecl)
- return getTypeDeclType(FILEDecl);
+ return getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, FILEDecl);
return QualType();
}
@@ -2214,7 +2234,8 @@ public:
/// Retrieve the C jmp_buf type.
QualType getjmp_bufType() const {
if (jmp_bufDecl)
- return getTypeDeclType(jmp_bufDecl);
+ return getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, jmp_bufDecl);
return QualType();
}
@@ -2226,7 +2247,8 @@ public:
/// Retrieve the C sigjmp_buf type.
QualType getsigjmp_bufType() const {
if (sigjmp_bufDecl)
- return getTypeDeclType(sigjmp_bufDecl);
+ return getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, sigjmp_bufDecl);
return QualType();
}
@@ -2238,12 +2260,13 @@ public:
/// Retrieve the C ucontext_t type.
QualType getucontext_tType() const {
if (ucontext_tDecl)
- return getTypeDeclType(ucontext_tDecl);
+ return getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ucontext_tDecl);
return QualType();
}
/// The result type of logical operations, '<', '>', '!=', etc.
- QualType getLogicalOperationType() const {
+ CanQualType getLogicalOperationType() const {
return getLangOpts().CPlusPlus ? BoolTy : IntTy;
}
@@ -2308,7 +2331,8 @@ public:
/// This is set up lazily, by Sema. \c id is always a (typedef for a)
/// pointer type, a pointer to a struct.
QualType getObjCIdType() const {
- return getTypeDeclType(getObjCIdDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getObjCIdDecl());
}
/// Retrieve the typedef corresponding to the predefined 'SEL' type
@@ -2318,7 +2342,8 @@ public:
/// Retrieve the type that corresponds to the predefined Objective-C
/// 'SEL' type.
QualType getObjCSelType() const {
- return getTypeDeclType(getObjCSelDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getObjCSelDecl());
}
PointerAuthQualifier getObjCMemberSelTypePtrAuth();
@@ -2332,7 +2357,8 @@ public:
/// This is set up lazily, by Sema. \c Class is always a (typedef for a)
/// pointer type, a pointer to a struct.
QualType getObjCClassType() const {
- return getTypeDeclType(getObjCClassDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getObjCClassDecl());
}
/// Retrieve the Objective-C class declaration corresponding to
@@ -2351,7 +2377,8 @@ public:
/// type of 'BOOL' type.
QualType getBOOLType() const {
- return getTypeDeclType(getBOOLDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getBOOLDecl());
}
/// Retrieve the type of the Objective-C \c Protocol class.
@@ -2365,7 +2392,8 @@ public:
/// Retrieve the type of the \c __builtin_va_list type.
QualType getBuiltinVaListType() const {
- return getTypeDeclType(getBuiltinVaListDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getBuiltinVaListDecl());
}
/// Retrieve the C type declaration corresponding to the predefined
@@ -2379,16 +2407,17 @@ public:
/// Retrieve the type of the \c __builtin_ms_va_list type.
QualType getBuiltinMSVaListType() const {
- return getTypeDeclType(getBuiltinMSVaListDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getBuiltinMSVaListDecl());
}
/// Retrieve the implicitly-predeclared 'struct _GUID' declaration.
TagDecl *getMSGuidTagDecl() const { return MSGuidTagDecl; }
/// Retrieve the implicitly-predeclared 'struct _GUID' type.
- QualType getMSGuidType() const {
+ CanQualType getMSGuidType() const {
assert(MSGuidTagDecl && "asked for GUID type but MS extensions disabled");
- return getTagDeclType(MSGuidTagDecl);
+ return getCanonicalTagType(MSGuidTagDecl);
}
/// Retrieve the implicitly-predeclared 'struct type_info' declaration.
@@ -2477,7 +2506,7 @@ public:
UnresolvedSetIterator End) const;
TemplateName getAssumedTemplateName(DeclarationName Name) const;
- TemplateName getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ TemplateName getQualifiedTemplateName(NestedNameSpecifier Qualifier,
bool TemplateKeyword,
TemplateName Template) const;
TemplateName
@@ -2919,32 +2948,6 @@ public:
/// Determine if two types are similar, ignoring only CVR qualifiers.
bool hasCvrSimilarType(QualType T1, QualType T2);
- /// Retrieves the "canonical" nested name specifier for a
- /// given nested name specifier.
- ///
- /// The canonical nested name specifier is a nested name specifier
- /// that uniquely identifies a type or namespace within the type
- /// system. For example, given:
- ///
- /// \code
- /// namespace N {
- /// struct S {
- /// template<typename T> struct X { typename T* type; };
- /// };
- /// }
- ///
- /// template<typename T> struct Y {
- /// typename N::S::X<T>::type member;
- /// };
- /// \endcode
- ///
- /// Here, the nested-name-specifier for N::S::X<T>:: will be
- /// S::X<template-param-0-0>, since 'S' and 'X' are uniquely defined
- /// by declarations in the type system and the canonical type for
- /// the template type parameter 'T' is template-param-0-0.
- NestedNameSpecifier *
- getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const;
-
/// Retrieves the default calling convention for the current context.
///
/// The context's default calling convention may differ from the current
@@ -3158,7 +3161,7 @@ public:
mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
FunctionProtoType::ExceptionSpecInfo ESI2,
SmallVectorImpl<QualType> &ExceptionTypeStorage,
- bool AcceptDependent);
+ bool AcceptDependent) const;
// For two "same" types, return a type which has
// the common sugar between them. If Unqualified is true,
@@ -3166,7 +3169,7 @@ public:
// The result will drop the qualifiers which do not occur
// in both types.
QualType getCommonSugaredType(QualType X, QualType Y,
- bool Unqualified = false);
+ bool Unqualified = false) const;
private:
// Helper for integer ordering
@@ -3184,23 +3187,11 @@ public:
bool propertyTypesAreCompatible(QualType, QualType);
bool typesAreBlockPointerCompatible(QualType, QualType);
- bool isObjCIdType(QualType T) const {
- if (const auto *ET = dyn_cast<ElaboratedType>(T))
- T = ET->getNamedType();
- return T == getObjCIdType();
- }
+ bool isObjCIdType(QualType T) const { return T == getObjCIdType(); }
- bool isObjCClassType(QualType T) const {
- if (const auto *ET = dyn_cast<ElaboratedType>(T))
- T = ET->getNamedType();
- return T == getObjCClassType();
- }
+ bool isObjCClassType(QualType T) const { return T == getObjCClassType(); }
- bool isObjCSelType(QualType T) const {
- if (const auto *ET = dyn_cast<ElaboratedType>(T))
- T = ET->getNamedType();
- return T == getObjCSelType();
- }
+ bool isObjCSelType(QualType T) const { return T == getObjCSelType(); }
bool ObjCQualifiedIdTypesAreCompatible(const ObjCObjectPointerType *LHS,
const ObjCObjectPointerType *RHS,
@@ -3731,7 +3722,7 @@ public:
/// Resolve the root record to be used to derive the vtable pointer
/// authentication policy for the specified record.
const CXXRecordDecl *
- baseForVTableAuthentication(const CXXRecordDecl *ThisClass);
+ baseForVTableAuthentication(const CXXRecordDecl *ThisClass) const;
bool useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
StringRef MangledName);
diff --git a/clang/include/clang/AST/ASTImporter.h b/clang/include/clang/AST/ASTImporter.h
index c40b926..4a0ca45 100644
--- a/clang/include/clang/AST/ASTImporter.h
+++ b/clang/include/clang/AST/ASTImporter.h
@@ -404,7 +404,7 @@ class TypeSourceInfo;
///
/// \returns The equivalent nested-name-specifier in the "to"
/// context, or the import error.
- llvm::Expected<NestedNameSpecifier *> Import(NestedNameSpecifier *FromNNS);
+ llvm::Expected<NestedNameSpecifier> Import(NestedNameSpecifier FromNNS);
/// Import the given nested-name-specifier-loc from the "from"
/// context into the "to" context.
diff --git a/clang/include/clang/AST/ASTNodeTraverser.h b/clang/include/clang/AST/ASTNodeTraverser.h
index 8ebabb2..fe08d637 100644
--- a/clang/include/clang/AST/ASTNodeTraverser.h
+++ b/clang/include/clang/AST/ASTNodeTraverser.h
@@ -394,12 +394,14 @@ public:
}
void VisitMemberPointerType(const MemberPointerType *T) {
// FIXME: Provide a NestedNameSpecifier visitor.
- NestedNameSpecifier *Qualifier = T->getQualifier();
- if (NestedNameSpecifier::SpecifierKind K = Qualifier->getKind();
- K == NestedNameSpecifier::TypeSpec)
- Visit(Qualifier->getAsType());
+ NestedNameSpecifier Qualifier = T->getQualifier();
+ if (NestedNameSpecifier::Kind K = Qualifier.getKind();
+ K == NestedNameSpecifier::Kind::Type)
+ Visit(Qualifier.getAsType());
if (T->isSugared())
- Visit(T->getMostRecentCXXRecordDecl()->getTypeForDecl());
+ Visit(cast<MemberPointerType>(T->getCanonicalTypeUnqualified())
+ ->getQualifier()
+ .getAsType());
Visit(T->getPointeeType());
}
void VisitArrayType(const ArrayType *T) { Visit(T->getElementType()); }
@@ -510,7 +512,7 @@ public:
}
void VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
// FIXME: Provide NestedNamespecifierLoc visitor.
- Visit(TL.getQualifierLoc().getTypeLoc());
+ Visit(TL.getQualifierLoc().castAsTypeLoc());
}
void VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
Visit(TL.getSizeExpr());
@@ -647,21 +649,8 @@ public:
template <typename SpecializationDecl>
void dumpTemplateDeclSpecialization(const SpecializationDecl *D) {
- for (const auto *RedeclWithBadType : D->redecls()) {
- // FIXME: The redecls() range sometimes has elements of a less-specific
- // type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
- // us TagDecls, and should give CXXRecordDecls).
- auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType);
- if (!Redecl) {
- // Found the injected-class-name for a class template. This will be
- // dumped as part of its surrounding class so we don't need to dump it
- // here.
- assert(isa<CXXRecordDecl>(RedeclWithBadType) &&
- "expected an injected-class-name");
- continue;
- }
- Visit(Redecl);
- }
+ for (const auto *Redecl : D->redecls())
+ Visit(cast<SpecializationDecl>(Redecl));
}
template <typename TemplateDecl>
@@ -772,17 +761,16 @@ public:
}
void VisitUsingShadowDecl(const UsingShadowDecl *D) {
- if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
- Visit(TD->getTypeForDecl());
+ Visit(D->getTargetDecl());
}
void VisitFriendDecl(const FriendDecl *D) {
if (D->getFriendType()) {
// Traverse any CXXRecordDecl owned by this type, since
// it will not be in the parent context:
- if (auto *ET = D->getFriendType()->getType()->getAs<ElaboratedType>())
- if (auto *TD = ET->getOwnedTagDecl())
- Visit(TD);
+ if (auto *TT = D->getFriendType()->getType()->getAs<TagType>())
+ if (TT->isTagOwned())
+ Visit(TT->getOriginalDecl());
} else {
Visit(D->getFriendDecl());
}
diff --git a/clang/include/clang/AST/ASTTypeTraits.h b/clang/include/clang/AST/ASTTypeTraits.h
index 3988a15..6f40705 100644
--- a/clang/include/clang/AST/ASTTypeTraits.h
+++ b/clang/include/clang/AST/ASTTypeTraits.h
@@ -307,7 +307,7 @@ public:
/// For nodes which represent textual entities in the source code,
/// return their SourceRange. For all other nodes, return SourceRange().
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange(bool IncludeQualifier = false) const;
/// @{
/// Imposes an order on \c DynTypedNode.
@@ -336,9 +336,9 @@ public:
NodeKind)) {
auto NNSLA = getUnchecked<NestedNameSpecifierLoc>();
auto NNSLB = Other.getUnchecked<NestedNameSpecifierLoc>();
- return std::make_pair(NNSLA.getNestedNameSpecifier(),
+ return std::make_pair(NNSLA.getNestedNameSpecifier().getAsVoidPointer(),
NNSLA.getOpaqueData()) <
- std::make_pair(NNSLB.getNestedNameSpecifier(),
+ std::make_pair(NNSLB.getNestedNameSpecifier().getAsVoidPointer(),
NNSLB.getOpaqueData());
}
@@ -393,8 +393,9 @@ public:
if (ASTNodeKind::getFromNodeKind<NestedNameSpecifierLoc>().isSame(
Val.NodeKind)) {
auto NNSL = Val.getUnchecked<NestedNameSpecifierLoc>();
- return llvm::hash_combine(NNSL.getNestedNameSpecifier(),
- NNSL.getOpaqueData());
+ return llvm::hash_combine(
+ NNSL.getNestedNameSpecifier().getAsVoidPointer(),
+ NNSL.getOpaqueData());
}
assert(Val.getMemoizationData());
@@ -539,8 +540,8 @@ struct DynTypedNode::BaseConverter<
: public DynCastPtrConverter<T, Attr> {};
template <>
-struct DynTypedNode::BaseConverter<
- NestedNameSpecifier, void> : public PtrConverter<NestedNameSpecifier> {};
+struct DynTypedNode::BaseConverter<NestedNameSpecifier, void>
+ : public ValueConverter<NestedNameSpecifier> {};
template <>
struct DynTypedNode::BaseConverter<
diff --git a/clang/include/clang/AST/AbstractBasicReader.h b/clang/include/clang/AST/AbstractBasicReader.h
index 0a2db9e..0d187eb4 100644
--- a/clang/include/clang/AST/AbstractBasicReader.h
+++ b/clang/include/clang/AST/AbstractBasicReader.h
@@ -193,11 +193,11 @@ public:
auto elemTy = origTy;
unsigned pathLength = asImpl().readUInt32();
for (unsigned i = 0; i < pathLength; ++i) {
- if (elemTy->template getAs<RecordType>()) {
+ if (elemTy->isRecordType()) {
unsigned int_ = asImpl().readUInt32();
Decl *decl = asImpl().template readDeclAs<Decl>();
if (auto *recordDecl = dyn_cast<CXXRecordDecl>(decl))
- elemTy = getASTContext().getRecordType(recordDecl);
+ elemTy = getASTContext().getCanonicalTagType(recordDecl);
else
elemTy = cast<ValueDecl>(decl)->getType();
path.push_back(
@@ -252,39 +252,34 @@ public:
return EffectConditionExpr{asImpl().readExprRef()};
}
- NestedNameSpecifier *readNestedNameSpecifier() {
+ NestedNameSpecifier readNestedNameSpecifier() {
auto &ctx = getASTContext();
// We build this up iteratively.
- NestedNameSpecifier *cur = nullptr;
+ NestedNameSpecifier cur = std::nullopt;
uint32_t depth = asImpl().readUInt32();
for (uint32_t i = 0; i != depth; ++i) {
auto kind = asImpl().readNestedNameSpecifierKind();
switch (kind) {
- case NestedNameSpecifier::Identifier:
- cur = NestedNameSpecifier::Create(ctx, cur,
- asImpl().readIdentifier());
+ case NestedNameSpecifier::Kind::Namespace:
+ cur =
+ NestedNameSpecifier(ctx, asImpl().readNamespaceBaseDeclRef(), cur);
continue;
-
- case NestedNameSpecifier::Namespace:
- cur = NestedNameSpecifier::Create(ctx, cur,
- asImpl().readNamespaceBaseDeclRef());
+ case NestedNameSpecifier::Kind::Type:
+ assert(!cur);
+ cur = NestedNameSpecifier(asImpl().readQualType().getTypePtr());
continue;
-
- case NestedNameSpecifier::TypeSpec:
- cur = NestedNameSpecifier::Create(ctx, cur,
- asImpl().readQualType().getTypePtr());
+ case NestedNameSpecifier::Kind::Global:
+ assert(!cur);
+ cur = NestedNameSpecifier::getGlobal();
continue;
-
- case NestedNameSpecifier::Global:
- cur = NestedNameSpecifier::GlobalSpecifier(ctx);
- continue;
-
- case NestedNameSpecifier::Super:
- cur = NestedNameSpecifier::SuperSpecifier(ctx,
- asImpl().readCXXRecordDeclRef());
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ assert(!cur);
+ cur = NestedNameSpecifier(asImpl().readCXXRecordDeclRef());
continue;
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
llvm_unreachable("bad nested name specifier kind");
}
diff --git a/clang/include/clang/AST/AbstractBasicWriter.h b/clang/include/clang/AST/AbstractBasicWriter.h
index c105bbb..8ea0c29 100644
--- a/clang/include/clang/AST/AbstractBasicWriter.h
+++ b/clang/include/clang/AST/AbstractBasicWriter.h
@@ -176,12 +176,12 @@ public:
asImpl().writeUInt32(path.size());
auto &ctx = ((BasicWriterBase<Impl> *)this)->getASTContext();
for (auto elem : path) {
- if (elemTy->getAs<RecordType>()) {
+ if (elemTy->isRecordType()) {
asImpl().writeUInt32(elem.getAsBaseOrMember().getInt());
const Decl *baseOrMember = elem.getAsBaseOrMember().getPointer();
if (const auto *recordDecl = dyn_cast<CXXRecordDecl>(baseOrMember)) {
asImpl().writeDeclRef(recordDecl);
- elemTy = ctx.getRecordType(recordDecl);
+ elemTy = ctx.getCanonicalTagType(recordDecl);
} else {
const auto *valueDecl = cast<ValueDecl>(baseOrMember);
asImpl().writeDeclRef(valueDecl);
@@ -229,42 +229,43 @@ public:
asImpl().writeExprRef(CE.getCondition());
}
- void writeNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ void writeNestedNameSpecifier(NestedNameSpecifier NNS) {
// Nested name specifiers usually aren't too long. I think that 8 would
// typically accommodate the vast majority.
- SmallVector<NestedNameSpecifier *, 8> nestedNames;
+ SmallVector<NestedNameSpecifier, 8> nestedNames;
// Push each of the NNS's onto a stack for serialization in reverse order.
while (NNS) {
nestedNames.push_back(NNS);
- NNS = NNS->getPrefix();
+ NNS = NNS.getKind() == NestedNameSpecifier::Kind::Namespace
+ ? NNS.getAsNamespaceAndPrefix().Prefix
+ : std::nullopt;
}
asImpl().writeUInt32(nestedNames.size());
while (!nestedNames.empty()) {
NNS = nestedNames.pop_back_val();
- NestedNameSpecifier::SpecifierKind kind = NNS->getKind();
+ NestedNameSpecifier::Kind kind = NNS.getKind();
asImpl().writeNestedNameSpecifierKind(kind);
switch (kind) {
- case NestedNameSpecifier::Identifier:
- asImpl().writeIdentifier(NNS->getAsIdentifier());
+ case NestedNameSpecifier::Kind::Namespace:
+ asImpl().writeNamespaceBaseDeclRef(
+ NNS.getAsNamespaceAndPrefix().Namespace);
continue;
-
- case NestedNameSpecifier::Namespace:
- asImpl().writeNamespaceBaseDeclRef(NNS->getAsNamespace());
- continue;
-
- case NestedNameSpecifier::TypeSpec:
- asImpl().writeQualType(QualType(NNS->getAsType(), 0));
+ case NestedNameSpecifier::Kind::Type:
+ asImpl().writeQualType(QualType(NNS.getAsType(), 0));
continue;
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
// Don't need to write an associated value.
continue;
- case NestedNameSpecifier::Super:
- asImpl().writeDeclRef(NNS->getAsRecordDecl());
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ asImpl().writeDeclRef(NNS.getAsMicrosoftSuper());
continue;
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
llvm_unreachable("bad nested name specifier kind");
}
diff --git a/clang/include/clang/AST/CXXInheritance.h b/clang/include/clang/AST/CXXInheritance.h
index bbef018..e893260 100644
--- a/clang/include/clang/AST/CXXInheritance.h
+++ b/clang/include/clang/AST/CXXInheritance.h
@@ -359,7 +359,7 @@ class CXXFinalOverriderMap
/// A set of all the primary bases for a class.
class CXXIndirectPrimaryBaseSet
- : public llvm::SmallSet<const CXXRecordDecl*, 32> {};
+ : public llvm::SmallPtrSet<const CXXRecordDecl *, 32> {};
inline bool
inheritanceModelHasVBPtrOffsetField(MSInheritanceModel Inheritance) {
diff --git a/clang/include/clang/AST/CanonicalType.h b/clang/include/clang/AST/CanonicalType.h
index 35db689..b5a4e94e13 100644
--- a/clang/include/clang/AST/CanonicalType.h
+++ b/clang/include/clang/AST/CanonicalType.h
@@ -453,7 +453,7 @@ template<>
struct CanProxyAdaptor<MemberPointerType>
: public CanProxyBase<MemberPointerType> {
LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(NestedNameSpecifier *, getQualifier)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(NestedNameSpecifier, getQualifier)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(const CXXRecordDecl *,
getMostRecentCXXRecordDecl)
};
@@ -551,21 +551,18 @@ struct CanProxyAdaptor<UnaryTransformType>
template<>
struct CanProxyAdaptor<TagType> : public CanProxyBase<TagType> {
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(TagDecl *, getDecl)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(TagDecl *, getOriginalDecl)
};
template<>
struct CanProxyAdaptor<RecordType> : public CanProxyBase<RecordType> {
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(RecordDecl *, getDecl)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(RecordDecl *, getOriginalDecl)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasConstFields)
};
template<>
struct CanProxyAdaptor<EnumType> : public CanProxyBase<EnumType> {
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(EnumDecl *, getDecl)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(EnumDecl *, getOriginalDecl)
};
template<>
diff --git a/clang/include/clang/AST/Comment.h b/clang/include/clang/AST/Comment.h
index dd99067..5ba95c8 100644
--- a/clang/include/clang/AST/Comment.h
+++ b/clang/include/clang/AST/Comment.h
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
class Decl;
@@ -119,6 +120,11 @@ protected:
LLVM_PREFERRED_TYPE(CommandTraits::KnownCommandIDs)
unsigned CommandID : CommandInfo::NumCommandIDBits;
+
+ /// Describes the syntax that was used in a documentation command.
+ /// Contains values from CommandMarkerKind enum.
+ LLVM_PREFERRED_TYPE(CommandMarkerKind)
+ unsigned CommandMarker : 1;
};
enum { NumInlineCommandCommentBits = NumInlineContentCommentBits + 3 +
CommandInfo::NumCommandIDBits };
@@ -347,6 +353,16 @@ public:
InlineCommandCommentBits.RenderKind = llvm::to_underlying(RK);
InlineCommandCommentBits.CommandID = CommandID;
}
+ InlineCommandComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ unsigned CommandID, InlineCommandRenderKind RK,
+ CommandMarkerKind CommandMarker, ArrayRef<Argument> Args)
+ : InlineContentComment(CommentKind::InlineCommandComment, LocBegin,
+ LocEnd),
+ Args(Args) {
+ InlineCommandCommentBits.RenderKind = llvm::to_underlying(RK);
+ InlineCommandCommentBits.CommandID = CommandID;
+ InlineCommandCommentBits.CommandMarker = llvm::to_underlying(CommandMarker);
+ }
static bool classof(const Comment *C) {
return C->getCommentKind() == CommentKind::InlineCommandComment;
@@ -384,6 +400,11 @@ public:
SourceRange getArgRange(unsigned Idx) const {
return Args[Idx].Range;
}
+
+ CommandMarkerKind getCommandMarker() const {
+ return static_cast<CommandMarkerKind>(
+ InlineCommandCommentBits.CommandMarker);
+ }
};
/// Abstract class for opening and closing HTML tags. HTML tags are always
diff --git a/clang/include/clang/AST/CommentHTMLTags.td b/clang/include/clang/AST/CommentHTMLTags.td
index a1ce8c6..9b89bc0 100644
--- a/clang/include/clang/AST/CommentHTMLTags.td
+++ b/clang/include/clang/AST/CommentHTMLTags.td
@@ -51,6 +51,11 @@ def Col : Tag<"col"> { let EndTagForbidden = 1; }
def Tr : Tag<"tr"> { let EndTagOptional = 1; }
def Th : Tag<"th"> { let EndTagOptional = 1; }
def Td : Tag<"td"> { let EndTagOptional = 1; }
+def Summary : Tag<"summary">;
+def Details : Tag<"details">;
+def Mark : Tag<"mark">;
+def Figure : Tag<"figure">;
+def FigCaption : Tag<"figcaption">;
// Define a list of attributes that are not safe to pass through to HTML
// output if the input is untrusted.
diff --git a/clang/include/clang/AST/CommentSema.h b/clang/include/clang/AST/CommentSema.h
index 916d794..3169e2b 100644
--- a/clang/include/clang/AST/CommentSema.h
+++ b/clang/include/clang/AST/CommentSema.h
@@ -131,6 +131,7 @@ public:
InlineCommandComment *actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
unsigned CommandID,
+ CommandMarkerKind CommandMarker,
ArrayRef<Comment::Argument> Args);
InlineContentComment *actOnUnknownCommand(SourceLocation LocBegin,
diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h
index 08fe1f8..c24d412 100644
--- a/clang/include/clang/AST/Decl.h
+++ b/clang/include/clang/AST/Decl.h
@@ -20,9 +20,9 @@
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExternalASTSource.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/Redeclarable.h"
-#include "clang/AST/Type.h"
+#include "clang/AST/TypeBase.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
@@ -833,9 +833,9 @@ public:
/// Retrieve the nested-name-specifier that qualifies the name of this
/// declaration, if it was present in the source.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
- : nullptr;
+ : std::nullopt;
}
/// Retrieve the nested-name-specifier (with source-location
@@ -2668,6 +2668,10 @@ public:
/// an attribute on its declaration or its type.
bool isNoReturn() const;
+ /// Determines whether this function is known to be 'noreturn' for analyzer,
+ /// through an `analyzer_noreturn` attribute on its declaration.
+ bool isAnalyzerNoReturn() const;
+
/// True if the function was a definition but its body was skipped.
bool hasSkippedBody() const { return FunctionDeclBits.HasSkippedBody; }
void setHasSkippedBody(bool Skipped = true) {
@@ -3526,10 +3530,16 @@ protected:
public:
// Low-level accessor. If you just want the type defined by this node,
// check out ASTContext::getTypeDeclType or one of
- // ASTContext::getTypedefType, ASTContext::getRecordType, etc. if you
+ // ASTContext::getTypedefType, ASTContext::getTagType, etc. if you
// already know the specific kind of node this is.
- const Type *getTypeForDecl() const { return TypeForDecl; }
- void setTypeForDecl(const Type *TD) { TypeForDecl = TD; }
+ const Type *getTypeForDecl() const {
+ assert(!isa<TagDecl>(this));
+ return TypeForDecl;
+ }
+ void setTypeForDecl(const Type *TD) {
+ assert(!isa<TagDecl>(this));
+ TypeForDecl = TD;
+ }
SourceLocation getBeginLoc() const LLVM_READONLY { return LocStart; }
void setLocStart(SourceLocation L) { LocStart = L; }
@@ -3635,6 +3645,10 @@ public:
return isTransparentTagSlow();
}
+ // These types are created lazily, use the ASTContext methods to obtain them.
+ const Type *getTypeForDecl() const = delete;
+ void setTypeForDecl(const Type *TD) = delete;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
@@ -3754,14 +3768,6 @@ protected:
/// True if this decl is currently being defined.
void setBeingDefined(bool V = true) { TagDeclBits.IsBeingDefined = V; }
- /// Indicates whether it is possible for declarations of this kind
- /// to have an out-of-date definition.
- ///
- /// This option is only enabled when modules are enabled.
- void setMayHaveOutOfDateDef(bool V = true) {
- TagDeclBits.MayHaveOutOfDateDef = V;
- }
-
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
@@ -3842,12 +3848,6 @@ public:
TagDeclBits.IsFreeStanding = isFreeStanding;
}
- /// Indicates whether it is possible for declarations of this kind
- /// to have an out-of-date definition.
- ///
- /// This option is only enabled when modules are enabled.
- bool mayHaveOutOfDateDef() const { return TagDeclBits.MayHaveOutOfDateDef; }
-
/// Whether this declaration declares a type that is
/// dependent, i.e., a type that somehow depends on template
/// parameters.
@@ -3888,6 +3888,19 @@ public:
/// the struct/union/class/enum.
TagDecl *getDefinition() const;
+ TagDecl *getDefinitionOrSelf() const {
+ if (TagDecl *Def = getDefinition())
+ return Def;
+ return const_cast<TagDecl *>(this);
+ }
+
+ /// Determines whether this entity is in the process of being defined.
+ bool isEntityBeingDefined() const {
+ if (const TagDecl *Def = getDefinition())
+ return Def->isBeingDefined();
+ return false;
+ }
+
StringRef getKindName() const {
return TypeWithKeyword::getTagTypeKindName(getTagKind());
}
@@ -3906,6 +3919,10 @@ public:
bool isUnion() const { return getTagKind() == TagTypeKind::Union; }
bool isEnum() const { return getTagKind() == TagTypeKind::Enum; }
+ bool isStructureOrClass() const {
+ return isStruct() || isClass() || isInterface();
+ }
+
/// Is this tag type named, either directly or via being defined in
/// a typedef of this type?
///
@@ -3934,9 +3951,9 @@ public:
/// Retrieve the nested-name-specifier that qualifies the name of this
/// declaration, if it was present in the source.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
- : nullptr;
+ : std::nullopt;
}
/// Retrieve the nested-name-specifier (with source-location
@@ -3958,6 +3975,10 @@ public:
return getExtInfo()->TemplParamLists[i];
}
+ // These types are created lazily, use the ASTContext methods to obtain them.
+ const Type *getTypeForDecl() const = delete;
+ void setTypeForDecl(const Type *TD) = delete;
+
using TypeDecl::printName;
void printName(raw_ostream &OS, const PrintingPolicy &Policy) const override;
@@ -4087,6 +4108,10 @@ public:
return cast_or_null<EnumDecl>(TagDecl::getDefinition());
}
+ EnumDecl *getDefinitionOrSelf() const {
+ return cast_or_null<EnumDecl>(TagDecl::getDefinitionOrSelf());
+ }
+
static EnumDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, EnumDecl *PrevDecl,
@@ -4469,6 +4494,10 @@ public:
return cast_or_null<RecordDecl>(TagDecl::getDefinition());
}
+ RecordDecl *getDefinitionOrSelf() const {
+ return cast_or_null<RecordDecl>(TagDecl::getDefinitionOrSelf());
+ }
+
/// Returns whether this record is a union, or contains (at any nesting level)
/// a union member. This is used by CMSE to warn about possible information
/// leaks.
@@ -5299,6 +5328,8 @@ void Redeclarable<decl_type>::setPreviousDecl(decl_type *PrevDecl) {
/// We use this function to break a cycle between the inline definitions in
/// Type.h and Decl.h.
inline bool IsEnumDeclComplete(EnumDecl *ED) {
+ if (const auto *Def = ED->getDefinition())
+ return Def->isComplete();
return ED->isComplete();
}
diff --git a/clang/include/clang/AST/DeclBase.h b/clang/include/clang/AST/DeclBase.h
index dd67ebc..c6326a8 100644
--- a/clang/include/clang/AST/DeclBase.h
+++ b/clang/include/clang/AST/DeclBase.h
@@ -410,9 +410,6 @@ protected:
virtual ~Decl();
- /// Update a potentially out-of-date declaration.
- void updateOutOfDate(IdentifierInfo &II) const;
-
Linkage getCachedLinkage() const {
return static_cast<Linkage>(CacheValidAndLinkage);
}
@@ -625,6 +622,12 @@ public:
void setReferenced(bool R = true) { Referenced = R; }
+ /// When doing manipulations which might change the computed linkage,
+ /// such as changing the DeclContext after the declaration has already been
+ /// used, invalidating the cache will make sure its linkage will be
+ /// recomputed.
+ void invalidateCachedLinkage() { setCachedLinkage(Linkage::Invalid); }
+
/// Whether this declaration is a top-level declaration (function,
/// global variable, etc.) that is lexically inside an objc container
/// definition.
@@ -1564,13 +1567,6 @@ protected:
LLVM_PREFERRED_TYPE(bool)
uint64_t IsFreeStanding : 1;
- /// Indicates whether it is possible for declarations of this kind
- /// to have an out-of-date definition.
- ///
- /// This option is only enabled when modules are enabled.
- LLVM_PREFERRED_TYPE(bool)
- uint64_t MayHaveOutOfDateDef : 1;
-
/// Has the full definition of this type been required by a use somewhere in
/// the TU.
LLVM_PREFERRED_TYPE(bool)
diff --git a/clang/include/clang/AST/DeclCXX.h b/clang/include/clang/AST/DeclCXX.h
index 33ae3d6..8802664 100644
--- a/clang/include/clang/AST/DeclCXX.h
+++ b/clang/include/clang/AST/DeclCXX.h
@@ -22,10 +22,10 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LambdaCapture.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/Stmt.h"
-#include "clang/AST/Type.h"
+#include "clang/AST/TypeBase.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/UnresolvedSet.h"
#include "clang/Basic/LLVM.h"
@@ -545,20 +545,6 @@ public:
return const_cast<CXXRecordDecl*>(this)->getMostRecentDecl();
}
- CXXRecordDecl *getMostRecentNonInjectedDecl() {
- CXXRecordDecl *Recent = getMostRecentDecl();
- while (Recent->isInjectedClassName()) {
- // FIXME: Does injected class name need to be in the redeclarations chain?
- assert(Recent->getPreviousDecl());
- Recent = Recent->getPreviousDecl();
- }
- return Recent;
- }
-
- const CXXRecordDecl *getMostRecentNonInjectedDecl() const {
- return const_cast<CXXRecordDecl*>(this)->getMostRecentNonInjectedDecl();
- }
-
CXXRecordDecl *getDefinition() const {
// We only need an update if we don't already know which
// declaration is the definition.
@@ -566,13 +552,18 @@ public:
return DD ? DD->Definition : nullptr;
}
+ CXXRecordDecl *getDefinitionOrSelf() const {
+ if (auto *Def = getDefinition())
+ return Def;
+ return const_cast<CXXRecordDecl *>(this);
+ }
+
bool hasDefinition() const { return DefinitionData || dataPtr(); }
static CXXRecordDecl *Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id,
- CXXRecordDecl *PrevDecl = nullptr,
- bool DelayTypeCreation = false);
+ CXXRecordDecl *PrevDecl = nullptr);
static CXXRecordDecl *CreateLambda(const ASTContext &C, DeclContext *DC,
TypeSourceInfo *Info, SourceLocation Loc,
unsigned DependencyKind, bool IsGeneric,
@@ -1903,6 +1894,20 @@ public:
/// \endcode
bool isInjectedClassName() const;
+ /// Determines whether this declaration has is canonically of an injected
+ /// class type. These are non-instantiated class template patterns, which can
+ /// be used from within the class template itself. For example:
+ ///
+ /// \code
+ /// template<class T> struct C {
+ /// C *t; // Here `C *` is a pointer to an injected class type.
+ /// };
+ /// \endcode
+ bool hasInjectedClassType() const;
+
+ CanQualType
+ getCanonicalTemplateSpecializationType(const ASTContext &Ctx) const;
+
// Determine whether this type is an Interface Like type for
// __interface inheritance purposes.
bool isInterfaceLike() const;
@@ -3131,7 +3136,7 @@ public:
/// Retrieve the nested-name-specifier that qualifies the
/// name of the namespace.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3252,7 +3257,7 @@ public:
/// Retrieve the nested-name-specifier that qualifies the
/// name of the namespace.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3614,7 +3619,7 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3804,13 +3809,11 @@ public:
/// The source location of the 'enum' keyword.
SourceLocation getEnumLoc() const { return EnumLocation; }
void setEnumLoc(SourceLocation L) { EnumLocation = L; }
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return getQualifierLoc().getNestedNameSpecifier();
}
NestedNameSpecifierLoc getQualifierLoc() const {
- if (auto ETL = EnumType->getTypeLoc().getAs<ElaboratedTypeLoc>())
- return ETL.getQualifierLoc();
- return NestedNameSpecifierLoc();
+ return getEnumTypeLoc().getPrefix();
}
// Returns the "qualifier::Name" part as a TypeLoc.
TypeLoc getEnumTypeLoc() const {
@@ -3822,7 +3825,9 @@ public:
void setEnumType(TypeSourceInfo *TSI) { EnumType = TSI; }
public:
- EnumDecl *getEnumDecl() const { return cast<EnumDecl>(EnumType->getType()->getAsTagDecl()); }
+ EnumDecl *getEnumDecl() const {
+ return EnumType->getType()->castAs<clang::EnumType>()->getOriginalDecl();
+ }
static UsingEnumDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingL, SourceLocation EnumL,
@@ -3970,7 +3975,7 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -4060,7 +4065,7 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
diff --git a/clang/include/clang/AST/DeclObjC.h b/clang/include/clang/AST/DeclObjC.h
index 9014d76..2541edb 100644
--- a/clang/include/clang/AST/DeclObjC.h
+++ b/clang/include/clang/AST/DeclObjC.h
@@ -643,6 +643,9 @@ public:
/// from the explicitly-specified bound.
SourceLocation getColonLoc() const { return ColonLoc; }
+ using TypeDecl::getTypeForDecl;
+ using TypeDecl::setTypeForDecl;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == ObjCTypeParam; }
diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h
index 32de203..bba7236 100644
--- a/clang/include/clang/AST/DeclTemplate.h
+++ b/clang/include/clang/AST/DeclTemplate.h
@@ -1796,7 +1796,10 @@ public:
}
BuiltinTemplateKind getBuiltinTemplateKind() const { return BTK; }
+
+ bool isPackProducingBuiltinTemplate() const;
};
+bool isPackProducingBuiltinTemplateName(TemplateName N);
/// Provides information about an explicit instantiation of a variable or class
/// template.
@@ -1898,14 +1901,14 @@ public:
void getNameForDiagnostic(raw_ostream &OS, const PrintingPolicy &Policy,
bool Qualified) const override;
- // FIXME: This is broken. CXXRecordDecl::getMostRecentDecl() returns a
- // different "most recent" declaration from this function for the same
- // declaration, because we don't override getMostRecentDeclImpl(). But
- // it's not clear that we should override that, because the most recent
- // declaration as a CXXRecordDecl sometimes is the injected-class-name.
ClassTemplateSpecializationDecl *getMostRecentDecl() {
return cast<ClassTemplateSpecializationDecl>(
- getMostRecentNonInjectedDecl());
+ CXXRecordDecl::getMostRecentDecl());
+ }
+
+ ClassTemplateSpecializationDecl *getDefinitionOrSelf() const {
+ return cast<ClassTemplateSpecializationDecl>(
+ CXXRecordDecl::getDefinitionOrSelf());
}
/// Retrieve the template that this specialization specializes.
@@ -2123,10 +2126,13 @@ class ClassTemplatePartialSpecializationDecl
llvm::PointerIntPair<ClassTemplatePartialSpecializationDecl *, 1, bool>
InstantiatedFromMember;
+ mutable CanQualType CanonInjectedTST;
+
ClassTemplatePartialSpecializationDecl(
ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args,
+ CanQualType CanonInjectedTST,
ClassTemplatePartialSpecializationDecl *PrevDecl);
ClassTemplatePartialSpecializationDecl(ASTContext &C)
@@ -2143,7 +2149,7 @@ public:
Create(ASTContext &Context, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
TemplateParameterList *Params, ClassTemplateDecl *SpecializedTemplate,
- ArrayRef<TemplateArgument> Args, QualType CanonInjectedType,
+ ArrayRef<TemplateArgument> Args, CanQualType CanonInjectedTST,
ClassTemplatePartialSpecializationDecl *PrevDecl);
static ClassTemplatePartialSpecializationDecl *
@@ -2160,12 +2166,6 @@ public:
return TemplateParams;
}
- /// Get the template argument list of the template parameter list.
- ArrayRef<TemplateArgument>
- getInjectedTemplateArgs(const ASTContext &Context) const {
- return getTemplateParameters()->getInjectedTemplateArgs(Context);
- }
-
/// \brief All associated constraints of this partial specialization,
/// including the requires clause and any constraints derived from
/// constrained-parameters.
@@ -2247,14 +2247,10 @@ public:
return First->InstantiatedFromMember.setInt(true);
}
- /// Retrieves the injected specialization type for this partial
- /// specialization. This is not the same as the type-decl-type for
- /// this partial specialization, which is an InjectedClassNameType.
- QualType getInjectedSpecializationType() const {
- assert(getTypeForDecl() && "partial specialization has no type set!");
- return cast<InjectedClassNameType>(getTypeForDecl())
- ->getInjectedSpecializationType();
- }
+ /// Retrieves the canonical injected specialization type for this partial
+ /// specialization.
+ CanQualType
+ getCanonicalInjectedSpecializationType(const ASTContext &Ctx) const;
SourceRange getSourceRange() const override LLVM_READONLY;
@@ -2289,8 +2285,8 @@ protected:
llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>
PartialSpecializations;
- /// The injected-class-name type for this class template.
- QualType InjectedClassNameType;
+ /// The Injected Template Specialization Type for this declaration.
+ CanQualType CanonInjectedTST;
Common() = default;
};
@@ -2427,7 +2423,7 @@ public:
findPartialSpecInstantiatedFromMember(
ClassTemplatePartialSpecializationDecl *D);
- /// Retrieve the template specialization type of the
+ /// Retrieve the canonical template specialization type of the
/// injected-class-name for this class template.
///
/// The injected-class-name for a class template \c X is \c
@@ -2441,7 +2437,8 @@ public:
/// typedef array this_type; // "array" is equivalent to "array<T, N>"
/// };
/// \endcode
- QualType getInjectedClassNameSpecialization();
+ CanQualType
+ getCanonicalInjectedSpecializationType(const ASTContext &Ctx) const;
using spec_iterator = SpecIterator<ClassTemplateSpecializationDecl>;
using spec_range = llvm::iterator_range<spec_iterator>;
diff --git a/clang/include/clang/AST/DeclarationName.h b/clang/include/clang/AST/DeclarationName.h
index 284228d..a7185f5 100644
--- a/clang/include/clang/AST/DeclarationName.h
+++ b/clang/include/clang/AST/DeclarationName.h
@@ -13,7 +13,7 @@
#ifndef LLVM_CLANG_AST_DECLARATIONNAME_H
#define LLVM_CLANG_AST_DECLARATIONNAME_H
-#include "clang/AST/Type.h"
+#include "clang/AST/TypeBase.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/OperatorKinds.h"
diff --git a/clang/include/clang/AST/DependenceFlags.h b/clang/include/clang/AST/DependenceFlags.h
index bdcaabc..c439525 100644
--- a/clang/include/clang/AST/DependenceFlags.h
+++ b/clang/include/clang/AST/DependenceFlags.h
@@ -293,7 +293,7 @@ inline TypeDependence toSemanticDependence(TypeDependence D) {
}
inline NestedNameSpecifierDependence
-toNestedNameSpecifierDependendence(TypeDependence D) {
+toNestedNameSpecifierDependence(TypeDependence D) {
return Dependence(D).nestedNameSpecifier();
}
diff --git a/clang/include/clang/AST/DynamicRecursiveASTVisitor.h b/clang/include/clang/AST/DynamicRecursiveASTVisitor.h
index 703cca2..7b5bdca 100644
--- a/clang/include/clang/AST/DynamicRecursiveASTVisitor.h
+++ b/clang/include/clang/AST/DynamicRecursiveASTVisitor.h
@@ -134,8 +134,7 @@ public:
/// Recursively visit a C++ nested-name-specifier.
///
/// \returns false if the visitation was terminated early, true otherwise.
- virtual bool
- TraverseNestedNameSpecifier(MaybeConst<NestedNameSpecifier> *NNS);
+ virtual bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS);
/// Recursively visit a C++ nested-name-specifier with location
/// information.
@@ -181,14 +180,14 @@ public:
///
/// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type).
- virtual bool TraverseType(QualType T);
+ virtual bool TraverseType(QualType T, bool TraverseQualifier = true);
/// Recursively visit a type with location, by dispatching to
/// Traverse*TypeLoc() based on the argument type's getTypeClass() property.
///
/// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type location).
- virtual bool TraverseTypeLoc(TypeLoc TL);
+ virtual bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true);
/// Recursively visit an Objective-C protocol reference with location
/// information.
@@ -273,7 +272,8 @@ public:
#define ABSTRACT_TYPE(CLASS, BASE)
#define TYPE(CLASS, BASE) \
bool WalkUpFrom##CLASS##Type(MaybeConst<CLASS##Type> *T); \
- virtual bool Traverse##CLASS##Type(MaybeConst<CLASS##Type> *T);
+ virtual bool Traverse##CLASS##Type(MaybeConst<CLASS##Type> *T, \
+ bool TraverseQualifier = true);
#include "clang/AST/TypeNodes.inc"
#define TYPE(CLASS, BASE) \
@@ -283,7 +283,8 @@ public:
// TypeLocs.
#define ABSTRACT_TYPELOC(CLASS, BASE)
#define TYPELOC(CLASS, BASE) \
- virtual bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL);
+ virtual bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL, \
+ bool TraverseQualifier);
#include "clang/AST/TypeLocNodes.def"
#define TYPELOC(CLASS, BASE) \
diff --git a/clang/include/clang/AST/Expr.h b/clang/include/clang/AST/Expr.h
index 237b3b2..23a0996 100644
--- a/clang/include/clang/AST/Expr.h
+++ b/clang/include/clang/AST/Expr.h
@@ -23,7 +23,7 @@
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
-#include "clang/AST/Type.h"
+#include "clang/AST/TypeBase.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SyncScope.h"
@@ -40,6 +40,7 @@
#include <optional>
namespace clang {
+ class AllocSizeAttr;
class APValue;
class ASTContext;
class BlockDecl;
@@ -1369,7 +1370,7 @@ public:
/// If the name was qualified, retrieves the nested-name-specifier
/// that precedes the name. Otherwise, returns NULL.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return getQualifierLoc().getNestedNameSpecifier();
}
@@ -3261,6 +3262,15 @@ public:
setDependence(getDependence() | ExprDependence::TypeValueInstantiation);
}
+ /// Try to get the alloc_size attribute of the callee. May return null.
+ const AllocSizeAttr *getCalleeAllocSizeAttr() const;
+
+ /// Evaluates the total size in bytes allocated by calling a function
+ /// decorated with alloc_size. Returns std::nullopt if the the result cannot
+ /// be evaluated.
+ std::optional<llvm::APInt>
+ evaluateBytesReturnedByAllocSizeCall(const ASTContext &Ctx) const;
+
bool isCallToStdMove() const;
static bool classof(const Stmt *T) {
@@ -3398,7 +3408,7 @@ public:
/// If the member name was qualified, retrieves the
/// nested-name-specifier that precedes the member name. Otherwise, returns
/// NULL.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return getQualifierLoc().getNestedNameSpecifier();
}
@@ -3548,6 +3558,7 @@ public:
QualType T, ExprValueKind VK, Expr *init, bool fileScope)
: Expr(CompoundLiteralExprClass, T, VK, OK_Ordinary),
LParenLoc(lparenloc), TInfoAndScope(tinfo, fileScope), Init(init) {
+ assert(Init && "Init is a nullptr");
setDependence(computeDependence(this));
}
@@ -3577,19 +3588,11 @@ public:
APValue &getStaticValue() const;
SourceLocation getBeginLoc() const LLVM_READONLY {
- // FIXME: Init should never be null.
- if (!Init)
- return SourceLocation();
if (LParenLoc.isInvalid())
return Init->getBeginLoc();
return LParenLoc;
}
- SourceLocation getEndLoc() const LLVM_READONLY {
- // FIXME: Init should never be null.
- if (!Init)
- return SourceLocation();
- return Init->getEndLoc();
- }
+ SourceLocation getEndLoc() const LLVM_READONLY { return Init->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundLiteralExprClass;
@@ -5110,9 +5113,9 @@ public:
"trying to dereference an invalid iterator");
IntegerLiteral *N = EExpr->FakeChildNode;
N->setValue(*EExpr->Ctx,
- llvm::APInt(N->getValue().getBitWidth(),
+ llvm::APInt(N->getBitWidth(),
EExpr->Data->BinaryData->getCodeUnit(CurOffset),
- N->getType()->isSignedIntegerType()));
+ /*Signed=*/true));
// We want to return a reference to the fake child node in the
// EmbedExpr, not the local variable N.
return const_cast<typename BaseTy::reference>(EExpr->FakeChildNode);
diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h
index 7a26934..9fedb23 100644
--- a/clang/include/clang/AST/ExprCXX.h
+++ b/clang/include/clang/AST/ExprCXX.h
@@ -1712,6 +1712,19 @@ public:
CXXConstructExprBits.IsImmediateEscalating = Set;
}
+ /// Returns the WarnUnusedResultAttr that is declared on the callee
+ /// or its return type declaration, together with a NamedDecl that
+ /// refers to the declaration the attribute is attached to.
+ std::pair<const NamedDecl *, const WarnUnusedResultAttr *>
+ getUnusedResultAttr(const ASTContext &Ctx) const {
+ return getUnusedResultAttrImpl(getConstructor(), getType());
+ }
+
+ /// Returns true if this call expression should warn on unused results.
+ bool hasUnusedResultAttr(const ASTContext &Ctx) const {
+ return getUnusedResultAttr(Ctx).second != nullptr;
+ }
+
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
SourceRange getParenOrBraceRange() const { return ParenOrBraceRange; }
@@ -2781,7 +2794,7 @@ public:
/// If the member name was qualified, retrieves the
/// nested-name-specifier that precedes the member name. Otherwise, returns
/// null.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3222,7 +3235,7 @@ public:
SourceLocation getNameLoc() const { return NameInfo.getLoc(); }
/// Fetches the nested-name qualifier, if one was given.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3540,7 +3553,7 @@ public:
/// Retrieve the nested-name-specifier that qualifies this
/// declaration.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3955,7 +3968,7 @@ public:
}
/// Retrieve the nested-name-specifier that qualifies the member name.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
diff --git a/clang/include/clang/AST/JSONNodeDumper.h b/clang/include/clang/AST/JSONNodeDumper.h
index 570662b..8640780 100644
--- a/clang/include/clang/AST/JSONNodeDumper.h
+++ b/clang/include/clang/AST/JSONNodeDumper.h
@@ -240,7 +240,6 @@ public:
void VisitInjectedClassNameType(const InjectedClassNameType *ICNT);
void VisitObjCInterfaceType(const ObjCInterfaceType *OIT);
void VisitPackExpansionType(const PackExpansionType *PET);
- void VisitElaboratedType(const ElaboratedType *ET);
void VisitMacroQualifiedType(const MacroQualifiedType *MQT);
void VisitMemberPointerType(const MemberPointerType *MPT);
diff --git a/clang/include/clang/AST/NestedNameSpecifier.h b/clang/include/clang/AST/NestedNameSpecifier.h
index 1614f9d..f198a8b 100644
--- a/clang/include/clang/AST/NestedNameSpecifier.h
+++ b/clang/include/clang/AST/NestedNameSpecifier.h
@@ -6,507 +6,266 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the NestedNameSpecifier class, which represents
-// a C++ nested-name-specifier.
+// This file completes the definition of the NestedNameSpecifier class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_NESTEDNAMESPECIFIER_H
#define LLVM_CLANG_AST_NESTEDNAMESPECIFIER_H
-#include "clang/AST/DependenceFlags.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/SourceLocation.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/Support/Compiler.h"
-#include <cstdint>
-#include <cstdlib>
-#include <utility>
namespace clang {
-class ASTContext;
-class CXXRecordDecl;
-class IdentifierInfo;
-class LangOptions;
-class NamespaceBaseDecl;
-struct PrintingPolicy;
-class Type;
-class TypeLoc;
-
-/// Represents a C++ nested name specifier, such as
-/// "\::std::vector<int>::".
-///
-/// C++ nested name specifiers are the prefixes to qualified
-/// names. For example, "foo::" in "foo::x" is a nested name
-/// specifier. Nested name specifiers are made up of a sequence of
-/// specifiers, each of which can be a namespace, type, identifier
-/// (for dependent names), decltype specifier, or the global specifier ('::').
-/// The last two specifiers can only appear at the start of a
-/// nested-namespace-specifier.
-class NestedNameSpecifier : public llvm::FoldingSetNode {
- /// Enumeration describing
- enum StoredSpecifierKind {
- StoredIdentifier = 0,
- StoredDecl = 1,
- StoredTypeSpec = 2
- };
-
- /// The nested name specifier that precedes this nested name
- /// specifier.
- ///
- /// The pointer is the nested-name-specifier that precedes this
- /// one. The integer stores one of the first four values of type
- /// SpecifierKind.
- llvm::PointerIntPair<NestedNameSpecifier *, 2, StoredSpecifierKind> Prefix;
-
- /// The last component in the nested name specifier, which
- /// can be an identifier, a declaration, or a type.
- ///
- /// When the pointer is NULL, this specifier represents the global
- /// specifier '::'. Otherwise, the pointer is one of
- /// IdentifierInfo*, Namespace*, or Type*, depending on the kind of
- /// specifier as encoded within the prefix.
- void* Specifier = nullptr;
-
-public:
- /// The kind of specifier that completes this nested name
- /// specifier.
- enum SpecifierKind {
- /// An identifier, stored as an IdentifierInfo*.
- Identifier,
-
- /// A namespace-like entity, stored as a NamespaceBaseDecl*.
- Namespace,
-
- /// A type, stored as a Type*.
- TypeSpec,
-
- /// The global specifier '::'. There is no stored value.
- Global,
-
- /// Microsoft's '__super' specifier, stored as a CXXRecordDecl* of
- /// the class it appeared in.
- Super
- };
-
-private:
- /// Builds the global specifier.
- NestedNameSpecifier() : Prefix(nullptr, StoredIdentifier) {}
-
- /// Copy constructor used internally to clone nested name
- /// specifiers.
- NestedNameSpecifier(const NestedNameSpecifier &Other) = default;
-
- /// Either find or insert the given nested name specifier
- /// mockup in the given context.
- static NestedNameSpecifier *FindOrInsert(const ASTContext &Context,
- const NestedNameSpecifier &Mockup);
-
-public:
- NestedNameSpecifier &operator=(const NestedNameSpecifier &) = delete;
-
- /// Builds a specifier combining a prefix and an identifier.
- ///
- /// The prefix must be dependent, since nested name specifiers
- /// referencing an identifier are only permitted when the identifier
- /// cannot be resolved.
- static NestedNameSpecifier *Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const IdentifierInfo *II);
-
- /// Builds a nested name specifier that names a namespace or namespace alias.
- static NestedNameSpecifier *Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const NamespaceBaseDecl *NS);
-
- /// Builds a nested name specifier that names a type.
- static NestedNameSpecifier *
- Create(const ASTContext &Context, NestedNameSpecifier *Prefix, const Type *T);
-
- /// Builds a specifier that consists of just an identifier.
- ///
- /// The nested-name-specifier is assumed to be dependent, but has no
- /// prefix because the prefix is implied by something outside of the
- /// nested name specifier, e.g., in "x->Base::f", the "x" has a dependent
- /// type.
- static NestedNameSpecifier *Create(const ASTContext &Context,
- const IdentifierInfo *II);
-
- /// Returns the nested name specifier representing the global
- /// scope.
- static NestedNameSpecifier *GlobalSpecifier(const ASTContext &Context);
-
- /// Returns the nested name specifier representing the __super scope
- /// for the given CXXRecordDecl.
- static NestedNameSpecifier *SuperSpecifier(const ASTContext &Context,
- CXXRecordDecl *RD);
-
- /// Return the prefix of this nested name specifier.
- ///
- /// The prefix contains all of the parts of the nested name
- /// specifier that precede this current specifier. For example, for a
- /// nested name specifier that represents "foo::bar::", the current
- /// specifier will contain "bar::" and the prefix will contain
- /// "foo::".
- NestedNameSpecifier *getPrefix() const { return Prefix.getPointer(); }
-
- /// Determine what kind of nested name specifier is stored.
- SpecifierKind getKind() const;
-
- /// Retrieve the identifier stored in this nested name
- /// specifier.
- IdentifierInfo *getAsIdentifier() const {
- if (Prefix.getInt() == StoredIdentifier)
- return (IdentifierInfo *)Specifier;
-
- return nullptr;
+auto NestedNameSpecifier::getKind() const -> Kind {
+ if (!isStoredKind()) {
+ switch (getFlagKind()) {
+ case FlagKind::Null:
+ return Kind::Null;
+ case FlagKind::Global:
+ return Kind::Global;
+ case FlagKind::Invalid:
+ llvm_unreachable("use of invalid NestedNameSpecifier");
+ }
+ llvm_unreachable("unhandled FlagKind");
}
+ switch (auto [K, Ptr] = getStored(); K) {
+ case StoredKind::Type:
+ return Kind::Type;
+ case StoredKind::NamespaceWithGlobal:
+ case StoredKind::NamespaceWithNamespace:
+ return Kind::Namespace;
+ case StoredKind::NamespaceOrSuper:
+ switch (static_cast<const Decl *>(Ptr)->getKind()) {
+ case Decl::Namespace:
+ case Decl::NamespaceAlias:
+ return Kind::Namespace;
+ case Decl::CXXRecord:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ return Kind::MicrosoftSuper;
+ default:
+ llvm_unreachable("unexpected decl kind");
+ }
+ }
+ llvm_unreachable("unknown StoredKind");
+}
- /// Retrieve the namespace or namespace alias stored in this nested name
- /// specifier.
- NamespaceBaseDecl *getAsNamespace() const;
-
- /// Retrieve the record declaration stored in this nested name
- /// specifier.
- CXXRecordDecl *getAsRecordDecl() const;
-
- /// Retrieve the type stored in this nested name specifier.
- const Type *getAsType() const {
- if (Prefix.getInt() == StoredTypeSpec)
- return (const Type *)Specifier;
+NestedNameSpecifier::NestedNameSpecifier(const Type *T)
+ : NestedNameSpecifier({StoredKind::Type, T}) {
+ assert(getKind() == Kind::Type);
+}
- return nullptr;
+auto NestedNameSpecifier::MakeNamespacePtrKind(
+ const ASTContext &Ctx, const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix) -> PtrKind {
+ switch (Prefix.getKind()) {
+ case Kind::Null:
+ return {StoredKind::NamespaceOrSuper, Namespace};
+ case Kind::Global:
+ return {StoredKind::NamespaceWithGlobal, Namespace};
+ case Kind::Namespace:
+ return {StoredKind::NamespaceWithNamespace,
+ MakeNamespaceAndPrefixStorage(Ctx, Namespace, Prefix)};
+ case Kind::MicrosoftSuper:
+ case Kind::Type:
+ llvm_unreachable("invalid prefix for namespace");
}
+ llvm_unreachable("unhandled kind");
+}
- /// Fully translate this nested name specifier to a type.
- /// Unlike getAsType, this will convert this entire nested
- /// name specifier chain into its equivalent type.
- const Type *translateToType(const ASTContext &Context) const;
+/// Builds a nested name specifier that names a namespace.
+NestedNameSpecifier::NestedNameSpecifier(const ASTContext &Ctx,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix)
+ : NestedNameSpecifier(MakeNamespacePtrKind(Ctx, Namespace, Prefix)) {
+ assert(getKind() == Kind::Namespace);
+}
- NestedNameSpecifierDependence getDependence() const;
+/// Builds a nested name specifier that names a class through microsoft's
+/// __super specifier.
+NestedNameSpecifier::NestedNameSpecifier(CXXRecordDecl *RD)
+ : NestedNameSpecifier({StoredKind::NamespaceOrSuper, RD}) {
+ assert(getKind() == Kind::MicrosoftSuper);
+}
- /// Whether this nested name specifier refers to a dependent
- /// type or not.
- bool isDependent() const;
+CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
+ switch (getKind()) {
+ case Kind::MicrosoftSuper:
+ return getAsMicrosoftSuper();
+ case Kind::Type:
+ return getAsType()->getAsCXXRecordDecl();
+ case Kind::Global:
+ case Kind::Namespace:
+ case Kind::Null:
+ return nullptr;
+ }
+ llvm_unreachable("Invalid NNS Kind!");
+}
- /// Whether this nested name specifier involves a template
- /// parameter.
- bool isInstantiationDependent() const;
+NestedNameSpecifier NestedNameSpecifier::getCanonical() const {
+ switch (getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ // These are canonical and unique.
+ return *this;
+ case NestedNameSpecifier::Kind::Namespace: {
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ const NamespaceBaseDecl *ND = getAsNamespaceAndPrefix().Namespace;
+ return NestedNameSpecifier(
+ {StoredKind::NamespaceOrSuper, ND->getNamespace()->getCanonicalDecl()});
+ }
+ case NestedNameSpecifier::Kind::Type:
+ return NestedNameSpecifier(
+ getAsType()->getCanonicalTypeInternal().getTypePtr());
+ }
+ llvm_unreachable("unhandled kind");
+}
- /// Whether this nested-name-specifier contains an unexpanded
- /// parameter pack (for C++11 variadic templates).
- bool containsUnexpandedParameterPack() const;
+bool NestedNameSpecifier::isCanonical() const {
+ return *this == getCanonical();
+}
- /// Whether this nested name specifier contains an error.
- bool containsErrors() const;
+TypeLoc NestedNameSpecifierLoc::castAsTypeLoc() const {
+ return TypeLoc(Qualifier.getAsType(), LoadPointer(/*Offset=*/0));
+}
- /// Print this nested name specifier to the given output stream. If
- /// `ResolveTemplateArguments` is true, we'll print actual types, e.g.
- /// `ns::SomeTemplate<int, MyClass>` instead of
- /// `ns::SomeTemplate<Container::value_type, T>`.
- void print(raw_ostream &OS, const PrintingPolicy &Policy,
- bool ResolveTemplateArguments = false,
- bool PrintFinalScopeResOp = true) const;
+TypeLoc NestedNameSpecifierLoc::getAsTypeLoc() const {
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Type)
+ return TypeLoc();
+ return castAsTypeLoc();
+}
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddPointer(Prefix.getOpaqueValue());
- ID.AddPointer(Specifier);
+unsigned
+NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier Qualifier) {
+ // Location of the trailing '::'.
+ unsigned Length = sizeof(SourceLocation::UIntTy);
+
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Global:
+ // Nothing more to add.
+ break;
+
+ case NestedNameSpecifier::Kind::Namespace:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ // The location of the identifier or namespace name.
+ Length += sizeof(SourceLocation::UIntTy);
+ break;
+
+ case NestedNameSpecifier::Kind::Type:
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ Length += sizeof(void *);
+ break;
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("Expected a non-NULL qualifier");
}
- /// Dump the nested name specifier to standard output to aid
- /// in debugging.
- void dump(const LangOptions &LO) const;
- void dump() const;
- void dump(llvm::raw_ostream &OS) const;
- void dump(llvm::raw_ostream &OS, const LangOptions &LO) const;
-};
+ return Length;
+}
-/// A C++ nested-name-specifier augmented with source location
-/// information.
-class NestedNameSpecifierLoc {
- NestedNameSpecifier *Qualifier = nullptr;
- void *Data = nullptr;
-
- /// Determines the data length for the last component in the
- /// given nested-name-specifier.
- static unsigned getLocalDataLength(NestedNameSpecifier *Qualifier);
-
- /// Determines the data length for the entire
- /// nested-name-specifier.
- static unsigned getDataLength(NestedNameSpecifier *Qualifier);
-
-public:
- /// Construct an empty nested-name-specifier.
- NestedNameSpecifierLoc() = default;
-
- /// Construct a nested-name-specifier with source location information
- /// from
- NestedNameSpecifierLoc(NestedNameSpecifier *Qualifier, void *Data)
- : Qualifier(Qualifier), Data(Data) {}
-
- /// Evaluates true when this nested-name-specifier location is
- /// non-empty.
- explicit operator bool() const { return Qualifier; }
-
- /// Evaluates true when this nested-name-specifier location is
- /// non-empty.
- bool hasQualifier() const { return Qualifier; }
-
- /// Retrieve the nested-name-specifier to which this instance
- /// refers.
- NestedNameSpecifier *getNestedNameSpecifier() const {
- return Qualifier;
- }
+NamespaceAndPrefixLoc NestedNameSpecifierLoc::castAsNamespaceAndPrefix() const {
+ auto [Namespace, Prefix] = Qualifier.getAsNamespaceAndPrefix();
+ return {Namespace, NestedNameSpecifierLoc(Prefix, Data)};
+}
- /// Retrieve the opaque pointer that refers to source-location data.
- void *getOpaqueData() const { return Data; }
-
- /// Retrieve the source range covering the entirety of this
- /// nested-name-specifier.
- ///
- /// For example, if this instance refers to a nested-name-specifier
- /// \c \::std::vector<int>::, the returned source range would cover
- /// from the initial '::' to the last '::'.
- SourceRange getSourceRange() const LLVM_READONLY {
- return SourceRange(getBeginLoc(), getEndLoc());
- }
+NamespaceAndPrefixLoc NestedNameSpecifierLoc::getAsNamespaceAndPrefix() const {
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Namespace)
+ return {};
+ return castAsNamespaceAndPrefix();
+}
- /// Retrieve the source range covering just the last part of
- /// this nested-name-specifier, not including the prefix.
- ///
- /// For example, if this instance refers to a nested-name-specifier
- /// \c \::std::vector<int>::, the returned source range would cover
- /// from "vector" to the last '::'.
- SourceRange getLocalSourceRange() const;
-
- /// Retrieve the location of the beginning of this
- /// nested-name-specifier.
- SourceLocation getBeginLoc() const {
- if (!Qualifier)
- return SourceLocation();
-
- NestedNameSpecifierLoc First = *this;
- while (NestedNameSpecifierLoc Prefix = First.getPrefix())
- First = Prefix;
- return First.getLocalSourceRange().getBegin();
+unsigned NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier Qualifier) {
+ unsigned Length = 0;
+ for (; Qualifier; Qualifier = Qualifier.getAsNamespaceAndPrefix().Prefix) {
+ Length += getLocalDataLength(Qualifier);
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Namespace)
+ break;
}
+ return Length;
+}
- /// Retrieve the location of the end of this
- /// nested-name-specifier.
- SourceLocation getEndLoc() const { return getLocalSourceRange().getEnd(); }
+unsigned NestedNameSpecifierLoc::getDataLength() const {
+ return getDataLength(Qualifier);
+}
- /// Retrieve the location of the beginning of this
- /// component of the nested-name-specifier.
- SourceLocation getLocalBeginLoc() const {
- return getLocalSourceRange().getBegin();
+SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
+ switch (auto Kind = Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ return SourceRange();
+ case NestedNameSpecifier::Kind::Global:
+ return LoadSourceLocation(/*Offset=*/0);
+ case NestedNameSpecifier::Kind::Namespace:
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
+ unsigned Offset =
+ Kind == NestedNameSpecifier::Kind::Namespace
+ ? getDataLength(Qualifier.getAsNamespaceAndPrefix().Prefix)
+ : 0;
+ return SourceRange(
+ LoadSourceLocation(Offset),
+ LoadSourceLocation(Offset + sizeof(SourceLocation::UIntTy)));
}
-
- /// Retrieve the location of the end of this component of the
- /// nested-name-specifier.
- SourceLocation getLocalEndLoc() const {
- return getLocalSourceRange().getEnd();
+ case NestedNameSpecifier::Kind::Type: {
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ void *TypeData = LoadPointer(/*Offset=*/0);
+ TypeLoc TL(Qualifier.getAsType(), TypeData);
+ return SourceRange(TL.getBeginLoc(), LoadSourceLocation(sizeof(void *)));
}
-
- /// Return the prefix of this nested-name-specifier.
- ///
- /// For example, if this instance refers to a nested-name-specifier
- /// \c \::std::vector<int>::, the prefix is \c \::std::. Note that the
- /// returned prefix may be empty, if this is the first component of
- /// the nested-name-specifier.
- NestedNameSpecifierLoc getPrefix() const {
- if (!Qualifier)
- return *this;
-
- return NestedNameSpecifierLoc(Qualifier->getPrefix(), Data);
}
- /// For a nested-name-specifier that refers to a type,
- /// retrieve the type with source-location information.
- TypeLoc getTypeLoc() const;
+ llvm_unreachable("Invalid NNS Kind!");
+}
- /// Determines the data length for the entire
- /// nested-name-specifier.
- unsigned getDataLength() const { return getDataLength(Qualifier); }
+SourceRange NestedNameSpecifierLoc::getSourceRange() const {
+ return SourceRange(getBeginLoc(), getEndLoc());
+}
- friend bool operator==(NestedNameSpecifierLoc X,
- NestedNameSpecifierLoc Y) {
- return X.Qualifier == Y.Qualifier && X.Data == Y.Data;
- }
+SourceLocation NestedNameSpecifierLoc::getEndLoc() const {
+ return getLocalSourceRange().getEnd();
+}
- friend bool operator!=(NestedNameSpecifierLoc X,
- NestedNameSpecifierLoc Y) {
- return !(X == Y);
- }
-};
+/// Retrieve the location of the beginning of this
+/// component of the nested-name-specifier.
+SourceLocation NestedNameSpecifierLoc::getLocalBeginLoc() const {
+ return getLocalSourceRange().getBegin();
+}
-/// Class that aids in the construction of nested-name-specifiers along
-/// with source-location information for all of the components of the
+/// Retrieve the location of the end of this component of the
/// nested-name-specifier.
-class NestedNameSpecifierLocBuilder {
- /// The current representation of the nested-name-specifier we're
- /// building.
- NestedNameSpecifier *Representation = nullptr;
-
- /// Buffer used to store source-location information for the
- /// nested-name-specifier.
- ///
- /// Note that we explicitly manage the buffer (rather than using a
- /// SmallVector) because \c Declarator expects it to be possible to memcpy()
- /// a \c CXXScopeSpec, and CXXScopeSpec uses a NestedNameSpecifierLocBuilder.
- char *Buffer = nullptr;
-
- /// The size of the buffer used to store source-location information
- /// for the nested-name-specifier.
- unsigned BufferSize = 0;
-
- /// The capacity of the buffer used to store source-location
- /// information for the nested-name-specifier.
- unsigned BufferCapacity = 0;
-
-public:
- NestedNameSpecifierLocBuilder() = default;
- NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other);
-
- NestedNameSpecifierLocBuilder &
- operator=(const NestedNameSpecifierLocBuilder &Other);
-
- ~NestedNameSpecifierLocBuilder() {
- if (BufferCapacity)
- free(Buffer);
- }
+SourceLocation NestedNameSpecifierLoc::getLocalEndLoc() const {
+ return getLocalSourceRange().getEnd();
+}
- /// Retrieve the representation of the nested-name-specifier.
- NestedNameSpecifier *getRepresentation() const { return Representation; }
-
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'type::'.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param TL The TypeLoc that describes the type preceding the '::'.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, TypeLoc TL, SourceLocation ColonColonLoc);
-
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'identifier::'.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param Identifier The identifier.
- ///
- /// \param IdentifierLoc The location of the identifier.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, IdentifierInfo *Identifier,
- SourceLocation IdentifierLoc, SourceLocation ColonColonLoc);
-
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'namespace::'.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param Namespace The namespace or namespace alias.
- ///
- /// \param NamespaceLoc The location of the namespace name or the namespace
- // alias.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, NamespaceBaseDecl *Namespace,
- SourceLocation NamespaceLoc, SourceLocation ColonColonLoc);
-
- /// Turn this (empty) nested-name-specifier into the global
- /// nested-name-specifier '::'.
- void MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc);
-
- /// Turns this (empty) nested-name-specifier into '__super'
- /// nested-name-specifier.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param RD The declaration of the class in which nested-name-specifier
- /// appeared.
- ///
- /// \param SuperLoc The location of the '__super' keyword.
- /// name.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
- SourceLocation SuperLoc, SourceLocation ColonColonLoc);
-
- /// Make a new nested-name-specifier from incomplete source-location
- /// information.
- ///
- /// This routine should be used very, very rarely, in cases where we
- /// need to synthesize a nested-name-specifier. Most code should instead use
- /// \c Adopt() with a proper \c NestedNameSpecifierLoc.
- void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier,
- SourceRange R);
-
- /// Adopt an existing nested-name-specifier (with source-range
- /// information).
- void Adopt(NestedNameSpecifierLoc Other);
-
- /// Retrieve the source range covered by this nested-name-specifier.
- SourceRange getSourceRange() const LLVM_READONLY {
- return NestedNameSpecifierLoc(Representation, Buffer).getSourceRange();
- }
+SourceRange NestedNameSpecifierLocBuilder::getSourceRange() const {
+ return NestedNameSpecifierLoc(Representation, Buffer).getSourceRange();
+}
- /// Retrieve a nested-name-specifier with location information,
- /// copied into the given AST context.
- ///
- /// \param Context The context into which this nested-name-specifier will be
- /// copied.
- NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const;
-
- /// Retrieve a nested-name-specifier with location
- /// information based on the information in this builder.
- ///
- /// This loc will contain references to the builder's internal data and may
- /// be invalidated by any change to the builder.
- NestedNameSpecifierLoc getTemporary() const {
- return NestedNameSpecifierLoc(Representation, Buffer);
- }
+} // namespace clang
+
+namespace llvm {
+
+template <> struct DenseMapInfo<clang::NestedNameSpecifier> {
+ static clang::NestedNameSpecifier getEmptyKey() { return std::nullopt; }
- /// Clear out this builder, and prepare it to build another
- /// nested-name-specifier with source-location information.
- void Clear() {
- Representation = nullptr;
- BufferSize = 0;
+ static clang::NestedNameSpecifier getTombstoneKey() {
+ return clang::NestedNameSpecifier::getInvalid();
}
- /// Retrieve the underlying buffer.
- ///
- /// \returns A pair containing a pointer to the buffer of source-location
- /// data and the size of the source-location data that resides in that
- /// buffer.
- std::pair<char *, unsigned> getBuffer() const {
- return std::make_pair(Buffer, BufferSize);
+ static unsigned getHashValue(const clang::NestedNameSpecifier &V) {
+ return hash_combine(V.getAsVoidPointer());
}
};
-/// Insertion operator for diagnostics. This allows sending
-/// NestedNameSpecifiers into a diagnostic with <<.
-inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
- NestedNameSpecifier *NNS) {
- DB.AddTaggedVal(reinterpret_cast<uint64_t>(NNS),
- DiagnosticsEngine::ak_nestednamespec);
- return DB;
-}
-
-} // namespace clang
-
-namespace llvm {
-
template <> struct DenseMapInfo<clang::NestedNameSpecifierLoc> {
- using FirstInfo = DenseMapInfo<clang::NestedNameSpecifier *>;
+ using FirstInfo = DenseMapInfo<clang::NestedNameSpecifier>;
using SecondInfo = DenseMapInfo<void *>;
static clang::NestedNameSpecifierLoc getEmptyKey() {
diff --git a/clang/include/clang/AST/NestedNameSpecifierBase.h b/clang/include/clang/AST/NestedNameSpecifierBase.h
new file mode 100644
index 0000000..73c60ba
--- /dev/null
+++ b/clang/include/clang/AST/NestedNameSpecifierBase.h
@@ -0,0 +1,586 @@
+//===- NestedNameSpecifier.h - C++ nested name specifiers -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NestedNameSpecifier class, which represents
+// a C++ nested-name-specifier.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_NESTEDNAMESPECIFIERBASE_H
+#define LLVM_CLANG_AST_NESTEDNAMESPECIFIERBASE_H
+
+#include "clang/AST/DependenceFlags.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cstdint>
+#include <cstdlib>
+#include <utility>
+
+namespace clang {
+
+class ASTContext;
+class CXXRecordDecl;
+class NamedDecl;
+class IdentifierInfo;
+class LangOptions;
+class NamespaceBaseDecl;
+struct PrintingPolicy;
+class Type;
+class TypeLoc;
+
+struct NamespaceAndPrefix;
+struct alignas(8) NamespaceAndPrefixStorage;
+
+/// Represents a C++ nested name specifier, such as
+/// "\::std::vector<int>::".
+///
+/// C++ nested name specifiers are the prefixes to qualified
+/// names. For example, "foo::" in "foo::x" is a nested name
+/// specifier. Nested name specifiers are made up of a sequence of
+/// specifiers, each of which can be a namespace, type, decltype specifier, or
+/// the global specifier ('::'). The last two specifiers can only appear at the
+/// start of a nested-namespace-specifier.
+class NestedNameSpecifier {
+ enum class FlagKind { Null, Global, Invalid };
+ enum class StoredKind {
+ Type,
+ NamespaceOrSuper,
+ NamespaceWithGlobal,
+ NamespaceWithNamespace
+ };
+ static constexpr uintptr_t FlagBits = 2, FlagMask = (1u << FlagBits) - 1u,
+ FlagOffset = 1, PtrOffset = FlagBits + FlagOffset,
+ PtrMask = (1u << PtrOffset) - 1u;
+
+ uintptr_t StoredOrFlag;
+
+ explicit NestedNameSpecifier(uintptr_t StoredOrFlag)
+ : StoredOrFlag(StoredOrFlag) {}
+ struct PtrKind {
+ StoredKind SK;
+ const void *Ptr;
+ };
+ explicit NestedNameSpecifier(PtrKind PK)
+ : StoredOrFlag(uintptr_t(PK.Ptr) | (uintptr_t(PK.SK) << FlagOffset)) {
+ assert(PK.Ptr != nullptr);
+ assert((uintptr_t(PK.Ptr) & ((1u << PtrOffset) - 1u)) == 0);
+ assert((uintptr_t(PK.Ptr) >> PtrOffset) != 0);
+ }
+
+ explicit constexpr NestedNameSpecifier(FlagKind K)
+ : StoredOrFlag(uintptr_t(K) << FlagOffset) {}
+
+ bool isStoredKind() const { return (StoredOrFlag >> PtrOffset) != 0; }
+
+ std::pair<StoredKind, const void *> getStored() const {
+ assert(isStoredKind());
+ return {StoredKind(StoredOrFlag >> FlagOffset & FlagMask),
+ reinterpret_cast<const void *>(StoredOrFlag & ~PtrMask)};
+ }
+
+ FlagKind getFlagKind() const {
+ assert(!isStoredKind());
+ return FlagKind(StoredOrFlag >> FlagOffset);
+ }
+
+ static const NamespaceAndPrefixStorage *
+ MakeNamespaceAndPrefixStorage(const ASTContext &Ctx,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix);
+ static inline PtrKind MakeNamespacePtrKind(const ASTContext &Ctx,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix);
+
+public:
+ static constexpr NestedNameSpecifier getInvalid() {
+ return NestedNameSpecifier(FlagKind::Invalid);
+ }
+
+ static constexpr NestedNameSpecifier getGlobal() {
+ return NestedNameSpecifier(FlagKind::Global);
+ }
+
+ NestedNameSpecifier() : NestedNameSpecifier(FlagKind::Invalid) {}
+
+ /// The kind of specifier that completes this nested name
+ /// specifier.
+ enum class Kind {
+ /// Empty.
+ Null,
+
+ /// The global specifier '::'. There is no stored value.
+ Global,
+
+ /// A type, stored as a Type*.
+ Type,
+
+ /// A namespace-like entity, stored as a NamespaceBaseDecl*.
+ Namespace,
+
+ /// Microsoft's '__super' specifier, stored as a CXXRecordDecl* of
+ /// the class it appeared in.
+ MicrosoftSuper,
+ };
+
+ inline Kind getKind() const;
+
+ NestedNameSpecifier(std::nullopt_t) : StoredOrFlag(0) {}
+
+ explicit inline NestedNameSpecifier(const Type *T);
+
+ /// Builds a nested name specifier that names a namespace.
+ inline NestedNameSpecifier(const ASTContext &Ctx,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix);
+
+ /// Builds a nested name specifier that names a class through microsoft's
+ /// __super specifier.
+ explicit inline NestedNameSpecifier(CXXRecordDecl *RD);
+
+ explicit operator bool() const { return StoredOrFlag != 0; }
+
+ void *getAsVoidPointer() const {
+ return reinterpret_cast<void *>(StoredOrFlag);
+ }
+ static NestedNameSpecifier getFromVoidPointer(const void *Ptr) {
+ return NestedNameSpecifier(reinterpret_cast<uintptr_t>(Ptr));
+ }
+
+ const Type *getAsType() const {
+ auto [Kind, Ptr] = getStored();
+ assert(Kind == StoredKind::Type);
+ assert(Ptr != nullptr);
+ return static_cast<const Type *>(Ptr);
+ }
+
+ inline NamespaceAndPrefix getAsNamespaceAndPrefix() const;
+
+ CXXRecordDecl *getAsMicrosoftSuper() const {
+ auto [Kind, Ptr] = getStored();
+ assert(Kind == StoredKind::NamespaceOrSuper);
+ assert(Ptr != nullptr);
+ return static_cast<CXXRecordDecl *>(const_cast<void *>(Ptr));
+ }
+
+ /// Retrieve the record declaration stored in this nested name
+ /// specifier, or null.
+ inline CXXRecordDecl *getAsRecordDecl() const;
+
+ friend bool operator==(NestedNameSpecifier LHS, NestedNameSpecifier RHS) {
+ return LHS.StoredOrFlag == RHS.StoredOrFlag;
+ }
+ friend bool operator!=(NestedNameSpecifier LHS, NestedNameSpecifier RHS) {
+ return LHS.StoredOrFlag != RHS.StoredOrFlag;
+ }
+
+ /// Retrieves the "canonical" nested name specifier for a
+ /// given nested name specifier.
+ ///
+ /// The canonical nested name specifier is a nested name specifier
+ /// that uniquely identifies a type or namespace within the type
+ /// system. For example, given:
+ ///
+ /// \code
+ /// namespace N {
+ /// struct S {
+ /// template<typename T> struct X { typename T* type; };
+ /// };
+ /// }
+ ///
+ /// template<typename T> struct Y {
+ /// typename N::S::X<T>::type member;
+ /// };
+ /// \endcode
+ ///
+ /// Here, the nested-name-specifier for N::S::X<T>:: will be
+ /// S::X<template-param-0-0>, since 'S' and 'X' are uniquely defined
+ /// by declarations in the type system and the canonical type for
+ /// the template type parameter 'T' is template-param-0-0.
+ inline NestedNameSpecifier getCanonical() const;
+
+ /// Whether this nested name specifier is canonical.
+ inline bool isCanonical() const;
+
+ /// Whether this nested name specifier starts with a '::'.
+ bool isFullyQualified() const;
+
+ NestedNameSpecifierDependence getDependence() const;
+
+ /// Whether this nested name specifier refers to a dependent
+ /// type or not.
+ bool isDependent() const {
+ return getDependence() & NestedNameSpecifierDependence::Dependent;
+ }
+
+ /// Whether this nested name specifier involves a template
+ /// parameter.
+ bool isInstantiationDependent() const {
+ return getDependence() & NestedNameSpecifierDependence::Instantiation;
+ }
+
+ /// Whether this nested-name-specifier contains an unexpanded
+ /// parameter pack (for C++11 variadic templates).
+ bool containsUnexpandedParameterPack() const {
+ return getDependence() & NestedNameSpecifierDependence::UnexpandedPack;
+ }
+
+ /// Whether this nested name specifier contains an error.
+ bool containsErrors() const {
+ return getDependence() & NestedNameSpecifierDependence::Error;
+ }
+
+ /// Print this nested name specifier to the given output stream. If
+ /// `ResolveTemplateArguments` is true, we'll print actual types, e.g.
+ /// `ns::SomeTemplate<int, MyClass>` instead of
+ /// `ns::SomeTemplate<Container::value_type, T>`.
+ void print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool ResolveTemplateArguments = false,
+ bool PrintFinalScopeResOp = true) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(StoredOrFlag);
+ }
+
+ /// Dump the nested name specifier to aid in debugging.
+ void dump(llvm::raw_ostream *OS = nullptr,
+ const LangOptions *LO = nullptr) const;
+ void dump(const LangOptions &LO) const;
+ void dump(llvm::raw_ostream &OS) const;
+ void dump(llvm::raw_ostream &OS, const LangOptions &LO) const;
+
+ static constexpr auto NumLowBitsAvailable = FlagOffset;
+};
+
+struct NamespaceAndPrefix {
+ const NamespaceBaseDecl *Namespace;
+ NestedNameSpecifier Prefix;
+};
+
+struct alignas(8) NamespaceAndPrefixStorage : NamespaceAndPrefix,
+ llvm::FoldingSetNode {
+ NamespaceAndPrefixStorage(const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix)
+ : NamespaceAndPrefix{Namespace, Prefix} {}
+ void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Namespace, Prefix); }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix) {
+ ID.AddPointer(Namespace);
+ Prefix.Profile(ID);
+ }
+};
+
+NamespaceAndPrefix NestedNameSpecifier::getAsNamespaceAndPrefix() const {
+ auto [Kind, Ptr] = getStored();
+ switch (Kind) {
+ case StoredKind::NamespaceOrSuper:
+ case StoredKind::NamespaceWithGlobal:
+ return {static_cast<const NamespaceBaseDecl *>(Ptr),
+ Kind == StoredKind::NamespaceWithGlobal
+ ? NestedNameSpecifier::getGlobal()
+ : std::nullopt};
+ case StoredKind::NamespaceWithNamespace:
+ return *static_cast<const NamespaceAndPrefixStorage *>(Ptr);
+ case StoredKind::Type:;
+ }
+ llvm_unreachable("unexpected stored kind");
+}
+
+struct NamespaceAndPrefixLoc;
+
+/// A C++ nested-name-specifier augmented with source location
+/// information.
+class NestedNameSpecifierLoc {
+ NestedNameSpecifier Qualifier = std::nullopt;
+ void *Data = nullptr;
+
+ /// Load a (possibly unaligned) source location from a given address
+ /// and offset.
+ SourceLocation LoadSourceLocation(unsigned Offset) const {
+ SourceLocation::UIntTy Raw;
+ memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(Raw));
+ return SourceLocation::getFromRawEncoding(Raw);
+ }
+
+ /// Load a (possibly unaligned) pointer from a given address and
+ /// offset.
+ void *LoadPointer(unsigned Offset) const {
+ void *Result;
+ memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void *));
+ return Result;
+ }
+
+ /// Determines the data length for the last component in the
+ /// given nested-name-specifier.
+ static inline unsigned getLocalDataLength(NestedNameSpecifier Qualifier);
+
+ /// Determines the data length for the entire
+ /// nested-name-specifier.
+ static inline unsigned getDataLength(NestedNameSpecifier Qualifier);
+
+public:
+ /// Construct an empty nested-name-specifier.
+ NestedNameSpecifierLoc() = default;
+
+ /// Construct a nested-name-specifier with source location information
+ /// from
+ NestedNameSpecifierLoc(NestedNameSpecifier Qualifier, void *Data)
+ : Qualifier(Qualifier), Data(Data) {}
+
+ /// Evaluates true when this nested-name-specifier location is
+ /// non-empty.
+ explicit operator bool() const { return bool(Qualifier); }
+
+ /// Evaluates true when this nested-name-specifier location is
+ /// non-empty.
+ bool hasQualifier() const { return bool(Qualifier); }
+
+ /// Retrieve the nested-name-specifier to which this instance
+ /// refers.
+ NestedNameSpecifier getNestedNameSpecifier() const { return Qualifier; }
+
+ /// Retrieve the opaque pointer that refers to source-location data.
+ void *getOpaqueData() const { return Data; }
+
+ /// Retrieve the source range covering the entirety of this
+ /// nested-name-specifier.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c \::std::vector<int>::, the returned source range would cover
+ /// from the initial '::' to the last '::'.
+ inline SourceRange getSourceRange() const LLVM_READONLY;
+
+ /// Retrieve the source range covering just the last part of
+ /// this nested-name-specifier, not including the prefix.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c \::std::vector<int>::, the returned source range would cover
+ /// from "vector" to the last '::'.
+ inline SourceRange getLocalSourceRange() const;
+
+ /// Retrieve the location of the beginning of this
+ /// nested-name-specifier.
+ SourceLocation getBeginLoc() const;
+
+ /// Retrieve the location of the end of this
+ /// nested-name-specifier.
+ inline SourceLocation getEndLoc() const;
+
+ /// Retrieve the location of the beginning of this
+ /// component of the nested-name-specifier.
+ inline SourceLocation getLocalBeginLoc() const;
+
+ /// Retrieve the location of the end of this component of the
+ /// nested-name-specifier.
+ inline SourceLocation getLocalEndLoc() const;
+
+ /// For a nested-name-specifier that refers to a namespace,
+ /// retrieve the namespace and its prefix.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c \::std::chrono::, the prefix is \c \::std::. Note that the
+ /// returned prefix may be empty, if this is the first component of
+ /// the nested-name-specifier.
+ inline NamespaceAndPrefixLoc castAsNamespaceAndPrefix() const;
+ inline NamespaceAndPrefixLoc getAsNamespaceAndPrefix() const;
+
+ /// For a nested-name-specifier that refers to a type,
+ /// retrieve the type with source-location information.
+ inline TypeLoc castAsTypeLoc() const;
+ inline TypeLoc getAsTypeLoc() const;
+
+ /// Determines the data length for the entire
+ /// nested-name-specifier.
+ inline unsigned getDataLength() const;
+
+ friend bool operator==(NestedNameSpecifierLoc X, NestedNameSpecifierLoc Y) {
+ return X.Qualifier == Y.Qualifier && X.Data == Y.Data;
+ }
+
+ friend bool operator!=(NestedNameSpecifierLoc X, NestedNameSpecifierLoc Y) {
+ return !(X == Y);
+ }
+};
+
+struct NamespaceAndPrefixLoc {
+ const NamespaceBaseDecl *Namespace = nullptr;
+ NestedNameSpecifierLoc Prefix;
+
+ explicit operator bool() const { return Namespace != nullptr; }
+};
+
+/// Class that aids in the construction of nested-name-specifiers along
+/// with source-location information for all of the components of the
+/// nested-name-specifier.
+class NestedNameSpecifierLocBuilder {
+ /// The current representation of the nested-name-specifier we're
+ /// building.
+ NestedNameSpecifier Representation = std::nullopt;
+
+ /// Buffer used to store source-location information for the
+ /// nested-name-specifier.
+ ///
+ /// Note that we explicitly manage the buffer (rather than using a
+ /// SmallVector) because \c Declarator expects it to be possible to memcpy()
+ /// a \c CXXScopeSpec, and CXXScopeSpec uses a NestedNameSpecifierLocBuilder.
+ char *Buffer = nullptr;
+
+ /// The size of the buffer used to store source-location information
+ /// for the nested-name-specifier.
+ unsigned BufferSize = 0;
+
+ /// The capacity of the buffer used to store source-location
+ /// information for the nested-name-specifier.
+ unsigned BufferCapacity = 0;
+
+ void PushTrivial(ASTContext &Context, NestedNameSpecifier Qualifier,
+ SourceRange R);
+
+public:
+ NestedNameSpecifierLocBuilder() = default;
+ NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other);
+
+ NestedNameSpecifierLocBuilder &
+ operator=(const NestedNameSpecifierLocBuilder &Other);
+
+ ~NestedNameSpecifierLocBuilder() {
+ if (BufferCapacity)
+ free(Buffer);
+ }
+
+ /// Retrieve the representation of the nested-name-specifier.
+ NestedNameSpecifier getRepresentation() const { return Representation; }
+
+ /// Make a nested-name-specifier of the form 'type::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param TL The TypeLoc that describes the type preceding the '::'.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Make(ASTContext &Context, TypeLoc TL, SourceLocation ColonColonLoc);
+
+ /// Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'namespace::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param Namespace The namespace.
+ ///
+ /// \param NamespaceLoc The location of the namespace name.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, const NamespaceBaseDecl *Namespace,
+ SourceLocation NamespaceLoc, SourceLocation ColonColonLoc);
+
+ /// Turn this (empty) nested-name-specifier into the global
+ /// nested-name-specifier '::'.
+ void MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc);
+
+ /// Turns this (empty) nested-name-specifier into '__super'
+ /// nested-name-specifier.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param RD The declaration of the class in which nested-name-specifier
+ /// appeared.
+ ///
+ /// \param SuperLoc The location of the '__super' keyword.
+ /// name.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void MakeMicrosoftSuper(ASTContext &Context, CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc);
+
+ /// Make a new nested-name-specifier from incomplete source-location
+ /// information.
+ ///
+ /// This routine should be used very, very rarely, in cases where we
+ /// need to synthesize a nested-name-specifier. Most code should instead use
+ /// \c Adopt() with a proper \c NestedNameSpecifierLoc.
+ void MakeTrivial(ASTContext &Context, NestedNameSpecifier Qualifier,
+ SourceRange R) {
+ Representation = Qualifier;
+ BufferSize = 0;
+ PushTrivial(Context, Qualifier, R);
+ }
+
+ /// Adopt an existing nested-name-specifier (with source-range
+ /// information).
+ void Adopt(NestedNameSpecifierLoc Other);
+
+ /// Retrieve the source range covered by this nested-name-specifier.
+ inline SourceRange getSourceRange() const LLVM_READONLY;
+
+ /// Retrieve a nested-name-specifier with location information,
+ /// copied into the given AST context.
+ ///
+ /// \param Context The context into which this nested-name-specifier will be
+ /// copied.
+ NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const;
+
+ /// Retrieve a nested-name-specifier with location
+ /// information based on the information in this builder.
+ ///
+ /// This loc will contain references to the builder's internal data and may
+ /// be invalidated by any change to the builder.
+ NestedNameSpecifierLoc getTemporary() const {
+ return NestedNameSpecifierLoc(Representation, Buffer);
+ }
+
+ /// Clear out this builder, and prepare it to build another
+ /// nested-name-specifier with source-location information.
+ void Clear() {
+ Representation = std::nullopt;
+ BufferSize = 0;
+ }
+
+ /// Retrieve the underlying buffer.
+ ///
+ /// \returns A pair containing a pointer to the buffer of source-location
+ /// data and the size of the source-location data that resides in that
+ /// buffer.
+ std::pair<char *, unsigned> getBuffer() const {
+ return std::make_pair(Buffer, BufferSize);
+ }
+};
+
+/// Insertion operator for diagnostics. This allows sending
+/// NestedNameSpecifiers into a diagnostic with <<.
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
+ NestedNameSpecifier NNS) {
+ DB.AddTaggedVal(reinterpret_cast<uintptr_t>(NNS.getAsVoidPointer()),
+ DiagnosticsEngine::ak_nestednamespec);
+ return DB;
+}
+
+} // namespace clang
+
+namespace llvm {
+
+template <> struct PointerLikeTypeTraits<clang::NestedNameSpecifier> {
+ static void *getAsVoidPointer(clang::NestedNameSpecifier P) {
+ return P.getAsVoidPointer();
+ }
+ static clang::NestedNameSpecifier getFromVoidPointer(const void *P) {
+ return clang::NestedNameSpecifier::getFromVoidPointer(P);
+ }
+ static constexpr int NumLowBitsAvailable =
+ clang::NestedNameSpecifier::NumLowBitsAvailable;
+};
+
+} // namespace llvm
+
+#endif // LLVM_CLANG_AST_NESTEDNAMESPECIFIERBASE_H
diff --git a/clang/include/clang/AST/ODRHash.h b/clang/include/clang/AST/ODRHash.h
index 11f917a..ae3fab6 100644
--- a/clang/include/clang/AST/ODRHash.h
+++ b/clang/include/clang/AST/ODRHash.h
@@ -93,7 +93,7 @@ public:
void AddQualType(QualType T);
void AddStmt(const Stmt *S);
void AddIdentifierInfo(const IdentifierInfo *II);
- void AddNestedNameSpecifier(const NestedNameSpecifier *NNS);
+ void AddNestedNameSpecifier(NestedNameSpecifier NNS);
void AddDependentTemplateName(const DependentTemplateStorage &Name);
void AddTemplateName(TemplateName Name);
void AddDeclarationNameInfo(DeclarationNameInfo NameInfo,
diff --git a/clang/include/clang/AST/OpenACCClause.h b/clang/include/clang/AST/OpenACCClause.h
index b52f716..2f4aba1 100644
--- a/clang/include/clang/AST/OpenACCClause.h
+++ b/clang/include/clang/AST/OpenACCClause.h
@@ -1250,19 +1250,32 @@ public:
SourceLocation EndLoc);
};
+// A structure to stand in for the recipe on a reduction. RecipeDecl is the
+// 'main' declaration used for initializaiton, which is fixed.
+struct OpenACCReductionRecipe {
+ VarDecl *RecipeDecl;
+ // TODO: OpenACC: this should eventually have the operations here too.
+};
+
class OpenACCReductionClause final
: public OpenACCClauseWithVarList,
- private llvm::TrailingObjects<OpenACCReductionClause, Expr *> {
+ private llvm::TrailingObjects<OpenACCReductionClause, Expr *,
+ OpenACCReductionRecipe> {
friend TrailingObjects;
OpenACCReductionOperator Op;
OpenACCReductionClause(SourceLocation BeginLoc, SourceLocation LParenLoc,
OpenACCReductionOperator Operator,
- ArrayRef<Expr *> VarList, SourceLocation EndLoc)
+ ArrayRef<Expr *> VarList,
+ ArrayRef<OpenACCReductionRecipe> Recipes,
+ SourceLocation EndLoc)
: OpenACCClauseWithVarList(OpenACCClauseKind::Reduction, BeginLoc,
LParenLoc, EndLoc),
Op(Operator) {
- setExprs(getTrailingObjects(VarList.size()), VarList);
+ assert(VarList.size() == Recipes.size());
+ setExprs(getTrailingObjects<Expr *>(VarList.size()), VarList);
+ llvm::uninitialized_copy(Recipes, getTrailingObjects<
+ OpenACCReductionRecipe > ());
}
public:
@@ -1270,12 +1283,26 @@ public:
return C->getClauseKind() == OpenACCClauseKind::Reduction;
}
+ ArrayRef<OpenACCReductionRecipe> getRecipes() {
+ return ArrayRef<OpenACCReductionRecipe>{
+ getTrailingObjects<OpenACCReductionRecipe>(), getExprs().size()};
+ }
+
+ ArrayRef<OpenACCReductionRecipe> getRecipes() const {
+ return ArrayRef<OpenACCReductionRecipe>{
+ getTrailingObjects<OpenACCReductionRecipe>(), getExprs().size()};
+ }
+
static OpenACCReductionClause *
Create(const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList,
- SourceLocation EndLoc);
+ ArrayRef<OpenACCReductionRecipe> Recipes, SourceLocation EndLoc);
OpenACCReductionOperator getReductionOp() const { return Op; }
+
+ size_t numTrailingObjects(OverloadToken<Expr *>) const {
+ return getExprs().size();
+ }
};
class OpenACCLinkClause final
diff --git a/clang/include/clang/AST/OpenMPClause.h b/clang/include/clang/AST/OpenMPClause.h
index 1118d3e..72effbc 100644
--- a/clang/include/clang/AST/OpenMPClause.h
+++ b/clang/include/clang/AST/OpenMPClause.h
@@ -1865,62 +1865,43 @@ public:
/// \endcode
/// In this example directive '#pragma omp error' has simple
/// 'message' clause with user error message of "GNU compiler required.".
-class OMPMessageClause final : public OMPClause {
+class OMPMessageClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_message, OMPClause>,
+ public OMPClauseWithPreInit {
friend class OMPClauseReader;
- /// Location of '('
- SourceLocation LParenLoc;
-
- // Expression of the 'message' clause.
- Stmt *MessageString = nullptr;
-
/// Set message string of the clause.
- void setMessageString(Expr *MS) { MessageString = MS; }
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+ void setMessageString(Expr *MS) { setStmt(MS); }
public:
/// Build 'message' clause with message string argument
///
/// \param MS Argument of the clause (message string).
+ /// \param HelperMS Helper statement for the construct.
+ /// \param CaptureRegion Innermost OpenMP region where expressions in this
+ /// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
- OMPMessageClause(Expr *MS, SourceLocation StartLoc, SourceLocation LParenLoc,
+ OMPMessageClause(Expr *MS, Stmt *HelperMS, OpenMPDirectiveKind CaptureRegion,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_message, StartLoc, EndLoc),
- LParenLoc(LParenLoc), MessageString(MS) {}
-
- /// Build an empty clause.
- OMPMessageClause()
- : OMPClause(llvm::omp::OMPC_message, SourceLocation(), SourceLocation()) {
+ : OMPOneStmtClause(MS, StartLoc, LParenLoc, EndLoc),
+ OMPClauseWithPreInit(this) {
+ setPreInitStmt(HelperMS, CaptureRegion);
}
- /// Returns the locaiton of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ /// Build an empty clause.
+ OMPMessageClause() : OMPOneStmtClause(), OMPClauseWithPreInit(this) {}
/// Returns message string of the clause.
- Expr *getMessageString() const { return cast_or_null<Expr>(MessageString); }
-
- child_range children() {
- return child_range(&MessageString, &MessageString + 1);
- }
-
- const_child_range children() const {
- return const_child_range(&MessageString, &MessageString + 1);
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
-
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
+ Expr *getMessageString() const { return getStmtAs<Expr>(); }
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_message;
+ /// Try to evaluate the message string at compile time.
+ std::optional<std::string> tryEvaluateString(ASTContext &Ctx) const {
+ if (Expr *MessageExpr = getMessageString())
+ return MessageExpr->tryEvaluateString(Ctx);
+ return std::nullopt;
}
};
diff --git a/clang/include/clang/AST/PrettyPrinter.h b/clang/include/clang/AST/PrettyPrinter.h
index 875769c..fd995a6 100644
--- a/clang/include/clang/AST/PrettyPrinter.h
+++ b/clang/include/clang/AST/PrettyPrinter.h
@@ -63,9 +63,9 @@ struct PrintingPolicy {
SuppressTagKeyword(LO.CPlusPlus), IncludeTagDefinition(false),
SuppressScope(false), SuppressUnwrittenScope(false),
SuppressInlineNamespace(SuppressInlineNamespaceMode::Redundant),
- SuppressElaboration(false), SuppressInitializers(false),
- ConstantArraySizeAsWritten(false), AnonymousTagLocations(true),
- SuppressStrongLifetime(false), SuppressLifetimeQualifiers(false),
+ SuppressInitializers(false), ConstantArraySizeAsWritten(false),
+ AnonymousTagLocations(true), SuppressStrongLifetime(false),
+ SuppressLifetimeQualifiers(false),
SuppressTemplateArgsInCXXConstructors(false),
SuppressDefaultTemplateArgs(true), Bool(LO.Bool),
Nullptr(LO.CPlusPlus11 || LO.C23), NullptrTypeInNamespace(LO.CPlusPlus),
@@ -150,11 +150,6 @@ struct PrintingPolicy {
LLVM_PREFERRED_TYPE(SuppressInlineNamespaceMode)
unsigned SuppressInlineNamespace : 2;
- /// Ignore qualifiers and tag keywords as specified by elaborated type sugar,
- /// instead letting the underlying type print as normal.
- LLVM_PREFERRED_TYPE(bool)
- unsigned SuppressElaboration : 1;
-
/// Suppress printing of variable initializers.
///
/// This flag is used when printing the loop variable in a for-range
diff --git a/clang/include/clang/AST/PropertiesBase.td b/clang/include/clang/AST/PropertiesBase.td
index 0438e4d..5b10127 100644
--- a/clang/include/clang/AST/PropertiesBase.td
+++ b/clang/include/clang/AST/PropertiesBase.td
@@ -127,9 +127,8 @@ def LValuePathSerializationHelper :
PropertyType<"APValue::LValuePathSerializationHelper"> {
let BufferElementTypes = [ LValuePathEntry ];
}
-def NestedNameSpecifier : PropertyType<"NestedNameSpecifier *">;
-def NestedNameSpecifierKind :
- EnumPropertyType<"NestedNameSpecifier::SpecifierKind">;
+def NestedNameSpecifier : PropertyType<"NestedNameSpecifier">;
+def NestedNameSpecifierKind : EnumPropertyType<"NestedNameSpecifier::Kind">;
def OverloadedOperatorKind : EnumPropertyType;
def Qualifiers : PropertyType;
def QualType : DefaultValuePropertyType;
diff --git a/clang/include/clang/AST/QualTypeNames.h b/clang/include/clang/AST/QualTypeNames.h
index daa86cd..9f5cf04 100644
--- a/clang/include/clang/AST/QualTypeNames.h
+++ b/clang/include/clang/AST/QualTypeNames.h
@@ -87,6 +87,16 @@ std::string getFullyQualifiedName(QualType QT, const ASTContext &Ctx,
/// specifier "::" should be prepended or not.
QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
bool WithGlobalNsPrefix = false);
+
+/// Get the fully qualified name for the declared context of a declaration.
+///
+/// \param[in] Ctx - the ASTContext to be used.
+/// \param[in] Decl - the declaration for which to get the fully qualified name.
+/// \param[in] WithGlobalNsPrefix - If true, then the global namespace
+/// specifier "::" will be prepended to the fully qualified name.
+NestedNameSpecifier
+getFullyQualifiedDeclaredContext(const ASTContext &Ctx, const Decl *Decl,
+ bool WithGlobalNsPrefix = false);
} // end namespace TypeName
} // end namespace clang
#endif // LLVM_CLANG_AST_QUALTYPENAMES_H
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index 62991d9..02581c8 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -216,14 +216,14 @@ public:
///
/// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type).
- bool TraverseType(QualType T);
+ bool TraverseType(QualType T, bool TraverseQualifier = true);
/// Recursively visit a type with location, by dispatching to
/// Traverse*TypeLoc() based on the argument type's getTypeClass() property.
///
/// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type location).
- bool TraverseTypeLoc(TypeLoc TL);
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true);
/// Recursively visit an attribute, by dispatching to
/// Traverse*Attr() based on the argument's dynamic type.
@@ -242,7 +242,7 @@ public:
/// Recursively visit a C++ nested-name-specifier.
///
/// \returns false if the visitation was terminated early, true otherwise.
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS);
/// Recursively visit a C++ nested-name-specifier with location
/// information.
@@ -389,7 +389,8 @@ public:
// Declare Traverse*() for all concrete Type classes.
#define ABSTRACT_TYPE(CLASS, BASE)
-#define TYPE(CLASS, BASE) bool Traverse##CLASS##Type(CLASS##Type *T);
+#define TYPE(CLASS, BASE) \
+ bool Traverse##CLASS##Type(CLASS##Type *T, bool TraverseQualifier);
#include "clang/AST/TypeNodes.inc"
// The above header #undefs ABSTRACT_TYPE and TYPE upon exit.
@@ -410,7 +411,8 @@ public:
// Declare Traverse*() for all concrete TypeLoc classes.
#define ABSTRACT_TYPELOC(CLASS, BASE)
-#define TYPELOC(CLASS, BASE) bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL);
+#define TYPELOC(CLASS, BASE) \
+ bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL, bool TraverseQualifier);
#include "clang/AST/TypeLocNodes.def"
// The above header #undefs ABSTRACT_TYPELOC and TYPELOC upon exit.
@@ -490,6 +492,8 @@ private:
bool TraverseTemplateArgumentLocsHelper(const TemplateArgumentLoc *TAL,
unsigned Count);
bool TraverseArrayTypeLocHelper(ArrayTypeLoc TL);
+ bool TraverseSubstPackTypeHelper(SubstPackType *T);
+ bool TraverseSubstPackTypeLocHelper(SubstPackTypeLoc TL);
bool TraverseRecordHelper(RecordDecl *D);
bool TraverseCXXRecordHelper(CXXRecordDecl *D);
bool TraverseDeclaratorHelper(DeclaratorDecl *D);
@@ -499,6 +503,8 @@ private:
bool TraverseOMPExecutableDirective(OMPExecutableDirective *S);
bool TraverseOMPLoopDirective(OMPLoopDirective *S);
bool TraverseOMPClause(OMPClause *C);
+ bool TraverseTagType(TagType *T, bool TraverseQualifier);
+ bool TraverseTagTypeLoc(TagTypeLoc TL, bool TraverseQualifier);
#define GEN_CLANG_CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class) bool Visit##Class(Class *C);
#include "llvm/Frontend/OpenMP/OMP.inc"
@@ -698,7 +704,8 @@ RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S, DataRecursionQueue *Queue) {
}
template <typename Derived>
-bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
+bool RecursiveASTVisitor<Derived>::TraverseType(QualType T,
+ bool TraverseQualifier) {
if (T.isNull())
return true;
@@ -707,7 +714,8 @@ bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
#define TYPE(CLASS, BASE) \
case Type::CLASS: \
return getDerived().Traverse##CLASS##Type( \
- static_cast<CLASS##Type *>(const_cast<Type *>(T.getTypePtr())));
+ static_cast<CLASS##Type *>(const_cast<Type *>(T.getTypePtr())), \
+ TraverseQualifier);
#include "clang/AST/TypeNodes.inc"
}
@@ -715,7 +723,8 @@ bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
}
template <typename Derived>
-bool RecursiveASTVisitor<Derived>::TraverseTypeLoc(TypeLoc TL) {
+bool RecursiveASTVisitor<Derived>::TraverseTypeLoc(TypeLoc TL,
+ bool TraverseQualifier) {
if (TL.isNull())
return true;
@@ -723,7 +732,8 @@ bool RecursiveASTVisitor<Derived>::TraverseTypeLoc(TypeLoc TL) {
#define ABSTRACT_TYPELOC(CLASS, BASE)
#define TYPELOC(CLASS, BASE) \
case TypeLoc::CLASS: \
- return getDerived().Traverse##CLASS##TypeLoc(TL.castAs<CLASS##TypeLoc>());
+ return getDerived().Traverse##CLASS##TypeLoc(TL.castAs<CLASS##TypeLoc>(), \
+ TraverseQualifier);
#include "clang/AST/TypeLocNodes.def"
}
@@ -779,46 +789,43 @@ bool RecursiveASTVisitor<Derived>::TraverseDecl(Decl *D) {
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifier(
- NestedNameSpecifier *NNS) {
- if (!NNS)
+ NestedNameSpecifier NNS) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return true;
-
- if (NNS->getPrefix())
- TRY_TO(TraverseNestedNameSpecifier(NNS->getPrefix()));
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Namespace:
+ TRY_TO(TraverseNestedNameSpecifier(NNS.getAsNamespaceAndPrefix().Prefix));
+ return true;
+ case NestedNameSpecifier::Kind::Type: {
+ auto *T = const_cast<Type *>(NNS.getAsType());
+ TRY_TO(TraverseNestedNameSpecifier(T->getPrefix()));
+ TRY_TO(TraverseType(QualType(T, 0), /*TraverseQualifier=*/false));
return true;
-
- case NestedNameSpecifier::TypeSpec:
- TRY_TO(TraverseType(QualType(NNS->getAsType(), 0)));
}
-
- return true;
+ }
+ llvm_unreachable("unhandled kind");
}
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifierLoc(
NestedNameSpecifierLoc NNS) {
- if (!NNS)
+ switch (NNS.getNestedNameSpecifier().getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return true;
-
- if (NestedNameSpecifierLoc Prefix = NNS.getPrefix())
- TRY_TO(TraverseNestedNameSpecifierLoc(Prefix));
-
- switch (NNS.getNestedNameSpecifier()->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Namespace:
+ TRY_TO(
+ TraverseNestedNameSpecifierLoc(NNS.castAsNamespaceAndPrefix().Prefix));
return true;
-
- case NestedNameSpecifier::TypeSpec:
- TRY_TO(TraverseTypeLoc(NNS.getTypeLoc()));
- break;
+ case NestedNameSpecifier::Kind::Type: {
+ TypeLoc TL = NNS.castAsTypeLoc();
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getPrefix()));
+ TRY_TO(TraverseTypeLoc(TL, /*TraverseQualifier=*/false));
+ return true;
+ }
}
return true;
@@ -975,10 +982,13 @@ RecursiveASTVisitor<Derived>::TraverseLambdaCapture(LambdaExpr *LE,
// This macro makes available a variable T, the passed-in type.
#define DEF_TRAVERSE_TYPE(TYPE, CODE) \
template <typename Derived> \
- bool RecursiveASTVisitor<Derived>::Traverse##TYPE(TYPE *T) { \
+ bool RecursiveASTVisitor<Derived>::Traverse##TYPE(TYPE *T, \
+ bool TraverseQualifier) { \
if (!getDerived().shouldTraversePostOrder()) \
TRY_TO(WalkUpFrom##TYPE(T)); \
- { CODE; } \
+ { \
+ CODE; \
+ } \
if (getDerived().shouldTraversePostOrder()) \
TRY_TO(WalkUpFrom##TYPE(T)); \
return true; \
@@ -1000,10 +1010,11 @@ DEF_TRAVERSE_TYPE(RValueReferenceType,
{ TRY_TO(TraverseType(T->getPointeeType())); })
DEF_TRAVERSE_TYPE(MemberPointerType, {
- TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
- if (T->isSugared())
- TRY_TO(TraverseType(
- QualType(T->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0)));
+ NestedNameSpecifier Qualifier =
+ T->isSugared() ? cast<MemberPointerType>(T->getCanonicalTypeUnqualified())
+ ->getQualifier()
+ : T->getQualifier();
+ TRY_TO(TraverseNestedNameSpecifier(Qualifier));
TRY_TO(TraverseType(T->getPointeeType()));
})
@@ -1087,9 +1098,18 @@ DEF_TRAVERSE_TYPE(FunctionProtoType, {
TRY_TO(TraverseStmt(NE));
})
-DEF_TRAVERSE_TYPE(UsingType, {})
-DEF_TRAVERSE_TYPE(UnresolvedUsingType, {})
-DEF_TRAVERSE_TYPE(TypedefType, {})
+DEF_TRAVERSE_TYPE(UsingType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+})
+DEF_TRAVERSE_TYPE(UnresolvedUsingType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+})
+DEF_TRAVERSE_TYPE(TypedefType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+})
DEF_TRAVERSE_TYPE(TypeOfExprType,
{ TRY_TO(TraverseStmt(T->getUnderlyingExpr())); })
@@ -1115,27 +1135,15 @@ DEF_TRAVERSE_TYPE(AutoType, {
TRY_TO(TraverseTemplateArguments(T->getTypeConstraintArguments()));
}
})
-DEF_TRAVERSE_TYPE(DeducedTemplateSpecializationType, {
- TRY_TO(TraverseTemplateName(T->getTemplateName()));
- TRY_TO(TraverseType(T->getDeducedType()));
-})
-DEF_TRAVERSE_TYPE(RecordType, {})
-DEF_TRAVERSE_TYPE(EnumType, {})
DEF_TRAVERSE_TYPE(TemplateTypeParmType, {})
DEF_TRAVERSE_TYPE(SubstTemplateTypeParmType, {
TRY_TO(TraverseType(T->getReplacementType()));
})
-DEF_TRAVERSE_TYPE(SubstTemplateTypeParmPackType, {
- TRY_TO(TraverseTemplateArgument(T->getArgumentPack()));
-})
-
-DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
- TRY_TO(TraverseTemplateName(T->getTemplateName()));
- TRY_TO(TraverseTemplateArguments(T->template_arguments()));
-})
-
-DEF_TRAVERSE_TYPE(InjectedClassNameType, {})
+DEF_TRAVERSE_TYPE(SubstTemplateTypeParmPackType,
+ { TRY_TO(TraverseSubstPackTypeHelper(T)); })
+DEF_TRAVERSE_TYPE(SubstBuiltinTemplatePackType,
+ { TRY_TO(TraverseSubstPackTypeHelper(T)); })
DEF_TRAVERSE_TYPE(AttributedType,
{ TRY_TO(TraverseType(T->getModifiedType())); })
@@ -1165,22 +1173,54 @@ DEF_TRAVERSE_TYPE(ParenType, { TRY_TO(TraverseType(T->getInnerType())); })
DEF_TRAVERSE_TYPE(MacroQualifiedType,
{ TRY_TO(TraverseType(T->getUnderlyingType())); })
-DEF_TRAVERSE_TYPE(ElaboratedType, {
- if (T->getQualifier()) {
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTagType(TagType *T,
+ bool TraverseQualifier) {
+ if (TraverseQualifier)
TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
- }
- TRY_TO(TraverseType(T->getNamedType()));
-})
+ return true;
+}
-DEF_TRAVERSE_TYPE(DependentNameType,
- { TRY_TO(TraverseNestedNameSpecifier(T->getQualifier())); })
+DEF_TRAVERSE_TYPE(EnumType, { TRY_TO(TraverseTagType(T, TraverseQualifier)); })
+DEF_TRAVERSE_TYPE(RecordType,
+ { TRY_TO(TraverseTagType(T, TraverseQualifier)); })
+DEF_TRAVERSE_TYPE(InjectedClassNameType,
+ { TRY_TO(TraverseTagType(T, TraverseQualifier)); })
+
+DEF_TRAVERSE_TYPE(DependentNameType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+})
DEF_TRAVERSE_TYPE(DependentTemplateSpecializationType, {
const DependentTemplateStorage &S = T->getDependentTemplateName();
- TRY_TO(TraverseNestedNameSpecifier(S.getQualifier()));
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(S.getQualifier()));
+ TRY_TO(TraverseTemplateArguments(T->template_arguments()));
+})
+
+DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
+ if (TraverseQualifier) {
+ TRY_TO(TraverseTemplateName(T->getTemplateName()));
+ } else {
+ // FIXME: Try to preserve the rest of the template name.
+ TRY_TO(TraverseTemplateName(TemplateName(
+ T->getTemplateName().getAsTemplateDecl(/*IgnoreDeduced=*/true))));
+ }
TRY_TO(TraverseTemplateArguments(T->template_arguments()));
})
+DEF_TRAVERSE_TYPE(DeducedTemplateSpecializationType, {
+ if (TraverseQualifier) {
+ TRY_TO(TraverseTemplateName(T->getTemplateName()));
+ } else {
+ // FIXME: Try to preserve the rest of the template name.
+ TRY_TO(TraverseTemplateName(TemplateName(
+ T->getTemplateName().getAsTemplateDecl(/*IgnoreDeduced=*/true))));
+ }
+ TRY_TO(TraverseType(T->getDeducedType()));
+})
+
DEF_TRAVERSE_TYPE(PackExpansionType, { TRY_TO(TraverseType(T->getPattern())); })
DEF_TRAVERSE_TYPE(ObjCTypeParamType, {})
@@ -1221,13 +1261,16 @@ DEF_TRAVERSE_TYPE(PredefinedSugarType, {})
// continue to work.
#define DEF_TRAVERSE_TYPELOC(TYPE, CODE) \
template <typename Derived> \
- bool RecursiveASTVisitor<Derived>::Traverse##TYPE##Loc(TYPE##Loc TL) { \
+ bool RecursiveASTVisitor<Derived>::Traverse##TYPE##Loc( \
+ TYPE##Loc TL, bool TraverseQualifier) { \
if (!getDerived().shouldTraversePostOrder()) { \
TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
if (getDerived().shouldWalkTypesOfTypeLocs()) \
TRY_TO(WalkUpFrom##TYPE(const_cast<TYPE *>(TL.getTypePtr()))); \
} \
- { CODE; } \
+ { \
+ CODE; \
+ } \
if (getDerived().shouldTraversePostOrder()) { \
TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
if (getDerived().shouldWalkTypesOfTypeLocs()) \
@@ -1237,8 +1280,10 @@ DEF_TRAVERSE_TYPE(PredefinedSugarType, {})
}
template <typename Derived>
-bool
-RecursiveASTVisitor<Derived>::TraverseQualifiedTypeLoc(QualifiedTypeLoc TL) {
+bool RecursiveASTVisitor<Derived>::TraverseQualifiedTypeLoc(
+ QualifiedTypeLoc TL, bool TraverseQualifier) {
+ assert(TraverseQualifier &&
+ "Qualifiers should never occur within NestedNameSpecifiers");
// Move this over to the 'main' typeloc tree. Note that this is a
// move -- we pretend that we were really looking at the unqualified
// typeloc all along -- rather than a recursion, so we don't follow
@@ -1391,9 +1436,21 @@ DEF_TRAVERSE_TYPELOC(FunctionProtoType, {
TRY_TO(TraverseStmt(NE));
})
-DEF_TRAVERSE_TYPELOC(UsingType, {})
-DEF_TRAVERSE_TYPELOC(UnresolvedUsingType, {})
-DEF_TRAVERSE_TYPELOC(TypedefType, {})
+DEF_TRAVERSE_TYPELOC(UsingType, {
+ if (NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TraverseQualifier && QualifierLoc)
+ TRY_TO(TraverseNestedNameSpecifierLoc(QualifierLoc));
+})
+DEF_TRAVERSE_TYPELOC(UnresolvedUsingType, {
+ if (NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TraverseQualifier && QualifierLoc)
+ TRY_TO(TraverseNestedNameSpecifierLoc(QualifierLoc));
+})
+DEF_TRAVERSE_TYPELOC(TypedefType, {
+ if (NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TraverseQualifier && QualifierLoc)
+ TRY_TO(TraverseNestedNameSpecifierLoc(QualifierLoc));
+})
DEF_TRAVERSE_TYPELOC(TypeOfExprType,
{ TRY_TO(TraverseStmt(TL.getUnderlyingExpr())); })
@@ -1423,30 +1480,30 @@ DEF_TRAVERSE_TYPELOC(AutoType, {
}
})
-DEF_TRAVERSE_TYPELOC(DeducedTemplateSpecializationType, {
- TRY_TO(TraverseTemplateName(TL.getTypePtr()->getTemplateName()));
- TRY_TO(TraverseType(TL.getTypePtr()->getDeducedType()));
-})
-
-DEF_TRAVERSE_TYPELOC(RecordType, {})
-DEF_TRAVERSE_TYPELOC(EnumType, {})
DEF_TRAVERSE_TYPELOC(TemplateTypeParmType, {})
DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmType, {
TRY_TO(TraverseType(TL.getTypePtr()->getReplacementType()));
})
-DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmPackType, {
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseSubstPackTypeLocHelper(
+ SubstPackTypeLoc TL) {
TRY_TO(TraverseTemplateArgument(TL.getTypePtr()->getArgumentPack()));
-})
+ return true;
+}
-// FIXME: use the loc for the template name?
-DEF_TRAVERSE_TYPELOC(TemplateSpecializationType, {
- TRY_TO(TraverseTemplateName(TL.getTypePtr()->getTemplateName()));
- for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
- TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
- }
-})
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseSubstPackTypeHelper(
+ SubstPackType *T) {
+ TRY_TO(TraverseTemplateArgument(T->getArgumentPack()));
+ return true;
+}
+
+DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmPackType,
+ { TRY_TO(TraverseSubstPackTypeLocHelper(TL)); })
-DEF_TRAVERSE_TYPELOC(InjectedClassNameType, {})
+DEF_TRAVERSE_TYPELOC(SubstBuiltinTemplatePackType,
+ { TRY_TO(TraverseSubstPackTypeLocHelper(TL)); })
DEF_TRAVERSE_TYPELOC(ParenType, { TRY_TO(TraverseTypeLoc(TL.getInnerLoc())); })
@@ -1468,27 +1525,63 @@ DEF_TRAVERSE_TYPELOC(HLSLAttributedResourceType,
DEF_TRAVERSE_TYPELOC(HLSLInlineSpirvType,
{ TRY_TO(TraverseType(TL.getType())); })
-DEF_TRAVERSE_TYPELOC(ElaboratedType, {
- if (TL.getQualifierLoc()) {
- TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
- }
- TRY_TO(TraverseTypeLoc(TL.getNamedTypeLoc()));
-})
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTagTypeLoc(TagTypeLoc TL,
+ bool TraverseQualifier) {
+ if (NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TraverseQualifier && QualifierLoc)
+ TRY_TO(TraverseNestedNameSpecifierLoc(QualifierLoc));
+ return true;
+}
+
+DEF_TRAVERSE_TYPELOC(EnumType,
+ { TRY_TO(TraverseTagTypeLoc(TL, TraverseQualifier)); })
+DEF_TRAVERSE_TYPELOC(RecordType,
+ { TRY_TO(TraverseTagTypeLoc(TL, TraverseQualifier)); })
+DEF_TRAVERSE_TYPELOC(InjectedClassNameType,
+ { TRY_TO(TraverseTagTypeLoc(TL, TraverseQualifier)); })
DEF_TRAVERSE_TYPELOC(DependentNameType, {
- TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
})
DEF_TRAVERSE_TYPELOC(DependentTemplateSpecializationType, {
- if (TL.getQualifierLoc()) {
+ if (TraverseQualifier)
TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
}
+})
+
+DEF_TRAVERSE_TYPELOC(TemplateSpecializationType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+
+ // FIXME: Try to preserve the rest of the template name.
+ TRY_TO(TraverseTemplateName(
+ TemplateName(TL.getTypePtr()->getTemplateName().getAsTemplateDecl(
+ /*IgnoreDeduced=*/true))));
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
}
})
+DEF_TRAVERSE_TYPELOC(DeducedTemplateSpecializationType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+
+ const auto *T = TL.getTypePtr();
+ // FIXME: Try to preserve the rest of the template name.
+ TRY_TO(
+ TraverseTemplateName(TemplateName(T->getTemplateName().getAsTemplateDecl(
+ /*IgnoreDeduced=*/true))));
+
+ TRY_TO(TraverseType(T->getDeducedType()));
+})
+
DEF_TRAVERSE_TYPELOC(PackExpansionType,
{ TRY_TO(TraverseTypeLoc(TL.getPatternLoc())); })
@@ -1631,8 +1724,9 @@ DEF_TRAVERSE_DECL(FriendDecl, {
TRY_TO(TraverseTypeLoc(D->getFriendType()->getTypeLoc()));
// Traverse any CXXRecordDecl owned by this type, since
// it will not be in the parent context:
- if (auto *ET = D->getFriendType()->getType()->getAs<ElaboratedType>())
- TRY_TO(TraverseDecl(ET->getOwnedTagDecl()));
+ if (auto *TT = D->getFriendType()->getType()->getAs<TagType>();
+ TT && TT->isTagOwned())
+ TRY_TO(TraverseDecl(TT->getOriginalDecl()));
} else {
TRY_TO(TraverseDecl(D->getFriendDecl()));
}
diff --git a/clang/include/clang/AST/TemplateBase.h b/clang/include/clang/AST/TemplateBase.h
index eb384ea..de248ac 100644
--- a/clang/include/clang/AST/TemplateBase.h
+++ b/clang/include/clang/AST/TemplateBase.h
@@ -15,9 +15,9 @@
#define LLVM_CLANG_AST_TEMPLATEBASE_H
#include "clang/AST/DependenceFlags.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/TemplateName.h"
-#include "clang/AST/Type.h"
+#include "clang/AST/TypeBase.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/APInt.h"
@@ -478,31 +478,25 @@ public:
/// Location information for a TemplateArgument.
struct TemplateArgumentLocInfo {
-private:
struct TemplateTemplateArgLocInfo {
- // FIXME: We'd like to just use the qualifier in the TemplateName,
- // but template arguments get canonicalized too quickly.
- NestedNameSpecifier *Qualifier;
void *QualifierLocData;
+ SourceLocation TemplateKwLoc;
SourceLocation TemplateNameLoc;
SourceLocation EllipsisLoc;
};
- llvm::PointerUnion<TemplateTemplateArgLocInfo *, Expr *, TypeSourceInfo *>
- Pointer;
-
TemplateTemplateArgLocInfo *getTemplate() const {
return cast<TemplateTemplateArgLocInfo *>(Pointer);
}
-public:
TemplateArgumentLocInfo() {}
TemplateArgumentLocInfo(TypeSourceInfo *Declarator) { Pointer = Declarator; }
TemplateArgumentLocInfo(Expr *E) { Pointer = E; }
// Ctx is used for allocation -- this case is unusually large and also rare,
// so we store the payload out-of-line.
- TemplateArgumentLocInfo(ASTContext &Ctx, NestedNameSpecifierLoc QualifierLoc,
+ TemplateArgumentLocInfo(ASTContext &Ctx, SourceLocation TemplateKwLoc,
+ NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateNameLoc,
SourceLocation EllipsisLoc);
@@ -512,10 +506,8 @@ public:
Expr *getAsExpr() const { return cast<Expr *>(Pointer); }
- NestedNameSpecifierLoc getTemplateQualifierLoc() const {
- const auto *Template = getTemplate();
- return NestedNameSpecifierLoc(Template->Qualifier,
- Template->QualifierLocData);
+ SourceLocation getTemplateKwLoc() const {
+ return getTemplate()->TemplateKwLoc;
}
SourceLocation getTemplateNameLoc() const {
@@ -525,6 +517,10 @@ public:
SourceLocation getTemplateEllipsisLoc() const {
return getTemplate()->EllipsisLoc;
}
+
+private:
+ llvm::PointerUnion<TemplateTemplateArgLocInfo *, Expr *, TypeSourceInfo *>
+ Pointer;
};
/// Location wrapper for a TemplateArgument. TemplateArgument is to
@@ -558,14 +554,10 @@ public:
}
TemplateArgumentLoc(ASTContext &Ctx, const TemplateArgument &Argument,
+ SourceLocation TemplateKWLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateNameLoc,
- SourceLocation EllipsisLoc = SourceLocation())
- : Argument(Argument),
- LocInfo(Ctx, QualifierLoc, TemplateNameLoc, EllipsisLoc) {
- assert(Argument.getKind() == TemplateArgument::Template ||
- Argument.getKind() == TemplateArgument::TemplateExpansion);
- }
+ SourceLocation EllipsisLoc = SourceLocation());
/// - Fetches the primary location of the argument.
SourceLocation getLocation() const {
@@ -614,13 +606,15 @@ public:
return LocInfo.getAsExpr();
}
- NestedNameSpecifierLoc getTemplateQualifierLoc() const {
+ SourceLocation getTemplateKWLoc() const {
if (Argument.getKind() != TemplateArgument::Template &&
Argument.getKind() != TemplateArgument::TemplateExpansion)
- return NestedNameSpecifierLoc();
- return LocInfo.getTemplateQualifierLoc();
+ return SourceLocation();
+ return LocInfo.getTemplateKwLoc();
}
+ NestedNameSpecifierLoc getTemplateQualifierLoc() const;
+
SourceLocation getTemplateNameLoc() const {
if (Argument.getKind() != TemplateArgument::Template &&
Argument.getKind() != TemplateArgument::TemplateExpansion)
diff --git a/clang/include/clang/AST/TemplateName.h b/clang/include/clang/AST/TemplateName.h
index 63949f8..abb0669 100644
--- a/clang/include/clang/AST/TemplateName.h
+++ b/clang/include/clang/AST/TemplateName.h
@@ -14,7 +14,7 @@
#define LLVM_CLANG_AST_TEMPLATENAME_H
#include "clang/AST/DependenceFlags.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/UnsignedOrNone.h"
@@ -339,6 +339,14 @@ public:
/// structure, if any.
DependentTemplateName *getAsDependentTemplateName() const;
+ // Retrieve the qualifier and template keyword stored in either a underlying
+ // DependentTemplateName or QualifiedTemplateName.
+ std::tuple<NestedNameSpecifier, bool> getQualifierAndTemplateKeyword() const;
+
+ NestedNameSpecifier getQualifier() const {
+ return std::get<0>(getQualifierAndTemplateKeyword());
+ }
+
/// Retrieve the using shadow declaration through which the underlying
/// template declaration is introduced, if any.
UsingShadowDecl *getAsUsingShadowDecl() const;
@@ -503,7 +511,7 @@ class QualifiedTemplateName : public llvm::FoldingSetNode {
/// "template" keyword is always redundant in this case (otherwise,
/// the template name would be a dependent name and we would express
/// this name with DependentTemplateName).
- llvm::PointerIntPair<NestedNameSpecifier *, 1> Qualifier;
+ llvm::PointerIntPair<NestedNameSpecifier, 1, bool> Qualifier;
/// The underlying template name, it is either
/// 1) a Template -- a template declaration that this qualified name refers
@@ -512,7 +520,7 @@ class QualifiedTemplateName : public llvm::FoldingSetNode {
/// using-shadow declaration.
TemplateName UnderlyingTemplate;
- QualifiedTemplateName(NestedNameSpecifier *NNS, bool TemplateKeyword,
+ QualifiedTemplateName(NestedNameSpecifier NNS, bool TemplateKeyword,
TemplateName Template)
: Qualifier(NNS, TemplateKeyword ? 1 : 0), UnderlyingTemplate(Template) {
assert(UnderlyingTemplate.getKind() == TemplateName::Template ||
@@ -521,7 +529,7 @@ class QualifiedTemplateName : public llvm::FoldingSetNode {
public:
/// Return the nested name specifier that qualifies this name.
- NestedNameSpecifier *getQualifier() const { return Qualifier.getPointer(); }
+ NestedNameSpecifier getQualifier() const { return Qualifier.getPointer(); }
/// Whether the template name was prefixed by the "template"
/// keyword.
@@ -534,9 +542,9 @@ public:
Profile(ID, getQualifier(), hasTemplateKeyword(), UnderlyingTemplate);
}
- static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *NNS,
+ static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier NNS,
bool TemplateKeyword, TemplateName TN) {
- ID.AddPointer(NNS);
+ NNS.Profile(ID);
ID.AddBoolean(TemplateKeyword);
ID.AddPointer(TN.getAsVoidPointer());
}
@@ -585,18 +593,18 @@ class DependentTemplateStorage {
///
/// The bit stored in this qualifier describes whether the \c Name field
/// was preceeded by a template keyword.
- llvm::PointerIntPair<NestedNameSpecifier *, 1, bool> Qualifier;
+ llvm::PointerIntPair<NestedNameSpecifier, 1, bool> Qualifier;
/// The dependent template name.
IdentifierOrOverloadedOperator Name;
public:
- DependentTemplateStorage(NestedNameSpecifier *Qualifier,
+ DependentTemplateStorage(NestedNameSpecifier Qualifier,
IdentifierOrOverloadedOperator Name,
bool HasTemplateKeyword);
/// Return the nested name specifier that qualifies this name.
- NestedNameSpecifier *getQualifier() const { return Qualifier.getPointer(); }
+ NestedNameSpecifier getQualifier() const { return Qualifier.getPointer(); }
IdentifierOrOverloadedOperator getName() const { return Name; }
@@ -609,10 +617,10 @@ public:
Profile(ID, getQualifier(), getName(), hasTemplateKeyword());
}
- static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *NNS,
+ static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier NNS,
IdentifierOrOverloadedOperator Name,
bool HasTemplateKeyword) {
- ID.AddPointer(NNS);
+ NNS.Profile(ID);
ID.AddBoolean(HasTemplateKeyword);
Name.Profile(ID);
}
diff --git a/clang/include/clang/AST/TextNodeDumper.h b/clang/include/clang/AST/TextNodeDumper.h
index 1917a8a..6d279511 100644
--- a/clang/include/clang/AST/TextNodeDumper.h
+++ b/clang/include/clang/AST/TextNodeDumper.h
@@ -211,7 +211,7 @@ public:
void dumpAccessSpecifier(AccessSpecifier AS);
void dumpCleanupObject(const ExprWithCleanups::CleanupObject &C);
void dumpTemplateSpecializationKind(TemplateSpecializationKind TSK);
- void dumpNestedNameSpecifier(const NestedNameSpecifier *NNS);
+ void dumpNestedNameSpecifier(NestedNameSpecifier NNS);
void dumpConceptReference(const ConceptReference *R);
void dumpTemplateArgument(const TemplateArgument &TA);
void dumpBareTemplateName(TemplateName TN);
diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index 12dce30..48575c1 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -9,8284 +9,67 @@
/// \file
/// C Language Family Type Representation
///
-/// This file defines the clang::Type interface and subclasses, used to
-/// represent types for languages in the C family.
+/// This file defines some inline methods for clang::Type which depend on
+/// Decl.h, avoiding a circular dependency.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_TYPE_H
#define LLVM_CLANG_AST_TYPE_H
-#include "clang/AST/DependenceFlags.h"
-#include "clang/AST/NestedNameSpecifier.h"
-#include "clang/AST/TemplateName.h"
-#include "clang/Basic/AddressSpaces.h"
-#include "clang/Basic/AttrKinds.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/ExceptionSpecificationType.h"
-#include "clang/Basic/LLVM.h"
-#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/Linkage.h"
-#include "clang/Basic/PartialDiagnostic.h"
-#include "clang/Basic/PointerAuthOptions.h"
-#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/Specifiers.h"
-#include "clang/Basic/Visibility.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/ADT/PointerUnion.h"
-#include "llvm/ADT/STLForwardCompat.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/DXILABI.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/PointerLikeTypeTraits.h"
-#include "llvm/Support/TrailingObjects.h"
-#include "llvm/Support/type_traits.h"
-#include <bitset>
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-#include <optional>
-#include <string>
-#include <type_traits>
-#include <utility>
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/TypeBase.h"
namespace clang {
-class BTFTypeTagAttr;
-class ExtQuals;
-class QualType;
-class ConceptDecl;
-class ValueDecl;
-class TagDecl;
-class TemplateParameterList;
-class Type;
-class Attr;
-
-enum {
- TypeAlignmentInBits = 4,
- TypeAlignment = 1 << TypeAlignmentInBits
-};
-
-namespace serialization {
- template <class T> class AbstractTypeReader;
- template <class T> class AbstractTypeWriter;
-}
-
-} // namespace clang
-
-namespace llvm {
-
- template <typename T>
- struct PointerLikeTypeTraits;
- template<>
- struct PointerLikeTypeTraits< ::clang::Type*> {
- static inline void *getAsVoidPointer(::clang::Type *P) { return P; }
-
- static inline ::clang::Type *getFromVoidPointer(void *P) {
- return static_cast< ::clang::Type*>(P);
- }
-
- static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
- };
-
- template<>
- struct PointerLikeTypeTraits< ::clang::ExtQuals*> {
- static inline void *getAsVoidPointer(::clang::ExtQuals *P) { return P; }
-
- static inline ::clang::ExtQuals *getFromVoidPointer(void *P) {
- return static_cast< ::clang::ExtQuals*>(P);
- }
-
- static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
- };
-
-} // namespace llvm
-
-namespace clang {
-
-class ASTContext;
-template <typename> class CanQual;
-class CXXRecordDecl;
-class DeclContext;
-class EnumDecl;
-class Expr;
-class ExtQualsTypeCommonBase;
-class FunctionDecl;
-class FunctionEffectsRef;
-class FunctionEffectKindSet;
-class FunctionEffectSet;
-class IdentifierInfo;
-class NamedDecl;
-class ObjCInterfaceDecl;
-class ObjCProtocolDecl;
-class ObjCTypeParamDecl;
-struct PrintingPolicy;
-class RecordDecl;
-class Stmt;
-class TagDecl;
-class TemplateArgument;
-class TemplateArgumentListInfo;
-class TemplateArgumentLoc;
-class TemplateTypeParmDecl;
-class TypedefNameDecl;
-class UnresolvedUsingTypenameDecl;
-class UsingShadowDecl;
-
-using CanQualType = CanQual<Type>;
-
-// Provide forward declarations for all of the *Type classes.
-#define TYPE(Class, Base) class Class##Type;
-#include "clang/AST/TypeNodes.inc"
-
-/// Pointer-authentication qualifiers.
-class PointerAuthQualifier {
- enum : uint32_t {
- EnabledShift = 0,
- EnabledBits = 1,
- EnabledMask = 1 << EnabledShift,
- AddressDiscriminatedShift = EnabledShift + EnabledBits,
- AddressDiscriminatedBits = 1,
- AddressDiscriminatedMask = 1 << AddressDiscriminatedShift,
- AuthenticationModeShift =
- AddressDiscriminatedShift + AddressDiscriminatedBits,
- AuthenticationModeBits = 2,
- AuthenticationModeMask = ((1 << AuthenticationModeBits) - 1)
- << AuthenticationModeShift,
- IsaPointerShift = AuthenticationModeShift + AuthenticationModeBits,
- IsaPointerBits = 1,
- IsaPointerMask = ((1 << IsaPointerBits) - 1) << IsaPointerShift,
- AuthenticatesNullValuesShift = IsaPointerShift + IsaPointerBits,
- AuthenticatesNullValuesBits = 1,
- AuthenticatesNullValuesMask = ((1 << AuthenticatesNullValuesBits) - 1)
- << AuthenticatesNullValuesShift,
- KeyShift = AuthenticatesNullValuesShift + AuthenticatesNullValuesBits,
- KeyBits = 10,
- KeyMask = ((1 << KeyBits) - 1) << KeyShift,
- DiscriminatorShift = KeyShift + KeyBits,
- DiscriminatorBits = 16,
- DiscriminatorMask = ((1u << DiscriminatorBits) - 1) << DiscriminatorShift,
- };
-
- // bits: |0 |1 |2..3 |4 |
- // |Enabled|Address|AuthenticationMode|ISA pointer|
- // bits: |5 |6..15| 16...31 |
- // |AuthenticatesNull|Key |Discriminator|
- uint32_t Data = 0;
-
- // The following static assertions check that each of the 32 bits is present
- // exactly in one of the constants.
- static_assert((EnabledBits + AddressDiscriminatedBits +
- AuthenticationModeBits + IsaPointerBits +
- AuthenticatesNullValuesBits + KeyBits + DiscriminatorBits) ==
- 32,
- "PointerAuthQualifier should be exactly 32 bits");
- static_assert((EnabledMask + AddressDiscriminatedMask +
- AuthenticationModeMask + IsaPointerMask +
- AuthenticatesNullValuesMask + KeyMask + DiscriminatorMask) ==
- 0xFFFFFFFF,
- "All masks should cover the entire bits");
- static_assert((EnabledMask ^ AddressDiscriminatedMask ^
- AuthenticationModeMask ^ IsaPointerMask ^
- AuthenticatesNullValuesMask ^ KeyMask ^ DiscriminatorMask) ==
- 0xFFFFFFFF,
- "All masks should cover the entire bits");
-
- PointerAuthQualifier(unsigned Key, bool IsAddressDiscriminated,
- unsigned ExtraDiscriminator,
- PointerAuthenticationMode AuthenticationMode,
- bool IsIsaPointer, bool AuthenticatesNullValues)
- : Data(EnabledMask |
- (IsAddressDiscriminated
- ? llvm::to_underlying(AddressDiscriminatedMask)
- : 0) |
- (Key << KeyShift) |
- (llvm::to_underlying(AuthenticationMode)
- << AuthenticationModeShift) |
- (ExtraDiscriminator << DiscriminatorShift) |
- (IsIsaPointer << IsaPointerShift) |
- (AuthenticatesNullValues << AuthenticatesNullValuesShift)) {
- assert(Key <= KeyNoneInternal);
- assert(ExtraDiscriminator <= MaxDiscriminator);
- assert((Data == 0) ==
- (getAuthenticationMode() == PointerAuthenticationMode::None));
- }
-
-public:
- enum {
- KeyNoneInternal = (1u << KeyBits) - 1,
-
- /// The maximum supported pointer-authentication key.
- MaxKey = KeyNoneInternal - 1,
-
- /// The maximum supported pointer-authentication discriminator.
- MaxDiscriminator = (1u << DiscriminatorBits) - 1
- };
-
-public:
- PointerAuthQualifier() = default;
-
- static PointerAuthQualifier
- Create(unsigned Key, bool IsAddressDiscriminated, unsigned ExtraDiscriminator,
- PointerAuthenticationMode AuthenticationMode, bool IsIsaPointer,
- bool AuthenticatesNullValues) {
- if (Key == PointerAuthKeyNone)
- Key = KeyNoneInternal;
- assert(Key <= KeyNoneInternal && "out-of-range key value");
- return PointerAuthQualifier(Key, IsAddressDiscriminated, ExtraDiscriminator,
- AuthenticationMode, IsIsaPointer,
- AuthenticatesNullValues);
- }
-
- bool isPresent() const {
- assert((Data == 0) ==
- (getAuthenticationMode() == PointerAuthenticationMode::None));
- return Data != 0;
- }
-
- explicit operator bool() const { return isPresent(); }
-
- unsigned getKey() const {
- assert(isPresent());
- return (Data & KeyMask) >> KeyShift;
- }
-
- bool hasKeyNone() const { return isPresent() && getKey() == KeyNoneInternal; }
-
- bool isAddressDiscriminated() const {
- assert(isPresent());
- return (Data & AddressDiscriminatedMask) >> AddressDiscriminatedShift;
- }
-
- unsigned getExtraDiscriminator() const {
- assert(isPresent());
- return (Data >> DiscriminatorShift);
- }
-
- PointerAuthenticationMode getAuthenticationMode() const {
- return PointerAuthenticationMode((Data & AuthenticationModeMask) >>
- AuthenticationModeShift);
- }
-
- bool isIsaPointer() const {
- assert(isPresent());
- return (Data & IsaPointerMask) >> IsaPointerShift;
- }
-
- bool authenticatesNullValues() const {
- assert(isPresent());
- return (Data & AuthenticatesNullValuesMask) >> AuthenticatesNullValuesShift;
- }
-
- PointerAuthQualifier withoutKeyNone() const {
- return hasKeyNone() ? PointerAuthQualifier() : *this;
- }
-
- friend bool operator==(PointerAuthQualifier Lhs, PointerAuthQualifier Rhs) {
- return Lhs.Data == Rhs.Data;
- }
- friend bool operator!=(PointerAuthQualifier Lhs, PointerAuthQualifier Rhs) {
- return Lhs.Data != Rhs.Data;
- }
-
- bool isEquivalent(PointerAuthQualifier Other) const {
- return withoutKeyNone() == Other.withoutKeyNone();
- }
-
- uint32_t getAsOpaqueValue() const { return Data; }
-
- // Deserialize pointer-auth qualifiers from an opaque representation.
- static PointerAuthQualifier fromOpaqueValue(uint32_t Opaque) {
- PointerAuthQualifier Result;
- Result.Data = Opaque;
- assert((Result.Data == 0) ==
- (Result.getAuthenticationMode() == PointerAuthenticationMode::None));
- return Result;
- }
-
- std::string getAsString() const;
- std::string getAsString(const PrintingPolicy &Policy) const;
-
- bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const;
- void print(raw_ostream &OS, const PrintingPolicy &Policy) const;
-
- void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(Data); }
-};
-
-/// The collection of all-type qualifiers we support.
-/// Clang supports five independent qualifiers:
-/// * C99: const, volatile, and restrict
-/// * MS: __unaligned
-/// * Embedded C (TR18037): address spaces
-/// * Objective C: the GC attributes (none, weak, or strong)
-class Qualifiers {
-public:
- Qualifiers() = default;
- enum TQ : uint64_t {
- // NOTE: These flags must be kept in sync with DeclSpec::TQ.
- Const = 0x1,
- Restrict = 0x2,
- Volatile = 0x4,
- CVRMask = Const | Volatile | Restrict
- };
-
- enum GC {
- GCNone = 0,
- Weak,
- Strong
- };
-
- enum ObjCLifetime {
- /// There is no lifetime qualification on this type.
- OCL_None,
-
- /// This object can be modified without requiring retains or
- /// releases.
- OCL_ExplicitNone,
-
- /// Assigning into this object requires the old value to be
- /// released and the new value to be retained. The timing of the
- /// release of the old value is inexact: it may be moved to
- /// immediately after the last known point where the value is
- /// live.
- OCL_Strong,
-
- /// Reading or writing from this object requires a barrier call.
- OCL_Weak,
-
- /// Assigning into this object requires a lifetime extension.
- OCL_Autoreleasing
- };
-
- enum : uint64_t {
- /// The maximum supported address space number.
- /// 23 bits should be enough for anyone.
- MaxAddressSpace = 0x7fffffu,
-
- /// The width of the "fast" qualifier mask.
- FastWidth = 3,
-
- /// The fast qualifier mask.
- FastMask = (1 << FastWidth) - 1
- };
-
- /// Returns the common set of qualifiers while removing them from
- /// the given sets.
- static Qualifiers removeCommonQualifiers(Qualifiers &L, Qualifiers &R) {
- Qualifiers Q;
- PointerAuthQualifier LPtrAuth = L.getPointerAuth();
- if (LPtrAuth.isPresent() &&
- LPtrAuth.getKey() != PointerAuthQualifier::KeyNoneInternal &&
- LPtrAuth == R.getPointerAuth()) {
- Q.setPointerAuth(LPtrAuth);
- PointerAuthQualifier Empty;
- L.setPointerAuth(Empty);
- R.setPointerAuth(Empty);
- }
-
- // If both are only CVR-qualified, bit operations are sufficient.
- if (!(L.Mask & ~CVRMask) && !(R.Mask & ~CVRMask)) {
- Q.Mask = L.Mask & R.Mask;
- L.Mask &= ~Q.Mask;
- R.Mask &= ~Q.Mask;
- return Q;
- }
-
- unsigned CommonCRV = L.getCVRQualifiers() & R.getCVRQualifiers();
- Q.addCVRQualifiers(CommonCRV);
- L.removeCVRQualifiers(CommonCRV);
- R.removeCVRQualifiers(CommonCRV);
-
- if (L.getObjCGCAttr() == R.getObjCGCAttr()) {
- Q.setObjCGCAttr(L.getObjCGCAttr());
- L.removeObjCGCAttr();
- R.removeObjCGCAttr();
- }
-
- if (L.getObjCLifetime() == R.getObjCLifetime()) {
- Q.setObjCLifetime(L.getObjCLifetime());
- L.removeObjCLifetime();
- R.removeObjCLifetime();
- }
-
- if (L.getAddressSpace() == R.getAddressSpace()) {
- Q.setAddressSpace(L.getAddressSpace());
- L.removeAddressSpace();
- R.removeAddressSpace();
- }
- return Q;
- }
-
- static Qualifiers fromFastMask(unsigned Mask) {
- Qualifiers Qs;
- Qs.addFastQualifiers(Mask);
- return Qs;
- }
-
- static Qualifiers fromCVRMask(unsigned CVR) {
- Qualifiers Qs;
- Qs.addCVRQualifiers(CVR);
- return Qs;
- }
-
- static Qualifiers fromCVRUMask(unsigned CVRU) {
- Qualifiers Qs;
- Qs.addCVRUQualifiers(CVRU);
- return Qs;
- }
-
- // Deserialize qualifiers from an opaque representation.
- static Qualifiers fromOpaqueValue(uint64_t opaque) {
- Qualifiers Qs;
- Qs.Mask = opaque;
- return Qs;
- }
-
- // Serialize these qualifiers into an opaque representation.
- uint64_t getAsOpaqueValue() const { return Mask; }
-
- bool hasConst() const { return Mask & Const; }
- bool hasOnlyConst() const { return Mask == Const; }
- void removeConst() { Mask &= ~Const; }
- void addConst() { Mask |= Const; }
- Qualifiers withConst() const {
- Qualifiers Qs = *this;
- Qs.addConst();
- return Qs;
- }
-
- bool hasVolatile() const { return Mask & Volatile; }
- bool hasOnlyVolatile() const { return Mask == Volatile; }
- void removeVolatile() { Mask &= ~Volatile; }
- void addVolatile() { Mask |= Volatile; }
- Qualifiers withVolatile() const {
- Qualifiers Qs = *this;
- Qs.addVolatile();
- return Qs;
- }
-
- bool hasRestrict() const { return Mask & Restrict; }
- bool hasOnlyRestrict() const { return Mask == Restrict; }
- void removeRestrict() { Mask &= ~Restrict; }
- void addRestrict() { Mask |= Restrict; }
- Qualifiers withRestrict() const {
- Qualifiers Qs = *this;
- Qs.addRestrict();
- return Qs;
- }
-
- bool hasCVRQualifiers() const { return getCVRQualifiers(); }
- unsigned getCVRQualifiers() const { return Mask & CVRMask; }
- unsigned getCVRUQualifiers() const { return Mask & (CVRMask | UMask); }
-
- void setCVRQualifiers(unsigned mask) {
- assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
- Mask = (Mask & ~CVRMask) | mask;
- }
- void removeCVRQualifiers(unsigned mask) {
- assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
- Mask &= ~static_cast<uint64_t>(mask);
- }
- void removeCVRQualifiers() {
- removeCVRQualifiers(CVRMask);
- }
- void addCVRQualifiers(unsigned mask) {
- assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
- Mask |= mask;
- }
- void addCVRUQualifiers(unsigned mask) {
- assert(!(mask & ~CVRMask & ~UMask) && "bitmask contains non-CVRU bits");
- Mask |= mask;
- }
-
- bool hasUnaligned() const { return Mask & UMask; }
- void setUnaligned(bool flag) {
- Mask = (Mask & ~UMask) | (flag ? UMask : 0);
- }
- void removeUnaligned() { Mask &= ~UMask; }
- void addUnaligned() { Mask |= UMask; }
-
- bool hasObjCGCAttr() const { return Mask & GCAttrMask; }
- GC getObjCGCAttr() const { return GC((Mask & GCAttrMask) >> GCAttrShift); }
- void setObjCGCAttr(GC type) {
- Mask = (Mask & ~GCAttrMask) | (type << GCAttrShift);
- }
- void removeObjCGCAttr() { setObjCGCAttr(GCNone); }
- void addObjCGCAttr(GC type) {
- assert(type);
- setObjCGCAttr(type);
- }
- Qualifiers withoutObjCGCAttr() const {
- Qualifiers qs = *this;
- qs.removeObjCGCAttr();
- return qs;
- }
- Qualifiers withoutObjCLifetime() const {
- Qualifiers qs = *this;
- qs.removeObjCLifetime();
- return qs;
- }
- Qualifiers withoutAddressSpace() const {
- Qualifiers qs = *this;
- qs.removeAddressSpace();
- return qs;
- }
-
- bool hasObjCLifetime() const { return Mask & LifetimeMask; }
- ObjCLifetime getObjCLifetime() const {
- return ObjCLifetime((Mask & LifetimeMask) >> LifetimeShift);
- }
- void setObjCLifetime(ObjCLifetime type) {
- Mask = (Mask & ~LifetimeMask) | (type << LifetimeShift);
- }
- void removeObjCLifetime() { setObjCLifetime(OCL_None); }
- void addObjCLifetime(ObjCLifetime type) {
- assert(type);
- assert(!hasObjCLifetime());
- Mask |= (type << LifetimeShift);
- }
-
- /// True if the lifetime is neither None or ExplicitNone.
- bool hasNonTrivialObjCLifetime() const {
- ObjCLifetime lifetime = getObjCLifetime();
- return (lifetime > OCL_ExplicitNone);
- }
-
- /// True if the lifetime is either strong or weak.
- bool hasStrongOrWeakObjCLifetime() const {
- ObjCLifetime lifetime = getObjCLifetime();
- return (lifetime == OCL_Strong || lifetime == OCL_Weak);
- }
-
- bool hasAddressSpace() const { return Mask & AddressSpaceMask; }
- LangAS getAddressSpace() const {
- return static_cast<LangAS>((Mask & AddressSpaceMask) >> AddressSpaceShift);
- }
- bool hasTargetSpecificAddressSpace() const {
- return isTargetAddressSpace(getAddressSpace());
- }
- /// Get the address space attribute value to be printed by diagnostics.
- unsigned getAddressSpaceAttributePrintValue() const {
- auto Addr = getAddressSpace();
- // This function is not supposed to be used with language specific
- // address spaces. If that happens, the diagnostic message should consider
- // printing the QualType instead of the address space value.
- assert(Addr == LangAS::Default || hasTargetSpecificAddressSpace());
- if (Addr != LangAS::Default)
- return toTargetAddressSpace(Addr);
- // TODO: The diagnostic messages where Addr may be 0 should be fixed
- // since it cannot differentiate the situation where 0 denotes the default
- // address space or user specified __attribute__((address_space(0))).
- return 0;
- }
- void setAddressSpace(LangAS space) {
- assert((unsigned)space <= MaxAddressSpace);
- Mask = (Mask & ~AddressSpaceMask)
- | (((uint32_t) space) << AddressSpaceShift);
- }
- void removeAddressSpace() { setAddressSpace(LangAS::Default); }
- void addAddressSpace(LangAS space) {
- assert(space != LangAS::Default);
- setAddressSpace(space);
- }
-
- bool hasPointerAuth() const { return Mask & PtrAuthMask; }
- PointerAuthQualifier getPointerAuth() const {
- return PointerAuthQualifier::fromOpaqueValue(Mask >> PtrAuthShift);
- }
- void setPointerAuth(PointerAuthQualifier Q) {
- Mask = (Mask & ~PtrAuthMask) |
- (uint64_t(Q.getAsOpaqueValue()) << PtrAuthShift);
- }
- void removePointerAuth() { Mask &= ~PtrAuthMask; }
- void addPointerAuth(PointerAuthQualifier Q) {
- assert(Q.isPresent());
- setPointerAuth(Q);
- }
-
- // Fast qualifiers are those that can be allocated directly
- // on a QualType object.
- bool hasFastQualifiers() const { return getFastQualifiers(); }
- unsigned getFastQualifiers() const { return Mask & FastMask; }
- void setFastQualifiers(unsigned mask) {
- assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
- Mask = (Mask & ~FastMask) | mask;
- }
- void removeFastQualifiers(unsigned mask) {
- assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
- Mask &= ~static_cast<uint64_t>(mask);
- }
- void removeFastQualifiers() {
- removeFastQualifiers(FastMask);
- }
- void addFastQualifiers(unsigned mask) {
- assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
- Mask |= mask;
- }
-
- /// Return true if the set contains any qualifiers which require an ExtQuals
- /// node to be allocated.
- bool hasNonFastQualifiers() const { return Mask & ~FastMask; }
- Qualifiers getNonFastQualifiers() const {
- Qualifiers Quals = *this;
- Quals.setFastQualifiers(0);
- return Quals;
- }
-
- /// Return true if the set contains any qualifiers.
- bool hasQualifiers() const { return Mask; }
- bool empty() const { return !Mask; }
-
- /// Add the qualifiers from the given set to this set.
- void addQualifiers(Qualifiers Q) {
- // If the other set doesn't have any non-boolean qualifiers, just
- // bit-or it in.
- if (!(Q.Mask & ~CVRMask))
- Mask |= Q.Mask;
- else {
- Mask |= (Q.Mask & CVRMask);
- if (Q.hasAddressSpace())
- addAddressSpace(Q.getAddressSpace());
- if (Q.hasObjCGCAttr())
- addObjCGCAttr(Q.getObjCGCAttr());
- if (Q.hasObjCLifetime())
- addObjCLifetime(Q.getObjCLifetime());
- if (Q.hasPointerAuth())
- addPointerAuth(Q.getPointerAuth());
- }
- }
-
- /// Remove the qualifiers from the given set from this set.
- void removeQualifiers(Qualifiers Q) {
- // If the other set doesn't have any non-boolean qualifiers, just
- // bit-and the inverse in.
- if (!(Q.Mask & ~CVRMask))
- Mask &= ~Q.Mask;
- else {
- Mask &= ~(Q.Mask & CVRMask);
- if (getObjCGCAttr() == Q.getObjCGCAttr())
- removeObjCGCAttr();
- if (getObjCLifetime() == Q.getObjCLifetime())
- removeObjCLifetime();
- if (getAddressSpace() == Q.getAddressSpace())
- removeAddressSpace();
- if (getPointerAuth() == Q.getPointerAuth())
- removePointerAuth();
- }
- }
-
- /// Add the qualifiers from the given set to this set, given that
- /// they don't conflict.
- void addConsistentQualifiers(Qualifiers qs) {
- assert(getAddressSpace() == qs.getAddressSpace() ||
- !hasAddressSpace() || !qs.hasAddressSpace());
- assert(getObjCGCAttr() == qs.getObjCGCAttr() ||
- !hasObjCGCAttr() || !qs.hasObjCGCAttr());
- assert(getObjCLifetime() == qs.getObjCLifetime() ||
- !hasObjCLifetime() || !qs.hasObjCLifetime());
- assert(!hasPointerAuth() || !qs.hasPointerAuth() ||
- getPointerAuth() == qs.getPointerAuth());
- Mask |= qs.Mask;
- }
-
- /// Returns true if address space A is equal to or a superset of B.
- /// OpenCL v2.0 defines conversion rules (OpenCLC v2.0 s6.5.5) and notion of
- /// overlapping address spaces.
- /// CL1.1 or CL1.2:
- /// every address space is a superset of itself.
- /// CL2.0 adds:
- /// __generic is a superset of any address space except for __constant.
- static bool isAddressSpaceSupersetOf(LangAS A, LangAS B,
- const ASTContext &Ctx) {
- // Address spaces must match exactly.
- return A == B || isTargetAddressSpaceSupersetOf(A, B, Ctx);
- }
-
- static bool isTargetAddressSpaceSupersetOf(LangAS A, LangAS B,
- const ASTContext &Ctx);
-
- /// Returns true if the address space in these qualifiers is equal to or
- /// a superset of the address space in the argument qualifiers.
- bool isAddressSpaceSupersetOf(Qualifiers other, const ASTContext &Ctx) const {
- return isAddressSpaceSupersetOf(getAddressSpace(), other.getAddressSpace(),
- Ctx);
- }
-
- /// Determines if these qualifiers compatibly include another set.
- /// Generally this answers the question of whether an object with the other
- /// qualifiers can be safely used as an object with these qualifiers.
- bool compatiblyIncludes(Qualifiers other, const ASTContext &Ctx) const {
- return isAddressSpaceSupersetOf(other, Ctx) &&
- // ObjC GC qualifiers can match, be added, or be removed, but can't
- // be changed.
- (getObjCGCAttr() == other.getObjCGCAttr() || !hasObjCGCAttr() ||
- !other.hasObjCGCAttr()) &&
- // Pointer-auth qualifiers must match exactly.
- getPointerAuth() == other.getPointerAuth() &&
- // ObjC lifetime qualifiers must match exactly.
- getObjCLifetime() == other.getObjCLifetime() &&
- // CVR qualifiers may subset.
- (((Mask & CVRMask) | (other.Mask & CVRMask)) == (Mask & CVRMask)) &&
- // U qualifier may superset.
- (!other.hasUnaligned() || hasUnaligned());
- }
-
- /// Determines if these qualifiers compatibly include another set of
- /// qualifiers from the narrow perspective of Objective-C ARC lifetime.
- ///
- /// One set of Objective-C lifetime qualifiers compatibly includes the other
- /// if the lifetime qualifiers match, or if both are non-__weak and the
- /// including set also contains the 'const' qualifier, or both are non-__weak
- /// and one is None (which can only happen in non-ARC modes).
- bool compatiblyIncludesObjCLifetime(Qualifiers other) const {
- if (getObjCLifetime() == other.getObjCLifetime())
- return true;
-
- if (getObjCLifetime() == OCL_Weak || other.getObjCLifetime() == OCL_Weak)
- return false;
-
- if (getObjCLifetime() == OCL_None || other.getObjCLifetime() == OCL_None)
- return true;
-
- return hasConst();
- }
-
- /// Determine whether this set of qualifiers is a strict superset of
- /// another set of qualifiers, not considering qualifier compatibility.
- bool isStrictSupersetOf(Qualifiers Other) const;
-
- bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
- bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; }
-
- explicit operator bool() const { return hasQualifiers(); }
-
- Qualifiers &operator+=(Qualifiers R) {
- addQualifiers(R);
- return *this;
- }
-
- // Union two qualifier sets. If an enumerated qualifier appears
- // in both sets, use the one from the right.
- friend Qualifiers operator+(Qualifiers L, Qualifiers R) {
- L += R;
- return L;
- }
-
- Qualifiers &operator-=(Qualifiers R) {
- removeQualifiers(R);
- return *this;
- }
-
- /// Compute the difference between two qualifier sets.
- friend Qualifiers operator-(Qualifiers L, Qualifiers R) {
- L -= R;
- return L;
- }
-
- std::string getAsString() const;
- std::string getAsString(const PrintingPolicy &Policy) const;
-
- static std::string getAddrSpaceAsString(LangAS AS);
-
- bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const;
- void print(raw_ostream &OS, const PrintingPolicy &Policy,
- bool appendSpaceIfNonEmpty = false) const;
-
- void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(Mask); }
-
-private:
- // bits: |0 1 2|3|4 .. 5|6 .. 8|9 ... 31|32 ... 63|
- // |C R V|U|GCAttr|Lifetime|AddressSpace| PtrAuth |
- uint64_t Mask = 0;
- static_assert(sizeof(PointerAuthQualifier) == sizeof(uint32_t),
- "PointerAuthQualifier must be 32 bits");
-
- static constexpr uint64_t PtrAuthShift = 32;
- static constexpr uint64_t PtrAuthMask = UINT64_C(0xffffffff) << PtrAuthShift;
-
- static constexpr uint64_t UMask = 0x8;
- static constexpr uint64_t UShift = 3;
- static constexpr uint64_t GCAttrMask = 0x30;
- static constexpr uint64_t GCAttrShift = 4;
- static constexpr uint64_t LifetimeMask = 0x1C0;
- static constexpr uint64_t LifetimeShift = 6;
- static constexpr uint64_t AddressSpaceMask =
- ~(CVRMask | UMask | GCAttrMask | LifetimeMask | PtrAuthMask);
- static constexpr uint64_t AddressSpaceShift = 9;
-};
-
-class QualifiersAndAtomic {
- Qualifiers Quals;
- bool HasAtomic;
-
-public:
- QualifiersAndAtomic() : HasAtomic(false) {}
- QualifiersAndAtomic(Qualifiers Quals, bool HasAtomic)
- : Quals(Quals), HasAtomic(HasAtomic) {}
-
- operator Qualifiers() const { return Quals; }
-
- bool hasVolatile() const { return Quals.hasVolatile(); }
- bool hasConst() const { return Quals.hasConst(); }
- bool hasRestrict() const { return Quals.hasRestrict(); }
- bool hasAtomic() const { return HasAtomic; }
-
- void addVolatile() { Quals.addVolatile(); }
- void addConst() { Quals.addConst(); }
- void addRestrict() { Quals.addRestrict(); }
- void addAtomic() { HasAtomic = true; }
-
- void removeVolatile() { Quals.removeVolatile(); }
- void removeConst() { Quals.removeConst(); }
- void removeRestrict() { Quals.removeRestrict(); }
- void removeAtomic() { HasAtomic = false; }
-
- QualifiersAndAtomic withVolatile() {
- return {Quals.withVolatile(), HasAtomic};
- }
- QualifiersAndAtomic withConst() { return {Quals.withConst(), HasAtomic}; }
- QualifiersAndAtomic withRestrict() {
- return {Quals.withRestrict(), HasAtomic};
- }
- QualifiersAndAtomic withAtomic() { return {Quals, true}; }
-
- QualifiersAndAtomic &operator+=(Qualifiers RHS) {
- Quals += RHS;
- return *this;
- }
-};
-
-/// A std::pair-like structure for storing a qualified type split
-/// into its local qualifiers and its locally-unqualified type.
-struct SplitQualType {
- /// The locally-unqualified type.
- const Type *Ty = nullptr;
-
- /// The local qualifiers.
- Qualifiers Quals;
-
- SplitQualType() = default;
- SplitQualType(const Type *ty, Qualifiers qs) : Ty(ty), Quals(qs) {}
-
- SplitQualType getSingleStepDesugaredType() const; // end of this file
-
- // Make std::tie work.
- std::pair<const Type *,Qualifiers> asPair() const {
- return std::pair<const Type *, Qualifiers>(Ty, Quals);
- }
-
- friend bool operator==(SplitQualType a, SplitQualType b) {
- return a.Ty == b.Ty && a.Quals == b.Quals;
- }
- friend bool operator!=(SplitQualType a, SplitQualType b) {
- return a.Ty != b.Ty || a.Quals != b.Quals;
- }
-};
-
-/// The kind of type we are substituting Objective-C type arguments into.
-///
-/// The kind of substitution affects the replacement of type parameters when
-/// no concrete type information is provided, e.g., when dealing with an
-/// unspecialized type.
-enum class ObjCSubstitutionContext {
- /// An ordinary type.
- Ordinary,
-
- /// The result type of a method or function.
- Result,
-
- /// The parameter type of a method or function.
- Parameter,
-
- /// The type of a property.
- Property,
-
- /// The superclass of a type.
- Superclass,
-};
-
-/// The kind of 'typeof' expression we're after.
-enum class TypeOfKind : uint8_t {
- Qualified,
- Unqualified,
-};
-
-/// A (possibly-)qualified type.
-///
-/// For efficiency, we don't store CV-qualified types as nodes on their
-/// own: instead each reference to a type stores the qualifiers. This
-/// greatly reduces the number of nodes we need to allocate for types (for
-/// example we only need one for 'int', 'const int', 'volatile int',
-/// 'const volatile int', etc).
-///
-/// As an added efficiency bonus, instead of making this a pair, we
-/// just store the two bits we care about in the low bits of the
-/// pointer. To handle the packing/unpacking, we make QualType be a
-/// simple wrapper class that acts like a smart pointer. A third bit
-/// indicates whether there are extended qualifiers present, in which
-/// case the pointer points to a special structure.
-class QualType {
- friend class QualifierCollector;
-
- // Thankfully, these are efficiently composable.
- llvm::PointerIntPair<llvm::PointerUnion<const Type *, const ExtQuals *>,
- Qualifiers::FastWidth> Value;
-
- const ExtQuals *getExtQualsUnsafe() const {
- return cast<const ExtQuals *>(Value.getPointer());
- }
-
- const Type *getTypePtrUnsafe() const {
- return cast<const Type *>(Value.getPointer());
- }
-
- const ExtQualsTypeCommonBase *getCommonPtr() const {
- assert(!isNull() && "Cannot retrieve a NULL type pointer");
- auto CommonPtrVal = reinterpret_cast<uintptr_t>(Value.getOpaqueValue());
- CommonPtrVal &= ~(uintptr_t)((1 << TypeAlignmentInBits) - 1);
- return reinterpret_cast<ExtQualsTypeCommonBase*>(CommonPtrVal);
- }
-
-public:
- QualType() = default;
- QualType(const Type *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
- QualType(const ExtQuals *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
-
- unsigned getLocalFastQualifiers() const { return Value.getInt(); }
- void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); }
-
- bool UseExcessPrecision(const ASTContext &Ctx);
-
- /// Retrieves a pointer to the underlying (unqualified) type.
- ///
- /// This function requires that the type not be NULL. If the type might be
- /// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
- const Type *getTypePtr() const;
-
- const Type *getTypePtrOrNull() const;
-
- /// Retrieves a pointer to the name of the base type.
- const IdentifierInfo *getBaseTypeIdentifier() const;
-
- /// Divides a QualType into its unqualified type and a set of local
- /// qualifiers.
- SplitQualType split() const;
-
- void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
-
- static QualType getFromOpaquePtr(const void *Ptr) {
- QualType T;
- T.Value.setFromOpaqueValue(const_cast<void*>(Ptr));
- return T;
- }
-
- const Type &operator*() const {
- return *getTypePtr();
- }
-
- const Type *operator->() const {
- return getTypePtr();
- }
-
- bool isCanonical() const;
- bool isCanonicalAsParam() const;
-
- /// Return true if this QualType doesn't point to a type yet.
- bool isNull() const {
- return Value.getPointer().isNull();
- }
-
- // Determines if a type can form `T&`.
- bool isReferenceable() const;
-
- /// Determine whether this particular QualType instance has the
- /// "const" qualifier set, without looking through typedefs that may have
- /// added "const" at a different level.
- bool isLocalConstQualified() const {
- return (getLocalFastQualifiers() & Qualifiers::Const);
- }
-
- /// Determine whether this type is const-qualified.
- bool isConstQualified() const;
-
- enum class NonConstantStorageReason {
- MutableField,
- NonConstNonReferenceType,
- NonTrivialCtor,
- NonTrivialDtor,
- };
- /// Determine whether instances of this type can be placed in immutable
- /// storage.
- /// If ExcludeCtor is true, the duration when the object's constructor runs
- /// will not be considered. The caller will need to verify that the object is
- /// not written to during its construction. ExcludeDtor works similarly.
- std::optional<NonConstantStorageReason>
- isNonConstantStorage(const ASTContext &Ctx, bool ExcludeCtor,
- bool ExcludeDtor);
-
- bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor,
- bool ExcludeDtor) {
- return !isNonConstantStorage(Ctx, ExcludeCtor, ExcludeDtor);
- }
-
- /// Determine whether this particular QualType instance has the
- /// "restrict" qualifier set, without looking through typedefs that may have
- /// added "restrict" at a different level.
- bool isLocalRestrictQualified() const {
- return (getLocalFastQualifiers() & Qualifiers::Restrict);
- }
-
- /// Determine whether this type is restrict-qualified.
- bool isRestrictQualified() const;
-
- /// Determine whether this particular QualType instance has the
- /// "volatile" qualifier set, without looking through typedefs that may have
- /// added "volatile" at a different level.
- bool isLocalVolatileQualified() const {
- return (getLocalFastQualifiers() & Qualifiers::Volatile);
- }
-
- /// Determine whether this type is volatile-qualified.
- bool isVolatileQualified() const;
-
- /// Determine whether this particular QualType instance has any
- /// qualifiers, without looking through any typedefs that might add
- /// qualifiers at a different level.
- bool hasLocalQualifiers() const {
- return getLocalFastQualifiers() || hasLocalNonFastQualifiers();
- }
-
- /// Determine whether this type has any qualifiers.
- bool hasQualifiers() const;
-
- /// Determine whether this particular QualType instance has any
- /// "non-fast" qualifiers, e.g., those that are stored in an ExtQualType
- /// instance.
- bool hasLocalNonFastQualifiers() const {
- return isa<const ExtQuals *>(Value.getPointer());
- }
-
- /// Retrieve the set of qualifiers local to this particular QualType
- /// instance, not including any qualifiers acquired through typedefs or
- /// other sugar.
- Qualifiers getLocalQualifiers() const;
-
- /// Retrieve the set of qualifiers applied to this type.
- Qualifiers getQualifiers() const;
-
- /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
- /// local to this particular QualType instance, not including any qualifiers
- /// acquired through typedefs or other sugar.
- unsigned getLocalCVRQualifiers() const {
- return getLocalFastQualifiers();
- }
-
- /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
- /// applied to this type.
- unsigned getCVRQualifiers() const;
-
- bool isConstant(const ASTContext& Ctx) const {
- return QualType::isConstant(*this, Ctx);
- }
-
- /// Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
- bool isPODType(const ASTContext &Context) const;
-
- /// Return true if this is a POD type according to the rules of the C++98
- /// standard, regardless of the current compilation's language.
- bool isCXX98PODType(const ASTContext &Context) const;
-
- /// Return true if this is a POD type according to the more relaxed rules
- /// of the C++11 standard, regardless of the current compilation's language.
- /// (C++0x [basic.types]p9). Note that, unlike
- /// CXXRecordDecl::isCXX11StandardLayout, this takes DRs into account.
- bool isCXX11PODType(const ASTContext &Context) const;
-
- /// Return true if this is a trivial type per (C++0x [basic.types]p9)
- bool isTrivialType(const ASTContext &Context) const;
-
- /// Return true if this is a trivially copyable type (C++0x [basic.types]p9)
- bool isTriviallyCopyableType(const ASTContext &Context) const;
-
- /// Return true if the type is safe to bitwise copy using memcpy/memmove.
- ///
- /// This is an extension in clang: bitwise cloneable types act as trivially
- /// copyable types, meaning their underlying bytes can be safely copied by
- /// memcpy or memmove. After the copy, the destination object has the same
- /// object representation.
- ///
- /// However, there are cases where it is not safe to copy:
- /// - When sanitizers, such as AddressSanitizer, add padding with poison,
- /// which can cause issues if those poisoned padding bits are accessed.
- /// - Types with Objective-C lifetimes, where specific runtime
- /// semantics may not be preserved during a bitwise copy.
- bool isBitwiseCloneableType(const ASTContext &Context) const;
-
- /// Return true if this is a trivially copyable type
- bool isTriviallyCopyConstructibleType(const ASTContext &Context) const;
-
- /// Returns true if it is a class and it might be dynamic.
- bool mayBeDynamicClass() const;
-
- /// Returns true if it is not a class or if the class might not be dynamic.
- bool mayBeNotDynamicClass() const;
-
- /// Returns true if it is a WebAssembly Reference Type.
- bool isWebAssemblyReferenceType() const;
-
- /// Returns true if it is a WebAssembly Externref Type.
- bool isWebAssemblyExternrefType() const;
-
- /// Returns true if it is a WebAssembly Funcref Type.
- bool isWebAssemblyFuncrefType() const;
-
- // Don't promise in the API that anything besides 'const' can be
- // easily added.
-
- /// Add the `const` type qualifier to this QualType.
- void addConst() {
- addFastQualifiers(Qualifiers::Const);
- }
- QualType withConst() const {
- return withFastQualifiers(Qualifiers::Const);
- }
-
- /// Add the `volatile` type qualifier to this QualType.
- void addVolatile() {
- addFastQualifiers(Qualifiers::Volatile);
- }
- QualType withVolatile() const {
- return withFastQualifiers(Qualifiers::Volatile);
- }
-
- /// Add the `restrict` qualifier to this QualType.
- void addRestrict() {
- addFastQualifiers(Qualifiers::Restrict);
- }
- QualType withRestrict() const {
- return withFastQualifiers(Qualifiers::Restrict);
- }
-
- QualType withCVRQualifiers(unsigned CVR) const {
- return withFastQualifiers(CVR);
- }
-
- void addFastQualifiers(unsigned TQs) {
- assert(!(TQs & ~Qualifiers::FastMask)
- && "non-fast qualifier bits set in mask!");
- Value.setInt(Value.getInt() | TQs);
- }
-
- void removeLocalConst();
- void removeLocalVolatile();
- void removeLocalRestrict();
-
- void removeLocalFastQualifiers() { Value.setInt(0); }
- void removeLocalFastQualifiers(unsigned Mask) {
- assert(!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers");
- Value.setInt(Value.getInt() & ~Mask);
- }
-
- // Creates a type with the given qualifiers in addition to any
- // qualifiers already on this type.
- QualType withFastQualifiers(unsigned TQs) const {
- QualType T = *this;
- T.addFastQualifiers(TQs);
- return T;
- }
-
- // Creates a type with exactly the given fast qualifiers, removing
- // any existing fast qualifiers.
- QualType withExactLocalFastQualifiers(unsigned TQs) const {
- return withoutLocalFastQualifiers().withFastQualifiers(TQs);
- }
-
- // Removes fast qualifiers, but leaves any extended qualifiers in place.
- QualType withoutLocalFastQualifiers() const {
- QualType T = *this;
- T.removeLocalFastQualifiers();
- return T;
- }
-
- QualType getCanonicalType() const;
-
- /// Return this type with all of the instance-specific qualifiers
- /// removed, but without removing any qualifiers that may have been applied
- /// through typedefs.
- QualType getLocalUnqualifiedType() const { return QualType(getTypePtr(), 0); }
-
- /// Retrieve the unqualified variant of the given type,
- /// removing as little sugar as possible.
- ///
- /// This routine looks through various kinds of sugar to find the
- /// least-desugared type that is unqualified. For example, given:
- ///
- /// \code
- /// typedef int Integer;
- /// typedef const Integer CInteger;
- /// typedef CInteger DifferenceType;
- /// \endcode
- ///
- /// Executing \c getUnqualifiedType() on the type \c DifferenceType will
- /// desugar until we hit the type \c Integer, which has no qualifiers on it.
- ///
- /// The resulting type might still be qualified if it's sugar for an array
- /// type. To strip qualifiers even from within a sugared array type, use
- /// ASTContext::getUnqualifiedArrayType.
- ///
- /// Note: In C, the _Atomic qualifier is special (see C23 6.2.5p32 for
- /// details), and it is not stripped by this function. Use
- /// getAtomicUnqualifiedType() to strip qualifiers including _Atomic.
- inline QualType getUnqualifiedType() const;
-
- /// Retrieve the unqualified variant of the given type, removing as little
- /// sugar as possible.
- ///
- /// Like getUnqualifiedType(), but also returns the set of
- /// qualifiers that were built up.
- ///
- /// The resulting type might still be qualified if it's sugar for an array
- /// type. To strip qualifiers even from within a sugared array type, use
- /// ASTContext::getUnqualifiedArrayType.
- inline SplitQualType getSplitUnqualifiedType() const;
-
- /// Determine whether this type is more qualified than the other
- /// given type, requiring exact equality for non-CVR qualifiers.
- bool isMoreQualifiedThan(QualType Other, const ASTContext &Ctx) const;
-
- /// Determine whether this type is at least as qualified as the other
- /// given type, requiring exact equality for non-CVR qualifiers.
- bool isAtLeastAsQualifiedAs(QualType Other, const ASTContext &Ctx) const;
-
- QualType getNonReferenceType() const;
-
- /// Determine the type of a (typically non-lvalue) expression with the
- /// specified result type.
- ///
- /// This routine should be used for expressions for which the return type is
- /// explicitly specified (e.g., in a cast or call) and isn't necessarily
- /// an lvalue. It removes a top-level reference (since there are no
- /// expressions of reference type) and deletes top-level cvr-qualifiers
- /// from non-class types (in C++) or all types (in C).
- QualType getNonLValueExprType(const ASTContext &Context) const;
-
- /// Remove an outer pack expansion type (if any) from this type. Used as part
- /// of converting the type of a declaration to the type of an expression that
- /// references that expression. It's meaningless for an expression to have a
- /// pack expansion type.
- QualType getNonPackExpansionType() const;
-
- /// Return the specified type with any "sugar" removed from
- /// the type. This takes off typedefs, typeof's etc. If the outer level of
- /// the type is already concrete, it returns it unmodified. This is similar
- /// to getting the canonical type, but it doesn't remove *all* typedefs. For
- /// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
- /// concrete.
- ///
- /// Qualifiers are left in place.
- QualType getDesugaredType(const ASTContext &Context) const {
- return getDesugaredType(*this, Context);
- }
-
- SplitQualType getSplitDesugaredType() const {
- return getSplitDesugaredType(*this);
- }
-
- /// Return the specified type with one level of "sugar" removed from
- /// the type.
- ///
- /// This routine takes off the first typedef, typeof, etc. If the outer level
- /// of the type is already concrete, it returns it unmodified.
- QualType getSingleStepDesugaredType(const ASTContext &Context) const {
- return getSingleStepDesugaredTypeImpl(*this, Context);
- }
-
- /// Returns the specified type after dropping any
- /// outer-level parentheses.
- QualType IgnoreParens() const {
- if (isa<ParenType>(*this))
- return QualType::IgnoreParens(*this);
- return *this;
- }
-
- /// Indicate whether the specified types and qualifiers are identical.
- friend bool operator==(const QualType &LHS, const QualType &RHS) {
- return LHS.Value == RHS.Value;
- }
- friend bool operator!=(const QualType &LHS, const QualType &RHS) {
- return LHS.Value != RHS.Value;
- }
- friend bool operator<(const QualType &LHS, const QualType &RHS) {
- return LHS.Value < RHS.Value;
- }
-
- static std::string getAsString(SplitQualType split,
- const PrintingPolicy &Policy) {
- return getAsString(split.Ty, split.Quals, Policy);
- }
- static std::string getAsString(const Type *ty, Qualifiers qs,
- const PrintingPolicy &Policy);
-
- std::string getAsString() const;
- std::string getAsString(const PrintingPolicy &Policy) const;
-
- void print(raw_ostream &OS, const PrintingPolicy &Policy,
- const Twine &PlaceHolder = Twine(),
- unsigned Indentation = 0) const;
-
- static void print(SplitQualType split, raw_ostream &OS,
- const PrintingPolicy &policy, const Twine &PlaceHolder,
- unsigned Indentation = 0) {
- return print(split.Ty, split.Quals, OS, policy, PlaceHolder, Indentation);
- }
-
- static void print(const Type *ty, Qualifiers qs,
- raw_ostream &OS, const PrintingPolicy &policy,
- const Twine &PlaceHolder,
- unsigned Indentation = 0);
-
- void getAsStringInternal(std::string &Str,
- const PrintingPolicy &Policy) const;
-
- static void getAsStringInternal(SplitQualType split, std::string &out,
- const PrintingPolicy &policy) {
- return getAsStringInternal(split.Ty, split.Quals, out, policy);
- }
-
- static void getAsStringInternal(const Type *ty, Qualifiers qs,
- std::string &out,
- const PrintingPolicy &policy);
-
- class StreamedQualTypeHelper {
- const QualType &T;
- const PrintingPolicy &Policy;
- const Twine &PlaceHolder;
- unsigned Indentation;
-
- public:
- StreamedQualTypeHelper(const QualType &T, const PrintingPolicy &Policy,
- const Twine &PlaceHolder, unsigned Indentation)
- : T(T), Policy(Policy), PlaceHolder(PlaceHolder),
- Indentation(Indentation) {}
-
- friend raw_ostream &operator<<(raw_ostream &OS,
- const StreamedQualTypeHelper &SQT) {
- SQT.T.print(OS, SQT.Policy, SQT.PlaceHolder, SQT.Indentation);
- return OS;
- }
- };
-
- StreamedQualTypeHelper stream(const PrintingPolicy &Policy,
- const Twine &PlaceHolder = Twine(),
- unsigned Indentation = 0) const {
- return StreamedQualTypeHelper(*this, Policy, PlaceHolder, Indentation);
- }
-
- void dump(const char *s) const;
- void dump() const;
- void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddPointer(getAsOpaquePtr());
- }
-
- /// Check if this type has any address space qualifier.
- inline bool hasAddressSpace() const;
-
- /// Return the address space of this type.
- inline LangAS getAddressSpace() const;
-
- /// Returns true if address space qualifiers overlap with T address space
- /// qualifiers.
- /// OpenCL C defines conversion rules for pointers to different address spaces
- /// and notion of overlapping address spaces.
- /// CL1.1 or CL1.2:
- /// address spaces overlap iff they are they same.
- /// OpenCL C v2.0 s6.5.5 adds:
- /// __generic overlaps with any address space except for __constant.
- bool isAddressSpaceOverlapping(QualType T, const ASTContext &Ctx) const {
- Qualifiers Q = getQualifiers();
- Qualifiers TQ = T.getQualifiers();
- // Address spaces overlap if at least one of them is a superset of another
- return Q.isAddressSpaceSupersetOf(TQ, Ctx) ||
- TQ.isAddressSpaceSupersetOf(Q, Ctx);
- }
-
- /// Returns gc attribute of this type.
- inline Qualifiers::GC getObjCGCAttr() const;
-
- /// true when Type is objc's weak.
- bool isObjCGCWeak() const {
- return getObjCGCAttr() == Qualifiers::Weak;
- }
-
- /// true when Type is objc's strong.
- bool isObjCGCStrong() const {
- return getObjCGCAttr() == Qualifiers::Strong;
- }
-
- /// Returns lifetime attribute of this type.
- Qualifiers::ObjCLifetime getObjCLifetime() const {
- return getQualifiers().getObjCLifetime();
- }
-
- bool hasNonTrivialObjCLifetime() const {
- return getQualifiers().hasNonTrivialObjCLifetime();
- }
-
- bool hasStrongOrWeakObjCLifetime() const {
- return getQualifiers().hasStrongOrWeakObjCLifetime();
- }
-
- // true when Type is objc's weak and weak is enabled but ARC isn't.
- bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const;
-
- PointerAuthQualifier getPointerAuth() const {
- return getQualifiers().getPointerAuth();
- }
-
- bool hasAddressDiscriminatedPointerAuth() const {
- if (PointerAuthQualifier PtrAuth = getPointerAuth())
- return PtrAuth.isAddressDiscriminated();
- return false;
- }
-
- enum PrimitiveDefaultInitializeKind {
- /// The type does not fall into any of the following categories. Note that
- /// this case is zero-valued so that values of this enum can be used as a
- /// boolean condition for non-triviality.
- PDIK_Trivial,
-
- /// The type is an Objective-C retainable pointer type that is qualified
- /// with the ARC __strong qualifier.
- PDIK_ARCStrong,
-
- /// The type is an Objective-C retainable pointer type that is qualified
- /// with the ARC __weak qualifier.
- PDIK_ARCWeak,
-
- /// The type is a struct containing a field whose type is not PCK_Trivial.
- PDIK_Struct
- };
-
- /// Functions to query basic properties of non-trivial C struct types.
-
- /// Check if this is a non-trivial type that would cause a C struct
- /// transitively containing this type to be non-trivial to default initialize
- /// and return the kind.
- PrimitiveDefaultInitializeKind
- isNonTrivialToPrimitiveDefaultInitialize() const;
-
- enum PrimitiveCopyKind {
- /// The type does not fall into any of the following categories. Note that
- /// this case is zero-valued so that values of this enum can be used as a
- /// boolean condition for non-triviality.
- PCK_Trivial,
-
- /// The type would be trivial except that it is volatile-qualified. Types
- /// that fall into one of the other non-trivial cases may additionally be
- /// volatile-qualified.
- PCK_VolatileTrivial,
-
- /// The type is an Objective-C retainable pointer type that is qualified
- /// with the ARC __strong qualifier.
- PCK_ARCStrong,
-
- /// The type is an Objective-C retainable pointer type that is qualified
- /// with the ARC __weak qualifier.
- PCK_ARCWeak,
-
- /// The type is an address-discriminated signed pointer type.
- PCK_PtrAuth,
-
- /// The type is a struct containing a field whose type is neither
- /// PCK_Trivial nor PCK_VolatileTrivial.
- /// Note that a C++ struct type does not necessarily match this; C++ copying
- /// semantics are too complex to express here, in part because they depend
- /// on the exact constructor or assignment operator that is chosen by
- /// overload resolution to do the copy.
- PCK_Struct
- };
-
- /// Check if this is a non-trivial type that would cause a C struct
- /// transitively containing this type to be non-trivial to copy and return the
- /// kind.
- PrimitiveCopyKind isNonTrivialToPrimitiveCopy() const;
-
- /// Check if this is a non-trivial type that would cause a C struct
- /// transitively containing this type to be non-trivial to destructively
- /// move and return the kind. Destructive move in this context is a C++-style
- /// move in which the source object is placed in a valid but unspecified state
- /// after it is moved, as opposed to a truly destructive move in which the
- /// source object is placed in an uninitialized state.
- PrimitiveCopyKind isNonTrivialToPrimitiveDestructiveMove() const;
-
- enum DestructionKind {
- DK_none,
- DK_cxx_destructor,
- DK_objc_strong_lifetime,
- DK_objc_weak_lifetime,
- DK_nontrivial_c_struct
- };
-
- /// Returns a nonzero value if objects of this type require
- /// non-trivial work to clean up after. Non-zero because it's
- /// conceivable that qualifiers (objc_gc(weak)?) could make
- /// something require destruction.
- DestructionKind isDestructedType() const {
- return isDestructedTypeImpl(*this);
- }
-
- /// Check if this is or contains a C union that is non-trivial to
- /// default-initialize, which is a union that has a member that is non-trivial
- /// to default-initialize. If this returns true,
- /// isNonTrivialToPrimitiveDefaultInitialize returns PDIK_Struct.
- bool hasNonTrivialToPrimitiveDefaultInitializeCUnion() const;
-
- /// Check if this is or contains a C union that is non-trivial to destruct,
- /// which is a union that has a member that is non-trivial to destruct. If
- /// this returns true, isDestructedType returns DK_nontrivial_c_struct.
- bool hasNonTrivialToPrimitiveDestructCUnion() const;
-
- /// Check if this is or contains a C union that is non-trivial to copy, which
- /// is a union that has a member that is non-trivial to copy. If this returns
- /// true, isNonTrivialToPrimitiveCopy returns PCK_Struct.
- bool hasNonTrivialToPrimitiveCopyCUnion() const;
-
- /// Determine whether expressions of the given type are forbidden
- /// from being lvalues in C.
- ///
- /// The expression types that are forbidden to be lvalues are:
- /// - 'void', but not qualified void
- /// - function types
- ///
- /// The exact rule here is C99 6.3.2.1:
- /// An lvalue is an expression with an object type or an incomplete
- /// type other than void.
- bool isCForbiddenLValueType() const;
-
- /// Substitute type arguments for the Objective-C type parameters used in the
- /// subject type.
- ///
- /// \param ctx ASTContext in which the type exists.
- ///
- /// \param typeArgs The type arguments that will be substituted for the
- /// Objective-C type parameters in the subject type, which are generally
- /// computed via \c Type::getObjCSubstitutions. If empty, the type
- /// parameters will be replaced with their bounds or id/Class, as appropriate
- /// for the context.
- ///
- /// \param context The context in which the subject type was written.
- ///
- /// \returns the resulting type.
- QualType substObjCTypeArgs(ASTContext &ctx,
- ArrayRef<QualType> typeArgs,
- ObjCSubstitutionContext context) const;
-
- /// Substitute type arguments from an object type for the Objective-C type
- /// parameters used in the subject type.
- ///
- /// This operation combines the computation of type arguments for
- /// substitution (\c Type::getObjCSubstitutions) with the actual process of
- /// substitution (\c QualType::substObjCTypeArgs) for the convenience of
- /// callers that need to perform a single substitution in isolation.
- ///
- /// \param objectType The type of the object whose member type we're
- /// substituting into. For example, this might be the receiver of a message
- /// or the base of a property access.
- ///
- /// \param dc The declaration context from which the subject type was
- /// retrieved, which indicates (for example) which type parameters should
- /// be substituted.
- ///
- /// \param context The context in which the subject type was written.
- ///
- /// \returns the subject type after replacing all of the Objective-C type
- /// parameters with their corresponding arguments.
- QualType substObjCMemberType(QualType objectType,
- const DeclContext *dc,
- ObjCSubstitutionContext context) const;
-
- /// Strip Objective-C "__kindof" types from the given type.
- QualType stripObjCKindOfType(const ASTContext &ctx) const;
-
- /// Remove all qualifiers including _Atomic.
- ///
- /// Like getUnqualifiedType(), the type may still be qualified if it is a
- /// sugared array type. To strip qualifiers even from within a sugared array
- /// type, use in conjunction with ASTContext::getUnqualifiedArrayType.
- QualType getAtomicUnqualifiedType() const;
-
-private:
- // These methods are implemented in a separate translation unit;
- // "static"-ize them to avoid creating temporary QualTypes in the
- // caller.
- static bool isConstant(QualType T, const ASTContext& Ctx);
- static QualType getDesugaredType(QualType T, const ASTContext &Context);
- static SplitQualType getSplitDesugaredType(QualType T);
- static SplitQualType getSplitUnqualifiedTypeImpl(QualType type);
- static QualType getSingleStepDesugaredTypeImpl(QualType type,
- const ASTContext &C);
- static QualType IgnoreParens(QualType T);
- static DestructionKind isDestructedTypeImpl(QualType type);
-
- /// Check if \param RD is or contains a non-trivial C union.
- static bool hasNonTrivialToPrimitiveDefaultInitializeCUnion(const RecordDecl *RD);
- static bool hasNonTrivialToPrimitiveDestructCUnion(const RecordDecl *RD);
- static bool hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD);
-};
-
-raw_ostream &operator<<(raw_ostream &OS, QualType QT);
-
-} // namespace clang
-
-namespace llvm {
-
-/// Implement simplify_type for QualType, so that we can dyn_cast from QualType
-/// to a specific Type class.
-template<> struct simplify_type< ::clang::QualType> {
- using SimpleType = const ::clang::Type *;
-
- static SimpleType getSimplifiedValue(::clang::QualType Val) {
- return Val.getTypePtr();
- }
-};
-
-// Teach SmallPtrSet that QualType is "basically a pointer".
-template<>
-struct PointerLikeTypeTraits<clang::QualType> {
- static inline void *getAsVoidPointer(clang::QualType P) {
- return P.getAsOpaquePtr();
- }
-
- static inline clang::QualType getFromVoidPointer(void *P) {
- return clang::QualType::getFromOpaquePtr(P);
- }
-
- // Various qualifiers go in low bits.
- static constexpr int NumLowBitsAvailable = 0;
-};
-
-} // namespace llvm
-
-namespace clang {
-
-/// Base class that is common to both the \c ExtQuals and \c Type
-/// classes, which allows \c QualType to access the common fields between the
-/// two.
-class ExtQualsTypeCommonBase {
- friend class ExtQuals;
- friend class QualType;
- friend class Type;
- friend class ASTReader;
-
- /// The "base" type of an extended qualifiers type (\c ExtQuals) or
- /// a self-referential pointer (for \c Type).
- ///
- /// This pointer allows an efficient mapping from a QualType to its
- /// underlying type pointer.
- const Type *const BaseType;
-
- /// The canonical type of this type. A QualType.
- QualType CanonicalType;
-
- ExtQualsTypeCommonBase(const Type *baseType, QualType canon)
- : BaseType(baseType), CanonicalType(canon) {}
-};
-
-/// We can encode up to four bits in the low bits of a
-/// type pointer, but there are many more type qualifiers that we want
-/// to be able to apply to an arbitrary type. Therefore we have this
-/// struct, intended to be heap-allocated and used by QualType to
-/// store qualifiers.
-///
-/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
-/// in three low bits on the QualType pointer; a fourth bit records whether
-/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
-/// Objective-C GC attributes) are much more rare.
-class alignas(TypeAlignment) ExtQuals : public ExtQualsTypeCommonBase,
- public llvm::FoldingSetNode {
- // NOTE: changing the fast qualifiers should be straightforward as
- // long as you don't make 'const' non-fast.
- // 1. Qualifiers:
- // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ).
- // Fast qualifiers must occupy the low-order bits.
- // b) Update Qualifiers::FastWidth and FastMask.
- // 2. QualType:
- // a) Update is{Volatile,Restrict}Qualified(), defined inline.
- // b) Update remove{Volatile,Restrict}, defined near the end of
- // this header.
- // 3. ASTContext:
- // a) Update get{Volatile,Restrict}Type.
-
- /// The immutable set of qualifiers applied by this node. Always contains
- /// extended qualifiers.
- Qualifiers Quals;
-
- ExtQuals *this_() { return this; }
-
-public:
- ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
- : ExtQualsTypeCommonBase(baseType,
- canon.isNull() ? QualType(this_(), 0) : canon),
- Quals(quals) {
- assert(Quals.hasNonFastQualifiers()
- && "ExtQuals created with no fast qualifiers");
- assert(!Quals.hasFastQualifiers()
- && "ExtQuals created with fast qualifiers");
- }
-
- Qualifiers getQualifiers() const { return Quals; }
-
- bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); }
- Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); }
-
- bool hasObjCLifetime() const { return Quals.hasObjCLifetime(); }
- Qualifiers::ObjCLifetime getObjCLifetime() const {
- return Quals.getObjCLifetime();
- }
-
- bool hasAddressSpace() const { return Quals.hasAddressSpace(); }
- LangAS getAddressSpace() const { return Quals.getAddressSpace(); }
-
- const Type *getBaseType() const { return BaseType; }
-
-public:
- void Profile(llvm::FoldingSetNodeID &ID) const {
- Profile(ID, getBaseType(), Quals);
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID,
- const Type *BaseType,
- Qualifiers Quals) {
- assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!");
- ID.AddPointer(BaseType);
- Quals.Profile(ID);
- }
-};
-
-/// The kind of C++11 ref-qualifier associated with a function type.
-/// This determines whether a member function's "this" object can be an
-/// lvalue, rvalue, or neither.
-enum RefQualifierKind {
- /// No ref-qualifier was provided.
- RQ_None = 0,
-
- /// An lvalue ref-qualifier was provided (\c &).
- RQ_LValue,
-
- /// An rvalue ref-qualifier was provided (\c &&).
- RQ_RValue
-};
-
-/// Which keyword(s) were used to create an AutoType.
-enum class AutoTypeKeyword {
- /// auto
- Auto,
-
- /// decltype(auto)
- DecltypeAuto,
-
- /// __auto_type (GNU extension)
- GNUAutoType
-};
-
-enum class ArraySizeModifier;
-enum class ElaboratedTypeKeyword;
-enum class VectorKind;
-
-/// The base class of the type hierarchy.
-///
-/// A central concept with types is that each type always has a canonical
-/// type. A canonical type is the type with any typedef names stripped out
-/// of it or the types it references. For example, consider:
-///
-/// typedef int foo;
-/// typedef foo* bar;
-/// 'int *' 'foo *' 'bar'
-///
-/// There will be a Type object created for 'int'. Since int is canonical, its
-/// CanonicalType pointer points to itself. There is also a Type for 'foo' (a
-/// TypedefType). Its CanonicalType pointer points to the 'int' Type. Next
-/// there is a PointerType that represents 'int*', which, like 'int', is
-/// canonical. Finally, there is a PointerType type for 'foo*' whose canonical
-/// type is 'int*', and there is a TypedefType for 'bar', whose canonical type
-/// is also 'int*'.
-///
-/// Non-canonical types are useful for emitting diagnostics, without losing
-/// information about typedefs being used. Canonical types are useful for type
-/// comparisons (they allow by-pointer equality tests) and useful for reasoning
-/// about whether something has a particular form (e.g. is a function type),
-/// because they implicitly, recursively, strip all typedefs out of a type.
-///
-/// Types, once created, are immutable.
-///
-class alignas(TypeAlignment) Type : public ExtQualsTypeCommonBase {
-public:
- enum TypeClass {
-#define TYPE(Class, Base) Class,
-#define LAST_TYPE(Class) TypeLast = Class
-#define ABSTRACT_TYPE(Class, Base)
-#include "clang/AST/TypeNodes.inc"
- };
-
-private:
- /// Bitfields required by the Type class.
- class TypeBitfields {
- friend class Type;
- template <class T> friend class TypePropertyCache;
-
- /// TypeClass bitfield - Enum that specifies what subclass this belongs to.
- LLVM_PREFERRED_TYPE(TypeClass)
- unsigned TC : 8;
-
- /// Store information on the type dependency.
- LLVM_PREFERRED_TYPE(TypeDependence)
- unsigned Dependence : llvm::BitWidth<TypeDependence>;
-
- /// True if the cache (i.e. the bitfields here starting with
- /// 'Cache') is valid.
- LLVM_PREFERRED_TYPE(bool)
- mutable unsigned CacheValid : 1;
-
- /// Linkage of this type.
- LLVM_PREFERRED_TYPE(Linkage)
- mutable unsigned CachedLinkage : 3;
-
- /// Whether this type involves and local or unnamed types.
- LLVM_PREFERRED_TYPE(bool)
- mutable unsigned CachedLocalOrUnnamed : 1;
-
- /// Whether this type comes from an AST file.
- LLVM_PREFERRED_TYPE(bool)
- mutable unsigned FromAST : 1;
-
- bool isCacheValid() const {
- return CacheValid;
- }
-
- Linkage getLinkage() const {
- assert(isCacheValid() && "getting linkage from invalid cache");
- return static_cast<Linkage>(CachedLinkage);
- }
-
- bool hasLocalOrUnnamedType() const {
- assert(isCacheValid() && "getting linkage from invalid cache");
- return CachedLocalOrUnnamed;
- }
- };
- enum { NumTypeBits = 8 + llvm::BitWidth<TypeDependence> + 6 };
-
-protected:
- // These classes allow subclasses to somewhat cleanly pack bitfields
- // into Type.
-
- class ArrayTypeBitfields {
- friend class ArrayType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// CVR qualifiers from declarations like
- /// 'int X[static restrict 4]'. For function parameters only.
- LLVM_PREFERRED_TYPE(Qualifiers)
- unsigned IndexTypeQuals : 3;
-
- /// Storage class qualifiers from declarations like
- /// 'int X[static restrict 4]'. For function parameters only.
- LLVM_PREFERRED_TYPE(ArraySizeModifier)
- unsigned SizeModifier : 3;
- };
- enum { NumArrayTypeBits = NumTypeBits + 6 };
-
- class ConstantArrayTypeBitfields {
- friend class ConstantArrayType;
-
- LLVM_PREFERRED_TYPE(ArrayTypeBitfields)
- unsigned : NumArrayTypeBits;
-
- /// Whether we have a stored size expression.
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasExternalSize : 1;
-
- LLVM_PREFERRED_TYPE(unsigned)
- unsigned SizeWidth : 5;
- };
-
- class BuiltinTypeBitfields {
- friend class BuiltinType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// The kind (BuiltinType::Kind) of builtin type this is.
- static constexpr unsigned NumOfBuiltinTypeBits = 9;
- unsigned Kind : NumOfBuiltinTypeBits;
- };
-
-public:
- static constexpr int FunctionTypeNumParamsWidth = 16;
- static constexpr int FunctionTypeNumParamsLimit = (1 << 16) - 1;
-
-protected:
- /// FunctionTypeBitfields store various bits belonging to FunctionProtoType.
- /// Only common bits are stored here. Additional uncommon bits are stored
- /// in a trailing object after FunctionProtoType.
- class FunctionTypeBitfields {
- friend class FunctionProtoType;
- friend class FunctionType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// The ref-qualifier associated with a \c FunctionProtoType.
- ///
- /// This is a value of type \c RefQualifierKind.
- LLVM_PREFERRED_TYPE(RefQualifierKind)
- unsigned RefQualifier : 2;
-
- /// Used only by FunctionProtoType, put here to pack with the
- /// other bitfields.
- /// The qualifiers are part of FunctionProtoType because...
- ///
- /// C++ 8.3.5p4: The return type, the parameter type list and the
- /// cv-qualifier-seq, [...], are part of the function type.
- LLVM_PREFERRED_TYPE(Qualifiers)
- unsigned FastTypeQuals : Qualifiers::FastWidth;
- /// Whether this function has extended Qualifiers.
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasExtQuals : 1;
-
- /// The type of exception specification this function has.
- LLVM_PREFERRED_TYPE(ExceptionSpecificationType)
- unsigned ExceptionSpecType : 4;
-
- /// Whether this function has extended parameter information.
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasExtParameterInfos : 1;
-
- /// Whether this function has extra bitfields for the prototype.
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasExtraBitfields : 1;
-
- /// Whether the function is variadic.
- LLVM_PREFERRED_TYPE(bool)
- unsigned Variadic : 1;
-
- /// Whether this function has a trailing return type.
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasTrailingReturn : 1;
-
- /// Whether this function has is a cfi unchecked callee.
- LLVM_PREFERRED_TYPE(bool)
- unsigned CFIUncheckedCallee : 1;
-
- /// Extra information which affects how the function is called, like
- /// regparm and the calling convention.
- LLVM_PREFERRED_TYPE(CallingConv)
- unsigned ExtInfo : 14;
-
- /// The number of parameters this function has, not counting '...'.
- /// According to [implimits] 8 bits should be enough here but this is
- /// somewhat easy to exceed with metaprogramming and so we would like to
- /// keep NumParams as wide as reasonably possible.
- unsigned NumParams : FunctionTypeNumParamsWidth;
- };
-
- class ObjCObjectTypeBitfields {
- friend class ObjCObjectType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// The number of type arguments stored directly on this object type.
- unsigned NumTypeArgs : 7;
-
- /// The number of protocols stored directly on this object type.
- unsigned NumProtocols : 6;
-
- /// Whether this is a "kindof" type.
- LLVM_PREFERRED_TYPE(bool)
- unsigned IsKindOf : 1;
- };
-
- class ReferenceTypeBitfields {
- friend class ReferenceType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// True if the type was originally spelled with an lvalue sigil.
- /// This is never true of rvalue references but can also be false
- /// on lvalue references because of C++0x [dcl.typedef]p9,
- /// as follows:
- ///
- /// typedef int &ref; // lvalue, spelled lvalue
- /// typedef int &&rvref; // rvalue
- /// ref &a; // lvalue, inner ref, spelled lvalue
- /// ref &&a; // lvalue, inner ref
- /// rvref &a; // lvalue, inner ref, spelled lvalue
- /// rvref &&a; // rvalue, inner ref
- LLVM_PREFERRED_TYPE(bool)
- unsigned SpelledAsLValue : 1;
-
- /// True if the inner type is a reference type. This only happens
- /// in non-canonical forms.
- LLVM_PREFERRED_TYPE(bool)
- unsigned InnerRef : 1;
- };
-
- class TypeWithKeywordBitfields {
- friend class TypeWithKeyword;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// An ElaboratedTypeKeyword. 8 bits for efficient access.
- LLVM_PREFERRED_TYPE(ElaboratedTypeKeyword)
- unsigned Keyword : 8;
- };
-
- enum { NumTypeWithKeywordBits = NumTypeBits + 8 };
-
- class ElaboratedTypeBitfields {
- friend class ElaboratedType;
-
- LLVM_PREFERRED_TYPE(TypeWithKeywordBitfields)
- unsigned : NumTypeWithKeywordBits;
-
- /// Whether the ElaboratedType has a trailing OwnedTagDecl.
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasOwnedTagDecl : 1;
- };
-
- class VectorTypeBitfields {
- friend class VectorType;
- friend class DependentVectorType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// The kind of vector, either a generic vector type or some
- /// target-specific vector type such as for AltiVec or Neon.
- LLVM_PREFERRED_TYPE(VectorKind)
- unsigned VecKind : 4;
- /// The number of elements in the vector.
- uint32_t NumElements;
- };
-
- class AttributedTypeBitfields {
- friend class AttributedType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- LLVM_PREFERRED_TYPE(attr::Kind)
- unsigned AttrKind : 32 - NumTypeBits;
- };
-
- class AutoTypeBitfields {
- friend class AutoType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// Was this placeholder type spelled as 'auto', 'decltype(auto)',
- /// or '__auto_type'? AutoTypeKeyword value.
- LLVM_PREFERRED_TYPE(AutoTypeKeyword)
- unsigned Keyword : 2;
-
- /// The number of template arguments in the type-constraints, which is
- /// expected to be able to hold at least 1024 according to [implimits].
- /// However as this limit is somewhat easy to hit with template
- /// metaprogramming we'd prefer to keep it as large as possible.
- /// At the moment it has been left as a non-bitfield since this type
- /// safely fits in 64 bits as an unsigned, so there is no reason to
- /// introduce the performance impact of a bitfield.
- unsigned NumArgs;
- };
-
- class TypeOfBitfields {
- friend class TypeOfType;
- friend class TypeOfExprType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
- LLVM_PREFERRED_TYPE(TypeOfKind)
- unsigned Kind : 1;
- };
-
- class UsingBitfields {
- friend class UsingType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// True if the underlying type is different from the declared one.
- LLVM_PREFERRED_TYPE(bool)
- unsigned hasTypeDifferentFromDecl : 1;
- };
-
- class TypedefBitfields {
- friend class TypedefType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// True if the underlying type is different from the declared one.
- LLVM_PREFERRED_TYPE(bool)
- unsigned hasTypeDifferentFromDecl : 1;
- };
-
- class TemplateTypeParmTypeBitfields {
- friend class TemplateTypeParmType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// The depth of the template parameter.
- unsigned Depth : 15;
-
- /// Whether this is a template parameter pack.
- LLVM_PREFERRED_TYPE(bool)
- unsigned ParameterPack : 1;
-
- /// The index of the template parameter.
- unsigned Index : 16;
- };
-
- class SubstTemplateTypeParmTypeBitfields {
- friend class SubstTemplateTypeParmType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasNonCanonicalUnderlyingType : 1;
-
- // The index of the template parameter this substitution represents.
- unsigned Index : 15;
-
- LLVM_PREFERRED_TYPE(bool)
- unsigned Final : 1;
-
- /// Represents the index within a pack if this represents a substitution
- /// from a pack expansion. This index starts at the end of the pack and
- /// increments towards the beginning.
- /// Positive non-zero number represents the index + 1.
- /// Zero means this is not substituted from an expansion.
- unsigned PackIndex : 15;
- };
-
- class SubstTemplateTypeParmPackTypeBitfields {
- friend class SubstTemplateTypeParmPackType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- // The index of the template parameter this substitution represents.
- unsigned Index : 16;
-
- /// The number of template arguments in \c Arguments, which is
- /// expected to be able to hold at least 1024 according to [implimits].
- /// However as this limit is somewhat easy to hit with template
- /// metaprogramming we'd prefer to keep it as large as possible.
- unsigned NumArgs : 16;
- };
-
- class TemplateSpecializationTypeBitfields {
- friend class TemplateSpecializationType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// Whether this template specialization type is a substituted type alias.
- LLVM_PREFERRED_TYPE(bool)
- unsigned TypeAlias : 1;
-
- /// The number of template arguments named in this class template
- /// specialization, which is expected to be able to hold at least 1024
- /// according to [implimits]. However, as this limit is somewhat easy to
- /// hit with template metaprogramming we'd prefer to keep it as large
- /// as possible. At the moment it has been left as a non-bitfield since
- /// this type safely fits in 64 bits as an unsigned, so there is no reason
- /// to introduce the performance impact of a bitfield.
- unsigned NumArgs;
- };
-
- class DependentTemplateSpecializationTypeBitfields {
- friend class DependentTemplateSpecializationType;
-
- LLVM_PREFERRED_TYPE(TypeWithKeywordBitfields)
- unsigned : NumTypeWithKeywordBits;
-
- /// The number of template arguments named in this class template
- /// specialization, which is expected to be able to hold at least 1024
- /// according to [implimits]. However, as this limit is somewhat easy to
- /// hit with template metaprogramming we'd prefer to keep it as large
- /// as possible. At the moment it has been left as a non-bitfield since
- /// this type safely fits in 64 bits as an unsigned, so there is no reason
- /// to introduce the performance impact of a bitfield.
- unsigned NumArgs;
- };
-
- class PackExpansionTypeBitfields {
- friend class PackExpansionType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- /// The number of expansions that this pack expansion will
- /// generate when substituted (+1), which is expected to be able to
- /// hold at least 1024 according to [implimits]. However, as this limit
- /// is somewhat easy to hit with template metaprogramming we'd prefer to
- /// keep it as large as possible. At the moment it has been left as a
- /// non-bitfield since this type safely fits in 64 bits as an unsigned, so
- /// there is no reason to introduce the performance impact of a bitfield.
- ///
- /// This field will only have a non-zero value when some of the parameter
- /// packs that occur within the pattern have been substituted but others
- /// have not.
- unsigned NumExpansions;
- };
-
- enum class PredefinedSugarKind {
- /// The "size_t" type.
- SizeT,
-
- /// The signed integer type corresponding to "size_t".
- SignedSizeT,
-
- /// The "ptrdiff_t" type.
- PtrdiffT,
-
- // Indicates how many items the enum has.
- Last = PtrdiffT
- };
-
- class PresefinedSugarTypeBitfields {
- friend class PredefinedSugarType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- LLVM_PREFERRED_TYPE(PredefinedSugarKind)
- unsigned Kind : 8;
- };
-
- class CountAttributedTypeBitfields {
- friend class CountAttributedType;
-
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
-
- static constexpr unsigned NumCoupledDeclsBits = 4;
- unsigned NumCoupledDecls : NumCoupledDeclsBits;
- LLVM_PREFERRED_TYPE(bool)
- unsigned CountInBytes : 1;
- LLVM_PREFERRED_TYPE(bool)
- unsigned OrNull : 1;
- };
- static_assert(sizeof(CountAttributedTypeBitfields) <= sizeof(unsigned));
-
- union {
- TypeBitfields TypeBits;
- ArrayTypeBitfields ArrayTypeBits;
- ConstantArrayTypeBitfields ConstantArrayTypeBits;
- AttributedTypeBitfields AttributedTypeBits;
- AutoTypeBitfields AutoTypeBits;
- TypeOfBitfields TypeOfBits;
- TypedefBitfields TypedefBits;
- UsingBitfields UsingBits;
- BuiltinTypeBitfields BuiltinTypeBits;
- FunctionTypeBitfields FunctionTypeBits;
- ObjCObjectTypeBitfields ObjCObjectTypeBits;
- ReferenceTypeBitfields ReferenceTypeBits;
- TypeWithKeywordBitfields TypeWithKeywordBits;
- ElaboratedTypeBitfields ElaboratedTypeBits;
- VectorTypeBitfields VectorTypeBits;
- TemplateTypeParmTypeBitfields TemplateTypeParmTypeBits;
- SubstTemplateTypeParmTypeBitfields SubstTemplateTypeParmTypeBits;
- SubstTemplateTypeParmPackTypeBitfields SubstTemplateTypeParmPackTypeBits;
- TemplateSpecializationTypeBitfields TemplateSpecializationTypeBits;
- DependentTemplateSpecializationTypeBitfields
- DependentTemplateSpecializationTypeBits;
- PackExpansionTypeBitfields PackExpansionTypeBits;
- CountAttributedTypeBitfields CountAttributedTypeBits;
- PresefinedSugarTypeBitfields PredefinedSugarTypeBits;
- };
-
-private:
- template <class T> friend class TypePropertyCache;
-
- /// Set whether this type comes from an AST file.
- void setFromAST(bool V = true) const {
- TypeBits.FromAST = V;
- }
-
-protected:
- friend class ASTContext;
-
- Type(TypeClass tc, QualType canon, TypeDependence Dependence)
- : ExtQualsTypeCommonBase(this,
- canon.isNull() ? QualType(this_(), 0) : canon) {
- static_assert(sizeof(*this) <=
- alignof(decltype(*this)) + sizeof(ExtQualsTypeCommonBase),
- "changing bitfields changed sizeof(Type)!");
- static_assert(alignof(decltype(*this)) % TypeAlignment == 0,
- "Insufficient alignment!");
- TypeBits.TC = tc;
- TypeBits.Dependence = static_cast<unsigned>(Dependence);
- TypeBits.CacheValid = false;
- TypeBits.CachedLocalOrUnnamed = false;
- TypeBits.CachedLinkage = llvm::to_underlying(Linkage::Invalid);
- TypeBits.FromAST = false;
- }
-
- // silence VC++ warning C4355: 'this' : used in base member initializer list
- Type *this_() { return this; }
-
- void setDependence(TypeDependence D) {
- TypeBits.Dependence = static_cast<unsigned>(D);
- }
-
- void addDependence(TypeDependence D) { setDependence(getDependence() | D); }
-
-public:
- friend class ASTReader;
- friend class ASTWriter;
- template <class T> friend class serialization::AbstractTypeReader;
- template <class T> friend class serialization::AbstractTypeWriter;
-
- Type(const Type &) = delete;
- Type(Type &&) = delete;
- Type &operator=(const Type &) = delete;
- Type &operator=(Type &&) = delete;
-
- TypeClass getTypeClass() const { return static_cast<TypeClass>(TypeBits.TC); }
-
- /// Whether this type comes from an AST file.
- bool isFromAST() const { return TypeBits.FromAST; }
-
- /// Whether this type is or contains an unexpanded parameter
- /// pack, used to support C++0x variadic templates.
- ///
- /// A type that contains a parameter pack shall be expanded by the
- /// ellipsis operator at some point. For example, the typedef in the
- /// following example contains an unexpanded parameter pack 'T':
- ///
- /// \code
- /// template<typename ...T>
- /// struct X {
- /// typedef T* pointer_types; // ill-formed; T is a parameter pack.
- /// };
- /// \endcode
- ///
- /// Note that this routine does not specify which
- bool containsUnexpandedParameterPack() const {
- return getDependence() & TypeDependence::UnexpandedPack;
- }
-
- /// Determines if this type would be canonical if it had no further
- /// qualification.
- bool isCanonicalUnqualified() const {
- return CanonicalType == QualType(this, 0);
- }
-
- /// Pull a single level of sugar off of this locally-unqualified type.
- /// Users should generally prefer SplitQualType::getSingleStepDesugaredType()
- /// or QualType::getSingleStepDesugaredType(const ASTContext&).
- QualType getLocallyUnqualifiedSingleStepDesugaredType() const;
-
- /// As an extension, we classify types as one of "sized" or "sizeless";
- /// every type is one or the other. Standard types are all sized;
- /// sizeless types are purely an extension.
- ///
- /// Sizeless types contain data with no specified size, alignment,
- /// or layout.
- bool isSizelessType() const;
- bool isSizelessBuiltinType() const;
-
- /// Returns true for all scalable vector types.
- bool isSizelessVectorType() const;
-
- /// Returns true for SVE scalable vector types.
- bool isSVESizelessBuiltinType() const;
-
- /// Returns true for RVV scalable vector types.
- bool isRVVSizelessBuiltinType() const;
-
- /// Check if this is a WebAssembly Externref Type.
- bool isWebAssemblyExternrefType() const;
-
- /// Returns true if this is a WebAssembly table type: either an array of
- /// reference types, or a pointer to a reference type (which can only be
- /// created by array to pointer decay).
- bool isWebAssemblyTableType() const;
-
- /// Determines if this is a sizeless type supported by the
- /// 'arm_sve_vector_bits' type attribute, which can be applied to a single
- /// SVE vector or predicate, excluding tuple types such as svint32x4_t.
- bool isSveVLSBuiltinType() const;
-
- /// Returns the representative type for the element of an SVE builtin type.
- /// This is used to represent fixed-length SVE vectors created with the
- /// 'arm_sve_vector_bits' type attribute as VectorType.
- QualType getSveEltType(const ASTContext &Ctx) const;
-
- /// Determines if this is a sizeless type supported by the
- /// 'riscv_rvv_vector_bits' type attribute, which can be applied to a single
- /// RVV vector or mask.
- bool isRVVVLSBuiltinType() const;
-
- /// Returns the representative type for the element of an RVV builtin type.
- /// This is used to represent fixed-length RVV vectors created with the
- /// 'riscv_rvv_vector_bits' type attribute as VectorType.
- QualType getRVVEltType(const ASTContext &Ctx) const;
-
- /// Returns the representative type for the element of a sizeless vector
- /// builtin type.
- QualType getSizelessVectorEltType(const ASTContext &Ctx) const;
-
- /// Types are partitioned into 3 broad categories (C99 6.2.5p1):
- /// object types, function types, and incomplete types.
-
- /// Return true if this is an incomplete type.
- /// A type that can describe objects, but which lacks information needed to
- /// determine its size (e.g. void, or a fwd declared struct). Clients of this
- /// routine will need to determine if the size is actually required.
- ///
- /// Def If non-null, and the type refers to some kind of declaration
- /// that can be completed (such as a C struct, C++ class, or Objective-C
- /// class), will be set to the declaration.
- bool isIncompleteType(NamedDecl **Def = nullptr) const;
-
- /// Return true if this is an incomplete or object
- /// type, in other words, not a function type.
- bool isIncompleteOrObjectType() const {
- return !isFunctionType();
- }
-
- /// \returns True if the type is incomplete and it is also a type that
- /// cannot be completed by a later type definition.
- ///
- /// E.g. For `void` this is true but for `struct ForwardDecl;` this is false
- /// because a definition for `ForwardDecl` could be provided later on in the
- /// translation unit.
- ///
- /// Note even for types that this function returns true for it is still
- /// possible for the declarations that contain this type to later have a
- /// complete type in a translation unit. E.g.:
- ///
- /// \code{.c}
- /// // This decl has type 'char[]' which is incomplete and cannot be later
- /// // completed by another by another type declaration.
- /// extern char foo[];
- /// // This decl now has complete type 'char[5]'.
- /// char foo[5]; // foo has a complete type
- /// \endcode
- bool isAlwaysIncompleteType() const;
-
- /// Determine whether this type is an object type.
- bool isObjectType() const {
- // C++ [basic.types]p8:
- // An object type is a (possibly cv-qualified) type that is not a
- // function type, not a reference type, and not a void type.
- return !isReferenceType() && !isFunctionType() && !isVoidType();
- }
-
- /// Return true if this is a literal type
- /// (C++11 [basic.types]p10)
- bool isLiteralType(const ASTContext &Ctx) const;
-
- /// Determine if this type is a structural type, per C++20 [temp.param]p7.
- bool isStructuralType() const;
-
- /// Test if this type is a standard-layout type.
- /// (C++0x [basic.type]p9)
- bool isStandardLayoutType() const;
-
- /// Helper methods to distinguish type categories. All type predicates
- /// operate on the canonical type, ignoring typedefs and qualifiers.
-
- /// Returns true if the type is a builtin type.
- bool isBuiltinType() const;
-
- /// Test for a particular builtin type.
- bool isSpecificBuiltinType(unsigned K) const;
-
- /// Test for a type which does not represent an actual type-system type but
- /// is instead used as a placeholder for various convenient purposes within
- /// Clang. All such types are BuiltinTypes.
- bool isPlaceholderType() const;
- const BuiltinType *getAsPlaceholderType() const;
-
- /// Test for a specific placeholder type.
- bool isSpecificPlaceholderType(unsigned K) const;
-
- /// Test for a placeholder type other than Overload; see
- /// BuiltinType::isNonOverloadPlaceholderType.
- bool isNonOverloadPlaceholderType() const;
-
- /// isIntegerType() does *not* include complex integers (a GCC extension).
- /// isComplexIntegerType() can be used to test for complex integers.
- bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum)
- bool isEnumeralType() const;
-
- /// Determine whether this type is a scoped enumeration type.
- bool isScopedEnumeralType() const;
- bool isBooleanType() const;
- bool isCharType() const;
- bool isWideCharType() const;
- bool isChar8Type() const;
- bool isChar16Type() const;
- bool isChar32Type() const;
- bool isAnyCharacterType() const;
- bool isUnicodeCharacterType() const;
- bool isIntegralType(const ASTContext &Ctx) const;
-
- /// Determine whether this type is an integral or enumeration type.
- bool isIntegralOrEnumerationType() const;
-
- /// Determine whether this type is an integral or unscoped enumeration type.
- bool isIntegralOrUnscopedEnumerationType() const;
- bool isUnscopedEnumerationType() const;
-
- /// Floating point categories.
- bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
- /// isComplexType() does *not* include complex integers (a GCC extension).
- /// isComplexIntegerType() can be used to test for complex integers.
- bool isComplexType() const; // C99 6.2.5p11 (complex)
- bool isAnyComplexType() const; // C99 6.2.5p11 (complex) + Complex Int.
- bool isFloatingType() const; // C99 6.2.5p11 (real floating + complex)
- bool isHalfType() const; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half)
- bool isFloat16Type() const; // C11 extension ISO/IEC TS 18661
- bool isFloat32Type() const;
- bool isDoubleType() const;
- bool isBFloat16Type() const;
- bool isMFloat8Type() const;
- bool isFloat128Type() const;
- bool isIbm128Type() const;
- bool isRealType() const; // C99 6.2.5p17 (real floating + integer)
- bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating)
- bool isVoidType() const; // C99 6.2.5p19
- bool isScalarType() const; // C99 6.2.5p21 (arithmetic + pointers)
- bool isAggregateType() const;
- bool isFundamentalType() const;
- bool isCompoundType() const;
-
- // Type Predicates: Check to see if this type is structurally the specified
- // type, ignoring typedefs and qualifiers.
- bool isFunctionType() const;
- bool isFunctionNoProtoType() const { return getAs<FunctionNoProtoType>(); }
- bool isFunctionProtoType() const { return getAs<FunctionProtoType>(); }
- bool isPointerType() const;
- bool isPointerOrReferenceType() const;
- bool isSignableType(const ASTContext &Ctx) const;
- bool isSignablePointerType() const;
- bool isSignableIntegerType(const ASTContext &Ctx) const;
- bool isAnyPointerType() const; // Any C pointer or ObjC object pointer
- bool isCountAttributedType() const;
- bool isCFIUncheckedCalleeFunctionType() const;
- bool hasPointeeToToCFIUncheckedCalleeFunctionType() const;
- bool isBlockPointerType() const;
- bool isVoidPointerType() const;
- bool isReferenceType() const;
- bool isLValueReferenceType() const;
- bool isRValueReferenceType() const;
- bool isObjectPointerType() const;
- bool isFunctionPointerType() const;
- bool isFunctionReferenceType() const;
- bool isMemberPointerType() const;
- bool isMemberFunctionPointerType() const;
- bool isMemberDataPointerType() const;
- bool isArrayType() const;
- bool isConstantArrayType() const;
- bool isIncompleteArrayType() const;
- bool isVariableArrayType() const;
- bool isArrayParameterType() const;
- bool isDependentSizedArrayType() const;
- bool isRecordType() const;
- bool isClassType() const;
- bool isStructureType() const;
- bool isStructureTypeWithFlexibleArrayMember() const;
- bool isObjCBoxableRecordType() const;
- bool isInterfaceType() const;
- bool isStructureOrClassType() const;
- bool isUnionType() const;
- bool isComplexIntegerType() const; // GCC _Complex integer type.
- bool isVectorType() const; // GCC vector type.
- bool isExtVectorType() const; // Extended vector type.
- bool isExtVectorBoolType() const; // Extended vector type with bool element.
- // Extended vector type with bool element that is packed. HLSL doesn't pack
- // its bool vectors.
- bool isPackedVectorBoolType(const ASTContext &ctx) const;
- bool isSubscriptableVectorType() const;
- bool isMatrixType() const; // Matrix type.
- bool isConstantMatrixType() const; // Constant matrix type.
- bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
- bool isObjCObjectPointerType() const; // pointer to ObjC object
- bool isObjCRetainableType() const; // ObjC object or block pointer
- bool isObjCLifetimeType() const; // (array of)* retainable type
- bool isObjCIndirectLifetimeType() const; // (pointer to)* lifetime type
- bool isObjCNSObjectType() const; // __attribute__((NSObject))
- bool isObjCIndependentClassType() const; // __attribute__((objc_independent_class))
- // FIXME: change this to 'raw' interface type, so we can used 'interface' type
- // for the common case.
- bool isObjCObjectType() const; // NSString or typeof(*(id)0)
- bool isObjCQualifiedInterfaceType() const; // NSString<foo>
- bool isObjCQualifiedIdType() const; // id<foo>
- bool isObjCQualifiedClassType() const; // Class<foo>
- bool isObjCObjectOrInterfaceType() const;
- bool isObjCIdType() const; // id
- bool isDecltypeType() const;
- /// Was this type written with the special inert-in-ARC __unsafe_unretained
- /// qualifier?
- ///
- /// This approximates the answer to the following question: if this
- /// translation unit were compiled in ARC, would this type be qualified
- /// with __unsafe_unretained?
- bool isObjCInertUnsafeUnretainedType() const {
- return hasAttr(attr::ObjCInertUnsafeUnretained);
- }
-
- /// Whether the type is Objective-C 'id' or a __kindof type of an
- /// object type, e.g., __kindof NSView * or __kindof id
- /// <NSCopying>.
- ///
- /// \param bound Will be set to the bound on non-id subtype types,
- /// which will be (possibly specialized) Objective-C class type, or
- /// null for 'id.
- bool isObjCIdOrObjectKindOfType(const ASTContext &ctx,
- const ObjCObjectType *&bound) const;
-
- bool isObjCClassType() const; // Class
-
- /// Whether the type is Objective-C 'Class' or a __kindof type of an
- /// Class type, e.g., __kindof Class <NSCopying>.
- ///
- /// Unlike \c isObjCIdOrObjectKindOfType, there is no relevant bound
- /// here because Objective-C's type system cannot express "a class
- /// object for a subclass of NSFoo".
- bool isObjCClassOrClassKindOfType() const;
-
- bool isBlockCompatibleObjCPointerType(ASTContext &ctx) const;
- bool isObjCSelType() const; // Class
- bool isObjCBuiltinType() const; // 'id' or 'Class'
- bool isObjCARCBridgableType() const;
- bool isCARCBridgableType() const;
- bool isTemplateTypeParmType() const; // C++ template type parameter
- bool isNullPtrType() const; // C++11 std::nullptr_t or
- // C23 nullptr_t
- bool isNothrowT() const; // C++ std::nothrow_t
- bool isAlignValT() const; // C++17 std::align_val_t
- bool isStdByteType() const; // C++17 std::byte
- bool isAtomicType() const; // C11 _Atomic()
- bool isUndeducedAutoType() const; // C++11 auto or
- // C++14 decltype(auto)
- bool isTypedefNameType() const; // typedef or alias template
-
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- bool is##Id##Type() const;
-#include "clang/Basic/OpenCLImageTypes.def"
-
- bool isImageType() const; // Any OpenCL image type
-
- bool isSamplerT() const; // OpenCL sampler_t
- bool isEventT() const; // OpenCL event_t
- bool isClkEventT() const; // OpenCL clk_event_t
- bool isQueueT() const; // OpenCL queue_t
- bool isReserveIDT() const; // OpenCL reserve_id_t
-
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- bool is##Id##Type() const;
-#include "clang/Basic/OpenCLExtensionTypes.def"
- // Type defined in cl_intel_device_side_avc_motion_estimation OpenCL extension
- bool isOCLIntelSubgroupAVCType() const;
- bool isOCLExtOpaqueType() const; // Any OpenCL extension type
-
- bool isPipeType() const; // OpenCL pipe type
- bool isBitIntType() const; // Bit-precise integer type
- bool isOpenCLSpecificType() const; // Any OpenCL specific type
-
-#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) bool is##Id##Type() const;
-#include "clang/Basic/HLSLIntangibleTypes.def"
- bool isHLSLSpecificType() const; // Any HLSL specific type
- bool isHLSLBuiltinIntangibleType() const; // Any HLSL builtin intangible type
- bool isHLSLAttributedResourceType() const;
- bool isHLSLInlineSpirvType() const;
- bool isHLSLResourceRecord() const;
- bool isHLSLIntangibleType()
- const; // Any HLSL intangible type (builtin, array, class)
-
- /// Determines if this type, which must satisfy
- /// isObjCLifetimeType(), is implicitly __unsafe_unretained rather
- /// than implicitly __strong.
- bool isObjCARCImplicitlyUnretainedType() const;
-
- /// Check if the type is the CUDA device builtin surface type.
- bool isCUDADeviceBuiltinSurfaceType() const;
- /// Check if the type is the CUDA device builtin texture type.
- bool isCUDADeviceBuiltinTextureType() const;
-
- /// Return the implicit lifetime for this type, which must not be dependent.
- Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const;
-
- enum ScalarTypeKind {
- STK_CPointer,
- STK_BlockPointer,
- STK_ObjCObjectPointer,
- STK_MemberPointer,
- STK_Bool,
- STK_Integral,
- STK_Floating,
- STK_IntegralComplex,
- STK_FloatingComplex,
- STK_FixedPoint
- };
-
- /// Given that this is a scalar type, classify it.
- ScalarTypeKind getScalarTypeKind() const;
-
- TypeDependence getDependence() const {
- return static_cast<TypeDependence>(TypeBits.Dependence);
- }
-
- /// Whether this type is an error type.
- bool containsErrors() const {
- return getDependence() & TypeDependence::Error;
- }
-
- /// Whether this type is a dependent type, meaning that its definition
- /// somehow depends on a template parameter (C++ [temp.dep.type]).
- bool isDependentType() const {
- return getDependence() & TypeDependence::Dependent;
- }
-
- /// Determine whether this type is an instantiation-dependent type,
- /// meaning that the type involves a template parameter (even if the
- /// definition does not actually depend on the type substituted for that
- /// template parameter).
- bool isInstantiationDependentType() const {
- return getDependence() & TypeDependence::Instantiation;
- }
-
- /// Determine whether this type is an undeduced type, meaning that
- /// it somehow involves a C++11 'auto' type or similar which has not yet been
- /// deduced.
- bool isUndeducedType() const;
-
- /// Whether this type is a variably-modified type (C99 6.7.5).
- bool isVariablyModifiedType() const {
- return getDependence() & TypeDependence::VariablyModified;
- }
-
- /// Whether this type involves a variable-length array type
- /// with a definite size.
- bool hasSizedVLAType() const;
-
- /// Whether this type is or contains a local or unnamed type.
- bool hasUnnamedOrLocalType() const;
-
- bool isOverloadableType() const;
-
- /// Determine wither this type is a C++ elaborated-type-specifier.
- bool isElaboratedTypeSpecifier() const;
-
- bool canDecayToPointerType() const;
-
- /// Whether this type is represented natively as a pointer. This includes
- /// pointers, references, block pointers, and Objective-C interface,
- /// qualified id, and qualified interface types, as well as nullptr_t.
- bool hasPointerRepresentation() const;
-
- /// Whether this type can represent an objective pointer type for the
- /// purpose of GC'ability
- bool hasObjCPointerRepresentation() const;
-
- /// Determine whether this type has an integer representation
- /// of some sort, e.g., it is an integer type or a vector.
- bool hasIntegerRepresentation() const;
-
- /// Determine whether this type has an signed integer representation
- /// of some sort, e.g., it is an signed integer type or a vector.
- bool hasSignedIntegerRepresentation() const;
-
- /// Determine whether this type has an unsigned integer representation
- /// of some sort, e.g., it is an unsigned integer type or a vector.
- bool hasUnsignedIntegerRepresentation() const;
-
- /// Determine whether this type has a floating-point representation
- /// of some sort, e.g., it is a floating-point type or a vector thereof.
- bool hasFloatingRepresentation() const;
-
- /// Determine whether this type has a boolean representation -- i.e., it is a
- /// boolean type, an enum type whose underlying type is a boolean type, or a
- /// vector of booleans.
- bool hasBooleanRepresentation() const;
-
- // Type Checking Functions: Check to see if this type is structurally the
- // specified type, ignoring typedefs and qualifiers, and return a pointer to
- // the best type we can.
- const RecordType *getAsStructureType() const;
- /// NOTE: getAs*ArrayType are methods on ASTContext.
- const RecordType *getAsUnionType() const;
- const ComplexType *getAsComplexIntegerType() const; // GCC complex int type.
- const ObjCObjectType *getAsObjCInterfaceType() const;
-
- // The following is a convenience method that returns an ObjCObjectPointerType
- // for object declared using an interface.
- const ObjCObjectPointerType *getAsObjCInterfacePointerType() const;
- const ObjCObjectPointerType *getAsObjCQualifiedIdType() const;
- const ObjCObjectPointerType *getAsObjCQualifiedClassType() const;
- const ObjCObjectType *getAsObjCQualifiedInterfaceType() const;
-
- /// Retrieves the CXXRecordDecl that this type refers to, either
- /// because the type is a RecordType or because it is the injected-class-name
- /// type of a class template or class template partial specialization.
- CXXRecordDecl *getAsCXXRecordDecl() const;
-
- /// Retrieves the RecordDecl this type refers to.
- RecordDecl *getAsRecordDecl() const;
-
- /// Retrieves the TagDecl that this type refers to, either
- /// because the type is a TagType or because it is the injected-class-name
- /// type of a class template or class template partial specialization.
- TagDecl *getAsTagDecl() const;
-
- /// If this is a pointer or reference to a RecordType, return the
- /// CXXRecordDecl that the type refers to.
- ///
- /// If this is not a pointer or reference, or the type being pointed to does
- /// not refer to a CXXRecordDecl, returns NULL.
- const CXXRecordDecl *getPointeeCXXRecordDecl() const;
-
- /// Get the DeducedType whose type will be deduced for a variable with
- /// an initializer of this type. This looks through declarators like pointer
- /// types, but not through decltype or typedefs.
- DeducedType *getContainedDeducedType() const;
-
- /// Get the AutoType whose type will be deduced for a variable with
- /// an initializer of this type. This looks through declarators like pointer
- /// types, but not through decltype or typedefs.
- AutoType *getContainedAutoType() const {
- return dyn_cast_or_null<AutoType>(getContainedDeducedType());
- }
-
- /// Determine whether this type was written with a leading 'auto'
- /// corresponding to a trailing return type (possibly for a nested
- /// function type within a pointer to function type or similar).
- bool hasAutoForTrailingReturnType() const;
-
- /// Member-template getAs<specific type>'. Look through sugar for
- /// an instance of \<specific type>. This scheme will eventually
- /// replace the specific getAsXXXX methods above.
- ///
- /// There are some specializations of this member template listed
- /// immediately following this class.
- template <typename T> const T *getAs() const;
-
- /// Look through sugar for an instance of TemplateSpecializationType which
- /// is not a type alias, or null if there is no such type.
- /// This is used when you want as-written template arguments or the template
- /// name for a class template specialization.
- const TemplateSpecializationType *
- getAsNonAliasTemplateSpecializationType() const;
-
- const TemplateSpecializationType *
- castAsNonAliasTemplateSpecializationType() const {
- const auto *TST = getAsNonAliasTemplateSpecializationType();
- assert(TST && "not a TemplateSpecializationType");
- return TST;
- }
-
- /// Member-template getAsAdjusted<specific type>. Look through specific kinds
- /// of sugar (parens, attributes, etc) for an instance of \<specific type>.
- /// This is used when you need to walk over sugar nodes that represent some
- /// kind of type adjustment from a type that was written as a \<specific type>
- /// to another type that is still canonically a \<specific type>.
- template <typename T> const T *getAsAdjusted() const;
-
- /// A variant of getAs<> for array types which silently discards
- /// qualifiers from the outermost type.
- const ArrayType *getAsArrayTypeUnsafe() const;
-
- /// Member-template castAs<specific type>. Look through sugar for
- /// the underlying instance of \<specific type>.
- ///
- /// This method has the same relationship to getAs<T> as cast<T> has
- /// to dyn_cast<T>; which is to say, the underlying type *must*
- /// have the intended type, and this method will never return null.
- template <typename T> const T *castAs() const;
-
- /// A variant of castAs<> for array type which silently discards
- /// qualifiers from the outermost type.
- const ArrayType *castAsArrayTypeUnsafe() const;
-
- /// Determine whether this type had the specified attribute applied to it
- /// (looking through top-level type sugar).
- bool hasAttr(attr::Kind AK) const;
-
- /// Get the base element type of this type, potentially discarding type
- /// qualifiers. This should never be used when type qualifiers
- /// are meaningful.
- const Type *getBaseElementTypeUnsafe() const;
-
- /// If this is an array type, return the element type of the array,
- /// potentially with type qualifiers missing.
- /// This should never be used when type qualifiers are meaningful.
- const Type *getArrayElementTypeNoTypeQual() const;
-
- /// If this is a pointer type, return the pointee type.
- /// If this is an array type, return the array element type.
- /// This should never be used when type qualifiers are meaningful.
- const Type *getPointeeOrArrayElementType() const;
-
- /// If this is a pointer, ObjC object pointer, or block
- /// pointer, this returns the respective pointee.
- QualType getPointeeType() const;
-
- /// Return the specified type with any "sugar" removed from the type,
- /// removing any typedefs, typeofs, etc., as well as any qualifiers.
- const Type *getUnqualifiedDesugaredType() const;
-
- /// Return true if this is an integer type that is
- /// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
- /// or an enum decl which has a signed representation.
- bool isSignedIntegerType() const;
-
- /// Return true if this is an integer type that is
- /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool],
- /// or an enum decl which has an unsigned representation.
- bool isUnsignedIntegerType() const;
-
- /// Determines whether this is an integer type that is signed or an
- /// enumeration types whose underlying type is a signed integer type.
- bool isSignedIntegerOrEnumerationType() const;
-
- /// Determines whether this is an integer type that is unsigned or an
- /// enumeration types whose underlying type is a unsigned integer type.
- bool isUnsignedIntegerOrEnumerationType() const;
-
- /// Return true if this is a fixed point type according to
- /// ISO/IEC JTC1 SC22 WG14 N1169.
- bool isFixedPointType() const;
-
- /// Return true if this is a fixed point or integer type.
- bool isFixedPointOrIntegerType() const;
-
- /// Return true if this can be converted to (or from) a fixed point type.
- bool isConvertibleToFixedPointType() const;
-
- /// Return true if this is a saturated fixed point type according to
- /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
- bool isSaturatedFixedPointType() const;
-
- /// Return true if this is a saturated fixed point type according to
- /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
- bool isUnsaturatedFixedPointType() const;
-
- /// Return true if this is a fixed point type that is signed according
- /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
- bool isSignedFixedPointType() const;
-
- /// Return true if this is a fixed point type that is unsigned according
- /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
- bool isUnsignedFixedPointType() const;
-
- /// Return true if this is not a variable sized type,
- /// according to the rules of C99 6.7.5p3. It is not legal to call this on
- /// incomplete types.
- bool isConstantSizeType() const;
-
- /// Returns true if this type can be represented by some
- /// set of type specifiers.
- bool isSpecifierType() const;
-
- /// Determine the linkage of this type.
- Linkage getLinkage() const;
-
- /// Determine the visibility of this type.
- Visibility getVisibility() const {
- return getLinkageAndVisibility().getVisibility();
- }
-
- /// Return true if the visibility was explicitly set is the code.
- bool isVisibilityExplicit() const {
- return getLinkageAndVisibility().isVisibilityExplicit();
- }
-
- /// Determine the linkage and visibility of this type.
- LinkageInfo getLinkageAndVisibility() const;
-
- /// True if the computed linkage is valid. Used for consistency
- /// checking. Should always return true.
- bool isLinkageValid() const;
-
- /// Determine the nullability of the given type.
- ///
- /// Note that nullability is only captured as sugar within the type
- /// system, not as part of the canonical type, so nullability will
- /// be lost by canonicalization and desugaring.
- std::optional<NullabilityKind> getNullability() const;
-
- /// Determine whether the given type can have a nullability
- /// specifier applied to it, i.e., if it is any kind of pointer type.
- ///
- /// \param ResultIfUnknown The value to return if we don't yet know whether
- /// this type can have nullability because it is dependent.
- bool canHaveNullability(bool ResultIfUnknown = true) const;
-
- /// Retrieve the set of substitutions required when accessing a member
- /// of the Objective-C receiver type that is declared in the given context.
- ///
- /// \c *this is the type of the object we're operating on, e.g., the
- /// receiver for a message send or the base of a property access, and is
- /// expected to be of some object or object pointer type.
- ///
- /// \param dc The declaration context for which we are building up a
- /// substitution mapping, which should be an Objective-C class, extension,
- /// category, or method within.
- ///
- /// \returns an array of type arguments that can be substituted for
- /// the type parameters of the given declaration context in any type described
- /// within that context, or an empty optional to indicate that no
- /// substitution is required.
- std::optional<ArrayRef<QualType>>
- getObjCSubstitutions(const DeclContext *dc) const;
-
- /// Determines if this is an ObjC interface type that may accept type
- /// parameters.
- bool acceptsObjCTypeParams() const;
-
- const char *getTypeClassName() const;
-
- QualType getCanonicalTypeInternal() const {
- return CanonicalType;
- }
-
- CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
- void dump() const;
- void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
-};
-
-/// This will check for a TypedefType by removing any existing sugar
-/// until it reaches a TypedefType or a non-sugared type.
-template <> const TypedefType *Type::getAs() const;
-template <> const UsingType *Type::getAs() const;
-
-/// This will check for a TemplateSpecializationType by removing any
-/// existing sugar until it reaches a TemplateSpecializationType or a
-/// non-sugared type.
-template <> const TemplateSpecializationType *Type::getAs() const;
-
-/// This will check for an AttributedType by removing any existing sugar
-/// until it reaches an AttributedType or a non-sugared type.
-template <> const AttributedType *Type::getAs() const;
-
-/// This will check for a BoundsAttributedType by removing any existing
-/// sugar until it reaches an BoundsAttributedType or a non-sugared type.
-template <> const BoundsAttributedType *Type::getAs() const;
-
-/// This will check for a CountAttributedType by removing any existing
-/// sugar until it reaches an CountAttributedType or a non-sugared type.
-template <> const CountAttributedType *Type::getAs() const;
-
-// We can do canonical leaf types faster, because we don't have to
-// worry about preserving child type decoration.
-#define TYPE(Class, Base)
-#define LEAF_TYPE(Class) \
-template <> inline const Class##Type *Type::getAs() const { \
- return dyn_cast<Class##Type>(CanonicalType); \
-} \
-template <> inline const Class##Type *Type::castAs() const { \
- return cast<Class##Type>(CanonicalType); \
+inline CXXRecordDecl *Type::getAsCXXRecordDecl() const {
+ const auto *TT = dyn_cast<TagType>(CanonicalType);
+ if (!isa_and_present<RecordType, InjectedClassNameType>(TT))
+ return nullptr;
+ auto *TD = TT->getOriginalDecl();
+ if (isa<RecordType>(TT) && !isa<CXXRecordDecl>(TD))
+ return nullptr;
+ return cast<CXXRecordDecl>(TD)->getDefinitionOrSelf();
}
-#include "clang/AST/TypeNodes.inc"
-
-/// This class is used for builtin types like 'int'. Builtin
-/// types are always canonical and have a literal name field.
-class BuiltinType : public Type {
-public:
- enum Kind {
-// OpenCL image types
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) Id,
-#include "clang/Basic/OpenCLImageTypes.def"
-// OpenCL extension types
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) Id,
-#include "clang/Basic/OpenCLExtensionTypes.def"
-// SVE Types
-#define SVE_TYPE(Name, Id, SingletonId) Id,
-#include "clang/Basic/AArch64ACLETypes.def"
-// PPC MMA Types
-#define PPC_VECTOR_TYPE(Name, Id, Size) Id,
-#include "clang/Basic/PPCTypes.def"
-// RVV Types
-#define RVV_TYPE(Name, Id, SingletonId) Id,
-#include "clang/Basic/RISCVVTypes.def"
-// WebAssembly reference types
-#define WASM_TYPE(Name, Id, SingletonId) Id,
-#include "clang/Basic/WebAssemblyReferenceTypes.def"
-// AMDGPU types
-#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) Id,
-#include "clang/Basic/AMDGPUTypes.def"
-// HLSL intangible Types
-#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) Id,
-#include "clang/Basic/HLSLIntangibleTypes.def"
-// All other builtin types
-#define BUILTIN_TYPE(Id, SingletonId) Id,
-#define LAST_BUILTIN_TYPE(Id) LastKind = Id
-#include "clang/AST/BuiltinTypes.def"
- };
-
-private:
- friend class ASTContext; // ASTContext creates these.
-
- BuiltinType(Kind K)
- : Type(Builtin, QualType(),
- K == Dependent ? TypeDependence::DependentInstantiation
- : TypeDependence::None) {
- static_assert(Kind::LastKind <
- (1 << BuiltinTypeBitfields::NumOfBuiltinTypeBits) &&
- "Defined builtin type exceeds the allocated space for serial "
- "numbering");
- BuiltinTypeBits.Kind = K;
- }
-
-public:
- Kind getKind() const { return static_cast<Kind>(BuiltinTypeBits.Kind); }
- StringRef getName(const PrintingPolicy &Policy) const;
-
- const char *getNameAsCString(const PrintingPolicy &Policy) const {
- // The StringRef is null-terminated.
- StringRef str = getName(Policy);
- assert(!str.empty() && str.data()[str.size()] == '\0');
- return str.data();
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- bool isInteger() const {
- return getKind() >= Bool && getKind() <= Int128;
- }
-
- bool isSignedInteger() const {
- return getKind() >= Char_S && getKind() <= Int128;
- }
-
- bool isUnsignedInteger() const {
- return getKind() >= Bool && getKind() <= UInt128;
- }
-
- bool isFloatingPoint() const {
- return getKind() >= Half && getKind() <= Ibm128;
- }
-
- bool isSVEBool() const { return getKind() == Kind::SveBool; }
-
- bool isSVECount() const { return getKind() == Kind::SveCount; }
-
- /// Determines whether the given kind corresponds to a placeholder type.
- static bool isPlaceholderTypeKind(Kind K) {
- return K >= Overload;
- }
-
- /// Determines whether this type is a placeholder type, i.e. a type
- /// which cannot appear in arbitrary positions in a fully-formed
- /// expression.
- bool isPlaceholderType() const {
- return isPlaceholderTypeKind(getKind());
- }
-
- /// Determines whether this type is a placeholder type other than
- /// Overload. Most placeholder types require only syntactic
- /// information about their context in order to be resolved (e.g.
- /// whether it is a call expression), which means they can (and
- /// should) be resolved in an earlier "phase" of analysis.
- /// Overload expressions sometimes pick up further information
- /// from their context, like whether the context expects a
- /// specific function-pointer type, and so frequently need
- /// special treatment.
- bool isNonOverloadPlaceholderType() const {
- return getKind() > Overload;
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
-};
-
-/// Complex values, per C99 6.2.5p11. This supports the C99 complex
-/// types (_Complex float etc) as well as the GCC integer complex extensions.
-class ComplexType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- QualType ElementType;
-
- ComplexType(QualType Element, QualType CanonicalPtr)
- : Type(Complex, CanonicalPtr, Element->getDependence()),
- ElementType(Element) {}
-
-public:
- QualType getElementType() const { return ElementType; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getElementType());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Element) {
- ID.AddPointer(Element.getAsOpaquePtr());
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Complex; }
-};
-
-/// Sugar for parentheses used when specifying types.
-class ParenType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- QualType Inner;
-
- ParenType(QualType InnerType, QualType CanonType)
- : Type(Paren, CanonType, InnerType->getDependence()), Inner(InnerType) {}
-
-public:
- QualType getInnerType() const { return Inner; }
-
- bool isSugared() const { return true; }
- QualType desugar() const { return getInnerType(); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getInnerType());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Inner) {
- Inner.Profile(ID);
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Paren; }
-};
-
-/// PointerType - C99 6.7.5.1 - Pointer Declarators.
-class PointerType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- QualType PointeeType;
-
- PointerType(QualType Pointee, QualType CanonicalPtr)
- : Type(Pointer, CanonicalPtr, Pointee->getDependence()),
- PointeeType(Pointee) {}
-
-public:
- QualType getPointeeType() const { return PointeeType; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getPointeeType());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
- ID.AddPointer(Pointee.getAsOpaquePtr());
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Pointer; }
-};
-
-/// [BoundsSafety] Represents information of declarations referenced by the
-/// arguments of the `counted_by` attribute and the likes.
-class TypeCoupledDeclRefInfo {
-public:
- using BaseTy = llvm::PointerIntPair<ValueDecl *, 1, unsigned>;
-
-private:
- enum {
- DerefShift = 0,
- DerefMask = 1,
- };
- BaseTy Data;
-
-public:
- /// \p D is to a declaration referenced by the argument of attribute. \p Deref
- /// indicates whether \p D is referenced as a dereferenced form, e.g., \p
- /// Deref is true for `*n` in `int *__counted_by(*n)`.
- TypeCoupledDeclRefInfo(ValueDecl *D = nullptr, bool Deref = false);
-
- bool isDeref() const;
- ValueDecl *getDecl() const;
- unsigned getInt() const;
- void *getOpaqueValue() const;
- bool operator==(const TypeCoupledDeclRefInfo &Other) const;
- void setFromOpaqueValue(void *V);
-};
-
-/// [BoundsSafety] Represents a parent type class for CountAttributedType and
-/// similar sugar types that will be introduced to represent a type with a
-/// bounds attribute.
-///
-/// Provides a common interface to navigate declarations referred to by the
-/// bounds expression.
-
-class BoundsAttributedType : public Type, public llvm::FoldingSetNode {
- QualType WrappedTy;
-
-protected:
- ArrayRef<TypeCoupledDeclRefInfo> Decls; // stored in trailing objects
-
- BoundsAttributedType(TypeClass TC, QualType Wrapped, QualType Canon);
-
-public:
- bool isSugared() const { return true; }
- QualType desugar() const { return WrappedTy; }
-
- using decl_iterator = const TypeCoupledDeclRefInfo *;
- using decl_range = llvm::iterator_range<decl_iterator>;
-
- decl_iterator dependent_decl_begin() const { return Decls.begin(); }
- decl_iterator dependent_decl_end() const { return Decls.end(); }
-
- unsigned getNumCoupledDecls() const { return Decls.size(); }
-
- decl_range dependent_decls() const {
- return decl_range(dependent_decl_begin(), dependent_decl_end());
- }
-
- ArrayRef<TypeCoupledDeclRefInfo> getCoupledDecls() const {
- return {dependent_decl_begin(), dependent_decl_end()};
- }
-
- bool referencesFieldDecls() const;
-
- static bool classof(const Type *T) {
- // Currently, only `class CountAttributedType` inherits
- // `BoundsAttributedType` but the subclass will grow as we add more bounds
- // annotations.
- switch (T->getTypeClass()) {
- case CountAttributed:
- return true;
- default:
- return false;
- }
- }
-};
-
-/// Represents a sugar type with `__counted_by` or `__sized_by` annotations,
-/// including their `_or_null` variants.
-class CountAttributedType final
- : public BoundsAttributedType,
- public llvm::TrailingObjects<CountAttributedType,
- TypeCoupledDeclRefInfo> {
- friend class ASTContext;
-
- Expr *CountExpr;
- /// \p CountExpr represents the argument of __counted_by or the likes. \p
- /// CountInBytes indicates that \p CountExpr is a byte count (i.e.,
- /// __sized_by(_or_null)) \p OrNull means it's an or_null variant (i.e.,
- /// __counted_by_or_null or __sized_by_or_null) \p CoupledDecls contains the
- /// list of declarations referenced by \p CountExpr, which the type depends on
- /// for the bounds information.
- CountAttributedType(QualType Wrapped, QualType Canon, Expr *CountExpr,
- bool CountInBytes, bool OrNull,
- ArrayRef<TypeCoupledDeclRefInfo> CoupledDecls);
-
- unsigned numTrailingObjects(OverloadToken<TypeCoupledDeclRefInfo>) const {
- return CountAttributedTypeBits.NumCoupledDecls;
- }
-
-public:
- enum DynamicCountPointerKind {
- CountedBy = 0,
- SizedBy,
- CountedByOrNull,
- SizedByOrNull,
- };
-
- Expr *getCountExpr() const { return CountExpr; }
- bool isCountInBytes() const { return CountAttributedTypeBits.CountInBytes; }
- bool isOrNull() const { return CountAttributedTypeBits.OrNull; }
-
- DynamicCountPointerKind getKind() const {
- if (isOrNull())
- return isCountInBytes() ? SizedByOrNull : CountedByOrNull;
- return isCountInBytes() ? SizedBy : CountedBy;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, desugar(), CountExpr, isCountInBytes(), isOrNull());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType WrappedTy,
- Expr *CountExpr, bool CountInBytes, bool Nullable);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == CountAttributed;
- }
-
- StringRef getAttributeName(bool WithMacroPrefix) const;
-};
-
-/// Represents a type which was implicitly adjusted by the semantic
-/// engine for arbitrary reasons. For example, array and function types can
-/// decay, and function types can have their calling conventions adjusted.
-class AdjustedType : public Type, public llvm::FoldingSetNode {
- QualType OriginalTy;
- QualType AdjustedTy;
-
-protected:
- friend class ASTContext; // ASTContext creates these.
-
- AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy,
- QualType CanonicalPtr)
- : Type(TC, CanonicalPtr, OriginalTy->getDependence()),
- OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {}
-
-public:
- QualType getOriginalType() const { return OriginalTy; }
- QualType getAdjustedType() const { return AdjustedTy; }
-
- bool isSugared() const { return true; }
- QualType desugar() const { return AdjustedTy; }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, OriginalTy, AdjustedTy);
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Orig, QualType New) {
- ID.AddPointer(Orig.getAsOpaquePtr());
- ID.AddPointer(New.getAsOpaquePtr());
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == Adjusted || T->getTypeClass() == Decayed;
- }
-};
-
-/// Represents a pointer type decayed from an array or function type.
-class DecayedType : public AdjustedType {
- friend class ASTContext; // ASTContext creates these.
-
- inline
- DecayedType(QualType OriginalType, QualType Decayed, QualType Canonical);
-
-public:
- QualType getDecayedType() const { return getAdjustedType(); }
-
- inline QualType getPointeeType() const;
-
- static bool classof(const Type *T) { return T->getTypeClass() == Decayed; }
-};
-
-/// Pointer to a block type.
-/// This type is to represent types syntactically represented as
-/// "void (^)(int)", etc. Pointee is required to always be a function type.
-class BlockPointerType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- // Block is some kind of pointer type
- QualType PointeeType;
-
- BlockPointerType(QualType Pointee, QualType CanonicalCls)
- : Type(BlockPointer, CanonicalCls, Pointee->getDependence()),
- PointeeType(Pointee) {}
-
-public:
- // Get the pointee type. Pointee is required to always be a function type.
- QualType getPointeeType() const { return PointeeType; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getPointeeType());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
- ID.AddPointer(Pointee.getAsOpaquePtr());
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == BlockPointer;
- }
-};
-
-/// Base for LValueReferenceType and RValueReferenceType
-class ReferenceType : public Type, public llvm::FoldingSetNode {
- QualType PointeeType;
-
-protected:
- ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef,
- bool SpelledAsLValue)
- : Type(tc, CanonicalRef, Referencee->getDependence()),
- PointeeType(Referencee) {
- ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
- ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
- }
-
-public:
- bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; }
- bool isInnerRef() const { return ReferenceTypeBits.InnerRef; }
-
- QualType getPointeeTypeAsWritten() const { return PointeeType; }
-
- QualType getPointeeType() const {
- // FIXME: this might strip inner qualifiers; okay?
- const ReferenceType *T = this;
- while (T->isInnerRef())
- T = T->PointeeType->castAs<ReferenceType>();
- return T->PointeeType;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, PointeeType, isSpelledAsLValue());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID,
- QualType Referencee,
- bool SpelledAsLValue) {
- ID.AddPointer(Referencee.getAsOpaquePtr());
- ID.AddBoolean(SpelledAsLValue);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == LValueReference ||
- T->getTypeClass() == RValueReference;
- }
-};
-
-/// An lvalue reference type, per C++11 [dcl.ref].
-class LValueReferenceType : public ReferenceType {
- friend class ASTContext; // ASTContext creates these
-
- LValueReferenceType(QualType Referencee, QualType CanonicalRef,
- bool SpelledAsLValue)
- : ReferenceType(LValueReference, Referencee, CanonicalRef,
- SpelledAsLValue) {}
-
-public:
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == LValueReference;
- }
-};
-
-/// An rvalue reference type, per C++11 [dcl.ref].
-class RValueReferenceType : public ReferenceType {
- friend class ASTContext; // ASTContext creates these
-
- RValueReferenceType(QualType Referencee, QualType CanonicalRef)
- : ReferenceType(RValueReference, Referencee, CanonicalRef, false) {}
-
-public:
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == RValueReference;
- }
-};
-
-/// A pointer to member type per C++ 8.3.3 - Pointers to members.
-///
-/// This includes both pointers to data members and pointer to member functions.
-class MemberPointerType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- QualType PointeeType;
-
- /// The class of which the pointee is a member. Must ultimately be a
- /// CXXRecordType, but could be a typedef or a template parameter too.
- NestedNameSpecifier *Qualifier;
-
- MemberPointerType(QualType Pointee, NestedNameSpecifier *Qualifier,
- QualType CanonicalPtr)
- : Type(MemberPointer, CanonicalPtr,
- (toTypeDependence(Qualifier->getDependence()) &
- ~TypeDependence::VariablyModified) |
- Pointee->getDependence()),
- PointeeType(Pointee), Qualifier(Qualifier) {}
-
-public:
- QualType getPointeeType() const { return PointeeType; }
-
- /// Returns true if the member type (i.e. the pointee type) is a
- /// function type rather than a data-member type.
- bool isMemberFunctionPointer() const {
- return PointeeType->isFunctionProtoType();
- }
-
- /// Returns true if the member type (i.e. the pointee type) is a
- /// data type rather than a function type.
- bool isMemberDataPointer() const {
- return !PointeeType->isFunctionProtoType();
- }
-
- NestedNameSpecifier *getQualifier() const { return Qualifier; }
- /// Note: this can trigger extra deserialization when external AST sources are
- /// used. Prefer `getCXXRecordDecl()` unless you really need the most recent
- /// decl.
- CXXRecordDecl *getMostRecentCXXRecordDecl() const;
-
- bool isSugared() const;
- QualType desugar() const {
- return isSugared() ? getCanonicalTypeInternal() : QualType(this, 0);
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- // FIXME: `getMostRecentCXXRecordDecl()` should be possible to use here,
- // however when external AST sources are used it causes nondeterminism
- // issues (see https://github.com/llvm/llvm-project/pull/137910).
- Profile(ID, getPointeeType(), getQualifier(), getCXXRecordDecl());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
- const NestedNameSpecifier *Qualifier,
- const CXXRecordDecl *Cls);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == MemberPointer;
- }
-
-private:
- CXXRecordDecl *getCXXRecordDecl() const;
-};
-
-/// Capture whether this is a normal array (e.g. int X[4])
-/// an array with a static size (e.g. int X[static 4]), or an array
-/// with a star size (e.g. int X[*]).
-/// 'static' is only allowed on function parameters.
-enum class ArraySizeModifier { Normal, Static, Star };
-
-/// Represents an array type, per C99 6.7.5.2 - Array Declarators.
-class ArrayType : public Type, public llvm::FoldingSetNode {
-private:
- /// The element type of the array.
- QualType ElementType;
-
-protected:
- friend class ASTContext; // ASTContext creates these.
-
- ArrayType(TypeClass tc, QualType et, QualType can, ArraySizeModifier sm,
- unsigned tq, const Expr *sz = nullptr);
-
-public:
- QualType getElementType() const { return ElementType; }
-
- ArraySizeModifier getSizeModifier() const {
- return ArraySizeModifier(ArrayTypeBits.SizeModifier);
- }
-
- Qualifiers getIndexTypeQualifiers() const {
- return Qualifiers::fromCVRMask(getIndexTypeCVRQualifiers());
- }
-
- unsigned getIndexTypeCVRQualifiers() const {
- return ArrayTypeBits.IndexTypeQuals;
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ConstantArray ||
- T->getTypeClass() == VariableArray ||
- T->getTypeClass() == IncompleteArray ||
- T->getTypeClass() == DependentSizedArray ||
- T->getTypeClass() == ArrayParameter;
- }
-};
-
-/// Represents the canonical version of C arrays with a specified constant size.
-/// For example, the canonical type for 'int A[4 + 4*100]' is a
-/// ConstantArrayType where the element type is 'int' and the size is 404.
-class ConstantArrayType : public ArrayType {
- friend class ASTContext; // ASTContext creates these.
-
- struct ExternalSize {
- ExternalSize(const llvm::APInt &Sz, const Expr *SE)
- : Size(Sz), SizeExpr(SE) {}
- llvm::APInt Size; // Allows us to unique the type.
- const Expr *SizeExpr;
- };
-
- union {
- uint64_t Size;
- ExternalSize *SizePtr;
- };
-
- ConstantArrayType(QualType Et, QualType Can, uint64_t Width, uint64_t Sz,
- ArraySizeModifier SM, unsigned TQ)
- : ArrayType(ConstantArray, Et, Can, SM, TQ, nullptr), Size(Sz) {
- ConstantArrayTypeBits.HasExternalSize = false;
- ConstantArrayTypeBits.SizeWidth = Width / 8;
- // The in-structure size stores the size in bytes rather than bits so we
- // drop the three least significant bits since they're always zero anyways.
- assert(Width < 0xFF && "Type width in bits must be less than 8 bits");
- }
-
- ConstantArrayType(QualType Et, QualType Can, ExternalSize *SzPtr,
- ArraySizeModifier SM, unsigned TQ)
- : ArrayType(ConstantArray, Et, Can, SM, TQ, SzPtr->SizeExpr),
- SizePtr(SzPtr) {
- ConstantArrayTypeBits.HasExternalSize = true;
- ConstantArrayTypeBits.SizeWidth = 0;
-
- assert((SzPtr->SizeExpr == nullptr || !Can.isNull()) &&
- "canonical constant array should not have size expression");
- }
-
- static ConstantArrayType *Create(const ASTContext &Ctx, QualType ET,
- QualType Can, const llvm::APInt &Sz,
- const Expr *SzExpr, ArraySizeModifier SzMod,
- unsigned Qual);
-
-protected:
- ConstantArrayType(TypeClass Tc, const ConstantArrayType *ATy, QualType Can)
- : ArrayType(Tc, ATy->getElementType(), Can, ATy->getSizeModifier(),
- ATy->getIndexTypeQualifiers().getAsOpaqueValue(), nullptr) {
- ConstantArrayTypeBits.HasExternalSize =
- ATy->ConstantArrayTypeBits.HasExternalSize;
- if (!ConstantArrayTypeBits.HasExternalSize) {
- ConstantArrayTypeBits.SizeWidth = ATy->ConstantArrayTypeBits.SizeWidth;
- Size = ATy->Size;
- } else
- SizePtr = ATy->SizePtr;
- }
-
-public:
- /// Return the constant array size as an APInt.
- llvm::APInt getSize() const {
- return ConstantArrayTypeBits.HasExternalSize
- ? SizePtr->Size
- : llvm::APInt(ConstantArrayTypeBits.SizeWidth * 8, Size);
- }
-
- /// Return the bit width of the size type.
- unsigned getSizeBitWidth() const {
- return ConstantArrayTypeBits.HasExternalSize
- ? SizePtr->Size.getBitWidth()
- : static_cast<unsigned>(ConstantArrayTypeBits.SizeWidth * 8);
- }
-
- /// Return true if the size is zero.
- bool isZeroSize() const {
- return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.isZero()
- : 0 == Size;
- }
-
- /// Return the size zero-extended as a uint64_t.
- uint64_t getZExtSize() const {
- return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.getZExtValue()
- : Size;
- }
-
- /// Return the size sign-extended as a uint64_t.
- int64_t getSExtSize() const {
- return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.getSExtValue()
- : static_cast<int64_t>(Size);
- }
-
- /// Return the size zero-extended to uint64_t or UINT64_MAX if the value is
- /// larger than UINT64_MAX.
- uint64_t getLimitedSize() const {
- return ConstantArrayTypeBits.HasExternalSize
- ? SizePtr->Size.getLimitedValue()
- : Size;
- }
-
- /// Return a pointer to the size expression.
- const Expr *getSizeExpr() const {
- return ConstantArrayTypeBits.HasExternalSize ? SizePtr->SizeExpr : nullptr;
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- /// Determine the number of bits required to address a member of
- // an array with the given element type and number of elements.
- static unsigned getNumAddressingBits(const ASTContext &Context,
- QualType ElementType,
- const llvm::APInt &NumElements);
-
- unsigned getNumAddressingBits(const ASTContext &Context) const;
-
- /// Determine the maximum number of active bits that an array's size
- /// can require, which limits the maximum size of the array.
- static unsigned getMaxSizeBits(const ASTContext &Context);
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
- Profile(ID, Ctx, getElementType(), getZExtSize(), getSizeExpr(),
- getSizeModifier(), getIndexTypeCVRQualifiers());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx,
- QualType ET, uint64_t ArraySize, const Expr *SizeExpr,
- ArraySizeModifier SizeMod, unsigned TypeQuals);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ConstantArray ||
- T->getTypeClass() == ArrayParameter;
- }
-};
-
-/// Represents a constant array type that does not decay to a pointer when used
-/// as a function parameter.
-class ArrayParameterType : public ConstantArrayType {
- friend class ASTContext; // ASTContext creates these.
-
- ArrayParameterType(const ConstantArrayType *ATy, QualType CanTy)
- : ConstantArrayType(ArrayParameter, ATy, CanTy) {}
-
-public:
- static bool classof(const Type *T) {
- return T->getTypeClass() == ArrayParameter;
- }
-
- QualType getConstantArrayType(const ASTContext &Ctx) const;
-};
-
-/// Represents a C array with an unspecified size. For example 'int A[]' has
-/// an IncompleteArrayType where the element type is 'int' and the size is
-/// unspecified.
-class IncompleteArrayType : public ArrayType {
- friend class ASTContext; // ASTContext creates these.
-
- IncompleteArrayType(QualType et, QualType can,
- ArraySizeModifier sm, unsigned tq)
- : ArrayType(IncompleteArray, et, can, sm, tq) {}
-
-public:
- friend class StmtIteratorBase;
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == IncompleteArray;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getElementType(), getSizeModifier(),
- getIndexTypeCVRQualifiers());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType ET,
- ArraySizeModifier SizeMod, unsigned TypeQuals) {
- ID.AddPointer(ET.getAsOpaquePtr());
- ID.AddInteger(llvm::to_underlying(SizeMod));
- ID.AddInteger(TypeQuals);
- }
-};
-
-/// Represents a C array with a specified size that is not an
-/// integer-constant-expression. For example, 'int s[x+foo()]'.
-/// Since the size expression is an arbitrary expression, we store it as such.
-///
-/// Note: VariableArrayType's aren't uniqued (since the expressions aren't) and
-/// should not be: two lexically equivalent variable array types could mean
-/// different things, for example, these variables do not have the same type
-/// dynamically:
-///
-/// void foo(int x) {
-/// int Y[x];
-/// ++x;
-/// int Z[x];
-/// }
-///
-/// FIXME: Even constant array types might be represented by a
-/// VariableArrayType, as in:
-///
-/// void func(int n) {
-/// int array[7][n];
-/// }
-///
-/// Even though 'array' is a constant-size array of seven elements of type
-/// variable-length array of size 'n', it will be represented as a
-/// VariableArrayType whose 'SizeExpr' is an IntegerLiteral whose value is 7.
-/// Instead, this should be a ConstantArrayType whose element is a
-/// VariableArrayType, which models the type better.
-class VariableArrayType : public ArrayType {
- friend class ASTContext; // ASTContext creates these.
-
- /// An assignment-expression. VLA's are only permitted within
- /// a function block.
- Stmt *SizeExpr;
-
- VariableArrayType(QualType et, QualType can, Expr *e, ArraySizeModifier sm,
- unsigned tq)
- : ArrayType(VariableArray, et, can, sm, tq, e), SizeExpr((Stmt *)e) {}
-
-public:
- friend class StmtIteratorBase;
-
- Expr *getSizeExpr() const {
- // We use C-style casts instead of cast<> here because we do not wish
- // to have a dependency of Type.h on Stmt.h/Expr.h.
- return (Expr*) SizeExpr;
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == VariableArray;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- llvm_unreachable("Cannot unique VariableArrayTypes.");
- }
-};
-
-/// Represents an array type in C++ whose size is a value-dependent expression.
-///
-/// For example:
-/// \code
-/// template<typename T, int Size>
-/// class array {
-/// T data[Size];
-/// };
-/// \endcode
-///
-/// For these types, we won't actually know what the array bound is
-/// until template instantiation occurs, at which point this will
-/// become either a ConstantArrayType or a VariableArrayType.
-class DependentSizedArrayType : public ArrayType {
- friend class ASTContext; // ASTContext creates these.
-
- /// An assignment expression that will instantiate to the
- /// size of the array.
- ///
- /// The expression itself might be null, in which case the array
- /// type will have its size deduced from an initializer.
- Stmt *SizeExpr;
-
- DependentSizedArrayType(QualType et, QualType can, Expr *e,
- ArraySizeModifier sm, unsigned tq);
-
-public:
- friend class StmtIteratorBase;
-
- Expr *getSizeExpr() const {
- // We use C-style casts instead of cast<> here because we do not wish
- // to have a dependency of Type.h on Stmt.h/Expr.h.
- return (Expr*) SizeExpr;
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DependentSizedArray;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getElementType(),
- getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- QualType ET, ArraySizeModifier SizeMod,
- unsigned TypeQuals, Expr *E);
-};
-
-/// Represents an extended address space qualifier where the input address space
-/// value is dependent. Non-dependent address spaces are not represented with a
-/// special Type subclass; they are stored on an ExtQuals node as part of a QualType.
-///
-/// For example:
-/// \code
-/// template<typename T, int AddrSpace>
-/// class AddressSpace {
-/// typedef T __attribute__((address_space(AddrSpace))) type;
-/// }
-/// \endcode
-class DependentAddressSpaceType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext;
-
- Expr *AddrSpaceExpr;
- QualType PointeeType;
- SourceLocation loc;
-
- DependentAddressSpaceType(QualType PointeeType, QualType can,
- Expr *AddrSpaceExpr, SourceLocation loc);
-
-public:
- Expr *getAddrSpaceExpr() const { return AddrSpaceExpr; }
- QualType getPointeeType() const { return PointeeType; }
- SourceLocation getAttributeLoc() const { return loc; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DependentAddressSpace;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getPointeeType(), getAddrSpaceExpr());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- QualType PointeeType, Expr *AddrSpaceExpr);
-};
-
-/// Represents an extended vector type where either the type or size is
-/// dependent.
-///
-/// For example:
-/// \code
-/// template<typename T, int Size>
-/// class vector {
-/// typedef T __attribute__((ext_vector_type(Size))) type;
-/// }
-/// \endcode
-class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext;
-
- Expr *SizeExpr;
-
- /// The element type of the array.
- QualType ElementType;
-
- SourceLocation loc;
-
- DependentSizedExtVectorType(QualType ElementType, QualType can,
- Expr *SizeExpr, SourceLocation loc);
-
-public:
- Expr *getSizeExpr() const { return SizeExpr; }
- QualType getElementType() const { return ElementType; }
- SourceLocation getAttributeLoc() const { return loc; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DependentSizedExtVector;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getElementType(), getSizeExpr());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- QualType ElementType, Expr *SizeExpr);
-};
-
-enum class VectorKind {
- /// not a target-specific vector type
- Generic,
-
- /// is AltiVec vector
- AltiVecVector,
-
- /// is AltiVec 'vector Pixel'
- AltiVecPixel,
-
- /// is AltiVec 'vector bool ...'
- AltiVecBool,
-
- /// is ARM Neon vector
- Neon,
-
- /// is ARM Neon polynomial vector
- NeonPoly,
-
- /// is AArch64 SVE fixed-length data vector
- SveFixedLengthData,
-
- /// is AArch64 SVE fixed-length predicate vector
- SveFixedLengthPredicate,
-
- /// is RISC-V RVV fixed-length data vector
- RVVFixedLengthData,
-
- /// is RISC-V RVV fixed-length mask vector
- RVVFixedLengthMask,
-
- RVVFixedLengthMask_1,
- RVVFixedLengthMask_2,
- RVVFixedLengthMask_4
-};
-
-/// Represents a GCC generic vector type. This type is created using
-/// __attribute__((vector_size(n)), where "n" specifies the vector size in
-/// bytes; or from an Altivec __vector or vector declaration.
-/// Since the constructor takes the number of vector elements, the
-/// client is responsible for converting the size into the number of elements.
-class VectorType : public Type, public llvm::FoldingSetNode {
-protected:
- friend class ASTContext; // ASTContext creates these.
-
- /// The element type of the vector.
- QualType ElementType;
-
- VectorType(QualType vecType, unsigned nElements, QualType canonType,
- VectorKind vecKind);
-
- VectorType(TypeClass tc, QualType vecType, unsigned nElements,
- QualType canonType, VectorKind vecKind);
-
-public:
- QualType getElementType() const { return ElementType; }
- unsigned getNumElements() const { return VectorTypeBits.NumElements; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- VectorKind getVectorKind() const {
- return VectorKind(VectorTypeBits.VecKind);
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getElementType(), getNumElements(),
- getTypeClass(), getVectorKind());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
- unsigned NumElements, TypeClass TypeClass,
- VectorKind VecKind) {
- ID.AddPointer(ElementType.getAsOpaquePtr());
- ID.AddInteger(NumElements);
- ID.AddInteger(TypeClass);
- ID.AddInteger(llvm::to_underlying(VecKind));
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == Vector || T->getTypeClass() == ExtVector;
- }
-};
-
-/// Represents a vector type where either the type or size is dependent.
-////
-/// For example:
-/// \code
-/// template<typename T, int Size>
-/// class vector {
-/// typedef T __attribute__((vector_size(Size))) type;
-/// }
-/// \endcode
-class DependentVectorType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext;
-
- QualType ElementType;
- Expr *SizeExpr;
- SourceLocation Loc;
-
- DependentVectorType(QualType ElementType, QualType CanonType, Expr *SizeExpr,
- SourceLocation Loc, VectorKind vecKind);
-
-public:
- Expr *getSizeExpr() const { return SizeExpr; }
- QualType getElementType() const { return ElementType; }
- SourceLocation getAttributeLoc() const { return Loc; }
- VectorKind getVectorKind() const {
- return VectorKind(VectorTypeBits.VecKind);
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DependentVector;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getElementType(), getSizeExpr(), getVectorKind());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- QualType ElementType, const Expr *SizeExpr,
- VectorKind VecKind);
-};
-
-/// ExtVectorType - Extended vector type. This type is created using
-/// __attribute__((ext_vector_type(n)), where "n" is the number of elements.
-/// Unlike vector_size, ext_vector_type is only allowed on typedef's. This
-/// class enables syntactic extensions, like Vector Components for accessing
-/// points (as .xyzw), colors (as .rgba), and textures (modeled after OpenGL
-/// Shading Language).
-class ExtVectorType : public VectorType {
- friend class ASTContext; // ASTContext creates these.
-
- ExtVectorType(QualType vecType, unsigned nElements, QualType canonType)
- : VectorType(ExtVector, vecType, nElements, canonType,
- VectorKind::Generic) {}
-
-public:
- static int getPointAccessorIdx(char c) {
- switch (c) {
- default: return -1;
- case 'x': case 'r': return 0;
- case 'y': case 'g': return 1;
- case 'z': case 'b': return 2;
- case 'w': case 'a': return 3;
- }
- }
-
- static int getNumericAccessorIdx(char c) {
- switch (c) {
- default: return -1;
- case '0': return 0;
- case '1': return 1;
- case '2': return 2;
- case '3': return 3;
- case '4': return 4;
- case '5': return 5;
- case '6': return 6;
- case '7': return 7;
- case '8': return 8;
- case '9': return 9;
- case 'A':
- case 'a': return 10;
- case 'B':
- case 'b': return 11;
- case 'C':
- case 'c': return 12;
- case 'D':
- case 'd': return 13;
- case 'E':
- case 'e': return 14;
- case 'F':
- case 'f': return 15;
- }
- }
-
- static int getAccessorIdx(char c, bool isNumericAccessor) {
- if (isNumericAccessor)
- return getNumericAccessorIdx(c);
- else
- return getPointAccessorIdx(c);
- }
-
- bool isAccessorWithinNumElements(char c, bool isNumericAccessor) const {
- if (int idx = getAccessorIdx(c, isNumericAccessor)+1)
- return unsigned(idx-1) < getNumElements();
- return false;
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ExtVector;
- }
-};
-
-/// Represents a matrix type, as defined in the Matrix Types clang extensions.
-/// __attribute__((matrix_type(rows, columns))), where "rows" specifies
-/// number of rows and "columns" specifies the number of columns.
-class MatrixType : public Type, public llvm::FoldingSetNode {
-protected:
- friend class ASTContext;
-
- /// The element type of the matrix.
- QualType ElementType;
-
- MatrixType(QualType ElementTy, QualType CanonElementTy);
-
- MatrixType(TypeClass TypeClass, QualType ElementTy, QualType CanonElementTy,
- const Expr *RowExpr = nullptr, const Expr *ColumnExpr = nullptr);
-
-public:
- /// Returns type of the elements being stored in the matrix
- QualType getElementType() const { return ElementType; }
-
- /// Valid elements types are the following:
- /// * an integer type (as in C23 6.2.5p22), but excluding enumerated types
- /// and _Bool
- /// * the standard floating types float or double
- /// * a half-precision floating point type, if one is supported on the target
- static bool isValidElementType(QualType T) {
- return T->isDependentType() ||
- (T->isRealType() && !T->isBooleanType() && !T->isEnumeralType());
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ConstantMatrix ||
- T->getTypeClass() == DependentSizedMatrix;
- }
-};
-
-/// Represents a concrete matrix type with constant number of rows and columns
-class ConstantMatrixType final : public MatrixType {
-protected:
- friend class ASTContext;
-
- /// Number of rows and columns.
- unsigned NumRows;
- unsigned NumColumns;
-
- static constexpr unsigned MaxElementsPerDimension = (1 << 20) - 1;
-
- ConstantMatrixType(QualType MatrixElementType, unsigned NRows,
- unsigned NColumns, QualType CanonElementType);
-
- ConstantMatrixType(TypeClass typeClass, QualType MatrixType, unsigned NRows,
- unsigned NColumns, QualType CanonElementType);
-
-public:
- /// Returns the number of rows in the matrix.
- unsigned getNumRows() const { return NumRows; }
-
- /// Returns the number of columns in the matrix.
- unsigned getNumColumns() const { return NumColumns; }
-
- /// Returns the number of elements required to embed the matrix into a vector.
- unsigned getNumElementsFlattened() const {
- return getNumRows() * getNumColumns();
- }
-
- /// Returns true if \p NumElements is a valid matrix dimension.
- static constexpr bool isDimensionValid(size_t NumElements) {
- return NumElements > 0 && NumElements <= MaxElementsPerDimension;
- }
-
- /// Returns the maximum number of elements per dimension.
- static constexpr unsigned getMaxElementsPerDimension() {
- return MaxElementsPerDimension;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getElementType(), getNumRows(), getNumColumns(),
- getTypeClass());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
- unsigned NumRows, unsigned NumColumns,
- TypeClass TypeClass) {
- ID.AddPointer(ElementType.getAsOpaquePtr());
- ID.AddInteger(NumRows);
- ID.AddInteger(NumColumns);
- ID.AddInteger(TypeClass);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ConstantMatrix;
- }
-};
-
-/// Represents a matrix type where the type and the number of rows and columns
-/// is dependent on a template.
-class DependentSizedMatrixType final : public MatrixType {
- friend class ASTContext;
-
- Expr *RowExpr;
- Expr *ColumnExpr;
-
- SourceLocation loc;
-
- DependentSizedMatrixType(QualType ElementType, QualType CanonicalType,
- Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc);
-
-public:
- Expr *getRowExpr() const { return RowExpr; }
- Expr *getColumnExpr() const { return ColumnExpr; }
- SourceLocation getAttributeLoc() const { return loc; }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DependentSizedMatrix;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getElementType(), getRowExpr(), getColumnExpr());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- QualType ElementType, Expr *RowExpr, Expr *ColumnExpr);
-};
-
-/// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base
-/// class of FunctionNoProtoType and FunctionProtoType.
-class FunctionType : public Type {
- // The type returned by the function.
- QualType ResultType;
-
-public:
- /// Interesting information about a specific parameter that can't simply
- /// be reflected in parameter's type. This is only used by FunctionProtoType
- /// but is in FunctionType to make this class available during the
- /// specification of the bases of FunctionProtoType.
- ///
- /// It makes sense to model language features this way when there's some
- /// sort of parameter-specific override (such as an attribute) that
- /// affects how the function is called. For example, the ARC ns_consumed
- /// attribute changes whether a parameter is passed at +0 (the default)
- /// or +1 (ns_consumed). This must be reflected in the function type,
- /// but isn't really a change to the parameter type.
- ///
- /// One serious disadvantage of modelling language features this way is
- /// that they generally do not work with language features that attempt
- /// to destructure types. For example, template argument deduction will
- /// not be able to match a parameter declared as
- /// T (*)(U)
- /// against an argument of type
- /// void (*)(__attribute__((ns_consumed)) id)
- /// because the substitution of T=void, U=id into the former will
- /// not produce the latter.
- class ExtParameterInfo {
- enum {
- ABIMask = 0x0F,
- IsConsumed = 0x10,
- HasPassObjSize = 0x20,
- IsNoEscape = 0x40,
- };
- unsigned char Data = 0;
-
- public:
- ExtParameterInfo() = default;
-
- /// Return the ABI treatment of this parameter.
- ParameterABI getABI() const { return ParameterABI(Data & ABIMask); }
- ExtParameterInfo withABI(ParameterABI kind) const {
- ExtParameterInfo copy = *this;
- copy.Data = (copy.Data & ~ABIMask) | unsigned(kind);
- return copy;
- }
-
- /// Is this parameter considered "consumed" by Objective-C ARC?
- /// Consumed parameters must have retainable object type.
- bool isConsumed() const { return (Data & IsConsumed); }
- ExtParameterInfo withIsConsumed(bool consumed) const {
- ExtParameterInfo copy = *this;
- if (consumed)
- copy.Data |= IsConsumed;
- else
- copy.Data &= ~IsConsumed;
- return copy;
- }
-
- bool hasPassObjectSize() const { return Data & HasPassObjSize; }
- ExtParameterInfo withHasPassObjectSize() const {
- ExtParameterInfo Copy = *this;
- Copy.Data |= HasPassObjSize;
- return Copy;
- }
-
- bool isNoEscape() const { return Data & IsNoEscape; }
- ExtParameterInfo withIsNoEscape(bool NoEscape) const {
- ExtParameterInfo Copy = *this;
- if (NoEscape)
- Copy.Data |= IsNoEscape;
- else
- Copy.Data &= ~IsNoEscape;
- return Copy;
- }
-
- unsigned char getOpaqueValue() const { return Data; }
- static ExtParameterInfo getFromOpaqueValue(unsigned char data) {
- ExtParameterInfo result;
- result.Data = data;
- return result;
- }
-
- friend bool operator==(ExtParameterInfo lhs, ExtParameterInfo rhs) {
- return lhs.Data == rhs.Data;
- }
-
- friend bool operator!=(ExtParameterInfo lhs, ExtParameterInfo rhs) {
- return lhs.Data != rhs.Data;
- }
- };
-
- /// A class which abstracts out some details necessary for
- /// making a call.
- ///
- /// It is not actually used directly for storing this information in
- /// a FunctionType, although FunctionType does currently use the
- /// same bit-pattern.
- ///
- // If you add a field (say Foo), other than the obvious places (both,
- // constructors, compile failures), what you need to update is
- // * Operator==
- // * getFoo
- // * withFoo
- // * functionType. Add Foo, getFoo.
- // * ASTContext::getFooType
- // * ASTContext::mergeFunctionTypes
- // * FunctionNoProtoType::Profile
- // * FunctionProtoType::Profile
- // * TypePrinter::PrintFunctionProto
- // * AST read and write
- // * Codegen
- class ExtInfo {
- friend class FunctionType;
-
- // Feel free to rearrange or add bits, but if you go over 16, you'll need to
- // adjust the Bits field below, and if you add bits, you'll need to adjust
- // Type::FunctionTypeBitfields::ExtInfo as well.
-
- // | CC |noreturn|produces|nocallersavedregs|regparm|nocfcheck|cmsenscall|
- // |0 .. 5| 6 | 7 | 8 |9 .. 11| 12 | 13 |
- //
- // regparm is either 0 (no regparm attribute) or the regparm value+1.
- enum { CallConvMask = 0x3F };
- enum { NoReturnMask = 0x40 };
- enum { ProducesResultMask = 0x80 };
- enum { NoCallerSavedRegsMask = 0x100 };
- enum { RegParmMask = 0xe00, RegParmOffset = 9 };
- enum { NoCfCheckMask = 0x1000 };
- enum { CmseNSCallMask = 0x2000 };
- uint16_t Bits = CC_C;
-
- ExtInfo(unsigned Bits) : Bits(static_cast<uint16_t>(Bits)) {}
-
- public:
- // Constructor with no defaults. Use this when you know that you
- // have all the elements (when reading an AST file for example).
- ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc,
- bool producesResult, bool noCallerSavedRegs, bool NoCfCheck,
- bool cmseNSCall) {
- assert((!hasRegParm || regParm < 7) && "Invalid regparm value");
- Bits = ((unsigned)cc) | (noReturn ? NoReturnMask : 0) |
- (producesResult ? ProducesResultMask : 0) |
- (noCallerSavedRegs ? NoCallerSavedRegsMask : 0) |
- (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0) |
- (NoCfCheck ? NoCfCheckMask : 0) |
- (cmseNSCall ? CmseNSCallMask : 0);
- }
-
- // Constructor with all defaults. Use when for example creating a
- // function known to use defaults.
- ExtInfo() = default;
-
- // Constructor with just the calling convention, which is an important part
- // of the canonical type.
- ExtInfo(CallingConv CC) : Bits(CC) {}
-
- bool getNoReturn() const { return Bits & NoReturnMask; }
- bool getProducesResult() const { return Bits & ProducesResultMask; }
- bool getCmseNSCall() const { return Bits & CmseNSCallMask; }
- bool getNoCallerSavedRegs() const { return Bits & NoCallerSavedRegsMask; }
- bool getNoCfCheck() const { return Bits & NoCfCheckMask; }
- bool getHasRegParm() const { return ((Bits & RegParmMask) >> RegParmOffset) != 0; }
-
- unsigned getRegParm() const {
- unsigned RegParm = (Bits & RegParmMask) >> RegParmOffset;
- if (RegParm > 0)
- --RegParm;
- return RegParm;
- }
-
- CallingConv getCC() const { return CallingConv(Bits & CallConvMask); }
-
- bool operator==(ExtInfo Other) const {
- return Bits == Other.Bits;
- }
- bool operator!=(ExtInfo Other) const {
- return Bits != Other.Bits;
- }
-
- // Note that we don't have setters. That is by design, use
- // the following with methods instead of mutating these objects.
-
- ExtInfo withNoReturn(bool noReturn) const {
- if (noReturn)
- return ExtInfo(Bits | NoReturnMask);
- else
- return ExtInfo(Bits & ~NoReturnMask);
- }
-
- ExtInfo withProducesResult(bool producesResult) const {
- if (producesResult)
- return ExtInfo(Bits | ProducesResultMask);
- else
- return ExtInfo(Bits & ~ProducesResultMask);
- }
-
- ExtInfo withCmseNSCall(bool cmseNSCall) const {
- if (cmseNSCall)
- return ExtInfo(Bits | CmseNSCallMask);
- else
- return ExtInfo(Bits & ~CmseNSCallMask);
- }
-
- ExtInfo withNoCallerSavedRegs(bool noCallerSavedRegs) const {
- if (noCallerSavedRegs)
- return ExtInfo(Bits | NoCallerSavedRegsMask);
- else
- return ExtInfo(Bits & ~NoCallerSavedRegsMask);
- }
-
- ExtInfo withNoCfCheck(bool noCfCheck) const {
- if (noCfCheck)
- return ExtInfo(Bits | NoCfCheckMask);
- else
- return ExtInfo(Bits & ~NoCfCheckMask);
- }
-
- ExtInfo withRegParm(unsigned RegParm) const {
- assert(RegParm < 7 && "Invalid regparm value");
- return ExtInfo((Bits & ~RegParmMask) |
- ((RegParm + 1) << RegParmOffset));
- }
-
- ExtInfo withCallingConv(CallingConv cc) const {
- return ExtInfo((Bits & ~CallConvMask) | (unsigned) cc);
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(Bits);
- }
- };
-
- /// A simple holder for a QualType representing a type in an
- /// exception specification. Unfortunately needed by FunctionProtoType
- /// because TrailingObjects cannot handle repeated types.
- struct ExceptionType { QualType Type; };
-
- /// A simple holder for various uncommon bits which do not fit in
- /// FunctionTypeBitfields. Aligned to alignof(void *) to maintain the
- /// alignment of subsequent objects in TrailingObjects.
- struct alignas(void *) FunctionTypeExtraBitfields {
- /// The number of types in the exception specification.
- /// A whole unsigned is not needed here and according to
- /// [implimits] 8 bits would be enough here.
- unsigned NumExceptionType : 10;
-
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasArmTypeAttributes : 1;
-
- LLVM_PREFERRED_TYPE(bool)
- unsigned EffectsHaveConditions : 1;
- unsigned NumFunctionEffects : 4;
-
- FunctionTypeExtraBitfields()
- : NumExceptionType(0), HasArmTypeAttributes(false),
- EffectsHaveConditions(false), NumFunctionEffects(0) {}
- };
-
- /// The AArch64 SME ACLE (Arm C/C++ Language Extensions) define a number
- /// of function type attributes that can be set on function types, including
- /// function pointers.
- enum AArch64SMETypeAttributes : unsigned {
- SME_NormalFunction = 0,
- SME_PStateSMEnabledMask = 1 << 0,
- SME_PStateSMCompatibleMask = 1 << 1,
-
- // Describes the value of the state using ArmStateValue.
- SME_ZAShift = 2,
- SME_ZAMask = 0b111 << SME_ZAShift,
- SME_ZT0Shift = 5,
- SME_ZT0Mask = 0b111 << SME_ZT0Shift,
-
- // A bit to tell whether a function is agnostic about sme ZA state.
- SME_AgnosticZAStateShift = 8,
- SME_AgnosticZAStateMask = 1 << SME_AgnosticZAStateShift,
-
- SME_AttributeMask =
- 0b1'111'111'11 // We can't support more than 9 bits because of
- // the bitmask in FunctionTypeArmAttributes
- // and ExtProtoInfo.
- };
-
- enum ArmStateValue : unsigned {
- ARM_None = 0,
- ARM_Preserves = 1,
- ARM_In = 2,
- ARM_Out = 3,
- ARM_InOut = 4,
- };
-
- static ArmStateValue getArmZAState(unsigned AttrBits) {
- return (ArmStateValue)((AttrBits & SME_ZAMask) >> SME_ZAShift);
- }
-
- static ArmStateValue getArmZT0State(unsigned AttrBits) {
- return (ArmStateValue)((AttrBits & SME_ZT0Mask) >> SME_ZT0Shift);
- }
-
- /// A holder for Arm type attributes as described in the Arm C/C++
- /// Language extensions which are not particularly common to all
- /// types and therefore accounted separately from FunctionTypeBitfields.
- struct alignas(void *) FunctionTypeArmAttributes {
- /// Any AArch64 SME ACLE type attributes that need to be propagated
- /// on declarations and function pointers.
- unsigned AArch64SMEAttributes : 9;
-
- FunctionTypeArmAttributes() : AArch64SMEAttributes(SME_NormalFunction) {}
- };
-
-protected:
- FunctionType(TypeClass tc, QualType res, QualType Canonical,
- TypeDependence Dependence, ExtInfo Info)
- : Type(tc, Canonical, Dependence), ResultType(res) {
- FunctionTypeBits.ExtInfo = Info.Bits;
- }
-
- Qualifiers getFastTypeQuals() const {
- if (isFunctionProtoType())
- return Qualifiers::fromFastMask(FunctionTypeBits.FastTypeQuals);
-
- return Qualifiers();
- }
-
-public:
- QualType getReturnType() const { return ResultType; }
-
- bool getHasRegParm() const { return getExtInfo().getHasRegParm(); }
- unsigned getRegParmType() const { return getExtInfo().getRegParm(); }
-
- /// Determine whether this function type includes the GNU noreturn
- /// attribute. The C++11 [[noreturn]] attribute does not affect the function
- /// type.
- bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
-
- /// Determine whether this is a function prototype that includes the
- /// cfi_unchecked_callee attribute.
- bool getCFIUncheckedCalleeAttr() const;
-
- bool getCmseNSCallAttr() const { return getExtInfo().getCmseNSCall(); }
- CallingConv getCallConv() const { return getExtInfo().getCC(); }
- ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
-
- static_assert((~Qualifiers::FastMask & Qualifiers::CVRMask) == 0,
- "Const, volatile and restrict are assumed to be a subset of "
- "the fast qualifiers.");
-
- bool isConst() const { return getFastTypeQuals().hasConst(); }
- bool isVolatile() const { return getFastTypeQuals().hasVolatile(); }
- bool isRestrict() const { return getFastTypeQuals().hasRestrict(); }
-
- /// Determine the type of an expression that calls a function of
- /// this type.
- QualType getCallResultType(const ASTContext &Context) const {
- return getReturnType().getNonLValueExprType(Context);
- }
-
- static StringRef getNameForCallConv(CallingConv CC);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == FunctionNoProto ||
- T->getTypeClass() == FunctionProto;
- }
-};
-
-/// Represents a K&R-style 'int foo()' function, which has
-/// no information available about its arguments.
-class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
- : FunctionType(FunctionNoProto, Result, Canonical,
- Result->getDependence() &
- ~(TypeDependence::DependentInstantiation |
- TypeDependence::UnexpandedPack),
- Info) {}
-
-public:
- // No additional state past what FunctionType provides.
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getReturnType(), getExtInfo());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType ResultType,
- ExtInfo Info) {
- Info.Profile(ID);
- ID.AddPointer(ResultType.getAsOpaquePtr());
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == FunctionNoProto;
- }
-};
-
-// ------------------------------------------------------------------------------
-
-/// Represents an abstract function effect, using just an enumeration describing
-/// its kind.
-class FunctionEffect {
-public:
- /// Identifies the particular effect.
- enum class Kind : uint8_t {
- NonBlocking,
- NonAllocating,
- Blocking,
- Allocating,
- Last = Allocating
- };
- constexpr static size_t KindCount = static_cast<size_t>(Kind::Last) + 1;
-
- /// Flags describing some behaviors of the effect.
- using Flags = unsigned;
- enum FlagBit : Flags {
- // Can verification inspect callees' implementations? (e.g. nonblocking:
- // yes, tcb+types: no). This also implies the need for 2nd-pass
- // verification.
- FE_InferrableOnCallees = 0x1,
-
- // Language constructs which effects can diagnose as disallowed.
- FE_ExcludeThrow = 0x2,
- FE_ExcludeCatch = 0x4,
- FE_ExcludeObjCMessageSend = 0x8,
- FE_ExcludeStaticLocalVars = 0x10,
- FE_ExcludeThreadLocalVars = 0x20
- };
-
-private:
- Kind FKind;
-
- // Expansion: for hypothetical TCB+types, there could be one Kind for TCB,
- // then ~16(?) bits "SubKind" to map to a specific named TCB. SubKind would
- // be considered for uniqueness.
-
-public:
- explicit FunctionEffect(Kind K) : FKind(K) {}
-
- /// The kind of the effect.
- Kind kind() const { return FKind; }
-
- /// Return the opposite kind, for effects which have opposites.
- Kind oppositeKind() const;
-
- /// For serialization.
- uint32_t toOpaqueInt32() const { return uint32_t(FKind); }
- static FunctionEffect fromOpaqueInt32(uint32_t Value) {
- return FunctionEffect(Kind(Value));
- }
-
- /// Flags describing some behaviors of the effect.
- Flags flags() const {
- switch (kind()) {
- case Kind::NonBlocking:
- return FE_InferrableOnCallees | FE_ExcludeThrow | FE_ExcludeCatch |
- FE_ExcludeObjCMessageSend | FE_ExcludeStaticLocalVars |
- FE_ExcludeThreadLocalVars;
- case Kind::NonAllocating:
- // Same as NonBlocking, except without FE_ExcludeStaticLocalVars.
- return FE_InferrableOnCallees | FE_ExcludeThrow | FE_ExcludeCatch |
- FE_ExcludeObjCMessageSend | FE_ExcludeThreadLocalVars;
- case Kind::Blocking:
- case Kind::Allocating:
- return 0;
- }
- llvm_unreachable("unknown effect kind");
- }
-
- /// The description printed in diagnostics, e.g. 'nonblocking'.
- StringRef name() const;
-
- friend raw_ostream &operator<<(raw_ostream &OS,
- const FunctionEffect &Effect) {
- OS << Effect.name();
- return OS;
- }
-
- /// Determine whether the effect is allowed to be inferred on the callee,
- /// which is either a FunctionDecl or BlockDecl. If the returned optional
- /// is empty, inference is permitted; otherwise it holds the effect which
- /// blocked inference.
- /// Example: This allows nonblocking(false) to prevent inference for the
- /// function.
- std::optional<FunctionEffect>
- effectProhibitingInference(const Decl &Callee,
- FunctionEffectKindSet CalleeFX) const;
-
- // Return false for success. When true is returned for a direct call, then the
- // FE_InferrableOnCallees flag may trigger inference rather than an immediate
- // diagnostic. Caller should be assumed to have the effect (it may not have it
- // explicitly when inferring).
- bool shouldDiagnoseFunctionCall(bool Direct,
- FunctionEffectKindSet CalleeFX) const;
-
- friend bool operator==(FunctionEffect LHS, FunctionEffect RHS) {
- return LHS.FKind == RHS.FKind;
- }
- friend bool operator!=(FunctionEffect LHS, FunctionEffect RHS) {
- return !(LHS == RHS);
- }
- friend bool operator<(FunctionEffect LHS, FunctionEffect RHS) {
- return LHS.FKind < RHS.FKind;
- }
-};
-
-/// Wrap a function effect's condition expression in another struct so
-/// that FunctionProtoType's TrailingObjects can treat it separately.
-class EffectConditionExpr {
- Expr *Cond = nullptr; // if null, unconditional.
-
-public:
- EffectConditionExpr() = default;
- EffectConditionExpr(Expr *E) : Cond(E) {}
-
- Expr *getCondition() const { return Cond; }
-
- bool operator==(const EffectConditionExpr &RHS) const {
- return Cond == RHS.Cond;
- }
-};
-
-/// A FunctionEffect plus a potential boolean expression determining whether
-/// the effect is declared (e.g. nonblocking(expr)). Generally the condition
-/// expression when present, is dependent.
-struct FunctionEffectWithCondition {
- FunctionEffect Effect;
- EffectConditionExpr Cond;
-
- FunctionEffectWithCondition(FunctionEffect E, const EffectConditionExpr &C)
- : Effect(E), Cond(C) {}
-
- /// Return a textual description of the effect, and its condition, if any.
- std::string description() const;
-
- friend raw_ostream &operator<<(raw_ostream &OS,
- const FunctionEffectWithCondition &CFE);
-};
-
-/// Support iteration in parallel through a pair of FunctionEffect and
-/// EffectConditionExpr containers.
-template <typename Container> class FunctionEffectIterator {
- friend Container;
-
- const Container *Outer = nullptr;
- size_t Idx = 0;
-
-public:
- FunctionEffectIterator();
- FunctionEffectIterator(const Container &O, size_t I) : Outer(&O), Idx(I) {}
- bool operator==(const FunctionEffectIterator &Other) const {
- return Idx == Other.Idx;
- }
- bool operator!=(const FunctionEffectIterator &Other) const {
- return Idx != Other.Idx;
- }
-
- FunctionEffectIterator operator++() {
- ++Idx;
- return *this;
- }
-
- FunctionEffectWithCondition operator*() const {
- assert(Outer != nullptr && "invalid FunctionEffectIterator");
- bool HasConds = !Outer->Conditions.empty();
- return FunctionEffectWithCondition{Outer->Effects[Idx],
- HasConds ? Outer->Conditions[Idx]
- : EffectConditionExpr()};
- }
-};
-
-/// An immutable set of FunctionEffects and possibly conditions attached to
-/// them. The effects and conditions reside in memory not managed by this object
-/// (typically, trailing objects in FunctionProtoType, or borrowed references
-/// from a FunctionEffectSet).
-///
-/// Invariants:
-/// - there is never more than one instance of any given effect.
-/// - the array of conditions is either empty or has the same size as the
-/// array of effects.
-/// - some conditions may be null expressions; each condition pertains to
-/// the effect at the same array index.
-///
-/// Also, if there are any conditions, at least one of those expressions will be
-/// dependent, but this is only asserted in the constructor of
-/// FunctionProtoType.
-///
-/// See also FunctionEffectSet, in Sema, which provides a mutable set.
-class FunctionEffectsRef {
- // Restrict classes which can call the private constructor -- these friends
- // all maintain the required invariants. FunctionEffectSet is generally the
- // only way in which the arrays are created; FunctionProtoType will not
- // reorder them.
- friend FunctionProtoType;
- friend FunctionEffectSet;
-
- ArrayRef<FunctionEffect> Effects;
- ArrayRef<EffectConditionExpr> Conditions;
-
- // The arrays are expected to have been sorted by the caller, with the
- // effects in order. The conditions array must be empty or the same size
- // as the effects array, since the conditions are associated with the effects
- // at the same array indices.
- FunctionEffectsRef(ArrayRef<FunctionEffect> FX,
- ArrayRef<EffectConditionExpr> Conds)
- : Effects(FX), Conditions(Conds) {}
-
-public:
- /// Extract the effects from a Type if it is a function, block, or member
- /// function pointer, or a reference or pointer to one.
- static FunctionEffectsRef get(QualType QT);
-
- /// Asserts invariants.
- static FunctionEffectsRef create(ArrayRef<FunctionEffect> FX,
- ArrayRef<EffectConditionExpr> Conds);
-
- FunctionEffectsRef() = default;
-
- bool empty() const { return Effects.empty(); }
- size_t size() const { return Effects.size(); }
-
- ArrayRef<FunctionEffect> effects() const { return Effects; }
- ArrayRef<EffectConditionExpr> conditions() const { return Conditions; }
-
- using iterator = FunctionEffectIterator<FunctionEffectsRef>;
- friend iterator;
- iterator begin() const { return iterator(*this, 0); }
- iterator end() const { return iterator(*this, size()); }
-
- friend bool operator==(const FunctionEffectsRef &LHS,
- const FunctionEffectsRef &RHS) {
- return LHS.Effects == RHS.Effects && LHS.Conditions == RHS.Conditions;
- }
- friend bool operator!=(const FunctionEffectsRef &LHS,
- const FunctionEffectsRef &RHS) {
- return !(LHS == RHS);
- }
-
- void dump(llvm::raw_ostream &OS) const;
-};
-
-/// A mutable set of FunctionEffect::Kind.
-class FunctionEffectKindSet {
- // For now this only needs to be a bitmap.
- constexpr static size_t EndBitPos = FunctionEffect::KindCount;
- using KindBitsT = std::bitset<EndBitPos>;
-
- KindBitsT KindBits{};
-
- explicit FunctionEffectKindSet(KindBitsT KB) : KindBits(KB) {}
-
- // Functions to translate between an effect kind, starting at 1, and a
- // position in the bitset.
-
- constexpr static size_t kindToPos(FunctionEffect::Kind K) {
- return static_cast<size_t>(K);
- }
-
- constexpr static FunctionEffect::Kind posToKind(size_t Pos) {
- return static_cast<FunctionEffect::Kind>(Pos);
- }
-
- // Iterates through the bits which are set.
- class iterator {
- const FunctionEffectKindSet *Outer = nullptr;
- size_t Idx = 0;
-
- // If Idx does not reference a set bit, advance it until it does,
- // or until it reaches EndBitPos.
- void advanceToNextSetBit() {
- while (Idx < EndBitPos && !Outer->KindBits.test(Idx))
- ++Idx;
- }
-
- public:
- iterator();
- iterator(const FunctionEffectKindSet &O, size_t I) : Outer(&O), Idx(I) {
- advanceToNextSetBit();
- }
- bool operator==(const iterator &Other) const { return Idx == Other.Idx; }
- bool operator!=(const iterator &Other) const { return Idx != Other.Idx; }
-
- iterator operator++() {
- ++Idx;
- advanceToNextSetBit();
- return *this;
- }
-
- FunctionEffect operator*() const {
- assert(Idx < EndBitPos && "Dereference of end iterator");
- return FunctionEffect(posToKind(Idx));
- }
- };
-
-public:
- FunctionEffectKindSet() = default;
- explicit FunctionEffectKindSet(FunctionEffectsRef FX) { insert(FX); }
-
- iterator begin() const { return iterator(*this, 0); }
- iterator end() const { return iterator(*this, EndBitPos); }
-
- void insert(FunctionEffect Effect) { KindBits.set(kindToPos(Effect.kind())); }
- void insert(FunctionEffectsRef FX) {
- for (FunctionEffect Item : FX.effects())
- insert(Item);
- }
- void insert(FunctionEffectKindSet Set) { KindBits |= Set.KindBits; }
-
- bool empty() const { return KindBits.none(); }
- bool contains(const FunctionEffect::Kind EK) const {
- return KindBits.test(kindToPos(EK));
- }
- void dump(llvm::raw_ostream &OS) const;
-
- static FunctionEffectKindSet difference(FunctionEffectKindSet LHS,
- FunctionEffectKindSet RHS) {
- return FunctionEffectKindSet(LHS.KindBits & ~RHS.KindBits);
- }
-};
-
-/// A mutable set of FunctionEffects and possibly conditions attached to them.
-/// Used to compare and merge effects on declarations.
-///
-/// Has the same invariants as FunctionEffectsRef.
-class FunctionEffectSet {
- SmallVector<FunctionEffect> Effects;
- SmallVector<EffectConditionExpr> Conditions;
-public:
- FunctionEffectSet() = default;
-
- explicit FunctionEffectSet(const FunctionEffectsRef &FX)
- : Effects(FX.effects()), Conditions(FX.conditions()) {}
-
- bool empty() const { return Effects.empty(); }
- size_t size() const { return Effects.size(); }
-
- using iterator = FunctionEffectIterator<FunctionEffectSet>;
- friend iterator;
- iterator begin() const { return iterator(*this, 0); }
- iterator end() const { return iterator(*this, size()); }
-
- operator FunctionEffectsRef() const { return {Effects, Conditions}; }
-
- void dump(llvm::raw_ostream &OS) const;
-
- // Mutators
-
- // On insertion, a conflict occurs when attempting to insert an
- // effect which is opposite an effect already in the set, or attempting
- // to insert an effect which is already in the set but with a condition
- // which is not identical.
- struct Conflict {
- FunctionEffectWithCondition Kept;
- FunctionEffectWithCondition Rejected;
- };
- using Conflicts = SmallVector<Conflict>;
-
- // Returns true for success (obviating a check of Errs.empty()).
- bool insert(const FunctionEffectWithCondition &NewEC, Conflicts &Errs);
-
- // Returns true for success (obviating a check of Errs.empty()).
- bool insert(const FunctionEffectsRef &Set, Conflicts &Errs);
-
- // Set operations
-
- static FunctionEffectSet getUnion(FunctionEffectsRef LHS,
- FunctionEffectsRef RHS, Conflicts &Errs);
- static FunctionEffectSet getIntersection(FunctionEffectsRef LHS,
- FunctionEffectsRef RHS);
-};
-
-/// Represents a prototype with parameter type info, e.g.
-/// 'int foo(int)' or 'int foo(void)'. 'void' is represented as having no
-/// parameters, not as having a single void parameter. Such a type can have
-/// an exception specification, but this specification is not part of the
-/// canonical type. FunctionProtoType has several trailing objects, some of
-/// which optional. For more information about the trailing objects see
-/// the first comment inside FunctionProtoType.
-class FunctionProtoType final
- : public FunctionType,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<
- FunctionProtoType, QualType, SourceLocation,
- FunctionType::FunctionTypeExtraBitfields,
- FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
- Expr *, FunctionDecl *, FunctionType::ExtParameterInfo, Qualifiers,
- FunctionEffect, EffectConditionExpr> {
- friend class ASTContext; // ASTContext creates these.
- friend TrailingObjects;
-
- // FunctionProtoType is followed by several trailing objects, some of
- // which optional. They are in order:
- //
- // * An array of getNumParams() QualType holding the parameter types.
- // Always present. Note that for the vast majority of FunctionProtoType,
- // these will be the only trailing objects.
- //
- // * Optionally if the function is variadic, the SourceLocation of the
- // ellipsis.
- //
- // * Optionally if some extra data is stored in FunctionTypeExtraBitfields
- // (see FunctionTypeExtraBitfields and FunctionTypeBitfields):
- // a single FunctionTypeExtraBitfields. Present if and only if
- // hasExtraBitfields() is true.
- //
- // * Optionally exactly one of:
- // * an array of getNumExceptions() ExceptionType,
- // * a single Expr *,
- // * a pair of FunctionDecl *,
- // * a single FunctionDecl *
- // used to store information about the various types of exception
- // specification. See getExceptionSpecSize for the details.
- //
- // * Optionally an array of getNumParams() ExtParameterInfo holding
- // an ExtParameterInfo for each of the parameters. Present if and
- // only if hasExtParameterInfos() is true.
- //
- // * Optionally a Qualifiers object to represent extra qualifiers that can't
- // be represented by FunctionTypeBitfields.FastTypeQuals. Present if and
- // only if hasExtQualifiers() is true.
- //
- // * Optionally, an array of getNumFunctionEffects() FunctionEffect.
- // Present only when getNumFunctionEffects() > 0
- //
- // * Optionally, an array of getNumFunctionEffects() EffectConditionExpr.
- // Present only when getNumFunctionEffectConditions() > 0.
- //
- // The optional FunctionTypeExtraBitfields has to be before the data
- // related to the exception specification since it contains the number
- // of exception types.
- //
- // We put the ExtParameterInfos later. If all were equal, it would make
- // more sense to put these before the exception specification, because
- // it's much easier to skip past them compared to the elaborate switch
- // required to skip the exception specification. However, all is not
- // equal; ExtParameterInfos are used to model very uncommon features,
- // and it's better not to burden the more common paths.
-
-public:
- /// Holds information about the various types of exception specification.
- /// ExceptionSpecInfo is not stored as such in FunctionProtoType but is
- /// used to group together the various bits of information about the
- /// exception specification.
- struct ExceptionSpecInfo {
- /// The kind of exception specification this is.
- ExceptionSpecificationType Type = EST_None;
-
- /// Explicitly-specified list of exception types.
- ArrayRef<QualType> Exceptions;
-
- /// Noexcept expression, if this is a computed noexcept specification.
- Expr *NoexceptExpr = nullptr;
-
- /// The function whose exception specification this is, for
- /// EST_Unevaluated and EST_Uninstantiated.
- FunctionDecl *SourceDecl = nullptr;
-
- /// The function template whose exception specification this is instantiated
- /// from, for EST_Uninstantiated.
- FunctionDecl *SourceTemplate = nullptr;
-
- ExceptionSpecInfo() = default;
-
- ExceptionSpecInfo(ExceptionSpecificationType EST) : Type(EST) {}
-
- void instantiate();
- };
-
- /// Extra information about a function prototype. ExtProtoInfo is not
- /// stored as such in FunctionProtoType but is used to group together
- /// the various bits of extra information about a function prototype.
- struct ExtProtoInfo {
- FunctionType::ExtInfo ExtInfo;
- LLVM_PREFERRED_TYPE(bool)
- unsigned Variadic : 1;
- LLVM_PREFERRED_TYPE(bool)
- unsigned HasTrailingReturn : 1;
- LLVM_PREFERRED_TYPE(bool)
- unsigned CFIUncheckedCallee : 1;
- unsigned AArch64SMEAttributes : 9;
- Qualifiers TypeQuals;
- RefQualifierKind RefQualifier = RQ_None;
- ExceptionSpecInfo ExceptionSpec;
- const ExtParameterInfo *ExtParameterInfos = nullptr;
- SourceLocation EllipsisLoc;
- FunctionEffectsRef FunctionEffects;
-
- ExtProtoInfo()
- : Variadic(false), HasTrailingReturn(false), CFIUncheckedCallee(false),
- AArch64SMEAttributes(SME_NormalFunction) {}
-
- ExtProtoInfo(CallingConv CC)
- : ExtInfo(CC), Variadic(false), HasTrailingReturn(false),
- CFIUncheckedCallee(false), AArch64SMEAttributes(SME_NormalFunction) {}
-
- ExtProtoInfo withExceptionSpec(const ExceptionSpecInfo &ESI) {
- ExtProtoInfo Result(*this);
- Result.ExceptionSpec = ESI;
- return Result;
- }
-
- ExtProtoInfo withCFIUncheckedCallee(bool CFIUncheckedCallee) {
- ExtProtoInfo Result(*this);
- Result.CFIUncheckedCallee = CFIUncheckedCallee;
- return Result;
- }
-
- bool requiresFunctionProtoTypeExtraBitfields() const {
- return ExceptionSpec.Type == EST_Dynamic ||
- requiresFunctionProtoTypeArmAttributes() ||
- !FunctionEffects.empty();
- }
-
- bool requiresFunctionProtoTypeArmAttributes() const {
- return AArch64SMEAttributes != SME_NormalFunction;
- }
-
- void setArmSMEAttribute(AArch64SMETypeAttributes Kind, bool Enable = true) {
- if (Enable)
- AArch64SMEAttributes |= Kind;
- else
- AArch64SMEAttributes &= ~Kind;
- }
- };
-
-private:
- unsigned numTrailingObjects(OverloadToken<QualType>) const {
- return getNumParams();
- }
-
- unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
- return isVariadic();
- }
-
- unsigned numTrailingObjects(OverloadToken<FunctionTypeArmAttributes>) const {
- return hasArmTypeAttributes();
- }
-
- unsigned numTrailingObjects(OverloadToken<FunctionTypeExtraBitfields>) const {
- return hasExtraBitfields();
- }
-
- unsigned numTrailingObjects(OverloadToken<ExceptionType>) const {
- return getExceptionSpecSize().NumExceptionType;
- }
-
- unsigned numTrailingObjects(OverloadToken<Expr *>) const {
- return getExceptionSpecSize().NumExprPtr;
- }
-
- unsigned numTrailingObjects(OverloadToken<FunctionDecl *>) const {
- return getExceptionSpecSize().NumFunctionDeclPtr;
- }
-
- unsigned numTrailingObjects(OverloadToken<ExtParameterInfo>) const {
- return hasExtParameterInfos() ? getNumParams() : 0;
- }
-
- unsigned numTrailingObjects(OverloadToken<Qualifiers>) const {
- return hasExtQualifiers() ? 1 : 0;
- }
-
- unsigned numTrailingObjects(OverloadToken<FunctionEffect>) const {
- return getNumFunctionEffects();
- }
-
- /// Determine whether there are any argument types that
- /// contain an unexpanded parameter pack.
- static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
- unsigned numArgs) {
- for (unsigned Idx = 0; Idx < numArgs; ++Idx)
- if (ArgArray[Idx]->containsUnexpandedParameterPack())
- return true;
-
- return false;
- }
-
- FunctionProtoType(QualType result, ArrayRef<QualType> params,
- QualType canonical, const ExtProtoInfo &epi);
-
- /// This struct is returned by getExceptionSpecSize and is used to
- /// translate an ExceptionSpecificationType to the number and kind
- /// of trailing objects related to the exception specification.
- struct ExceptionSpecSizeHolder {
- unsigned NumExceptionType;
- unsigned NumExprPtr;
- unsigned NumFunctionDeclPtr;
- };
-
- /// Return the number and kind of trailing objects
- /// related to the exception specification.
- static ExceptionSpecSizeHolder
- getExceptionSpecSize(ExceptionSpecificationType EST, unsigned NumExceptions) {
- switch (EST) {
- case EST_None:
- case EST_DynamicNone:
- case EST_MSAny:
- case EST_BasicNoexcept:
- case EST_Unparsed:
- case EST_NoThrow:
- return {0, 0, 0};
-
- case EST_Dynamic:
- return {NumExceptions, 0, 0};
-
- case EST_DependentNoexcept:
- case EST_NoexceptFalse:
- case EST_NoexceptTrue:
- return {0, 1, 0};
-
- case EST_Uninstantiated:
- return {0, 0, 2};
-
- case EST_Unevaluated:
- return {0, 0, 1};
- }
- llvm_unreachable("bad exception specification kind");
- }
-
- /// Return the number and kind of trailing objects
- /// related to the exception specification.
- ExceptionSpecSizeHolder getExceptionSpecSize() const {
- return getExceptionSpecSize(getExceptionSpecType(), getNumExceptions());
- }
-
- /// Whether the trailing FunctionTypeExtraBitfields is present.
- bool hasExtraBitfields() const {
- assert((getExceptionSpecType() != EST_Dynamic ||
- FunctionTypeBits.HasExtraBitfields) &&
- "ExtraBitfields are required for given ExceptionSpecType");
- return FunctionTypeBits.HasExtraBitfields;
-
- }
-
- bool hasArmTypeAttributes() const {
- return FunctionTypeBits.HasExtraBitfields &&
- getTrailingObjects<FunctionTypeExtraBitfields>()
- ->HasArmTypeAttributes;
- }
-
- bool hasExtQualifiers() const {
- return FunctionTypeBits.HasExtQuals;
- }
-
-public:
- unsigned getNumParams() const { return FunctionTypeBits.NumParams; }
-
- QualType getParamType(unsigned i) const {
- assert(i < getNumParams() && "invalid parameter index");
- return param_type_begin()[i];
- }
-
- ArrayRef<QualType> getParamTypes() const {
- return {param_type_begin(), param_type_end()};
- }
-
- ExtProtoInfo getExtProtoInfo() const {
- ExtProtoInfo EPI;
- EPI.ExtInfo = getExtInfo();
- EPI.Variadic = isVariadic();
- EPI.EllipsisLoc = getEllipsisLoc();
- EPI.HasTrailingReturn = hasTrailingReturn();
- EPI.CFIUncheckedCallee = hasCFIUncheckedCallee();
- EPI.ExceptionSpec = getExceptionSpecInfo();
- EPI.TypeQuals = getMethodQuals();
- EPI.RefQualifier = getRefQualifier();
- EPI.ExtParameterInfos = getExtParameterInfosOrNull();
- EPI.AArch64SMEAttributes = getAArch64SMEAttributes();
- EPI.FunctionEffects = getFunctionEffects();
- return EPI;
- }
-
- /// Get the kind of exception specification on this function.
- ExceptionSpecificationType getExceptionSpecType() const {
- return static_cast<ExceptionSpecificationType>(
- FunctionTypeBits.ExceptionSpecType);
- }
-
- /// Return whether this function has any kind of exception spec.
- bool hasExceptionSpec() const { return getExceptionSpecType() != EST_None; }
-
- /// Return whether this function has a dynamic (throw) exception spec.
- bool hasDynamicExceptionSpec() const {
- return isDynamicExceptionSpec(getExceptionSpecType());
- }
-
- /// Return whether this function has a noexcept exception spec.
- bool hasNoexceptExceptionSpec() const {
- return isNoexceptExceptionSpec(getExceptionSpecType());
- }
-
- /// Return whether this function has a dependent exception spec.
- bool hasDependentExceptionSpec() const;
-
- /// Return whether this function has an instantiation-dependent exception
- /// spec.
- bool hasInstantiationDependentExceptionSpec() const;
-
- /// Return all the available information about this type's exception spec.
- ExceptionSpecInfo getExceptionSpecInfo() const {
- ExceptionSpecInfo Result;
- Result.Type = getExceptionSpecType();
- if (Result.Type == EST_Dynamic) {
- Result.Exceptions = exceptions();
- } else if (isComputedNoexcept(Result.Type)) {
- Result.NoexceptExpr = getNoexceptExpr();
- } else if (Result.Type == EST_Uninstantiated) {
- Result.SourceDecl = getExceptionSpecDecl();
- Result.SourceTemplate = getExceptionSpecTemplate();
- } else if (Result.Type == EST_Unevaluated) {
- Result.SourceDecl = getExceptionSpecDecl();
- }
- return Result;
- }
-
- /// Return the number of types in the exception specification.
- unsigned getNumExceptions() const {
- return getExceptionSpecType() == EST_Dynamic
- ? getTrailingObjects<FunctionTypeExtraBitfields>()
- ->NumExceptionType
- : 0;
- }
-
- /// Return the ith exception type, where 0 <= i < getNumExceptions().
- QualType getExceptionType(unsigned i) const {
- assert(i < getNumExceptions() && "Invalid exception number!");
- return exception_begin()[i];
- }
-
- /// Return the expression inside noexcept(expression), or a null pointer
- /// if there is none (because the exception spec is not of this form).
- Expr *getNoexceptExpr() const {
- if (!isComputedNoexcept(getExceptionSpecType()))
- return nullptr;
- return *getTrailingObjects<Expr *>();
- }
-
- /// If this function type has an exception specification which hasn't
- /// been determined yet (either because it has not been evaluated or because
- /// it has not been instantiated), this is the function whose exception
- /// specification is represented by this type.
- FunctionDecl *getExceptionSpecDecl() const {
- if (getExceptionSpecType() != EST_Uninstantiated &&
- getExceptionSpecType() != EST_Unevaluated)
- return nullptr;
- return getTrailingObjects<FunctionDecl *>()[0];
- }
-
- /// If this function type has an uninstantiated exception
- /// specification, this is the function whose exception specification
- /// should be instantiated to find the exception specification for
- /// this type.
- FunctionDecl *getExceptionSpecTemplate() const {
- if (getExceptionSpecType() != EST_Uninstantiated)
- return nullptr;
- return getTrailingObjects<FunctionDecl *>()[1];
- }
-
- /// Determine whether this function type has a non-throwing exception
- /// specification.
- CanThrowResult canThrow() const;
-
- /// Determine whether this function type has a non-throwing exception
- /// specification. If this depends on template arguments, returns
- /// \c ResultIfDependent.
- bool isNothrow(bool ResultIfDependent = false) const {
- return ResultIfDependent ? canThrow() != CT_Can : canThrow() == CT_Cannot;
- }
-
- /// Whether this function prototype is variadic.
- bool isVariadic() const { return FunctionTypeBits.Variadic; }
-
- SourceLocation getEllipsisLoc() const {
- return isVariadic() ? *getTrailingObjects<SourceLocation>()
- : SourceLocation();
- }
-
- /// Determines whether this function prototype contains a
- /// parameter pack at the end.
- ///
- /// A function template whose last parameter is a parameter pack can be
- /// called with an arbitrary number of arguments, much like a variadic
- /// function.
- bool isTemplateVariadic() const;
-
- /// Whether this function prototype has a trailing return type.
- bool hasTrailingReturn() const { return FunctionTypeBits.HasTrailingReturn; }
-
- bool hasCFIUncheckedCallee() const {
- return FunctionTypeBits.CFIUncheckedCallee;
- }
-
- Qualifiers getMethodQuals() const {
- if (hasExtQualifiers())
- return *getTrailingObjects<Qualifiers>();
- else
- return getFastTypeQuals();
- }
-
- /// Retrieve the ref-qualifier associated with this function type.
- RefQualifierKind getRefQualifier() const {
- return static_cast<RefQualifierKind>(FunctionTypeBits.RefQualifier);
- }
-
- using param_type_iterator = const QualType *;
-
- ArrayRef<QualType> param_types() const {
- return {param_type_begin(), param_type_end()};
- }
-
- param_type_iterator param_type_begin() const {
- return getTrailingObjects<QualType>();
- }
-
- param_type_iterator param_type_end() const {
- return param_type_begin() + getNumParams();
- }
-
- using exception_iterator = const QualType *;
-
- ArrayRef<QualType> exceptions() const {
- return {exception_begin(), exception_end()};
- }
-
- exception_iterator exception_begin() const {
- return reinterpret_cast<exception_iterator>(
- getTrailingObjects<ExceptionType>());
- }
-
- exception_iterator exception_end() const {
- return exception_begin() + getNumExceptions();
- }
-
- /// Is there any interesting extra information for any of the parameters
- /// of this function type?
- bool hasExtParameterInfos() const {
- return FunctionTypeBits.HasExtParameterInfos;
- }
-
- ArrayRef<ExtParameterInfo> getExtParameterInfos() const {
- assert(hasExtParameterInfos());
- return ArrayRef<ExtParameterInfo>(getTrailingObjects<ExtParameterInfo>(),
- getNumParams());
- }
-
- /// Return a pointer to the beginning of the array of extra parameter
- /// information, if present, or else null if none of the parameters
- /// carry it. This is equivalent to getExtProtoInfo().ExtParameterInfos.
- const ExtParameterInfo *getExtParameterInfosOrNull() const {
- if (!hasExtParameterInfos())
- return nullptr;
- return getTrailingObjects<ExtParameterInfo>();
- }
-
- /// Return a bitmask describing the SME attributes on the function type, see
- /// AArch64SMETypeAttributes for their values.
- unsigned getAArch64SMEAttributes() const {
- if (!hasArmTypeAttributes())
- return SME_NormalFunction;
- return getTrailingObjects<FunctionTypeArmAttributes>()
- ->AArch64SMEAttributes;
- }
-
- ExtParameterInfo getExtParameterInfo(unsigned I) const {
- assert(I < getNumParams() && "parameter index out of range");
- if (hasExtParameterInfos())
- return getTrailingObjects<ExtParameterInfo>()[I];
- return ExtParameterInfo();
- }
-
- ParameterABI getParameterABI(unsigned I) const {
- assert(I < getNumParams() && "parameter index out of range");
- if (hasExtParameterInfos())
- return getTrailingObjects<ExtParameterInfo>()[I].getABI();
- return ParameterABI::Ordinary;
- }
-
- bool isParamConsumed(unsigned I) const {
- assert(I < getNumParams() && "parameter index out of range");
- if (hasExtParameterInfos())
- return getTrailingObjects<ExtParameterInfo>()[I].isConsumed();
- return false;
- }
-
- unsigned getNumFunctionEffects() const {
- return hasExtraBitfields()
- ? getTrailingObjects<FunctionTypeExtraBitfields>()
- ->NumFunctionEffects
- : 0;
- }
-
- // For serialization.
- ArrayRef<FunctionEffect> getFunctionEffectsWithoutConditions() const {
- if (hasExtraBitfields()) {
- const auto *Bitfields = getTrailingObjects<FunctionTypeExtraBitfields>();
- if (Bitfields->NumFunctionEffects > 0)
- return getTrailingObjects<FunctionEffect>(
- Bitfields->NumFunctionEffects);
- }
- return {};
- }
-
- unsigned getNumFunctionEffectConditions() const {
- if (hasExtraBitfields()) {
- const auto *Bitfields = getTrailingObjects<FunctionTypeExtraBitfields>();
- if (Bitfields->EffectsHaveConditions)
- return Bitfields->NumFunctionEffects;
- }
- return 0;
- }
-
- // For serialization.
- ArrayRef<EffectConditionExpr> getFunctionEffectConditions() const {
- if (hasExtraBitfields()) {
- const auto *Bitfields = getTrailingObjects<FunctionTypeExtraBitfields>();
- if (Bitfields->EffectsHaveConditions)
- return getTrailingObjects<EffectConditionExpr>(
- Bitfields->NumFunctionEffects);
- }
- return {};
- }
-
- // Combines effects with their conditions.
- FunctionEffectsRef getFunctionEffects() const {
- if (hasExtraBitfields()) {
- const auto *Bitfields = getTrailingObjects<FunctionTypeExtraBitfields>();
- if (Bitfields->NumFunctionEffects > 0) {
- const size_t NumConds = Bitfields->EffectsHaveConditions
- ? Bitfields->NumFunctionEffects
- : 0;
- return FunctionEffectsRef(
- getTrailingObjects<FunctionEffect>(Bitfields->NumFunctionEffects),
- {NumConds ? getTrailingObjects<EffectConditionExpr>() : nullptr,
- NumConds});
- }
- }
- return {};
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void printExceptionSpecification(raw_ostream &OS,
- const PrintingPolicy &Policy) const;
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == FunctionProto;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Result,
- param_type_iterator ArgTys, unsigned NumArgs,
- const ExtProtoInfo &EPI, const ASTContext &Context,
- bool Canonical);
-};
-
-/// Represents the dependent type named by a dependently-scoped
-/// typename using declaration, e.g.
-/// using typename Base<T>::foo;
-///
-/// Template instantiation turns these into the underlying type.
-class UnresolvedUsingType : public Type {
- friend class ASTContext; // ASTContext creates these.
-
- UnresolvedUsingTypenameDecl *Decl;
-
- UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
- : Type(UnresolvedUsing, QualType(),
- TypeDependence::DependentInstantiation),
- Decl(const_cast<UnresolvedUsingTypenameDecl *>(D)) {}
-
-public:
- UnresolvedUsingTypenameDecl *getDecl() const { return Decl; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == UnresolvedUsing;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- return Profile(ID, Decl);
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID,
- UnresolvedUsingTypenameDecl *D) {
- ID.AddPointer(D);
- }
-};
-
-class UsingType final : public Type,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<UsingType, QualType> {
- UsingShadowDecl *Found;
- friend class ASTContext; // ASTContext creates these.
- friend TrailingObjects;
-
- UsingType(const UsingShadowDecl *Found, QualType Underlying, QualType Canon);
-
-public:
- UsingShadowDecl *getFoundDecl() const { return Found; }
- QualType getUnderlyingType() const;
-
- bool isSugared() const { return true; }
-
- // This always has the 'same' type as declared, but not necessarily identical.
- QualType desugar() const { return getUnderlyingType(); }
-
- // Internal helper, for debugging purposes.
- bool typeMatchesDecl() const { return !UsingBits.hasTypeDifferentFromDecl; }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, Found, getUnderlyingType());
- }
- static void Profile(llvm::FoldingSetNodeID &ID, const UsingShadowDecl *Found,
- QualType Underlying) {
- ID.AddPointer(Found);
- Underlying.Profile(ID);
- }
- static bool classof(const Type *T) { return T->getTypeClass() == Using; }
-};
-
-class TypedefType final : public Type,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<TypedefType, QualType> {
- TypedefNameDecl *Decl;
- friend class ASTContext; // ASTContext creates these.
- friend TrailingObjects;
-
- TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType UnderlyingType,
- bool HasTypeDifferentFromDecl);
-
-public:
- TypedefNameDecl *getDecl() const { return Decl; }
-
- bool isSugared() const { return true; }
-
- // This always has the 'same' type as declared, but not necessarily identical.
- QualType desugar() const;
-
- // Internal helper, for debugging purposes.
- bool typeMatchesDecl() const { return !TypedefBits.hasTypeDifferentFromDecl; }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, Decl, typeMatchesDecl() ? QualType() : desugar());
- }
- static void Profile(llvm::FoldingSetNodeID &ID, const TypedefNameDecl *Decl,
- QualType Underlying) {
- ID.AddPointer(Decl);
- if (!Underlying.isNull())
- Underlying.Profile(ID);
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
-};
-
-/// Sugar type that represents a type that was qualified by a qualifier written
-/// as a macro invocation.
-class MacroQualifiedType : public Type {
- friend class ASTContext; // ASTContext creates these.
-
- QualType UnderlyingTy;
- const IdentifierInfo *MacroII;
-
- MacroQualifiedType(QualType UnderlyingTy, QualType CanonTy,
- const IdentifierInfo *MacroII)
- : Type(MacroQualified, CanonTy, UnderlyingTy->getDependence()),
- UnderlyingTy(UnderlyingTy), MacroII(MacroII) {
- assert(isa<AttributedType>(UnderlyingTy) &&
- "Expected a macro qualified type to only wrap attributed types.");
- }
-
-public:
- const IdentifierInfo *getMacroIdentifier() const { return MacroII; }
- QualType getUnderlyingType() const { return UnderlyingTy; }
-
- /// Return this attributed type's modified type with no qualifiers attached to
- /// it.
- QualType getModifiedType() const;
-
- bool isSugared() const { return true; }
- QualType desugar() const;
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == MacroQualified;
- }
-};
-
-/// Represents a `typeof` (or __typeof__) expression (a C23 feature and GCC
-/// extension) or a `typeof_unqual` expression (a C23 feature).
-class TypeOfExprType : public Type {
- Expr *TOExpr;
- const ASTContext &Context;
-
-protected:
- friend class ASTContext; // ASTContext creates these.
-
- TypeOfExprType(const ASTContext &Context, Expr *E, TypeOfKind Kind,
- QualType Can = QualType());
-
-public:
- Expr *getUnderlyingExpr() const { return TOExpr; }
-
- /// Returns the kind of 'typeof' type this is.
- TypeOfKind getKind() const {
- return static_cast<TypeOfKind>(TypeOfBits.Kind);
- }
-
- /// Remove a single level of sugar.
- QualType desugar() const;
-
- /// Returns whether this type directly provides sugar.
- bool isSugared() const;
-
- static bool classof(const Type *T) { return T->getTypeClass() == TypeOfExpr; }
-};
-
-/// Internal representation of canonical, dependent
-/// `typeof(expr)` types.
-///
-/// This class is used internally by the ASTContext to manage
-/// canonical, dependent types, only. Clients will only see instances
-/// of this class via TypeOfExprType nodes.
-class DependentTypeOfExprType : public TypeOfExprType,
- public llvm::FoldingSetNode {
-public:
- DependentTypeOfExprType(const ASTContext &Context, Expr *E, TypeOfKind Kind)
- : TypeOfExprType(Context, E, Kind) {}
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getUnderlyingExpr(),
- getKind() == TypeOfKind::Unqualified);
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- Expr *E, bool IsUnqual);
-};
-
-/// Represents `typeof(type)`, a C23 feature and GCC extension, or
-/// `typeof_unqual(type), a C23 feature.
-class TypeOfType : public Type {
- friend class ASTContext; // ASTContext creates these.
-
- QualType TOType;
- const ASTContext &Context;
-
- TypeOfType(const ASTContext &Context, QualType T, QualType Can,
- TypeOfKind Kind);
-
-public:
- QualType getUnmodifiedType() const { return TOType; }
-
- /// Remove a single level of sugar.
- QualType desugar() const;
-
- /// Returns whether this type directly provides sugar.
- bool isSugared() const { return true; }
-
- /// Returns the kind of 'typeof' type this is.
- TypeOfKind getKind() const {
- return static_cast<TypeOfKind>(TypeOfBits.Kind);
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; }
-};
-
-/// Represents the type `decltype(expr)` (C++11).
-class DecltypeType : public Type {
- Expr *E;
- QualType UnderlyingType;
-
-protected:
- friend class ASTContext; // ASTContext creates these.
-
- DecltypeType(Expr *E, QualType underlyingType, QualType can = QualType());
-
-public:
- Expr *getUnderlyingExpr() const { return E; }
- QualType getUnderlyingType() const { return UnderlyingType; }
-
- /// Remove a single level of sugar.
- QualType desugar() const;
-
- /// Returns whether this type directly provides sugar.
- bool isSugared() const;
-
- static bool classof(const Type *T) { return T->getTypeClass() == Decltype; }
-};
-
-/// Internal representation of canonical, dependent
-/// decltype(expr) types.
-///
-/// This class is used internally by the ASTContext to manage
-/// canonical, dependent types, only. Clients will only see instances
-/// of this class via DecltypeType nodes.
-class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode {
-public:
- DependentDecltypeType(Expr *E);
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getUnderlyingExpr());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- Expr *E);
-};
-
-class PackIndexingType final
- : public Type,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<PackIndexingType, QualType> {
- friend TrailingObjects;
-
- QualType Pattern;
- Expr *IndexExpr;
-
- unsigned Size : 31;
-
- LLVM_PREFERRED_TYPE(bool)
- unsigned FullySubstituted : 1;
-
-protected:
- friend class ASTContext; // ASTContext creates these.
- PackIndexingType(QualType Canonical, QualType Pattern, Expr *IndexExpr,
- bool FullySubstituted, ArrayRef<QualType> Expansions = {});
-
-public:
- Expr *getIndexExpr() const { return IndexExpr; }
- QualType getPattern() const { return Pattern; }
-
- bool isSugared() const { return hasSelectedType(); }
-
- QualType desugar() const {
- if (hasSelectedType())
- return getSelectedType();
- return QualType(this, 0);
- }
-
- QualType getSelectedType() const {
- assert(hasSelectedType() && "Type is dependant");
- return *(getExpansionsPtr() + *getSelectedIndex());
- }
-
- UnsignedOrNone getSelectedIndex() const;
-
- bool hasSelectedType() const { return getSelectedIndex() != std::nullopt; }
-
- bool isFullySubstituted() const { return FullySubstituted; }
-
- bool expandsToEmptyPack() const { return isFullySubstituted() && Size == 0; }
-
- ArrayRef<QualType> getExpansions() const {
- return {getExpansionsPtr(), Size};
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == PackIndexing;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context);
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- QualType Pattern, Expr *E, bool FullySubstituted,
- ArrayRef<QualType> Expansions);
-
-private:
- const QualType *getExpansionsPtr() const { return getTrailingObjects(); }
-
- static TypeDependence computeDependence(QualType Pattern, Expr *IndexExpr,
- ArrayRef<QualType> Expansions = {});
-};
-
-/// A unary type transform, which is a type constructed from another.
-class UnaryTransformType : public Type, public llvm::FoldingSetNode {
-public:
- enum UTTKind {
-#define TRANSFORM_TYPE_TRAIT_DEF(Enum, _) Enum,
-#include "clang/Basic/TransformTypeTraits.def"
- };
-
-private:
- /// The untransformed type.
- QualType BaseType;
-
- /// The transformed type if not dependent, otherwise the same as BaseType.
- QualType UnderlyingType;
-
- UTTKind UKind;
-
-protected:
- friend class ASTContext;
-
- UnaryTransformType(QualType BaseTy, QualType UnderlyingTy, UTTKind UKind,
- QualType CanonicalTy);
-
-public:
- bool isSugared() const { return !isDependentType(); }
- QualType desugar() const { return UnderlyingType; }
-
- QualType getUnderlyingType() const { return UnderlyingType; }
- QualType getBaseType() const { return BaseType; }
-
- UTTKind getUTTKind() const { return UKind; }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == UnaryTransform;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getBaseType(), getUnderlyingType(), getUTTKind());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType BaseType,
- QualType UnderlyingType, UTTKind UKind) {
- BaseType.Profile(ID);
- UnderlyingType.Profile(ID);
- ID.AddInteger(UKind);
- }
-};
-
-class TagType : public Type {
- friend class ASTReader;
- template <class T> friend class serialization::AbstractTypeReader;
-
- /// Stores the TagDecl associated with this type. The decl may point to any
- /// TagDecl that declares the entity.
- TagDecl *decl;
-
-protected:
- TagType(TypeClass TC, const TagDecl *D, QualType can);
-
-public:
- TagDecl *getDecl() const;
-
- /// Determines whether this type is in the process of being defined.
- bool isBeingDefined() const;
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == Enum || T->getTypeClass() == Record;
- }
-};
-
-/// A helper class that allows the use of isa/cast/dyncast
-/// to detect TagType objects of structs/unions/classes.
-class RecordType : public TagType {
-protected:
- friend class ASTContext; // ASTContext creates these.
-
- explicit RecordType(const RecordDecl *D)
- : TagType(Record, reinterpret_cast<const TagDecl*>(D), QualType()) {}
- explicit RecordType(TypeClass TC, RecordDecl *D)
- : TagType(TC, reinterpret_cast<const TagDecl*>(D), QualType()) {}
-
-public:
- RecordDecl *getDecl() const {
- return reinterpret_cast<RecordDecl*>(TagType::getDecl());
- }
-
- /// Recursively check all fields in the record for const-ness. If any field
- /// is declared const, return true. Otherwise, return false.
- bool hasConstFields() const;
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Record; }
-};
-
-/// A helper class that allows the use of isa/cast/dyncast
-/// to detect TagType objects of enums.
-class EnumType : public TagType {
- friend class ASTContext; // ASTContext creates these.
-
- explicit EnumType(const EnumDecl *D)
- : TagType(Enum, reinterpret_cast<const TagDecl*>(D), QualType()) {}
-
-public:
- EnumDecl *getDecl() const {
- return reinterpret_cast<EnumDecl*>(TagType::getDecl());
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
-};
-
-/// An attributed type is a type to which a type attribute has been applied.
-///
-/// The "modified type" is the fully-sugared type to which the attributed
-/// type was applied; generally it is not canonically equivalent to the
-/// attributed type. The "equivalent type" is the minimally-desugared type
-/// which the type is canonically equivalent to.
-///
-/// For example, in the following attributed type:
-/// int32_t __attribute__((vector_size(16)))
-/// - the modified type is the TypedefType for int32_t
-/// - the equivalent type is VectorType(16, int32_t)
-/// - the canonical type is VectorType(16, int)
-class AttributedType : public Type, public llvm::FoldingSetNode {
-public:
- using Kind = attr::Kind;
-
-private:
- friend class ASTContext; // ASTContext creates these
-
- const Attr *Attribute;
-
- QualType ModifiedType;
- QualType EquivalentType;
-
- AttributedType(QualType canon, attr::Kind attrKind, QualType modified,
- QualType equivalent)
- : AttributedType(canon, attrKind, nullptr, modified, equivalent) {}
-
- AttributedType(QualType canon, const Attr *attr, QualType modified,
- QualType equivalent);
-
-private:
- AttributedType(QualType canon, attr::Kind attrKind, const Attr *attr,
- QualType modified, QualType equivalent);
-
-public:
- Kind getAttrKind() const {
- return static_cast<Kind>(AttributedTypeBits.AttrKind);
- }
-
- const Attr *getAttr() const { return Attribute; }
-
- QualType getModifiedType() const { return ModifiedType; }
- QualType getEquivalentType() const { return EquivalentType; }
-
- bool isSugared() const { return true; }
- QualType desugar() const { return getEquivalentType(); }
-
- /// Does this attribute behave like a type qualifier?
- ///
- /// A type qualifier adjusts a type to provide specialized rules for
- /// a specific object, like the standard const and volatile qualifiers.
- /// This includes attributes controlling things like nullability,
- /// address spaces, and ARC ownership. The value of the object is still
- /// largely described by the modified type.
- ///
- /// In contrast, many type attributes "rewrite" their modified type to
- /// produce a fundamentally different type, not necessarily related in any
- /// formalizable way to the original type. For example, calling convention
- /// and vector attributes are not simple type qualifiers.
- ///
- /// Type qualifiers are often, but not always, reflected in the canonical
- /// type.
- bool isQualifier() const;
-
- bool isMSTypeSpec() const;
-
- bool isWebAssemblyFuncrefSpec() const;
-
- bool isCallingConv() const;
-
- std::optional<NullabilityKind> getImmediateNullability() const;
-
- /// Strip off the top-level nullability annotation on the given
- /// type, if it's there.
- ///
- /// \param T The type to strip. If the type is exactly an
- /// AttributedType specifying nullability (without looking through
- /// type sugar), the nullability is returned and this type changed
- /// to the underlying modified type.
- ///
- /// \returns the top-level nullability, if present.
- static std::optional<NullabilityKind> stripOuterNullability(QualType &T);
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAttrKind(), ModifiedType, EquivalentType, Attribute);
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, Kind attrKind,
- QualType modified, QualType equivalent,
- const Attr *attr) {
- ID.AddInteger(attrKind);
- ID.AddPointer(modified.getAsOpaquePtr());
- ID.AddPointer(equivalent.getAsOpaquePtr());
- ID.AddPointer(attr);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == Attributed;
- }
-};
-
-class BTFTagAttributedType : public Type, public llvm::FoldingSetNode {
-private:
- friend class ASTContext; // ASTContext creates these
-
- QualType WrappedType;
- const BTFTypeTagAttr *BTFAttr;
-
- BTFTagAttributedType(QualType Canon, QualType Wrapped,
- const BTFTypeTagAttr *BTFAttr)
- : Type(BTFTagAttributed, Canon, Wrapped->getDependence()),
- WrappedType(Wrapped), BTFAttr(BTFAttr) {}
-
-public:
- QualType getWrappedType() const { return WrappedType; }
- const BTFTypeTagAttr *getAttr() const { return BTFAttr; }
-
- bool isSugared() const { return true; }
- QualType desugar() const { return getWrappedType(); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, WrappedType, BTFAttr);
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Wrapped,
- const BTFTypeTagAttr *BTFAttr) {
- ID.AddPointer(Wrapped.getAsOpaquePtr());
- ID.AddPointer(BTFAttr);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == BTFTagAttributed;
- }
-};
-
-class HLSLAttributedResourceType : public Type, public llvm::FoldingSetNode {
-public:
- struct Attributes {
- // Data gathered from HLSL resource attributes
- llvm::dxil::ResourceClass ResourceClass;
-
- LLVM_PREFERRED_TYPE(bool)
- uint8_t IsROV : 1;
-
- LLVM_PREFERRED_TYPE(bool)
- uint8_t RawBuffer : 1;
-
- Attributes(llvm::dxil::ResourceClass ResourceClass, bool IsROV = false,
- bool RawBuffer = false)
- : ResourceClass(ResourceClass), IsROV(IsROV), RawBuffer(RawBuffer) {}
-
- Attributes() : Attributes(llvm::dxil::ResourceClass::UAV, false, false) {}
-
- friend bool operator==(const Attributes &LHS, const Attributes &RHS) {
- return std::tie(LHS.ResourceClass, LHS.IsROV, LHS.RawBuffer) ==
- std::tie(RHS.ResourceClass, RHS.IsROV, RHS.RawBuffer);
- }
- friend bool operator!=(const Attributes &LHS, const Attributes &RHS) {
- return !(LHS == RHS);
- }
- };
-
-private:
- friend class ASTContext; // ASTContext creates these
-
- QualType WrappedType;
- QualType ContainedType;
- const Attributes Attrs;
-
- HLSLAttributedResourceType(QualType Wrapped, QualType Contained,
- const Attributes &Attrs)
- : Type(HLSLAttributedResource, QualType(),
- Contained.isNull() ? TypeDependence::None
- : Contained->getDependence()),
- WrappedType(Wrapped), ContainedType(Contained), Attrs(Attrs) {}
-
-public:
- QualType getWrappedType() const { return WrappedType; }
- QualType getContainedType() const { return ContainedType; }
- bool hasContainedType() const { return !ContainedType.isNull(); }
- const Attributes &getAttrs() const { return Attrs; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, WrappedType, ContainedType, Attrs);
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Wrapped,
- QualType Contained, const Attributes &Attrs) {
- ID.AddPointer(Wrapped.getAsOpaquePtr());
- ID.AddPointer(Contained.getAsOpaquePtr());
- ID.AddInteger(static_cast<uint32_t>(Attrs.ResourceClass));
- ID.AddBoolean(Attrs.IsROV);
- ID.AddBoolean(Attrs.RawBuffer);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == HLSLAttributedResource;
- }
-
- // Returns handle type from HLSL resource, if the type is a resource
- static const HLSLAttributedResourceType *
- findHandleTypeOnResource(const Type *RT);
-};
-
-/// Instances of this class represent operands to a SPIR-V type instruction.
-class SpirvOperand {
-public:
- enum SpirvOperandKind : unsigned char {
- Invalid, ///< Uninitialized.
- ConstantId, ///< Integral value to represent as a SPIR-V OpConstant
- ///< instruction ID.
- Literal, ///< Integral value to represent as an immediate literal.
- TypeId, ///< Type to represent as a SPIR-V type ID.
-
- Max,
- };
-
-private:
- SpirvOperandKind Kind = Invalid;
-
- QualType ResultType;
- llvm::APInt Value; // Signedness of constants is represented by ResultType.
-
-public:
- SpirvOperand() : Kind(Invalid), ResultType(), Value() {}
-
- SpirvOperand(SpirvOperandKind Kind, QualType ResultType, llvm::APInt Value)
- : Kind(Kind), ResultType(ResultType), Value(std::move(Value)) {}
-
- SpirvOperand(const SpirvOperand &Other) { *this = Other; }
- ~SpirvOperand() {}
-
- SpirvOperand &operator=(const SpirvOperand &Other) = default;
-
- bool operator==(const SpirvOperand &Other) const {
- return Kind == Other.Kind && ResultType == Other.ResultType &&
- Value == Other.Value;
- }
-
- bool operator!=(const SpirvOperand &Other) const { return !(*this == Other); }
-
- SpirvOperandKind getKind() const { return Kind; }
-
- bool isValid() const { return Kind != Invalid && Kind < Max; }
- bool isConstant() const { return Kind == ConstantId; }
- bool isLiteral() const { return Kind == Literal; }
- bool isType() const { return Kind == TypeId; }
-
- llvm::APInt getValue() const {
- assert((isConstant() || isLiteral()) &&
- "This is not an operand with a value!");
- return Value;
- }
-
- QualType getResultType() const {
- assert((isConstant() || isType()) &&
- "This is not an operand with a result type!");
- return ResultType;
- }
-
- static SpirvOperand createConstant(QualType ResultType, llvm::APInt Val) {
- return SpirvOperand(ConstantId, ResultType, std::move(Val));
- }
-
- static SpirvOperand createLiteral(llvm::APInt Val) {
- return SpirvOperand(Literal, QualType(), std::move(Val));
- }
-
- static SpirvOperand createType(QualType T) {
- return SpirvOperand(TypeId, T, llvm::APSInt());
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(Kind);
- ID.AddPointer(ResultType.getAsOpaquePtr());
- Value.Profile(ID);
- }
-};
-
-/// Represents an arbitrary, user-specified SPIR-V type instruction.
-class HLSLInlineSpirvType final
- : public Type,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<HLSLInlineSpirvType, SpirvOperand> {
- friend class ASTContext; // ASTContext creates these
- friend TrailingObjects;
-
-private:
- uint32_t Opcode;
- uint32_t Size;
- uint32_t Alignment;
- size_t NumOperands;
-
- HLSLInlineSpirvType(uint32_t Opcode, uint32_t Size, uint32_t Alignment,
- ArrayRef<SpirvOperand> Operands)
- : Type(HLSLInlineSpirv, QualType(), TypeDependence::None), Opcode(Opcode),
- Size(Size), Alignment(Alignment), NumOperands(Operands.size()) {
- for (size_t I = 0; I < NumOperands; I++) {
- // Since Operands are stored as a trailing object, they have not been
- // initialized yet. Call the constructor manually.
- auto *Operand = new (&getTrailingObjects()[I]) SpirvOperand();
- *Operand = Operands[I];
- }
- }
-
-public:
- uint32_t getOpcode() const { return Opcode; }
- uint32_t getSize() const { return Size; }
- uint32_t getAlignment() const { return Alignment; }
- ArrayRef<SpirvOperand> getOperands() const {
- return getTrailingObjects(NumOperands);
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, Opcode, Size, Alignment, getOperands());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, uint32_t Opcode,
- uint32_t Size, uint32_t Alignment,
- ArrayRef<SpirvOperand> Operands) {
- ID.AddInteger(Opcode);
- ID.AddInteger(Size);
- ID.AddInteger(Alignment);
- for (auto &Operand : Operands)
- Operand.Profile(ID);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == HLSLInlineSpirv;
- }
-};
-
-class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these
-
- // The associated TemplateTypeParmDecl for the non-canonical type.
- TemplateTypeParmDecl *TTPDecl;
-
- TemplateTypeParmType(unsigned D, unsigned I, bool PP,
- TemplateTypeParmDecl *TTPDecl, QualType Canon)
- : Type(TemplateTypeParm, Canon,
- TypeDependence::DependentInstantiation |
- (PP ? TypeDependence::UnexpandedPack : TypeDependence::None)),
- TTPDecl(TTPDecl) {
- assert(!TTPDecl == Canon.isNull());
- TemplateTypeParmTypeBits.Depth = D;
- TemplateTypeParmTypeBits.Index = I;
- TemplateTypeParmTypeBits.ParameterPack = PP;
- }
-
-public:
- unsigned getDepth() const { return TemplateTypeParmTypeBits.Depth; }
- unsigned getIndex() const { return TemplateTypeParmTypeBits.Index; }
- bool isParameterPack() const {
- return TemplateTypeParmTypeBits.ParameterPack;
- }
-
- TemplateTypeParmDecl *getDecl() const { return TTPDecl; }
-
- IdentifierInfo *getIdentifier() const;
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getDepth(), getIndex(), isParameterPack(), getDecl());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, unsigned Depth,
- unsigned Index, bool ParameterPack,
- TemplateTypeParmDecl *TTPDecl) {
- ID.AddInteger(Depth);
- ID.AddInteger(Index);
- ID.AddBoolean(ParameterPack);
- ID.AddPointer(TTPDecl);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == TemplateTypeParm;
- }
-};
-
-/// Represents the result of substituting a type for a template
-/// type parameter.
-///
-/// Within an instantiated template, all template type parameters have
-/// been replaced with these. They are used solely to record that a
-/// type was originally written as a template type parameter;
-/// therefore they are never canonical.
-class SubstTemplateTypeParmType final
- : public Type,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<SubstTemplateTypeParmType, QualType> {
- friend class ASTContext;
- friend class llvm::TrailingObjects<SubstTemplateTypeParmType, QualType>;
-
- Decl *AssociatedDecl;
-
- SubstTemplateTypeParmType(QualType Replacement, Decl *AssociatedDecl,
- unsigned Index, UnsignedOrNone PackIndex,
- bool Final);
-
-public:
- /// Gets the type that was substituted for the template
- /// parameter.
- QualType getReplacementType() const {
- return SubstTemplateTypeParmTypeBits.HasNonCanonicalUnderlyingType
- ? *getTrailingObjects()
- : getCanonicalTypeInternal();
- }
-
- /// A template-like entity which owns the whole pattern being substituted.
- /// This will usually own a set of template parameters, or in some
- /// cases might even be a template parameter itself.
- Decl *getAssociatedDecl() const { return AssociatedDecl; }
-
- /// Gets the template parameter declaration that was substituted for.
- const TemplateTypeParmDecl *getReplacedParameter() const;
-
- /// Returns the index of the replaced parameter in the associated declaration.
- /// This should match the result of `getReplacedParameter()->getIndex()`.
- unsigned getIndex() const { return SubstTemplateTypeParmTypeBits.Index; }
-
- // This substitution is Final, which means the substitution is fully
- // sugared: it doesn't need to be resugared later.
- unsigned getFinal() const { return SubstTemplateTypeParmTypeBits.Final; }
-
- UnsignedOrNone getPackIndex() const {
- return UnsignedOrNone::fromInternalRepresentation(
- SubstTemplateTypeParmTypeBits.PackIndex);
- }
-
- bool isSugared() const { return true; }
- QualType desugar() const { return getReplacementType(); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getReplacementType(), getAssociatedDecl(), getIndex(),
- getPackIndex(), getFinal());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Replacement,
- const Decl *AssociatedDecl, unsigned Index,
- UnsignedOrNone PackIndex, bool Final);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == SubstTemplateTypeParm;
- }
-};
-
-/// Represents the result of substituting a set of types for a template
-/// type parameter pack.
-///
-/// When a pack expansion in the source code contains multiple parameter packs
-/// and those parameter packs correspond to different levels of template
-/// parameter lists, this type node is used to represent a template type
-/// parameter pack from an outer level, which has already had its argument pack
-/// substituted but that still lives within a pack expansion that itself
-/// could not be instantiated. When actually performing a substitution into
-/// that pack expansion (e.g., when all template parameters have corresponding
-/// arguments), this type will be replaced with the \c SubstTemplateTypeParmType
-/// at the current pack substitution index.
-class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext;
-
- /// A pointer to the set of template arguments that this
- /// parameter pack is instantiated with.
- const TemplateArgument *Arguments;
-
- llvm::PointerIntPair<Decl *, 1, bool> AssociatedDeclAndFinal;
-
- SubstTemplateTypeParmPackType(QualType Canon, Decl *AssociatedDecl,
- unsigned Index, bool Final,
- const TemplateArgument &ArgPack);
-
-public:
- IdentifierInfo *getIdentifier() const;
-
- /// A template-like entity which owns the whole pattern being substituted.
- /// This will usually own a set of template parameters, or in some
- /// cases might even be a template parameter itself.
- Decl *getAssociatedDecl() const;
-
- /// Gets the template parameter declaration that was substituted for.
- const TemplateTypeParmDecl *getReplacedParameter() const;
-
- /// Returns the index of the replaced parameter in the associated declaration.
- /// This should match the result of `getReplacedParameter()->getIndex()`.
- unsigned getIndex() const { return SubstTemplateTypeParmPackTypeBits.Index; }
-
- // This substitution will be Final, which means the substitution will be fully
- // sugared: it doesn't need to be resugared later.
- bool getFinal() const;
-
- unsigned getNumArgs() const {
- return SubstTemplateTypeParmPackTypeBits.NumArgs;
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- TemplateArgument getArgumentPack() const;
-
- void Profile(llvm::FoldingSetNodeID &ID);
- static void Profile(llvm::FoldingSetNodeID &ID, const Decl *AssociatedDecl,
- unsigned Index, bool Final,
- const TemplateArgument &ArgPack);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == SubstTemplateTypeParmPack;
- }
-};
-
-/// Common base class for placeholders for types that get replaced by
-/// placeholder type deduction: C++11 auto, C++14 decltype(auto), C++17 deduced
-/// class template types, and constrained type names.
-///
-/// These types are usually a placeholder for a deduced type. However, before
-/// the initializer is attached, or (usually) if the initializer is
-/// type-dependent, there is no deduced type and the type is canonical. In
-/// the latter case, it is also a dependent type.
-class DeducedType : public Type {
- QualType DeducedAsType;
-
-protected:
- DeducedType(TypeClass TC, QualType DeducedAsType,
- TypeDependence ExtraDependence, QualType Canon)
- : Type(TC, Canon,
- ExtraDependence | (DeducedAsType.isNull()
- ? TypeDependence::None
- : DeducedAsType->getDependence() &
- ~TypeDependence::VariablyModified)),
- DeducedAsType(DeducedAsType) {}
-
-public:
- bool isSugared() const { return !DeducedAsType.isNull(); }
- QualType desugar() const {
- return isSugared() ? DeducedAsType : QualType(this, 0);
- }
-
- /// Get the type deduced for this placeholder type, or null if it
- /// has not been deduced.
- QualType getDeducedType() const { return DeducedAsType; }
- bool isDeduced() const {
- return !DeducedAsType.isNull() || isDependentType();
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == Auto ||
- T->getTypeClass() == DeducedTemplateSpecialization;
- }
-};
-
-/// Represents a C++11 auto or C++14 decltype(auto) type, possibly constrained
-/// by a type-constraint.
-class AutoType : public DeducedType {
- friend class ASTContext; // ASTContext creates these
-
- TemplateDecl *TypeConstraintConcept;
-
- AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
- TypeDependence ExtraDependence, QualType Canon, TemplateDecl *CD,
- ArrayRef<TemplateArgument> TypeConstraintArgs);
-
-public:
- ArrayRef<TemplateArgument> getTypeConstraintArguments() const {
- return {reinterpret_cast<const TemplateArgument *>(this + 1),
- AutoTypeBits.NumArgs};
- }
-
- TemplateDecl *getTypeConstraintConcept() const {
- return TypeConstraintConcept;
- }
-
- bool isConstrained() const {
- return TypeConstraintConcept != nullptr;
- }
-
- bool isDecltypeAuto() const {
- return getKeyword() == AutoTypeKeyword::DecltypeAuto;
- }
-
- bool isGNUAutoType() const {
- return getKeyword() == AutoTypeKeyword::GNUAutoType;
- }
-
- AutoTypeKeyword getKeyword() const {
- return (AutoTypeKeyword)AutoTypeBits.Keyword;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context);
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- QualType Deduced, AutoTypeKeyword Keyword,
- bool IsDependent, TemplateDecl *CD,
- ArrayRef<TemplateArgument> Arguments);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == Auto;
- }
-};
-
-/// Represents a C++17 deduced template specialization type.
-class DeducedTemplateSpecializationType : public DeducedType,
- public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these
-
- /// The name of the template whose arguments will be deduced.
- TemplateName Template;
-
- DeducedTemplateSpecializationType(TemplateName Template,
- QualType DeducedAsType,
- bool IsDeducedAsDependent, QualType Canon)
- : DeducedType(DeducedTemplateSpecialization, DeducedAsType,
- toTypeDependence(Template.getDependence()) |
- (IsDeducedAsDependent
- ? TypeDependence::DependentInstantiation
- : TypeDependence::None),
- Canon),
- Template(Template) {}
-
-public:
- /// Retrieve the name of the template that we are deducing.
- TemplateName getTemplateName() const { return Template;}
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- Profile(ID, getTemplateName(), getDeducedType(), isDependentType());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, TemplateName Template,
- QualType Deduced, bool IsDependent) {
- Template.Profile(ID);
- Deduced.Profile(ID);
- ID.AddBoolean(IsDependent || Template.isDependent());
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DeducedTemplateSpecialization;
- }
-};
-
-/// Represents a type template specialization; the template
-/// must be a class template, a type alias template, or a template
-/// template parameter. A template which cannot be resolved to one of
-/// these, e.g. because it is written with a dependent scope
-/// specifier, is instead represented as a
-/// @c DependentTemplateSpecializationType.
-///
-/// A non-dependent template specialization type is always "sugar",
-/// typically for a \c RecordType. For example, a class template
-/// specialization type of \c vector<int> will refer to a tag type for
-/// the instantiation \c std::vector<int, std::allocator<int>>
-///
-/// Template specializations are dependent if either the template or
-/// any of the template arguments are dependent, in which case the
-/// type may also be canonical.
-///
-/// Instances of this type are allocated with a trailing array of
-/// TemplateArguments, followed by a QualType representing the
-/// non-canonical aliased type when the template is a type alias
-/// template.
-class TemplateSpecializationType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these
-
- /// The name of the template being specialized. This is
- /// either a TemplateName::Template (in which case it is a
- /// ClassTemplateDecl*, a TemplateTemplateParmDecl*, or a
- /// TypeAliasTemplateDecl*), a
- /// TemplateName::SubstTemplateTemplateParmPack, or a
- /// TemplateName::SubstTemplateTemplateParm (in which case the
- /// replacement must, recursively, be one of these).
- TemplateName Template;
-
- TemplateSpecializationType(TemplateName T, bool IsAlias,
- ArrayRef<TemplateArgument> Args,
- QualType Underlying);
-
-public:
- /// Determine whether any of the given template arguments are dependent.
- ///
- /// The converted arguments should be supplied when known; whether an
- /// argument is dependent can depend on the conversions performed on it
- /// (for example, a 'const int' passed as a template argument might be
- /// dependent if the parameter is a reference but non-dependent if the
- /// parameter is an int).
- ///
- /// Note that the \p Args parameter is unused: this is intentional, to remind
- /// the caller that they need to pass in the converted arguments, not the
- /// specified arguments.
- static bool
- anyDependentTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
- ArrayRef<TemplateArgument> Converted);
- static bool
- anyDependentTemplateArguments(const TemplateArgumentListInfo &,
- ArrayRef<TemplateArgument> Converted);
- static bool anyInstantiationDependentTemplateArguments(
- ArrayRef<TemplateArgumentLoc> Args);
-
- /// True if this template specialization type matches a current
- /// instantiation in the context in which it is found.
- bool isCurrentInstantiation() const {
- return isa<InjectedClassNameType>(getCanonicalTypeInternal());
- }
-
- /// Determine if this template specialization type is for a type alias
- /// template that has been substituted.
- ///
- /// Nearly every template specialization type whose template is an alias
- /// template will be substituted. However, this is not the case when
- /// the specialization contains a pack expansion but the template alias
- /// does not have a corresponding parameter pack, e.g.,
- ///
- /// \code
- /// template<typename T, typename U, typename V> struct S;
- /// template<typename T, typename U> using A = S<T, int, U>;
- /// template<typename... Ts> struct X {
- /// typedef A<Ts...> type; // not a type alias
- /// };
- /// \endcode
- bool isTypeAlias() const { return TemplateSpecializationTypeBits.TypeAlias; }
-
- /// Get the aliased type, if this is a specialization of a type alias
- /// template.
- QualType getAliasedType() const;
-
- /// Retrieve the name of the template that we are specializing.
- TemplateName getTemplateName() const { return Template; }
-
- ArrayRef<TemplateArgument> template_arguments() const {
- return {reinterpret_cast<const TemplateArgument *>(this + 1),
- TemplateSpecializationTypeBits.NumArgs};
- }
-
- bool isSugared() const {
- return !isDependentType() || isCurrentInstantiation() || isTypeAlias();
- }
-
- QualType desugar() const {
- return isTypeAlias() ? getAliasedType() : getCanonicalTypeInternal();
- }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
- static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
- ArrayRef<TemplateArgument> Args, QualType Underlying,
- const ASTContext &Context);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == TemplateSpecialization;
- }
-};
-
-/// Print a template argument list, including the '<' and '>'
-/// enclosing the template arguments.
-void printTemplateArgumentList(raw_ostream &OS,
- ArrayRef<TemplateArgument> Args,
- const PrintingPolicy &Policy,
- const TemplateParameterList *TPL = nullptr);
-
-void printTemplateArgumentList(raw_ostream &OS,
- ArrayRef<TemplateArgumentLoc> Args,
- const PrintingPolicy &Policy,
- const TemplateParameterList *TPL = nullptr);
-
-void printTemplateArgumentList(raw_ostream &OS,
- const TemplateArgumentListInfo &Args,
- const PrintingPolicy &Policy,
- const TemplateParameterList *TPL = nullptr);
-
-/// Make a best-effort determination of whether the type T can be produced by
-/// substituting Args into the default argument of Param.
-bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
- const NamedDecl *Param,
- ArrayRef<TemplateArgument> Args,
- unsigned Depth);
-
-/// The injected class name of a C++ class template or class
-/// template partial specialization. Used to record that a type was
-/// spelled with a bare identifier rather than as a template-id; the
-/// equivalent for non-templated classes is just RecordType.
-///
-/// Injected class name types are always dependent. Template
-/// instantiation turns these into RecordTypes.
-///
-/// Injected class name types are always canonical. This works
-/// because it is impossible to compare an injected class name type
-/// with the corresponding non-injected template type, for the same
-/// reason that it is impossible to directly compare template
-/// parameters from different dependent contexts: injected class name
-/// types can only occur within the scope of a particular templated
-/// declaration, and within that scope every template specialization
-/// will canonicalize to the injected class name (when appropriate
-/// according to the rules of the language).
-class InjectedClassNameType : public Type {
- friend class ASTContext; // ASTContext creates these.
- friend class ASTNodeImporter;
- friend class ASTReader; // FIXME: ASTContext::getInjectedClassNameType is not
- // currently suitable for AST reading, too much
- // interdependencies.
- template <class T> friend class serialization::AbstractTypeReader;
-
- CXXRecordDecl *Decl;
-
- /// The template specialization which this type represents.
- /// For example, in
- /// template <class T> class A { ... };
- /// this is A<T>, whereas in
- /// template <class X, class Y> class A<B<X,Y> > { ... };
- /// this is A<B<X,Y> >.
- ///
- /// It is always unqualified, always a template specialization type,
- /// and always dependent.
- QualType InjectedType;
-
- InjectedClassNameType(CXXRecordDecl *D, QualType TST)
- : Type(InjectedClassName, QualType(),
- TypeDependence::DependentInstantiation),
- Decl(D), InjectedType(TST) {
- assert(isa<TemplateSpecializationType>(TST));
- assert(!TST.hasQualifiers());
- assert(TST->isDependentType());
- }
-
-public:
- QualType getInjectedSpecializationType() const { return InjectedType; }
-
- const TemplateSpecializationType *getInjectedTST() const {
- return cast<TemplateSpecializationType>(InjectedType.getTypePtr());
- }
-
- TemplateName getTemplateName() const {
- return getInjectedTST()->getTemplateName();
- }
-
- CXXRecordDecl *getDecl() const;
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == InjectedClassName;
- }
-};
-
-/// The elaboration keyword that precedes a qualified type name or
-/// introduces an elaborated-type-specifier.
-enum class ElaboratedTypeKeyword {
- /// The "struct" keyword introduces the elaborated-type-specifier.
- Struct,
-
- /// The "__interface" keyword introduces the elaborated-type-specifier.
- Interface,
-
- /// The "union" keyword introduces the elaborated-type-specifier.
- Union,
-
- /// The "class" keyword introduces the elaborated-type-specifier.
- Class,
-
- /// The "enum" keyword introduces the elaborated-type-specifier.
- Enum,
-
- /// The "typename" keyword precedes the qualified type name, e.g.,
- /// \c typename T::type.
- Typename,
-
- /// No keyword precedes the qualified type name.
- None
-};
-
-/// The kind of a tag type.
-enum class TagTypeKind {
- /// The "struct" keyword.
- Struct,
-
- /// The "__interface" keyword.
- Interface,
-
- /// The "union" keyword.
- Union,
-
- /// The "class" keyword.
- Class,
-
- /// The "enum" keyword.
- Enum
-};
-
-/// A helper class for Type nodes having an ElaboratedTypeKeyword.
-/// The keyword in stored in the free bits of the base class.
-/// Also provides a few static helpers for converting and printing
-/// elaborated type keyword and tag type kind enumerations.
-class TypeWithKeyword : public Type {
-protected:
- TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
- QualType Canonical, TypeDependence Dependence)
- : Type(tc, Canonical, Dependence) {
- TypeWithKeywordBits.Keyword = llvm::to_underlying(Keyword);
- }
-
-public:
- ElaboratedTypeKeyword getKeyword() const {
- return static_cast<ElaboratedTypeKeyword>(TypeWithKeywordBits.Keyword);
- }
-
- /// Converts a type specifier (DeclSpec::TST) into an elaborated type keyword.
- static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec);
-
- /// Converts a type specifier (DeclSpec::TST) into a tag type kind.
- /// It is an error to provide a type specifier which *isn't* a tag kind here.
- static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec);
-
- /// Converts a TagTypeKind into an elaborated type keyword.
- static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag);
-
- /// Converts an elaborated type keyword into a TagTypeKind.
- /// It is an error to provide an elaborated type keyword
- /// which *isn't* a tag kind here.
- static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword);
-
- static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword);
-
- static StringRef getKeywordName(ElaboratedTypeKeyword Keyword);
-
- static StringRef getTagTypeKindName(TagTypeKind Kind) {
- return getKeywordName(getKeywordForTagTypeKind(Kind));
- }
-
- class CannotCastToThisType {};
- static CannotCastToThisType classof(const Type *);
-};
-
-/// Represents a type that was referred to using an elaborated type
-/// keyword, e.g., struct S, or via a qualified name, e.g., N::M::type,
-/// or both.
-///
-/// This type is used to keep track of a type name as written in the
-/// source code, including tag keywords and any nested-name-specifiers.
-/// The type itself is always "sugar", used to express what was written
-/// in the source code but containing no additional semantic information.
-class ElaboratedType final
- : public TypeWithKeyword,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<ElaboratedType, TagDecl *> {
- friend class ASTContext; // ASTContext creates these
- friend TrailingObjects;
-
- /// The nested name specifier containing the qualifier.
- NestedNameSpecifier *NNS;
-
- /// The type that this qualified name refers to.
- QualType NamedType;
-
- /// The (re)declaration of this tag type owned by this occurrence is stored
- /// as a trailing object if there is one. Use getOwnedTagDecl to obtain
- /// it, or obtain a null pointer if there is none.
-
- ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
- QualType NamedType, QualType CanonType, TagDecl *OwnedTagDecl)
- : TypeWithKeyword(Keyword, Elaborated, CanonType,
- // Any semantic dependence on the qualifier will have
- // been incorporated into NamedType. We still need to
- // track syntactic (instantiation / error / pack)
- // dependence on the qualifier.
- NamedType->getDependence() |
- (NNS ? toSyntacticDependence(
- toTypeDependence(NNS->getDependence()))
- : TypeDependence::None)),
- NNS(NNS), NamedType(NamedType) {
- ElaboratedTypeBits.HasOwnedTagDecl = false;
- if (OwnedTagDecl) {
- ElaboratedTypeBits.HasOwnedTagDecl = true;
- *getTrailingObjects() = OwnedTagDecl;
- }
- }
-
-public:
- /// Retrieve the qualification on this type.
- NestedNameSpecifier *getQualifier() const { return NNS; }
-
- /// Retrieve the type named by the qualified-id.
- QualType getNamedType() const { return NamedType; }
-
- /// Remove a single level of sugar.
- QualType desugar() const { return getNamedType(); }
-
- /// Returns whether this type directly provides sugar.
- bool isSugared() const { return true; }
-
- /// Return the (re)declaration of this type owned by this occurrence of this
- /// type, or nullptr if there is none.
- TagDecl *getOwnedTagDecl() const {
- return ElaboratedTypeBits.HasOwnedTagDecl ? *getTrailingObjects() : nullptr;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getKeyword(), NNS, NamedType, getOwnedTagDecl());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, QualType NamedType,
- TagDecl *OwnedTagDecl) {
- ID.AddInteger(llvm::to_underlying(Keyword));
- ID.AddPointer(NNS);
- NamedType.Profile(ID);
- ID.AddPointer(OwnedTagDecl);
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Elaborated; }
-};
-
-/// Represents a qualified type name for which the type name is
-/// dependent.
-///
-/// DependentNameType represents a class of dependent types that involve a
-/// possibly dependent nested-name-specifier (e.g., "T::") followed by a
-/// name of a type. The DependentNameType may start with a "typename" (for a
-/// typename-specifier), "class", "struct", "union", or "enum" (for a
-/// dependent elaborated-type-specifier), or nothing (in contexts where we
-/// know that we must be referring to a type, e.g., in a base class specifier).
-/// Typically the nested-name-specifier is dependent, but in MSVC compatibility
-/// mode, this type is used with non-dependent names to delay name lookup until
-/// instantiation.
-class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these
-
- /// The nested name specifier containing the qualifier.
- NestedNameSpecifier *NNS;
-
- /// The type that this typename specifier refers to.
- const IdentifierInfo *Name;
-
- DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
- const IdentifierInfo *Name, QualType CanonType)
- : TypeWithKeyword(Keyword, DependentName, CanonType,
- TypeDependence::DependentInstantiation |
- toTypeDependence(NNS->getDependence())),
- NNS(NNS), Name(Name) {
- assert(NNS);
- assert(Name);
- }
-
-public:
- /// Retrieve the qualification on this type.
- NestedNameSpecifier *getQualifier() const { return NNS; }
-
- /// Retrieve the identifier that terminates this type name.
- /// For example, "type" in "typename T::type".
- const IdentifierInfo *getIdentifier() const {
- return Name;
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getKeyword(), NNS, Name);
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, const IdentifierInfo *Name) {
- ID.AddInteger(llvm::to_underlying(Keyword));
- ID.AddPointer(NNS);
- ID.AddPointer(Name);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DependentName;
- }
-};
-
-/// Represents a template specialization type whose template cannot be
-/// resolved, e.g.
-/// A<T>::template B<T>
-class DependentTemplateSpecializationType : public TypeWithKeyword {
- friend class ASTContext; // ASTContext creates these
-
- DependentTemplateStorage Name;
-
- DependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
- const DependentTemplateStorage &Name,
- ArrayRef<TemplateArgument> Args,
- QualType Canon);
-
-public:
- const DependentTemplateStorage &getDependentTemplateName() const {
- return Name;
- }
-
- ArrayRef<TemplateArgument> template_arguments() const {
- return {reinterpret_cast<const TemplateArgument *>(this + 1),
- DependentTemplateSpecializationTypeBits.NumArgs};
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getKeyword(), Name, template_arguments());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- ElaboratedTypeKeyword Keyword,
- const DependentTemplateStorage &Name,
- ArrayRef<TemplateArgument> Args);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DependentTemplateSpecialization;
- }
-};
-
-/// Represents a pack expansion of types.
-///
-/// Pack expansions are part of C++11 variadic templates. A pack
-/// expansion contains a pattern, which itself contains one or more
-/// "unexpanded" parameter packs. When instantiated, a pack expansion
-/// produces a series of types, each instantiated from the pattern of
-/// the expansion, where the Ith instantiation of the pattern uses the
-/// Ith arguments bound to each of the unexpanded parameter packs. The
-/// pack expansion is considered to "expand" these unexpanded
-/// parameter packs.
-///
-/// \code
-/// template<typename ...Types> struct tuple;
-///
-/// template<typename ...Types>
-/// struct tuple_of_references {
-/// typedef tuple<Types&...> type;
-/// };
-/// \endcode
-///
-/// Here, the pack expansion \c Types&... is represented via a
-/// PackExpansionType whose pattern is Types&.
-class PackExpansionType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these
-
- /// The pattern of the pack expansion.
- QualType Pattern;
-
- PackExpansionType(QualType Pattern, QualType Canon,
- UnsignedOrNone NumExpansions)
- : Type(PackExpansion, Canon,
- (Pattern->getDependence() | TypeDependence::Dependent |
- TypeDependence::Instantiation) &
- ~TypeDependence::UnexpandedPack),
- Pattern(Pattern) {
- PackExpansionTypeBits.NumExpansions =
- NumExpansions ? *NumExpansions + 1 : 0;
- }
-
-public:
- /// Retrieve the pattern of this pack expansion, which is the
- /// type that will be repeatedly instantiated when instantiating the
- /// pack expansion itself.
- QualType getPattern() const { return Pattern; }
-
- /// Retrieve the number of expansions that this pack expansion will
- /// generate, if known.
- UnsignedOrNone getNumExpansions() const {
- if (PackExpansionTypeBits.NumExpansions)
- return PackExpansionTypeBits.NumExpansions - 1;
- return std::nullopt;
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getPattern(), getNumExpansions());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern,
- UnsignedOrNone NumExpansions) {
- ID.AddPointer(Pattern.getAsOpaquePtr());
- ID.AddInteger(NumExpansions.toInternalRepresentation());
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == PackExpansion;
- }
-};
-
-/// This class wraps the list of protocol qualifiers. For types that can
-/// take ObjC protocol qualifers, they can subclass this class.
-template <class T>
-class ObjCProtocolQualifiers {
-protected:
- ObjCProtocolQualifiers() = default;
-
- ObjCProtocolDecl * const *getProtocolStorage() const {
- return const_cast<ObjCProtocolQualifiers*>(this)->getProtocolStorage();
- }
-
- ObjCProtocolDecl **getProtocolStorage() {
- return static_cast<T*>(this)->getProtocolStorageImpl();
- }
-
- void setNumProtocols(unsigned N) {
- static_cast<T*>(this)->setNumProtocolsImpl(N);
- }
-
- void initialize(ArrayRef<ObjCProtocolDecl *> protocols) {
- setNumProtocols(protocols.size());
- assert(getNumProtocols() == protocols.size() &&
- "bitfield overflow in protocol count");
- if (!protocols.empty())
- memcpy(getProtocolStorage(), protocols.data(),
- protocols.size() * sizeof(ObjCProtocolDecl*));
- }
-
-public:
- using qual_iterator = ObjCProtocolDecl * const *;
- using qual_range = llvm::iterator_range<qual_iterator>;
-
- qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
- qual_iterator qual_begin() const { return getProtocolStorage(); }
- qual_iterator qual_end() const { return qual_begin() + getNumProtocols(); }
-
- bool qual_empty() const { return getNumProtocols() == 0; }
-
- /// Return the number of qualifying protocols in this type, or 0 if
- /// there are none.
- unsigned getNumProtocols() const {
- return static_cast<const T*>(this)->getNumProtocolsImpl();
- }
-
- /// Fetch a protocol by index.
- ObjCProtocolDecl *getProtocol(unsigned I) const {
- assert(I < getNumProtocols() && "Out-of-range protocol access");
- return qual_begin()[I];
- }
-
- /// Retrieve all of the protocol qualifiers.
- ArrayRef<ObjCProtocolDecl *> getProtocols() const {
- return ArrayRef<ObjCProtocolDecl *>(qual_begin(), getNumProtocols());
- }
-};
-
-/// Represents a type parameter type in Objective C. It can take
-/// a list of protocols.
-class ObjCTypeParamType : public Type,
- public ObjCProtocolQualifiers<ObjCTypeParamType>,
- public llvm::FoldingSetNode {
- friend class ASTContext;
- friend class ObjCProtocolQualifiers<ObjCTypeParamType>;
-
- /// The number of protocols stored on this type.
- unsigned NumProtocols : 6;
-
- ObjCTypeParamDecl *OTPDecl;
-
- /// The protocols are stored after the ObjCTypeParamType node. In the
- /// canonical type, the list of protocols are sorted alphabetically
- /// and uniqued.
- ObjCProtocolDecl **getProtocolStorageImpl();
-
- /// Return the number of qualifying protocols in this interface type,
- /// or 0 if there are none.
- unsigned getNumProtocolsImpl() const {
- return NumProtocols;
- }
-
- void setNumProtocolsImpl(unsigned N) {
- NumProtocols = N;
- }
-
- ObjCTypeParamType(const ObjCTypeParamDecl *D,
- QualType can,
- ArrayRef<ObjCProtocolDecl *> protocols);
-
-public:
- bool isSugared() const { return true; }
- QualType desugar() const { return getCanonicalTypeInternal(); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ObjCTypeParam;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID);
- static void Profile(llvm::FoldingSetNodeID &ID,
- const ObjCTypeParamDecl *OTPDecl,
- QualType CanonicalType,
- ArrayRef<ObjCProtocolDecl *> protocols);
-
- ObjCTypeParamDecl *getDecl() const { return OTPDecl; }
-};
-
-/// Represents a class type in Objective C.
-///
-/// Every Objective C type is a combination of a base type, a set of
-/// type arguments (optional, for parameterized classes) and a list of
-/// protocols.
-///
-/// Given the following declarations:
-/// \code
-/// \@class C<T>;
-/// \@protocol P;
-/// \endcode
-///
-/// 'C' is an ObjCInterfaceType C. It is sugar for an ObjCObjectType
-/// with base C and no protocols.
-///
-/// 'C<P>' is an unspecialized ObjCObjectType with base C and protocol list [P].
-/// 'C<C*>' is a specialized ObjCObjectType with type arguments 'C*' and no
-/// protocol list.
-/// 'C<C*><P>' is a specialized ObjCObjectType with base C, type arguments 'C*',
-/// and protocol list [P].
-///
-/// 'id' is a TypedefType which is sugar for an ObjCObjectPointerType whose
-/// pointee is an ObjCObjectType with base BuiltinType::ObjCIdType
-/// and no protocols.
-///
-/// 'id<P>' is an ObjCObjectPointerType whose pointee is an ObjCObjectType
-/// with base BuiltinType::ObjCIdType and protocol list [P]. Eventually
-/// this should get its own sugar class to better represent the source.
-class ObjCObjectType : public Type,
- public ObjCProtocolQualifiers<ObjCObjectType> {
- friend class ObjCProtocolQualifiers<ObjCObjectType>;
-
- // ObjCObjectType.NumTypeArgs - the number of type arguments stored
- // after the ObjCObjectPointerType node.
- // ObjCObjectType.NumProtocols - the number of protocols stored
- // after the type arguments of ObjCObjectPointerType node.
- //
- // These protocols are those written directly on the type. If
- // protocol qualifiers ever become additive, the iterators will need
- // to get kindof complicated.
- //
- // In the canonical object type, these are sorted alphabetically
- // and uniqued.
-
- /// Either a BuiltinType or an InterfaceType or sugar for either.
- QualType BaseType;
-
- /// Cached superclass type.
- mutable llvm::PointerIntPair<const ObjCObjectType *, 1, bool>
- CachedSuperClassType;
-
- QualType *getTypeArgStorage();
- const QualType *getTypeArgStorage() const {
- return const_cast<ObjCObjectType *>(this)->getTypeArgStorage();
- }
-
- ObjCProtocolDecl **getProtocolStorageImpl();
- /// Return the number of qualifying protocols in this interface type,
- /// or 0 if there are none.
- unsigned getNumProtocolsImpl() const {
- return ObjCObjectTypeBits.NumProtocols;
- }
- void setNumProtocolsImpl(unsigned N) {
- ObjCObjectTypeBits.NumProtocols = N;
- }
-
-protected:
- enum Nonce_ObjCInterface { Nonce_ObjCInterface };
-
- ObjCObjectType(QualType Canonical, QualType Base,
- ArrayRef<QualType> typeArgs,
- ArrayRef<ObjCProtocolDecl *> protocols,
- bool isKindOf);
-
- ObjCObjectType(enum Nonce_ObjCInterface)
- : Type(ObjCInterface, QualType(), TypeDependence::None),
- BaseType(QualType(this_(), 0)) {
- ObjCObjectTypeBits.NumProtocols = 0;
- ObjCObjectTypeBits.NumTypeArgs = 0;
- ObjCObjectTypeBits.IsKindOf = 0;
- }
-
- void computeSuperClassTypeSlow() const;
-
-public:
- /// Gets the base type of this object type. This is always (possibly
- /// sugar for) one of:
- /// - the 'id' builtin type (as opposed to the 'id' type visible to the
- /// user, which is a typedef for an ObjCObjectPointerType)
- /// - the 'Class' builtin type (same caveat)
- /// - an ObjCObjectType (currently always an ObjCInterfaceType)
- QualType getBaseType() const { return BaseType; }
-
- bool isObjCId() const {
- return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCId);
- }
-
- bool isObjCClass() const {
- return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCClass);
- }
-
- bool isObjCUnqualifiedId() const { return qual_empty() && isObjCId(); }
- bool isObjCUnqualifiedClass() const { return qual_empty() && isObjCClass(); }
- bool isObjCUnqualifiedIdOrClass() const {
- if (!qual_empty()) return false;
- if (const BuiltinType *T = getBaseType()->getAs<BuiltinType>())
- return T->getKind() == BuiltinType::ObjCId ||
- T->getKind() == BuiltinType::ObjCClass;
- return false;
- }
- bool isObjCQualifiedId() const { return !qual_empty() && isObjCId(); }
- bool isObjCQualifiedClass() const { return !qual_empty() && isObjCClass(); }
-
- /// Gets the interface declaration for this object type, if the base type
- /// really is an interface.
- ObjCInterfaceDecl *getInterface() const;
-
- /// Determine whether this object type is "specialized", meaning
- /// that it has type arguments.
- bool isSpecialized() const;
-
- /// Determine whether this object type was written with type arguments.
- bool isSpecializedAsWritten() const {
- return ObjCObjectTypeBits.NumTypeArgs > 0;
- }
-
- /// Determine whether this object type is "unspecialized", meaning
- /// that it has no type arguments.
- bool isUnspecialized() const { return !isSpecialized(); }
-
- /// Determine whether this object type is "unspecialized" as
- /// written, meaning that it has no type arguments.
- bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
-
- /// Retrieve the type arguments of this object type (semantically).
- ArrayRef<QualType> getTypeArgs() const;
-
- /// Retrieve the type arguments of this object type as they were
- /// written.
- ArrayRef<QualType> getTypeArgsAsWritten() const {
- return {getTypeArgStorage(), ObjCObjectTypeBits.NumTypeArgs};
- }
-
- /// Whether this is a "__kindof" type as written.
- bool isKindOfTypeAsWritten() const { return ObjCObjectTypeBits.IsKindOf; }
-
- /// Whether this ia a "__kindof" type (semantically).
- bool isKindOfType() const;
-
- /// Retrieve the type of the superclass of this object type.
- ///
- /// This operation substitutes any type arguments into the
- /// superclass of the current class type, potentially producing a
- /// specialization of the superclass type. Produces a null type if
- /// there is no superclass.
- QualType getSuperClassType() const {
- if (!CachedSuperClassType.getInt())
- computeSuperClassTypeSlow();
-
- assert(CachedSuperClassType.getInt() && "Superclass not set?");
- return QualType(CachedSuperClassType.getPointer(), 0);
- }
-
- /// Strip off the Objective-C "kindof" type and (with it) any
- /// protocol qualifiers.
- QualType stripObjCKindOfTypeAndQuals(const ASTContext &ctx) const;
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ObjCObject ||
- T->getTypeClass() == ObjCInterface;
- }
-};
-
-/// A class providing a concrete implementation
-/// of ObjCObjectType, so as to not increase the footprint of
-/// ObjCInterfaceType. Code outside of ASTContext and the core type
-/// system should not reference this type.
-class ObjCObjectTypeImpl : public ObjCObjectType, public llvm::FoldingSetNode {
- friend class ASTContext;
-
- // If anyone adds fields here, ObjCObjectType::getProtocolStorage()
- // will need to be modified.
-
- ObjCObjectTypeImpl(QualType Canonical, QualType Base,
- ArrayRef<QualType> typeArgs,
- ArrayRef<ObjCProtocolDecl *> protocols,
- bool isKindOf)
- : ObjCObjectType(Canonical, Base, typeArgs, protocols, isKindOf) {}
-
-public:
- void Profile(llvm::FoldingSetNodeID &ID);
- static void Profile(llvm::FoldingSetNodeID &ID,
- QualType Base,
- ArrayRef<QualType> typeArgs,
- ArrayRef<ObjCProtocolDecl *> protocols,
- bool isKindOf);
-};
-
-inline QualType *ObjCObjectType::getTypeArgStorage() {
- return reinterpret_cast<QualType *>(static_cast<ObjCObjectTypeImpl*>(this)+1);
+inline CXXRecordDecl *Type::castAsCXXRecordDecl() const {
+ const auto *TT = cast<TagType>(CanonicalType);
+ return cast<CXXRecordDecl>(TT->getOriginalDecl())->getDefinitionOrSelf();
}
-inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorageImpl() {
- return reinterpret_cast<ObjCProtocolDecl**>(
- getTypeArgStorage() + ObjCObjectTypeBits.NumTypeArgs);
+inline RecordDecl *Type::getAsRecordDecl() const {
+ const auto *TT = dyn_cast<TagType>(CanonicalType);
+ if (!isa_and_present<RecordType, InjectedClassNameType>(TT))
+ return nullptr;
+ return cast<RecordDecl>(TT->getOriginalDecl())->getDefinitionOrSelf();
}
-inline ObjCProtocolDecl **ObjCTypeParamType::getProtocolStorageImpl() {
- return reinterpret_cast<ObjCProtocolDecl**>(
- static_cast<ObjCTypeParamType*>(this)+1);
+inline RecordDecl *Type::castAsRecordDecl() const {
+ const auto *TT = cast<TagType>(CanonicalType);
+ return cast<RecordDecl>(TT->getOriginalDecl())->getDefinitionOrSelf();
}
-/// Interfaces are the core concept in Objective-C for object oriented design.
-/// They basically correspond to C++ classes. There are two kinds of interface
-/// types: normal interfaces like `NSString`, and qualified interfaces, which
-/// are qualified with a protocol list like `NSString<NSCopyable, NSAmazing>`.
-///
-/// ObjCInterfaceType guarantees the following properties when considered
-/// as a subtype of its superclass, ObjCObjectType:
-/// - There are no protocol qualifiers. To reinforce this, code which
-/// tries to invoke the protocol methods via an ObjCInterfaceType will
-/// fail to compile.
-/// - It is its own base type. That is, if T is an ObjCInterfaceType*,
-/// T->getBaseType() == QualType(T, 0).
-class ObjCInterfaceType : public ObjCObjectType {
- friend class ASTContext; // ASTContext creates these.
- friend class ASTReader;
- template <class T> friend class serialization::AbstractTypeReader;
-
- ObjCInterfaceDecl *Decl;
-
- ObjCInterfaceType(const ObjCInterfaceDecl *D)
- : ObjCObjectType(Nonce_ObjCInterface),
- Decl(const_cast<ObjCInterfaceDecl*>(D)) {}
-
-public:
- /// Get the declaration of this interface.
- ObjCInterfaceDecl *getDecl() const;
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ObjCInterface;
- }
-
- // Nonsense to "hide" certain members of ObjCObjectType within this
- // class. People asking for protocols on an ObjCInterfaceType are
- // not going to get what they want: ObjCInterfaceTypes are
- // guaranteed to have no protocols.
- enum {
- qual_iterator,
- qual_begin,
- qual_end,
- getNumProtocols,
- getProtocol
- };
-};
-
-inline ObjCInterfaceDecl *ObjCObjectType::getInterface() const {
- QualType baseType = getBaseType();
- while (const auto *ObjT = baseType->getAs<ObjCObjectType>()) {
- if (const auto *T = dyn_cast<ObjCInterfaceType>(ObjT))
- return T->getDecl();
-
- baseType = ObjT->getBaseType();
- }
-
+inline EnumDecl *Type::getAsEnumDecl() const {
+ if (const auto *TT = dyn_cast<EnumType>(CanonicalType))
+ return TT->getOriginalDecl()->getDefinitionOrSelf();
return nullptr;
}
-/// Represents a pointer to an Objective C object.
-///
-/// These are constructed from pointer declarators when the pointee type is
-/// an ObjCObjectType (or sugar for one). In addition, the 'id' and 'Class'
-/// types are typedefs for these, and the protocol-qualified types 'id<P>'
-/// and 'Class<P>' are translated into these.
-///
-/// Pointers to pointers to Objective C objects are still PointerTypes;
-/// only the first level of pointer gets it own type implementation.
-class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- QualType PointeeType;
-
- ObjCObjectPointerType(QualType Canonical, QualType Pointee)
- : Type(ObjCObjectPointer, Canonical, Pointee->getDependence()),
- PointeeType(Pointee) {}
-
-public:
- /// Gets the type pointed to by this ObjC pointer.
- /// The result will always be an ObjCObjectType or sugar thereof.
- QualType getPointeeType() const { return PointeeType; }
-
- /// Gets the type pointed to by this ObjC pointer. Always returns non-null.
- ///
- /// This method is equivalent to getPointeeType() except that
- /// it discards any typedefs (or other sugar) between this
- /// type and the "outermost" object type. So for:
- /// \code
- /// \@class A; \@protocol P; \@protocol Q;
- /// typedef A<P> AP;
- /// typedef A A1;
- /// typedef A1<P> A1P;
- /// typedef A1P<Q> A1PQ;
- /// \endcode
- /// For 'A*', getObjectType() will return 'A'.
- /// For 'A<P>*', getObjectType() will return 'A<P>'.
- /// For 'AP*', getObjectType() will return 'A<P>'.
- /// For 'A1*', getObjectType() will return 'A'.
- /// For 'A1<P>*', getObjectType() will return 'A1<P>'.
- /// For 'A1P*', getObjectType() will return 'A1<P>'.
- /// For 'A1PQ*', getObjectType() will return 'A1<Q>', because
- /// adding protocols to a protocol-qualified base discards the
- /// old qualifiers (for now). But if it didn't, getObjectType()
- /// would return 'A1P<Q>' (and we'd have to make iterating over
- /// qualifiers more complicated).
- const ObjCObjectType *getObjectType() const {
- return PointeeType->castAs<ObjCObjectType>();
- }
-
- /// If this pointer points to an Objective C
- /// \@interface type, gets the type for that interface. Any protocol
- /// qualifiers on the interface are ignored.
- ///
- /// \return null if the base type for this pointer is 'id' or 'Class'
- const ObjCInterfaceType *getInterfaceType() const;
-
- /// If this pointer points to an Objective \@interface
- /// type, gets the declaration for that interface.
- ///
- /// \return null if the base type for this pointer is 'id' or 'Class'
- ObjCInterfaceDecl *getInterfaceDecl() const {
- return getObjectType()->getInterface();
- }
-
- /// True if this is equivalent to the 'id' type, i.e. if
- /// its object type is the primitive 'id' type with no protocols.
- bool isObjCIdType() const {
- return getObjectType()->isObjCUnqualifiedId();
- }
-
- /// True if this is equivalent to the 'Class' type,
- /// i.e. if its object tive is the primitive 'Class' type with no protocols.
- bool isObjCClassType() const {
- return getObjectType()->isObjCUnqualifiedClass();
- }
-
- /// True if this is equivalent to the 'id' or 'Class' type,
- bool isObjCIdOrClassType() const {
- return getObjectType()->isObjCUnqualifiedIdOrClass();
- }
-
- /// True if this is equivalent to 'id<P>' for some non-empty set of
- /// protocols.
- bool isObjCQualifiedIdType() const {
- return getObjectType()->isObjCQualifiedId();
- }
-
- /// True if this is equivalent to 'Class<P>' for some non-empty set of
- /// protocols.
- bool isObjCQualifiedClassType() const {
- return getObjectType()->isObjCQualifiedClass();
- }
-
- /// Whether this is a "__kindof" type.
- bool isKindOfType() const { return getObjectType()->isKindOfType(); }
-
- /// Whether this type is specialized, meaning that it has type arguments.
- bool isSpecialized() const { return getObjectType()->isSpecialized(); }
-
- /// Whether this type is specialized, meaning that it has type arguments.
- bool isSpecializedAsWritten() const {
- return getObjectType()->isSpecializedAsWritten();
- }
-
- /// Whether this type is unspecialized, meaning that is has no type arguments.
- bool isUnspecialized() const { return getObjectType()->isUnspecialized(); }
-
- /// Determine whether this object type is "unspecialized" as
- /// written, meaning that it has no type arguments.
- bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
-
- /// Retrieve the type arguments for this type.
- ArrayRef<QualType> getTypeArgs() const {
- return getObjectType()->getTypeArgs();
- }
-
- /// Retrieve the type arguments for this type.
- ArrayRef<QualType> getTypeArgsAsWritten() const {
- return getObjectType()->getTypeArgsAsWritten();
- }
-
- /// An iterator over the qualifiers on the object type. Provided
- /// for convenience. This will always iterate over the full set of
- /// protocols on a type, not just those provided directly.
- using qual_iterator = ObjCObjectType::qual_iterator;
- using qual_range = llvm::iterator_range<qual_iterator>;
-
- qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
-
- qual_iterator qual_begin() const {
- return getObjectType()->qual_begin();
- }
-
- qual_iterator qual_end() const {
- return getObjectType()->qual_end();
- }
-
- bool qual_empty() const { return getObjectType()->qual_empty(); }
-
- /// Return the number of qualifying protocols on the object type.
- unsigned getNumProtocols() const {
- return getObjectType()->getNumProtocols();
- }
-
- /// Retrieve a qualifying protocol by index on the object type.
- ObjCProtocolDecl *getProtocol(unsigned I) const {
- return getObjectType()->getProtocol(I);
- }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- /// Retrieve the type of the superclass of this object pointer type.
- ///
- /// This operation substitutes any type arguments into the
- /// superclass of the current class type, potentially producing a
- /// pointer to a specialization of the superclass type. Produces a
- /// null type if there is no superclass.
- QualType getSuperClassType() const;
-
- /// Strip off the Objective-C "kindof" type and (with it) any
- /// protocol qualifiers.
- const ObjCObjectPointerType *stripObjCKindOfTypeAndQuals(
- const ASTContext &ctx) const;
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getPointeeType());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
- ID.AddPointer(T.getAsOpaquePtr());
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == ObjCObjectPointer;
- }
-};
-
-class AtomicType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- QualType ValueType;
-
- AtomicType(QualType ValTy, QualType Canonical)
- : Type(Atomic, Canonical, ValTy->getDependence()), ValueType(ValTy) {}
-
-public:
- /// Gets the type contained by this atomic type, i.e.
- /// the type returned by performing an atomic load of this atomic type.
- QualType getValueType() const { return ValueType; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getValueType());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
- ID.AddPointer(T.getAsOpaquePtr());
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == Atomic;
- }
-};
-
-/// PipeType - OpenCL20.
-class PipeType : public Type, public llvm::FoldingSetNode {
- friend class ASTContext; // ASTContext creates these.
-
- QualType ElementType;
- bool isRead;
-
- PipeType(QualType elemType, QualType CanonicalPtr, bool isRead)
- : Type(Pipe, CanonicalPtr, elemType->getDependence()),
- ElementType(elemType), isRead(isRead) {}
-
-public:
- QualType getElementType() const { return ElementType; }
-
- bool isSugared() const { return false; }
-
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getElementType(), isReadOnly());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, QualType T, bool isRead) {
- ID.AddPointer(T.getAsOpaquePtr());
- ID.AddBoolean(isRead);
- }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == Pipe;
- }
-
- bool isReadOnly() const { return isRead; }
-};
-
-/// A fixed int type of a specified bitwidth.
-class BitIntType final : public Type, public llvm::FoldingSetNode {
- friend class ASTContext;
- LLVM_PREFERRED_TYPE(bool)
- unsigned IsUnsigned : 1;
- unsigned NumBits : 24;
-
-protected:
- BitIntType(bool isUnsigned, unsigned NumBits);
-
-public:
- bool isUnsigned() const { return IsUnsigned; }
- bool isSigned() const { return !IsUnsigned; }
- unsigned getNumBits() const { return NumBits; }
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- Profile(ID, isUnsigned(), getNumBits());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, bool IsUnsigned,
- unsigned NumBits) {
- ID.AddBoolean(IsUnsigned);
- ID.AddInteger(NumBits);
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == BitInt; }
-};
-
-class DependentBitIntType final : public Type, public llvm::FoldingSetNode {
- friend class ASTContext;
- llvm::PointerIntPair<Expr*, 1, bool> ExprAndUnsigned;
-
-protected:
- DependentBitIntType(bool IsUnsigned, Expr *NumBits);
-
-public:
- bool isUnsigned() const;
- bool isSigned() const { return !isUnsigned(); }
- Expr *getNumBitsExpr() const;
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, isUnsigned(), getNumBitsExpr());
- }
- static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- bool IsUnsigned, Expr *NumBitsExpr);
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == DependentBitInt;
- }
-};
-
-class PredefinedSugarType final : public Type {
-public:
- friend class ASTContext;
- using Kind = PredefinedSugarKind;
-
-private:
- PredefinedSugarType(Kind KD, const IdentifierInfo *IdentName,
- QualType CanonicalType)
- : Type(PredefinedSugar, CanonicalType, TypeDependence::None),
- Name(IdentName) {
- PredefinedSugarTypeBits.Kind = llvm::to_underlying(KD);
- }
-
- static StringRef getName(Kind KD);
-
- const IdentifierInfo *Name;
-
-public:
- bool isSugared() const { return true; }
-
- QualType desugar() const { return getCanonicalTypeInternal(); }
-
- Kind getKind() const { return Kind(PredefinedSugarTypeBits.Kind); }
-
- const IdentifierInfo *getIdentifier() const { return Name; }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == PredefinedSugar;
- }
-};
-
-/// A qualifier set is used to build a set of qualifiers.
-class QualifierCollector : public Qualifiers {
-public:
- QualifierCollector(Qualifiers Qs = Qualifiers()) : Qualifiers(Qs) {}
-
- /// Collect any qualifiers on the given type and return an
- /// unqualified type. The qualifiers are assumed to be consistent
- /// with those already in the type.
- const Type *strip(QualType type) {
- addFastQualifiers(type.getLocalFastQualifiers());
- if (!type.hasLocalNonFastQualifiers())
- return type.getTypePtrUnsafe();
-
- const ExtQuals *extQuals = type.getExtQualsUnsafe();
- addConsistentQualifiers(extQuals->getQualifiers());
- return extQuals->getBaseType();
- }
-
- /// Apply the collected qualifiers to the given type.
- QualType apply(const ASTContext &Context, QualType QT) const;
-
- /// Apply the collected qualifiers to the given type.
- QualType apply(const ASTContext &Context, const Type* T) const;
-};
-
-/// A container of type source information.
-///
-/// A client can read the relevant info using TypeLoc wrappers, e.g:
-/// @code
-/// TypeLoc TL = TypeSourceInfo->getTypeLoc();
-/// TL.getBeginLoc().print(OS, SrcMgr);
-/// @endcode
-class alignas(8) TypeSourceInfo {
- // Contains a memory block after the class, used for type source information,
- // allocated by ASTContext.
- friend class ASTContext;
-
- QualType Ty;
-
- TypeSourceInfo(QualType ty, size_t DataSize); // implemented in TypeLoc.h
-
-public:
- /// Return the type wrapped by this type source info.
- QualType getType() const { return Ty; }
-
- /// Return the TypeLoc wrapper for the type source info.
- TypeLoc getTypeLoc() const; // implemented in TypeLoc.h
-
- /// Override the type stored in this TypeSourceInfo. Use with caution!
- void overrideType(QualType T) { Ty = T; }
-};
-
-// Inline function definitions.
-
-inline SplitQualType SplitQualType::getSingleStepDesugaredType() const {
- SplitQualType desugar =
- Ty->getLocallyUnqualifiedSingleStepDesugaredType().split();
- desugar.Quals.addConsistentQualifiers(Quals);
- return desugar;
-}
-
-inline const Type *QualType::getTypePtr() const {
- return getCommonPtr()->BaseType;
-}
-
-inline const Type *QualType::getTypePtrOrNull() const {
- return (isNull() ? nullptr : getCommonPtr()->BaseType);
-}
-
-inline bool QualType::isReferenceable() const {
- // C++ [defns.referenceable]
- // type that is either an object type, a function type that does not have
- // cv-qualifiers or a ref-qualifier, or a reference type.
- const Type &Self = **this;
- if (Self.isObjectType() || Self.isReferenceType())
- return true;
- if (const auto *F = Self.getAs<FunctionProtoType>())
- return F->getMethodQuals().empty() && F->getRefQualifier() == RQ_None;
-
- return false;
-}
-
-inline SplitQualType QualType::split() const {
- if (!hasLocalNonFastQualifiers())
- return SplitQualType(getTypePtrUnsafe(),
- Qualifiers::fromFastMask(getLocalFastQualifiers()));
-
- const ExtQuals *eq = getExtQualsUnsafe();
- Qualifiers qs = eq->getQualifiers();
- qs.addFastQualifiers(getLocalFastQualifiers());
- return SplitQualType(eq->getBaseType(), qs);
-}
-
-inline Qualifiers QualType::getLocalQualifiers() const {
- Qualifiers Quals;
- if (hasLocalNonFastQualifiers())
- Quals = getExtQualsUnsafe()->getQualifiers();
- Quals.addFastQualifiers(getLocalFastQualifiers());
- return Quals;
-}
-
-inline Qualifiers QualType::getQualifiers() const {
- Qualifiers quals = getCommonPtr()->CanonicalType.getLocalQualifiers();
- quals.addFastQualifiers(getLocalFastQualifiers());
- return quals;
-}
-
-inline unsigned QualType::getCVRQualifiers() const {
- unsigned cvr = getCommonPtr()->CanonicalType.getLocalCVRQualifiers();
- cvr |= getLocalCVRQualifiers();
- return cvr;
-}
-
-inline QualType QualType::getCanonicalType() const {
- QualType canon = getCommonPtr()->CanonicalType;
- return canon.withFastQualifiers(getLocalFastQualifiers());
-}
-
-inline bool QualType::isCanonical() const {
- return getTypePtr()->isCanonicalUnqualified();
-}
-
-inline bool QualType::isCanonicalAsParam() const {
- if (!isCanonical()) return false;
- if (hasLocalQualifiers()) return false;
-
- const Type *T = getTypePtr();
- if (T->isVariablyModifiedType() && T->hasSizedVLAType())
- return false;
-
- return !isa<FunctionType>(T) &&
- (!isa<ArrayType>(T) || isa<ArrayParameterType>(T));
-}
-
-inline bool QualType::isConstQualified() const {
- return isLocalConstQualified() ||
- getCommonPtr()->CanonicalType.isLocalConstQualified();
-}
-
-inline bool QualType::isRestrictQualified() const {
- return isLocalRestrictQualified() ||
- getCommonPtr()->CanonicalType.isLocalRestrictQualified();
-}
-
-
-inline bool QualType::isVolatileQualified() const {
- return isLocalVolatileQualified() ||
- getCommonPtr()->CanonicalType.isLocalVolatileQualified();
-}
-
-inline bool QualType::hasQualifiers() const {
- return hasLocalQualifiers() ||
- getCommonPtr()->CanonicalType.hasLocalQualifiers();
-}
-
-inline QualType QualType::getUnqualifiedType() const {
- if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
- return QualType(getTypePtr(), 0);
-
- return QualType(getSplitUnqualifiedTypeImpl(*this).Ty, 0);
-}
-
-inline SplitQualType QualType::getSplitUnqualifiedType() const {
- if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
- return split();
-
- return getSplitUnqualifiedTypeImpl(*this);
-}
-
-inline void QualType::removeLocalConst() {
- removeLocalFastQualifiers(Qualifiers::Const);
-}
-
-inline void QualType::removeLocalRestrict() {
- removeLocalFastQualifiers(Qualifiers::Restrict);
-}
-
-inline void QualType::removeLocalVolatile() {
- removeLocalFastQualifiers(Qualifiers::Volatile);
-}
-
-/// Check if this type has any address space qualifier.
-inline bool QualType::hasAddressSpace() const {
- return getQualifiers().hasAddressSpace();
+inline EnumDecl *Type::castAsEnumDecl() const {
+ return cast<EnumType>(CanonicalType)
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
}
-/// Return the address space of this type.
-inline LangAS QualType::getAddressSpace() const {
- return getQualifiers().getAddressSpace();
+inline TagDecl *Type::getAsTagDecl() const {
+ if (const auto *TT = dyn_cast<TagType>(CanonicalType))
+ return TT->getOriginalDecl()->getDefinitionOrSelf();
+ return nullptr;
}
-/// Return the gc attribute of this type.
-inline Qualifiers::GC QualType::getObjCGCAttr() const {
- return getQualifiers().getObjCGCAttr();
+inline TagDecl *Type::castAsTagDecl() const {
+ return cast<TagType>(CanonicalType)->getOriginalDecl()->getDefinitionOrSelf();
}
inline bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion() const {
@@ -8307,809 +90,6 @@ inline bool QualType::hasNonTrivialToPrimitiveCopyCUnion() const {
return false;
}
-inline FunctionType::ExtInfo getFunctionExtInfo(const Type &t) {
- if (const auto *PT = t.getAs<PointerType>()) {
- if (const auto *FT = PT->getPointeeType()->getAs<FunctionType>())
- return FT->getExtInfo();
- } else if (const auto *FT = t.getAs<FunctionType>())
- return FT->getExtInfo();
-
- return FunctionType::ExtInfo();
-}
-
-inline FunctionType::ExtInfo getFunctionExtInfo(QualType t) {
- return getFunctionExtInfo(*t);
-}
-
-/// Determine whether this type is more
-/// qualified than the Other type. For example, "const volatile int"
-/// is more qualified than "const int", "volatile int", and
-/// "int". However, it is not more qualified than "const volatile
-/// int".
-inline bool QualType::isMoreQualifiedThan(QualType other,
- const ASTContext &Ctx) const {
- Qualifiers MyQuals = getQualifiers();
- Qualifiers OtherQuals = other.getQualifiers();
- return (MyQuals != OtherQuals && MyQuals.compatiblyIncludes(OtherQuals, Ctx));
-}
-
-/// Determine whether this type is at last
-/// as qualified as the Other type. For example, "const volatile
-/// int" is at least as qualified as "const int", "volatile int",
-/// "int", and "const volatile int".
-inline bool QualType::isAtLeastAsQualifiedAs(QualType other,
- const ASTContext &Ctx) const {
- Qualifiers OtherQuals = other.getQualifiers();
-
- // Ignore __unaligned qualifier if this type is a void.
- if (getUnqualifiedType()->isVoidType())
- OtherQuals.removeUnaligned();
-
- return getQualifiers().compatiblyIncludes(OtherQuals, Ctx);
-}
-
-/// If Type is a reference type (e.g., const
-/// int&), returns the type that the reference refers to ("const
-/// int"). Otherwise, returns the type itself. This routine is used
-/// throughout Sema to implement C++ 5p6:
-///
-/// If an expression initially has the type "reference to T" (8.3.2,
-/// 8.5.3), the type is adjusted to "T" prior to any further
-/// analysis, the expression designates the object or function
-/// denoted by the reference, and the expression is an lvalue.
-inline QualType QualType::getNonReferenceType() const {
- if (const auto *RefType = (*this)->getAs<ReferenceType>())
- return RefType->getPointeeType();
- else
- return *this;
-}
-
-inline bool QualType::isCForbiddenLValueType() const {
- return ((getTypePtr()->isVoidType() && !hasQualifiers()) ||
- getTypePtr()->isFunctionType());
-}
-
-/// Tests whether the type is categorized as a fundamental type.
-///
-/// \returns True for types specified in C++0x [basic.fundamental].
-inline bool Type::isFundamentalType() const {
- return isVoidType() ||
- isNullPtrType() ||
- // FIXME: It's really annoying that we don't have an
- // 'isArithmeticType()' which agrees with the standard definition.
- (isArithmeticType() && !isEnumeralType());
-}
-
-/// Tests whether the type is categorized as a compound type.
-///
-/// \returns True for types specified in C++0x [basic.compound].
-inline bool Type::isCompoundType() const {
- // C++0x [basic.compound]p1:
- // Compound types can be constructed in the following ways:
- // -- arrays of objects of a given type [...];
- return isArrayType() ||
- // -- functions, which have parameters of given types [...];
- isFunctionType() ||
- // -- pointers to void or objects or functions [...];
- isPointerType() ||
- // -- references to objects or functions of a given type. [...]
- isReferenceType() ||
- // -- classes containing a sequence of objects of various types, [...];
- isRecordType() ||
- // -- unions, which are classes capable of containing objects of different
- // types at different times;
- isUnionType() ||
- // -- enumerations, which comprise a set of named constant values. [...];
- isEnumeralType() ||
- // -- pointers to non-static class members, [...].
- isMemberPointerType();
-}
-
-inline bool Type::isFunctionType() const {
- return isa<FunctionType>(CanonicalType);
-}
-
-inline bool Type::isPointerType() const {
- return isa<PointerType>(CanonicalType);
-}
-
-inline bool Type::isPointerOrReferenceType() const {
- return isPointerType() || isReferenceType();
-}
-
-inline bool Type::isAnyPointerType() const {
- return isPointerType() || isObjCObjectPointerType();
-}
-
-inline bool Type::isSignableType(const ASTContext &Ctx) const {
- return isSignablePointerType() || isSignableIntegerType(Ctx);
-}
-
-inline bool Type::isSignablePointerType() const {
- return isPointerType() || isObjCClassType() || isObjCQualifiedClassType();
-}
-
-inline bool Type::isBlockPointerType() const {
- return isa<BlockPointerType>(CanonicalType);
-}
-
-inline bool Type::isReferenceType() const {
- return isa<ReferenceType>(CanonicalType);
-}
-
-inline bool Type::isLValueReferenceType() const {
- return isa<LValueReferenceType>(CanonicalType);
-}
-
-inline bool Type::isRValueReferenceType() const {
- return isa<RValueReferenceType>(CanonicalType);
-}
-
-inline bool Type::isObjectPointerType() const {
- // Note: an "object pointer type" is not the same thing as a pointer to an
- // object type; rather, it is a pointer to an object type or a pointer to cv
- // void.
- if (const auto *T = getAs<PointerType>())
- return !T->getPointeeType()->isFunctionType();
- else
- return false;
-}
-
-inline bool Type::isCFIUncheckedCalleeFunctionType() const {
- if (const auto *Fn = getAs<FunctionProtoType>())
- return Fn->hasCFIUncheckedCallee();
- return false;
-}
-
-inline bool Type::hasPointeeToToCFIUncheckedCalleeFunctionType() const {
- QualType Pointee;
- if (const auto *PT = getAs<PointerType>())
- Pointee = PT->getPointeeType();
- else if (const auto *RT = getAs<ReferenceType>())
- Pointee = RT->getPointeeType();
- else if (const auto *MPT = getAs<MemberPointerType>())
- Pointee = MPT->getPointeeType();
- else if (const auto *DT = getAs<DecayedType>())
- Pointee = DT->getPointeeType();
- else
- return false;
- return Pointee->isCFIUncheckedCalleeFunctionType();
-}
-
-inline bool Type::isFunctionPointerType() const {
- if (const auto *T = getAs<PointerType>())
- return T->getPointeeType()->isFunctionType();
- else
- return false;
-}
-
-inline bool Type::isFunctionReferenceType() const {
- if (const auto *T = getAs<ReferenceType>())
- return T->getPointeeType()->isFunctionType();
- else
- return false;
-}
-
-inline bool Type::isMemberPointerType() const {
- return isa<MemberPointerType>(CanonicalType);
-}
-
-inline bool Type::isMemberFunctionPointerType() const {
- if (const auto *T = getAs<MemberPointerType>())
- return T->isMemberFunctionPointer();
- else
- return false;
-}
-
-inline bool Type::isMemberDataPointerType() const {
- if (const auto *T = getAs<MemberPointerType>())
- return T->isMemberDataPointer();
- else
- return false;
-}
-
-inline bool Type::isArrayType() const {
- return isa<ArrayType>(CanonicalType);
-}
-
-inline bool Type::isConstantArrayType() const {
- return isa<ConstantArrayType>(CanonicalType);
-}
-
-inline bool Type::isIncompleteArrayType() const {
- return isa<IncompleteArrayType>(CanonicalType);
-}
-
-inline bool Type::isVariableArrayType() const {
- return isa<VariableArrayType>(CanonicalType);
-}
-
-inline bool Type::isArrayParameterType() const {
- return isa<ArrayParameterType>(CanonicalType);
-}
-
-inline bool Type::isDependentSizedArrayType() const {
- return isa<DependentSizedArrayType>(CanonicalType);
-}
-
-inline bool Type::isBuiltinType() const {
- return isa<BuiltinType>(CanonicalType);
-}
-
-inline bool Type::isRecordType() const {
- return isa<RecordType>(CanonicalType);
-}
-
-inline bool Type::isEnumeralType() const {
- return isa<EnumType>(CanonicalType);
-}
-
-inline bool Type::isAnyComplexType() const {
- return isa<ComplexType>(CanonicalType);
-}
-
-inline bool Type::isVectorType() const {
- return isa<VectorType>(CanonicalType);
-}
-
-inline bool Type::isExtVectorType() const {
- return isa<ExtVectorType>(CanonicalType);
-}
-
-inline bool Type::isExtVectorBoolType() const {
- if (!isExtVectorType())
- return false;
- return cast<ExtVectorType>(CanonicalType)->getElementType()->isBooleanType();
-}
-
-inline bool Type::isSubscriptableVectorType() const {
- return isVectorType() || isSveVLSBuiltinType();
-}
-
-inline bool Type::isMatrixType() const {
- return isa<MatrixType>(CanonicalType);
-}
-
-inline bool Type::isConstantMatrixType() const {
- return isa<ConstantMatrixType>(CanonicalType);
-}
-
-inline bool Type::isDependentAddressSpaceType() const {
- return isa<DependentAddressSpaceType>(CanonicalType);
-}
-
-inline bool Type::isObjCObjectPointerType() const {
- return isa<ObjCObjectPointerType>(CanonicalType);
-}
-
-inline bool Type::isObjCObjectType() const {
- return isa<ObjCObjectType>(CanonicalType);
-}
-
-inline bool Type::isObjCObjectOrInterfaceType() const {
- return isa<ObjCInterfaceType>(CanonicalType) ||
- isa<ObjCObjectType>(CanonicalType);
-}
-
-inline bool Type::isAtomicType() const {
- return isa<AtomicType>(CanonicalType);
-}
-
-inline bool Type::isUndeducedAutoType() const {
- return isa<AutoType>(CanonicalType);
-}
-
-inline bool Type::isObjCQualifiedIdType() const {
- if (const auto *OPT = getAs<ObjCObjectPointerType>())
- return OPT->isObjCQualifiedIdType();
- return false;
-}
-
-inline bool Type::isObjCQualifiedClassType() const {
- if (const auto *OPT = getAs<ObjCObjectPointerType>())
- return OPT->isObjCQualifiedClassType();
- return false;
-}
-
-inline bool Type::isObjCIdType() const {
- if (const auto *OPT = getAs<ObjCObjectPointerType>())
- return OPT->isObjCIdType();
- return false;
-}
-
-inline bool Type::isObjCClassType() const {
- if (const auto *OPT = getAs<ObjCObjectPointerType>())
- return OPT->isObjCClassType();
- return false;
-}
-
-inline bool Type::isObjCSelType() const {
- if (const auto *OPT = getAs<PointerType>())
- return OPT->getPointeeType()->isSpecificBuiltinType(BuiltinType::ObjCSel);
- return false;
-}
-
-inline bool Type::isObjCBuiltinType() const {
- return isObjCIdType() || isObjCClassType() || isObjCSelType();
-}
-
-inline bool Type::isDecltypeType() const {
- return isa<DecltypeType>(this);
-}
-
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- inline bool Type::is##Id##Type() const { \
- return isSpecificBuiltinType(BuiltinType::Id); \
- }
-#include "clang/Basic/OpenCLImageTypes.def"
-
-inline bool Type::isSamplerT() const {
- return isSpecificBuiltinType(BuiltinType::OCLSampler);
-}
-
-inline bool Type::isEventT() const {
- return isSpecificBuiltinType(BuiltinType::OCLEvent);
-}
-
-inline bool Type::isClkEventT() const {
- return isSpecificBuiltinType(BuiltinType::OCLClkEvent);
-}
-
-inline bool Type::isQueueT() const {
- return isSpecificBuiltinType(BuiltinType::OCLQueue);
-}
-
-inline bool Type::isReserveIDT() const {
- return isSpecificBuiltinType(BuiltinType::OCLReserveID);
-}
-
-inline bool Type::isImageType() const {
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) is##Id##Type() ||
- return
-#include "clang/Basic/OpenCLImageTypes.def"
- false; // end boolean or operation
-}
-
-inline bool Type::isPipeType() const {
- return isa<PipeType>(CanonicalType);
-}
-
-inline bool Type::isBitIntType() const {
- return isa<BitIntType>(CanonicalType);
-}
-
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- inline bool Type::is##Id##Type() const { \
- return isSpecificBuiltinType(BuiltinType::Id); \
- }
-#include "clang/Basic/OpenCLExtensionTypes.def"
-
-inline bool Type::isOCLIntelSubgroupAVCType() const {
-#define INTEL_SUBGROUP_AVC_TYPE(ExtType, Id) \
- isOCLIntelSubgroupAVC##Id##Type() ||
- return
-#include "clang/Basic/OpenCLExtensionTypes.def"
- false; // end of boolean or operation
-}
-
-inline bool Type::isOCLExtOpaqueType() const {
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) is##Id##Type() ||
- return
-#include "clang/Basic/OpenCLExtensionTypes.def"
- false; // end of boolean or operation
-}
-
-inline bool Type::isOpenCLSpecificType() const {
- return isSamplerT() || isEventT() || isImageType() || isClkEventT() ||
- isQueueT() || isReserveIDT() || isPipeType() || isOCLExtOpaqueType();
-}
-
-#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
- inline bool Type::is##Id##Type() const { \
- return isSpecificBuiltinType(BuiltinType::Id); \
- }
-#include "clang/Basic/HLSLIntangibleTypes.def"
-
-inline bool Type::isHLSLBuiltinIntangibleType() const {
-#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) is##Id##Type() ||
- return
-#include "clang/Basic/HLSLIntangibleTypes.def"
- false;
-}
-
-inline bool Type::isHLSLSpecificType() const {
- return isHLSLBuiltinIntangibleType() || isHLSLAttributedResourceType() ||
- isHLSLInlineSpirvType();
-}
-
-inline bool Type::isHLSLAttributedResourceType() const {
- return isa<HLSLAttributedResourceType>(this);
-}
-
-inline bool Type::isHLSLInlineSpirvType() const {
- return isa<HLSLInlineSpirvType>(this);
-}
-
-inline bool Type::isTemplateTypeParmType() const {
- return isa<TemplateTypeParmType>(CanonicalType);
-}
-
-inline bool Type::isSpecificBuiltinType(unsigned K) const {
- if (const BuiltinType *BT = getAs<BuiltinType>()) {
- return BT->getKind() == static_cast<BuiltinType::Kind>(K);
- }
- return false;
-}
-
-inline bool Type::isPlaceholderType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(this))
- return BT->isPlaceholderType();
- return false;
-}
-
-inline const BuiltinType *Type::getAsPlaceholderType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(this))
- if (BT->isPlaceholderType())
- return BT;
- return nullptr;
-}
-
-inline bool Type::isSpecificPlaceholderType(unsigned K) const {
- assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K));
- return isSpecificBuiltinType(K);
-}
-
-inline bool Type::isNonOverloadPlaceholderType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(this))
- return BT->isNonOverloadPlaceholderType();
- return false;
-}
-
-inline bool Type::isVoidType() const {
- return isSpecificBuiltinType(BuiltinType::Void);
-}
-
-inline bool Type::isHalfType() const {
- // FIXME: Should we allow complex __fp16? Probably not.
- return isSpecificBuiltinType(BuiltinType::Half);
-}
-
-inline bool Type::isFloat16Type() const {
- return isSpecificBuiltinType(BuiltinType::Float16);
-}
-
-inline bool Type::isFloat32Type() const {
- return isSpecificBuiltinType(BuiltinType::Float);
-}
-
-inline bool Type::isDoubleType() const {
- return isSpecificBuiltinType(BuiltinType::Double);
-}
-
-inline bool Type::isBFloat16Type() const {
- return isSpecificBuiltinType(BuiltinType::BFloat16);
-}
-
-inline bool Type::isMFloat8Type() const {
- return isSpecificBuiltinType(BuiltinType::MFloat8);
-}
-
-inline bool Type::isFloat128Type() const {
- return isSpecificBuiltinType(BuiltinType::Float128);
-}
-
-inline bool Type::isIbm128Type() const {
- return isSpecificBuiltinType(BuiltinType::Ibm128);
-}
-
-inline bool Type::isNullPtrType() const {
- return isSpecificBuiltinType(BuiltinType::NullPtr);
-}
-
-bool IsEnumDeclComplete(EnumDecl *);
-bool IsEnumDeclScoped(EnumDecl *);
-
-inline bool Type::isIntegerType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->isInteger();
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
- // Incomplete enum types are not treated as integer types.
- // FIXME: In C++, enum types are never integer types.
- return IsEnumDeclComplete(ET->getDecl()) &&
- !IsEnumDeclScoped(ET->getDecl());
- }
- return isBitIntType();
-}
-
-inline bool Type::isFixedPointType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
- return BT->getKind() >= BuiltinType::ShortAccum &&
- BT->getKind() <= BuiltinType::SatULongFract;
- }
- return false;
-}
-
-inline bool Type::isFixedPointOrIntegerType() const {
- return isFixedPointType() || isIntegerType();
-}
-
-inline bool Type::isConvertibleToFixedPointType() const {
- return isRealFloatingType() || isFixedPointOrIntegerType();
-}
-
-inline bool Type::isSaturatedFixedPointType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
- return BT->getKind() >= BuiltinType::SatShortAccum &&
- BT->getKind() <= BuiltinType::SatULongFract;
- }
- return false;
-}
-
-inline bool Type::isUnsaturatedFixedPointType() const {
- return isFixedPointType() && !isSaturatedFixedPointType();
-}
-
-inline bool Type::isSignedFixedPointType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
- return ((BT->getKind() >= BuiltinType::ShortAccum &&
- BT->getKind() <= BuiltinType::LongAccum) ||
- (BT->getKind() >= BuiltinType::ShortFract &&
- BT->getKind() <= BuiltinType::LongFract) ||
- (BT->getKind() >= BuiltinType::SatShortAccum &&
- BT->getKind() <= BuiltinType::SatLongAccum) ||
- (BT->getKind() >= BuiltinType::SatShortFract &&
- BT->getKind() <= BuiltinType::SatLongFract));
- }
- return false;
-}
-
-inline bool Type::isUnsignedFixedPointType() const {
- return isFixedPointType() && !isSignedFixedPointType();
-}
-
-inline bool Type::isScalarType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() > BuiltinType::Void &&
- BT->getKind() <= BuiltinType::NullPtr;
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
- // Enums are scalar types, but only if they are defined. Incomplete enums
- // are not treated as scalar types.
- return IsEnumDeclComplete(ET->getDecl());
- return isa<PointerType>(CanonicalType) ||
- isa<BlockPointerType>(CanonicalType) ||
- isa<MemberPointerType>(CanonicalType) ||
- isa<ComplexType>(CanonicalType) ||
- isa<ObjCObjectPointerType>(CanonicalType) ||
- isBitIntType();
-}
-
-inline bool Type::isIntegralOrEnumerationType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->isInteger();
-
- // Check for a complete enum type; incomplete enum types are not properly an
- // enumeration type in the sense required here.
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
- return IsEnumDeclComplete(ET->getDecl());
-
- return isBitIntType();
-}
-
-inline bool Type::isBooleanType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::Bool;
- return false;
-}
-
-inline bool Type::isUndeducedType() const {
- auto *DT = getContainedDeducedType();
- return DT && !DT->isDeduced();
-}
-
-/// Determines whether this is a type for which one can define
-/// an overloaded operator.
-inline bool Type::isOverloadableType() const {
- if (!isDependentType())
- return isRecordType() || isEnumeralType();
- return !isArrayType() && !isFunctionType() && !isAnyPointerType() &&
- !isMemberPointerType();
-}
-
-/// Determines whether this type is written as a typedef-name.
-inline bool Type::isTypedefNameType() const {
- if (getAs<TypedefType>())
- return true;
- if (auto *TST = getAs<TemplateSpecializationType>())
- return TST->isTypeAlias();
- return false;
-}
-
-/// Determines whether this type can decay to a pointer type.
-inline bool Type::canDecayToPointerType() const {
- return isFunctionType() || (isArrayType() && !isArrayParameterType());
-}
-
-inline bool Type::hasPointerRepresentation() const {
- return (isPointerType() || isReferenceType() || isBlockPointerType() ||
- isObjCObjectPointerType() || isNullPtrType());
-}
-
-inline bool Type::hasObjCPointerRepresentation() const {
- return isObjCObjectPointerType();
-}
-
-inline const Type *Type::getBaseElementTypeUnsafe() const {
- const Type *type = this;
- while (const ArrayType *arrayType = type->getAsArrayTypeUnsafe())
- type = arrayType->getElementType().getTypePtr();
- return type;
-}
-
-inline const Type *Type::getPointeeOrArrayElementType() const {
- const Type *type = this;
- if (type->isAnyPointerType())
- return type->getPointeeType().getTypePtr();
- else if (type->isArrayType())
- return type->getBaseElementTypeUnsafe();
- return type;
-}
-/// Insertion operator for partial diagnostics. This allows sending adress
-/// spaces into a diagnostic with <<.
-inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
- LangAS AS) {
- PD.AddTaggedVal(llvm::to_underlying(AS),
- DiagnosticsEngine::ArgumentKind::ak_addrspace);
- return PD;
-}
-
-/// Insertion operator for partial diagnostics. This allows sending Qualifiers
-/// into a diagnostic with <<.
-inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
- Qualifiers Q) {
- PD.AddTaggedVal(Q.getAsOpaqueValue(),
- DiagnosticsEngine::ArgumentKind::ak_qual);
- return PD;
-}
-
-/// Insertion operator for partial diagnostics. This allows sending QualType's
-/// into a diagnostic with <<.
-inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
- QualType T) {
- PD.AddTaggedVal(reinterpret_cast<uint64_t>(T.getAsOpaquePtr()),
- DiagnosticsEngine::ak_qualtype);
- return PD;
-}
-
-// Helper class template that is used by Type::getAs to ensure that one does
-// not try to look through a qualified type to get to an array type.
-template <typename T>
-using TypeIsArrayType =
- std::integral_constant<bool, std::is_same<T, ArrayType>::value ||
- std::is_base_of<ArrayType, T>::value>;
-
-// Member-template getAs<specific type>'.
-template <typename T> const T *Type::getAs() const {
- static_assert(!TypeIsArrayType<T>::value,
- "ArrayType cannot be used with getAs!");
-
- // If this is directly a T type, return it.
- if (const auto *Ty = dyn_cast<T>(this))
- return Ty;
-
- // If the canonical form of this type isn't the right kind, reject it.
- if (!isa<T>(CanonicalType))
- return nullptr;
-
- // If this is a typedef for the type, strip the typedef off without
- // losing all typedef information.
- return cast<T>(getUnqualifiedDesugaredType());
-}
-
-template <typename T> const T *Type::getAsAdjusted() const {
- static_assert(!TypeIsArrayType<T>::value, "ArrayType cannot be used with getAsAdjusted!");
-
- // If this is directly a T type, return it.
- if (const auto *Ty = dyn_cast<T>(this))
- return Ty;
-
- // If the canonical form of this type isn't the right kind, reject it.
- if (!isa<T>(CanonicalType))
- return nullptr;
-
- // Strip off type adjustments that do not modify the underlying nature of the
- // type.
- const Type *Ty = this;
- while (Ty) {
- if (const auto *A = dyn_cast<AttributedType>(Ty))
- Ty = A->getModifiedType().getTypePtr();
- else if (const auto *A = dyn_cast<BTFTagAttributedType>(Ty))
- Ty = A->getWrappedType().getTypePtr();
- else if (const auto *A = dyn_cast<HLSLAttributedResourceType>(Ty))
- Ty = A->getWrappedType().getTypePtr();
- else if (const auto *E = dyn_cast<ElaboratedType>(Ty))
- Ty = E->desugar().getTypePtr();
- else if (const auto *P = dyn_cast<ParenType>(Ty))
- Ty = P->desugar().getTypePtr();
- else if (const auto *A = dyn_cast<AdjustedType>(Ty))
- Ty = A->desugar().getTypePtr();
- else if (const auto *M = dyn_cast<MacroQualifiedType>(Ty))
- Ty = M->desugar().getTypePtr();
- else
- break;
- }
-
- // Just because the canonical type is correct does not mean we can use cast<>,
- // since we may not have stripped off all the sugar down to the base type.
- return dyn_cast<T>(Ty);
-}
-
-inline const ArrayType *Type::getAsArrayTypeUnsafe() const {
- // If this is directly an array type, return it.
- if (const auto *arr = dyn_cast<ArrayType>(this))
- return arr;
-
- // If the canonical form of this type isn't the right kind, reject it.
- if (!isa<ArrayType>(CanonicalType))
- return nullptr;
-
- // If this is a typedef for the type, strip the typedef off without
- // losing all typedef information.
- return cast<ArrayType>(getUnqualifiedDesugaredType());
-}
-
-template <typename T> const T *Type::castAs() const {
- static_assert(!TypeIsArrayType<T>::value,
- "ArrayType cannot be used with castAs!");
-
- if (const auto *ty = dyn_cast<T>(this)) return ty;
- assert(isa<T>(CanonicalType));
- return cast<T>(getUnqualifiedDesugaredType());
-}
-
-inline const ArrayType *Type::castAsArrayTypeUnsafe() const {
- assert(isa<ArrayType>(CanonicalType));
- if (const auto *arr = dyn_cast<ArrayType>(this)) return arr;
- return cast<ArrayType>(getUnqualifiedDesugaredType());
-}
-
-DecayedType::DecayedType(QualType OriginalType, QualType DecayedPtr,
- QualType CanonicalPtr)
- : AdjustedType(Decayed, OriginalType, DecayedPtr, CanonicalPtr) {
-#ifndef NDEBUG
- QualType Adjusted = getAdjustedType();
- (void)AttributedType::stripOuterNullability(Adjusted);
- assert(isa<PointerType>(Adjusted));
-#endif
-}
-
-QualType DecayedType::getPointeeType() const {
- QualType Decayed = getDecayedType();
- (void)AttributedType::stripOuterNullability(Decayed);
- return cast<PointerType>(Decayed)->getPointeeType();
-}
-
-// Get the decimal string representation of a fixed point type, represented
-// as a scaled integer.
-// TODO: At some point, we should change the arguments to instead just accept an
-// APFixedPoint instead of APSInt and scale.
-void FixedPointValueToString(SmallVectorImpl<char> &Str, llvm::APSInt Val,
- unsigned Scale);
-
-inline FunctionEffectsRef FunctionEffectsRef::get(QualType QT) {
- const Type *TypePtr = QT.getTypePtr();
- while (true) {
- if (QualType Pointee = TypePtr->getPointeeType(); !Pointee.isNull())
- TypePtr = Pointee.getTypePtr();
- else if (TypePtr->isArrayType())
- TypePtr = TypePtr->getBaseElementTypeUnsafe();
- else
- break;
- }
- if (const auto *FPT = TypePtr->getAs<FunctionProtoType>())
- return FPT->getFunctionEffects();
- return {};
-}
-
} // namespace clang
#endif // LLVM_CLANG_AST_TYPE_H
diff --git a/clang/include/clang/AST/TypeBase.h b/clang/include/clang/AST/TypeBase.h
new file mode 100644
index 0000000..db2ab04
--- /dev/null
+++ b/clang/include/clang/AST/TypeBase.h
@@ -0,0 +1,9281 @@
+//===- TypeBase.h - C Language Family Type Representation -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// C Language Family Type Representation
+///
+/// This file defines the clang::Type interface and subclasses, used to
+/// represent types for languages in the C family.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_TYPE_BASE_H
+#define LLVM_CLANG_AST_TYPE_BASE_H
+
+#include "clang/AST/DependenceFlags.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/AttrKinds.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Linkage.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/PointerAuthOptions.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/Visibility.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DXILABI.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/TrailingObjects.h"
+#include "llvm/Support/type_traits.h"
+#include <bitset>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+namespace clang {
+
+class BTFTypeTagAttr;
+class ExtQuals;
+class QualType;
+class ConceptDecl;
+class ValueDecl;
+class TagDecl;
+class TemplateParameterList;
+class Type;
+class Attr;
+
+enum {
+ TypeAlignmentInBits = 4,
+ TypeAlignment = 1 << TypeAlignmentInBits
+};
+
+namespace serialization {
+ template <class T> class AbstractTypeReader;
+ template <class T> class AbstractTypeWriter;
+}
+
+} // namespace clang
+
+namespace llvm {
+
+ template <typename T>
+ struct PointerLikeTypeTraits;
+ template<>
+ struct PointerLikeTypeTraits< ::clang::Type*> {
+ static inline void *getAsVoidPointer(::clang::Type *P) { return P; }
+
+ static inline ::clang::Type *getFromVoidPointer(void *P) {
+ return static_cast< ::clang::Type*>(P);
+ }
+
+ static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
+ };
+
+ template<>
+ struct PointerLikeTypeTraits< ::clang::ExtQuals*> {
+ static inline void *getAsVoidPointer(::clang::ExtQuals *P) { return P; }
+
+ static inline ::clang::ExtQuals *getFromVoidPointer(void *P) {
+ return static_cast< ::clang::ExtQuals*>(P);
+ }
+
+ static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
+ };
+
+} // namespace llvm
+
+namespace clang {
+
+class ASTContext;
+template <typename> class CanQual;
+class CXXRecordDecl;
+class DeclContext;
+class EnumDecl;
+class Expr;
+class ExtQualsTypeCommonBase;
+class FunctionDecl;
+class FunctionEffectsRef;
+class FunctionEffectKindSet;
+class FunctionEffectSet;
+class IdentifierInfo;
+class NamedDecl;
+class ObjCInterfaceDecl;
+class ObjCProtocolDecl;
+class ObjCTypeParamDecl;
+struct PrintingPolicy;
+class RecordDecl;
+class Stmt;
+class TagDecl;
+class ClassTemplateDecl;
+class TemplateArgument;
+class TemplateArgumentListInfo;
+class TemplateArgumentLoc;
+class TemplateTypeParmDecl;
+class TypedefNameDecl;
+class UnresolvedUsingTypenameDecl;
+class UsingShadowDecl;
+
+using CanQualType = CanQual<Type>;
+
+// Provide forward declarations for all of the *Type classes.
+#define TYPE(Class, Base) class Class##Type;
+#include "clang/AST/TypeNodes.inc"
+
+/// Pointer-authentication qualifiers.
+class PointerAuthQualifier {
+ enum : uint32_t {
+ EnabledShift = 0,
+ EnabledBits = 1,
+ EnabledMask = 1 << EnabledShift,
+ AddressDiscriminatedShift = EnabledShift + EnabledBits,
+ AddressDiscriminatedBits = 1,
+ AddressDiscriminatedMask = 1 << AddressDiscriminatedShift,
+ AuthenticationModeShift =
+ AddressDiscriminatedShift + AddressDiscriminatedBits,
+ AuthenticationModeBits = 2,
+ AuthenticationModeMask = ((1 << AuthenticationModeBits) - 1)
+ << AuthenticationModeShift,
+ IsaPointerShift = AuthenticationModeShift + AuthenticationModeBits,
+ IsaPointerBits = 1,
+ IsaPointerMask = ((1 << IsaPointerBits) - 1) << IsaPointerShift,
+ AuthenticatesNullValuesShift = IsaPointerShift + IsaPointerBits,
+ AuthenticatesNullValuesBits = 1,
+ AuthenticatesNullValuesMask = ((1 << AuthenticatesNullValuesBits) - 1)
+ << AuthenticatesNullValuesShift,
+ KeyShift = AuthenticatesNullValuesShift + AuthenticatesNullValuesBits,
+ KeyBits = 10,
+ KeyMask = ((1 << KeyBits) - 1) << KeyShift,
+ DiscriminatorShift = KeyShift + KeyBits,
+ DiscriminatorBits = 16,
+ DiscriminatorMask = ((1u << DiscriminatorBits) - 1) << DiscriminatorShift,
+ };
+
+ // bits: |0 |1 |2..3 |4 |
+ // |Enabled|Address|AuthenticationMode|ISA pointer|
+ // bits: |5 |6..15| 16...31 |
+ // |AuthenticatesNull|Key |Discriminator|
+ uint32_t Data = 0;
+
+ // The following static assertions check that each of the 32 bits is present
+ // exactly in one of the constants.
+ static_assert((EnabledBits + AddressDiscriminatedBits +
+ AuthenticationModeBits + IsaPointerBits +
+ AuthenticatesNullValuesBits + KeyBits + DiscriminatorBits) ==
+ 32,
+ "PointerAuthQualifier should be exactly 32 bits");
+ static_assert((EnabledMask + AddressDiscriminatedMask +
+ AuthenticationModeMask + IsaPointerMask +
+ AuthenticatesNullValuesMask + KeyMask + DiscriminatorMask) ==
+ 0xFFFFFFFF,
+ "All masks should cover the entire bits");
+ static_assert((EnabledMask ^ AddressDiscriminatedMask ^
+ AuthenticationModeMask ^ IsaPointerMask ^
+ AuthenticatesNullValuesMask ^ KeyMask ^ DiscriminatorMask) ==
+ 0xFFFFFFFF,
+ "All masks should cover the entire bits");
+
+ PointerAuthQualifier(unsigned Key, bool IsAddressDiscriminated,
+ unsigned ExtraDiscriminator,
+ PointerAuthenticationMode AuthenticationMode,
+ bool IsIsaPointer, bool AuthenticatesNullValues)
+ : Data(EnabledMask |
+ (IsAddressDiscriminated
+ ? llvm::to_underlying(AddressDiscriminatedMask)
+ : 0) |
+ (Key << KeyShift) |
+ (llvm::to_underlying(AuthenticationMode)
+ << AuthenticationModeShift) |
+ (ExtraDiscriminator << DiscriminatorShift) |
+ (IsIsaPointer << IsaPointerShift) |
+ (AuthenticatesNullValues << AuthenticatesNullValuesShift)) {
+ assert(Key <= KeyNoneInternal);
+ assert(ExtraDiscriminator <= MaxDiscriminator);
+ assert((Data == 0) ==
+ (getAuthenticationMode() == PointerAuthenticationMode::None));
+ }
+
+public:
+ enum {
+ KeyNoneInternal = (1u << KeyBits) - 1,
+
+ /// The maximum supported pointer-authentication key.
+ MaxKey = KeyNoneInternal - 1,
+
+ /// The maximum supported pointer-authentication discriminator.
+ MaxDiscriminator = (1u << DiscriminatorBits) - 1
+ };
+
+public:
+ PointerAuthQualifier() = default;
+
+ static PointerAuthQualifier
+ Create(unsigned Key, bool IsAddressDiscriminated, unsigned ExtraDiscriminator,
+ PointerAuthenticationMode AuthenticationMode, bool IsIsaPointer,
+ bool AuthenticatesNullValues) {
+ if (Key == PointerAuthKeyNone)
+ Key = KeyNoneInternal;
+ assert(Key <= KeyNoneInternal && "out-of-range key value");
+ return PointerAuthQualifier(Key, IsAddressDiscriminated, ExtraDiscriminator,
+ AuthenticationMode, IsIsaPointer,
+ AuthenticatesNullValues);
+ }
+
+ bool isPresent() const {
+ assert((Data == 0) ==
+ (getAuthenticationMode() == PointerAuthenticationMode::None));
+ return Data != 0;
+ }
+
+ explicit operator bool() const { return isPresent(); }
+
+ unsigned getKey() const {
+ assert(isPresent());
+ return (Data & KeyMask) >> KeyShift;
+ }
+
+ bool hasKeyNone() const { return isPresent() && getKey() == KeyNoneInternal; }
+
+ bool isAddressDiscriminated() const {
+ assert(isPresent());
+ return (Data & AddressDiscriminatedMask) >> AddressDiscriminatedShift;
+ }
+
+ unsigned getExtraDiscriminator() const {
+ assert(isPresent());
+ return (Data >> DiscriminatorShift);
+ }
+
+ PointerAuthenticationMode getAuthenticationMode() const {
+ return PointerAuthenticationMode((Data & AuthenticationModeMask) >>
+ AuthenticationModeShift);
+ }
+
+ bool isIsaPointer() const {
+ assert(isPresent());
+ return (Data & IsaPointerMask) >> IsaPointerShift;
+ }
+
+ bool authenticatesNullValues() const {
+ assert(isPresent());
+ return (Data & AuthenticatesNullValuesMask) >> AuthenticatesNullValuesShift;
+ }
+
+ PointerAuthQualifier withoutKeyNone() const {
+ return hasKeyNone() ? PointerAuthQualifier() : *this;
+ }
+
+ friend bool operator==(PointerAuthQualifier Lhs, PointerAuthQualifier Rhs) {
+ return Lhs.Data == Rhs.Data;
+ }
+ friend bool operator!=(PointerAuthQualifier Lhs, PointerAuthQualifier Rhs) {
+ return Lhs.Data != Rhs.Data;
+ }
+
+ bool isEquivalent(PointerAuthQualifier Other) const {
+ return withoutKeyNone() == Other.withoutKeyNone();
+ }
+
+ uint32_t getAsOpaqueValue() const { return Data; }
+
+ // Deserialize pointer-auth qualifiers from an opaque representation.
+ static PointerAuthQualifier fromOpaqueValue(uint32_t Opaque) {
+ PointerAuthQualifier Result;
+ Result.Data = Opaque;
+ assert((Result.Data == 0) ==
+ (Result.getAuthenticationMode() == PointerAuthenticationMode::None));
+ return Result;
+ }
+
+ std::string getAsString() const;
+ std::string getAsString(const PrintingPolicy &Policy) const;
+
+ bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const;
+ void print(raw_ostream &OS, const PrintingPolicy &Policy) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(Data); }
+};
+
+/// The collection of all-type qualifiers we support.
+/// Clang supports five independent qualifiers:
+/// * C99: const, volatile, and restrict
+/// * MS: __unaligned
+/// * Embedded C (TR18037): address spaces
+/// * Objective C: the GC attributes (none, weak, or strong)
+class Qualifiers {
+public:
+ Qualifiers() = default;
+ enum TQ : uint64_t {
+ // NOTE: These flags must be kept in sync with DeclSpec::TQ.
+ Const = 0x1,
+ Restrict = 0x2,
+ Volatile = 0x4,
+ CVRMask = Const | Volatile | Restrict
+ };
+
+ enum GC {
+ GCNone = 0,
+ Weak,
+ Strong
+ };
+
+ enum ObjCLifetime {
+ /// There is no lifetime qualification on this type.
+ OCL_None,
+
+ /// This object can be modified without requiring retains or
+ /// releases.
+ OCL_ExplicitNone,
+
+ /// Assigning into this object requires the old value to be
+ /// released and the new value to be retained. The timing of the
+ /// release of the old value is inexact: it may be moved to
+ /// immediately after the last known point where the value is
+ /// live.
+ OCL_Strong,
+
+ /// Reading or writing from this object requires a barrier call.
+ OCL_Weak,
+
+ /// Assigning into this object requires a lifetime extension.
+ OCL_Autoreleasing
+ };
+
+ enum : uint64_t {
+ /// The maximum supported address space number.
+ /// 23 bits should be enough for anyone.
+ MaxAddressSpace = 0x7fffffu,
+
+ /// The width of the "fast" qualifier mask.
+ FastWidth = 3,
+
+ /// The fast qualifier mask.
+ FastMask = (1 << FastWidth) - 1
+ };
+
+ /// Returns the common set of qualifiers while removing them from
+ /// the given sets.
+ static Qualifiers removeCommonQualifiers(Qualifiers &L, Qualifiers &R) {
+ Qualifiers Q;
+ PointerAuthQualifier LPtrAuth = L.getPointerAuth();
+ if (LPtrAuth.isPresent() &&
+ LPtrAuth.getKey() != PointerAuthQualifier::KeyNoneInternal &&
+ LPtrAuth == R.getPointerAuth()) {
+ Q.setPointerAuth(LPtrAuth);
+ PointerAuthQualifier Empty;
+ L.setPointerAuth(Empty);
+ R.setPointerAuth(Empty);
+ }
+
+ // If both are only CVR-qualified, bit operations are sufficient.
+ if (!(L.Mask & ~CVRMask) && !(R.Mask & ~CVRMask)) {
+ Q.Mask = L.Mask & R.Mask;
+ L.Mask &= ~Q.Mask;
+ R.Mask &= ~Q.Mask;
+ return Q;
+ }
+
+ unsigned CommonCRV = L.getCVRQualifiers() & R.getCVRQualifiers();
+ Q.addCVRQualifiers(CommonCRV);
+ L.removeCVRQualifiers(CommonCRV);
+ R.removeCVRQualifiers(CommonCRV);
+
+ if (L.getObjCGCAttr() == R.getObjCGCAttr()) {
+ Q.setObjCGCAttr(L.getObjCGCAttr());
+ L.removeObjCGCAttr();
+ R.removeObjCGCAttr();
+ }
+
+ if (L.getObjCLifetime() == R.getObjCLifetime()) {
+ Q.setObjCLifetime(L.getObjCLifetime());
+ L.removeObjCLifetime();
+ R.removeObjCLifetime();
+ }
+
+ if (L.getAddressSpace() == R.getAddressSpace()) {
+ Q.setAddressSpace(L.getAddressSpace());
+ L.removeAddressSpace();
+ R.removeAddressSpace();
+ }
+ return Q;
+ }
+
+ static Qualifiers fromFastMask(unsigned Mask) {
+ Qualifiers Qs;
+ Qs.addFastQualifiers(Mask);
+ return Qs;
+ }
+
+ static Qualifiers fromCVRMask(unsigned CVR) {
+ Qualifiers Qs;
+ Qs.addCVRQualifiers(CVR);
+ return Qs;
+ }
+
+ static Qualifiers fromCVRUMask(unsigned CVRU) {
+ Qualifiers Qs;
+ Qs.addCVRUQualifiers(CVRU);
+ return Qs;
+ }
+
+ // Deserialize qualifiers from an opaque representation.
+ static Qualifiers fromOpaqueValue(uint64_t opaque) {
+ Qualifiers Qs;
+ Qs.Mask = opaque;
+ return Qs;
+ }
+
+ // Serialize these qualifiers into an opaque representation.
+ uint64_t getAsOpaqueValue() const { return Mask; }
+
+ bool hasConst() const { return Mask & Const; }
+ bool hasOnlyConst() const { return Mask == Const; }
+ void removeConst() { Mask &= ~Const; }
+ void addConst() { Mask |= Const; }
+ Qualifiers withConst() const {
+ Qualifiers Qs = *this;
+ Qs.addConst();
+ return Qs;
+ }
+
+ bool hasVolatile() const { return Mask & Volatile; }
+ bool hasOnlyVolatile() const { return Mask == Volatile; }
+ void removeVolatile() { Mask &= ~Volatile; }
+ void addVolatile() { Mask |= Volatile; }
+ Qualifiers withVolatile() const {
+ Qualifiers Qs = *this;
+ Qs.addVolatile();
+ return Qs;
+ }
+
+ bool hasRestrict() const { return Mask & Restrict; }
+ bool hasOnlyRestrict() const { return Mask == Restrict; }
+ void removeRestrict() { Mask &= ~Restrict; }
+ void addRestrict() { Mask |= Restrict; }
+ Qualifiers withRestrict() const {
+ Qualifiers Qs = *this;
+ Qs.addRestrict();
+ return Qs;
+ }
+
+ bool hasCVRQualifiers() const { return getCVRQualifiers(); }
+ unsigned getCVRQualifiers() const { return Mask & CVRMask; }
+ unsigned getCVRUQualifiers() const { return Mask & (CVRMask | UMask); }
+
+ void setCVRQualifiers(unsigned mask) {
+ assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
+ Mask = (Mask & ~CVRMask) | mask;
+ }
+ void removeCVRQualifiers(unsigned mask) {
+ assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
+ Mask &= ~static_cast<uint64_t>(mask);
+ }
+ void removeCVRQualifiers() {
+ removeCVRQualifiers(CVRMask);
+ }
+ void addCVRQualifiers(unsigned mask) {
+ assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
+ Mask |= mask;
+ }
+ void addCVRUQualifiers(unsigned mask) {
+ assert(!(mask & ~CVRMask & ~UMask) && "bitmask contains non-CVRU bits");
+ Mask |= mask;
+ }
+
+ bool hasUnaligned() const { return Mask & UMask; }
+ void setUnaligned(bool flag) {
+ Mask = (Mask & ~UMask) | (flag ? UMask : 0);
+ }
+ void removeUnaligned() { Mask &= ~UMask; }
+ void addUnaligned() { Mask |= UMask; }
+
+ bool hasObjCGCAttr() const { return Mask & GCAttrMask; }
+ GC getObjCGCAttr() const { return GC((Mask & GCAttrMask) >> GCAttrShift); }
+ void setObjCGCAttr(GC type) {
+ Mask = (Mask & ~GCAttrMask) | (type << GCAttrShift);
+ }
+ void removeObjCGCAttr() { setObjCGCAttr(GCNone); }
+ void addObjCGCAttr(GC type) {
+ assert(type);
+ setObjCGCAttr(type);
+ }
+ Qualifiers withoutObjCGCAttr() const {
+ Qualifiers qs = *this;
+ qs.removeObjCGCAttr();
+ return qs;
+ }
+ Qualifiers withoutObjCLifetime() const {
+ Qualifiers qs = *this;
+ qs.removeObjCLifetime();
+ return qs;
+ }
+ Qualifiers withoutAddressSpace() const {
+ Qualifiers qs = *this;
+ qs.removeAddressSpace();
+ return qs;
+ }
+
+ bool hasObjCLifetime() const { return Mask & LifetimeMask; }
+ ObjCLifetime getObjCLifetime() const {
+ return ObjCLifetime((Mask & LifetimeMask) >> LifetimeShift);
+ }
+ void setObjCLifetime(ObjCLifetime type) {
+ Mask = (Mask & ~LifetimeMask) | (type << LifetimeShift);
+ }
+ void removeObjCLifetime() { setObjCLifetime(OCL_None); }
+ void addObjCLifetime(ObjCLifetime type) {
+ assert(type);
+ assert(!hasObjCLifetime());
+ Mask |= (type << LifetimeShift);
+ }
+
+ /// True if the lifetime is neither None or ExplicitNone.
+ bool hasNonTrivialObjCLifetime() const {
+ ObjCLifetime lifetime = getObjCLifetime();
+ return (lifetime > OCL_ExplicitNone);
+ }
+
+ /// True if the lifetime is either strong or weak.
+ bool hasStrongOrWeakObjCLifetime() const {
+ ObjCLifetime lifetime = getObjCLifetime();
+ return (lifetime == OCL_Strong || lifetime == OCL_Weak);
+ }
+
+ bool hasAddressSpace() const { return Mask & AddressSpaceMask; }
+ LangAS getAddressSpace() const {
+ return static_cast<LangAS>((Mask & AddressSpaceMask) >> AddressSpaceShift);
+ }
+ bool hasTargetSpecificAddressSpace() const {
+ return isTargetAddressSpace(getAddressSpace());
+ }
+ /// Get the address space attribute value to be printed by diagnostics.
+ unsigned getAddressSpaceAttributePrintValue() const {
+ auto Addr = getAddressSpace();
+ // This function is not supposed to be used with language specific
+ // address spaces. If that happens, the diagnostic message should consider
+ // printing the QualType instead of the address space value.
+ assert(Addr == LangAS::Default || hasTargetSpecificAddressSpace());
+ if (Addr != LangAS::Default)
+ return toTargetAddressSpace(Addr);
+ // TODO: The diagnostic messages where Addr may be 0 should be fixed
+ // since it cannot differentiate the situation where 0 denotes the default
+ // address space or user specified __attribute__((address_space(0))).
+ return 0;
+ }
+ void setAddressSpace(LangAS space) {
+ assert((unsigned)space <= MaxAddressSpace);
+ Mask = (Mask & ~AddressSpaceMask)
+ | (((uint32_t) space) << AddressSpaceShift);
+ }
+ void removeAddressSpace() { setAddressSpace(LangAS::Default); }
+ void addAddressSpace(LangAS space) {
+ assert(space != LangAS::Default);
+ setAddressSpace(space);
+ }
+
+ bool hasPointerAuth() const { return Mask & PtrAuthMask; }
+ PointerAuthQualifier getPointerAuth() const {
+ return PointerAuthQualifier::fromOpaqueValue(Mask >> PtrAuthShift);
+ }
+ void setPointerAuth(PointerAuthQualifier Q) {
+ Mask = (Mask & ~PtrAuthMask) |
+ (uint64_t(Q.getAsOpaqueValue()) << PtrAuthShift);
+ }
+ void removePointerAuth() { Mask &= ~PtrAuthMask; }
+ void addPointerAuth(PointerAuthQualifier Q) {
+ assert(Q.isPresent());
+ setPointerAuth(Q);
+ }
+
+ // Fast qualifiers are those that can be allocated directly
+ // on a QualType object.
+ bool hasFastQualifiers() const { return getFastQualifiers(); }
+ unsigned getFastQualifiers() const { return Mask & FastMask; }
+ void setFastQualifiers(unsigned mask) {
+ assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
+ Mask = (Mask & ~FastMask) | mask;
+ }
+ void removeFastQualifiers(unsigned mask) {
+ assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
+ Mask &= ~static_cast<uint64_t>(mask);
+ }
+ void removeFastQualifiers() {
+ removeFastQualifiers(FastMask);
+ }
+ void addFastQualifiers(unsigned mask) {
+ assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
+ Mask |= mask;
+ }
+
+ /// Return true if the set contains any qualifiers which require an ExtQuals
+ /// node to be allocated.
+ bool hasNonFastQualifiers() const { return Mask & ~FastMask; }
+ Qualifiers getNonFastQualifiers() const {
+ Qualifiers Quals = *this;
+ Quals.setFastQualifiers(0);
+ return Quals;
+ }
+
+ /// Return true if the set contains any qualifiers.
+ bool hasQualifiers() const { return Mask; }
+ bool empty() const { return !Mask; }
+
+ /// Add the qualifiers from the given set to this set.
+ void addQualifiers(Qualifiers Q) {
+ // If the other set doesn't have any non-boolean qualifiers, just
+ // bit-or it in.
+ if (!(Q.Mask & ~CVRMask))
+ Mask |= Q.Mask;
+ else {
+ Mask |= (Q.Mask & CVRMask);
+ if (Q.hasAddressSpace())
+ addAddressSpace(Q.getAddressSpace());
+ if (Q.hasObjCGCAttr())
+ addObjCGCAttr(Q.getObjCGCAttr());
+ if (Q.hasObjCLifetime())
+ addObjCLifetime(Q.getObjCLifetime());
+ if (Q.hasPointerAuth())
+ addPointerAuth(Q.getPointerAuth());
+ }
+ }
+
+ /// Remove the qualifiers from the given set from this set.
+ void removeQualifiers(Qualifiers Q) {
+ // If the other set doesn't have any non-boolean qualifiers, just
+ // bit-and the inverse in.
+ if (!(Q.Mask & ~CVRMask))
+ Mask &= ~Q.Mask;
+ else {
+ Mask &= ~(Q.Mask & CVRMask);
+ if (getObjCGCAttr() == Q.getObjCGCAttr())
+ removeObjCGCAttr();
+ if (getObjCLifetime() == Q.getObjCLifetime())
+ removeObjCLifetime();
+ if (getAddressSpace() == Q.getAddressSpace())
+ removeAddressSpace();
+ if (getPointerAuth() == Q.getPointerAuth())
+ removePointerAuth();
+ }
+ }
+
+ /// Add the qualifiers from the given set to this set, given that
+ /// they don't conflict.
+ void addConsistentQualifiers(Qualifiers qs) {
+ assert(getAddressSpace() == qs.getAddressSpace() ||
+ !hasAddressSpace() || !qs.hasAddressSpace());
+ assert(getObjCGCAttr() == qs.getObjCGCAttr() ||
+ !hasObjCGCAttr() || !qs.hasObjCGCAttr());
+ assert(getObjCLifetime() == qs.getObjCLifetime() ||
+ !hasObjCLifetime() || !qs.hasObjCLifetime());
+ assert(!hasPointerAuth() || !qs.hasPointerAuth() ||
+ getPointerAuth() == qs.getPointerAuth());
+ Mask |= qs.Mask;
+ }
+
+ /// Returns true if address space A is equal to or a superset of B.
+ /// OpenCL v2.0 defines conversion rules (OpenCLC v2.0 s6.5.5) and notion of
+ /// overlapping address spaces.
+ /// CL1.1 or CL1.2:
+ /// every address space is a superset of itself.
+ /// CL2.0 adds:
+ /// __generic is a superset of any address space except for __constant.
+ static bool isAddressSpaceSupersetOf(LangAS A, LangAS B,
+ const ASTContext &Ctx) {
+ // Address spaces must match exactly.
+ return A == B || isTargetAddressSpaceSupersetOf(A, B, Ctx);
+ }
+
+ static bool isTargetAddressSpaceSupersetOf(LangAS A, LangAS B,
+ const ASTContext &Ctx);
+
+ /// Returns true if the address space in these qualifiers is equal to or
+ /// a superset of the address space in the argument qualifiers.
+ bool isAddressSpaceSupersetOf(Qualifiers other, const ASTContext &Ctx) const {
+ return isAddressSpaceSupersetOf(getAddressSpace(), other.getAddressSpace(),
+ Ctx);
+ }
+
+ /// Determines if these qualifiers compatibly include another set.
+ /// Generally this answers the question of whether an object with the other
+ /// qualifiers can be safely used as an object with these qualifiers.
+ bool compatiblyIncludes(Qualifiers other, const ASTContext &Ctx) const {
+ return isAddressSpaceSupersetOf(other, Ctx) &&
+ // ObjC GC qualifiers can match, be added, or be removed, but can't
+ // be changed.
+ (getObjCGCAttr() == other.getObjCGCAttr() || !hasObjCGCAttr() ||
+ !other.hasObjCGCAttr()) &&
+ // Pointer-auth qualifiers must match exactly.
+ getPointerAuth() == other.getPointerAuth() &&
+ // ObjC lifetime qualifiers must match exactly.
+ getObjCLifetime() == other.getObjCLifetime() &&
+ // CVR qualifiers may subset.
+ (((Mask & CVRMask) | (other.Mask & CVRMask)) == (Mask & CVRMask)) &&
+ // U qualifier may superset.
+ (!other.hasUnaligned() || hasUnaligned());
+ }
+
+ /// Determines if these qualifiers compatibly include another set of
+ /// qualifiers from the narrow perspective of Objective-C ARC lifetime.
+ ///
+ /// One set of Objective-C lifetime qualifiers compatibly includes the other
+ /// if the lifetime qualifiers match, or if both are non-__weak and the
+ /// including set also contains the 'const' qualifier, or both are non-__weak
+ /// and one is None (which can only happen in non-ARC modes).
+ bool compatiblyIncludesObjCLifetime(Qualifiers other) const {
+ if (getObjCLifetime() == other.getObjCLifetime())
+ return true;
+
+ if (getObjCLifetime() == OCL_Weak || other.getObjCLifetime() == OCL_Weak)
+ return false;
+
+ if (getObjCLifetime() == OCL_None || other.getObjCLifetime() == OCL_None)
+ return true;
+
+ return hasConst();
+ }
+
+ /// Determine whether this set of qualifiers is a strict superset of
+ /// another set of qualifiers, not considering qualifier compatibility.
+ bool isStrictSupersetOf(Qualifiers Other) const;
+
+ bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
+ bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; }
+
+ explicit operator bool() const { return hasQualifiers(); }
+
+ Qualifiers &operator+=(Qualifiers R) {
+ addQualifiers(R);
+ return *this;
+ }
+
+ // Union two qualifier sets. If an enumerated qualifier appears
+ // in both sets, use the one from the right.
+ friend Qualifiers operator+(Qualifiers L, Qualifiers R) {
+ L += R;
+ return L;
+ }
+
+ Qualifiers &operator-=(Qualifiers R) {
+ removeQualifiers(R);
+ return *this;
+ }
+
+ /// Compute the difference between two qualifier sets.
+ friend Qualifiers operator-(Qualifiers L, Qualifiers R) {
+ L -= R;
+ return L;
+ }
+
+ std::string getAsString() const;
+ std::string getAsString(const PrintingPolicy &Policy) const;
+
+ static std::string getAddrSpaceAsString(LangAS AS);
+
+ bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const;
+ void print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool appendSpaceIfNonEmpty = false) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(Mask); }
+
+private:
+ // bits: |0 1 2|3|4 .. 5|6 .. 8|9 ... 31|32 ... 63|
+ // |C R V|U|GCAttr|Lifetime|AddressSpace| PtrAuth |
+ uint64_t Mask = 0;
+ static_assert(sizeof(PointerAuthQualifier) == sizeof(uint32_t),
+ "PointerAuthQualifier must be 32 bits");
+
+ static constexpr uint64_t PtrAuthShift = 32;
+ static constexpr uint64_t PtrAuthMask = UINT64_C(0xffffffff) << PtrAuthShift;
+
+ static constexpr uint64_t UMask = 0x8;
+ static constexpr uint64_t UShift = 3;
+ static constexpr uint64_t GCAttrMask = 0x30;
+ static constexpr uint64_t GCAttrShift = 4;
+ static constexpr uint64_t LifetimeMask = 0x1C0;
+ static constexpr uint64_t LifetimeShift = 6;
+ static constexpr uint64_t AddressSpaceMask =
+ ~(CVRMask | UMask | GCAttrMask | LifetimeMask | PtrAuthMask);
+ static constexpr uint64_t AddressSpaceShift = 9;
+};
+
+class QualifiersAndAtomic {
+ Qualifiers Quals;
+ bool HasAtomic;
+
+public:
+ QualifiersAndAtomic() : HasAtomic(false) {}
+ QualifiersAndAtomic(Qualifiers Quals, bool HasAtomic)
+ : Quals(Quals), HasAtomic(HasAtomic) {}
+
+ operator Qualifiers() const { return Quals; }
+
+ bool hasVolatile() const { return Quals.hasVolatile(); }
+ bool hasConst() const { return Quals.hasConst(); }
+ bool hasRestrict() const { return Quals.hasRestrict(); }
+ bool hasAtomic() const { return HasAtomic; }
+
+ void addVolatile() { Quals.addVolatile(); }
+ void addConst() { Quals.addConst(); }
+ void addRestrict() { Quals.addRestrict(); }
+ void addAtomic() { HasAtomic = true; }
+
+ void removeVolatile() { Quals.removeVolatile(); }
+ void removeConst() { Quals.removeConst(); }
+ void removeRestrict() { Quals.removeRestrict(); }
+ void removeAtomic() { HasAtomic = false; }
+
+ QualifiersAndAtomic withVolatile() {
+ return {Quals.withVolatile(), HasAtomic};
+ }
+ QualifiersAndAtomic withConst() { return {Quals.withConst(), HasAtomic}; }
+ QualifiersAndAtomic withRestrict() {
+ return {Quals.withRestrict(), HasAtomic};
+ }
+ QualifiersAndAtomic withAtomic() { return {Quals, true}; }
+
+ QualifiersAndAtomic &operator+=(Qualifiers RHS) {
+ Quals += RHS;
+ return *this;
+ }
+};
+
+/// A std::pair-like structure for storing a qualified type split
+/// into its local qualifiers and its locally-unqualified type.
+struct SplitQualType {
+ /// The locally-unqualified type.
+ const Type *Ty = nullptr;
+
+ /// The local qualifiers.
+ Qualifiers Quals;
+
+ SplitQualType() = default;
+ SplitQualType(const Type *ty, Qualifiers qs) : Ty(ty), Quals(qs) {}
+
+ SplitQualType getSingleStepDesugaredType() const; // end of this file
+
+ // Make std::tie work.
+ std::pair<const Type *,Qualifiers> asPair() const {
+ return std::pair<const Type *, Qualifiers>(Ty, Quals);
+ }
+
+ friend bool operator==(SplitQualType a, SplitQualType b) {
+ return a.Ty == b.Ty && a.Quals == b.Quals;
+ }
+ friend bool operator!=(SplitQualType a, SplitQualType b) {
+ return a.Ty != b.Ty || a.Quals != b.Quals;
+ }
+};
+
+/// The kind of type we are substituting Objective-C type arguments into.
+///
+/// The kind of substitution affects the replacement of type parameters when
+/// no concrete type information is provided, e.g., when dealing with an
+/// unspecialized type.
+enum class ObjCSubstitutionContext {
+ /// An ordinary type.
+ Ordinary,
+
+ /// The result type of a method or function.
+ Result,
+
+ /// The parameter type of a method or function.
+ Parameter,
+
+ /// The type of a property.
+ Property,
+
+ /// The superclass of a type.
+ Superclass,
+};
+
+/// The kind of 'typeof' expression we're after.
+enum class TypeOfKind : uint8_t {
+ Qualified,
+ Unqualified,
+};
+
+/// A (possibly-)qualified type.
+///
+/// For efficiency, we don't store CV-qualified types as nodes on their
+/// own: instead each reference to a type stores the qualifiers. This
+/// greatly reduces the number of nodes we need to allocate for types (for
+/// example we only need one for 'int', 'const int', 'volatile int',
+/// 'const volatile int', etc).
+///
+/// As an added efficiency bonus, instead of making this a pair, we
+/// just store the two bits we care about in the low bits of the
+/// pointer. To handle the packing/unpacking, we make QualType be a
+/// simple wrapper class that acts like a smart pointer. A third bit
+/// indicates whether there are extended qualifiers present, in which
+/// case the pointer points to a special structure.
+class QualType {
+ friend class QualifierCollector;
+
+ // Thankfully, these are efficiently composable.
+ llvm::PointerIntPair<llvm::PointerUnion<const Type *, const ExtQuals *>,
+ Qualifiers::FastWidth> Value;
+
+ const ExtQuals *getExtQualsUnsafe() const {
+ return cast<const ExtQuals *>(Value.getPointer());
+ }
+
+ const Type *getTypePtrUnsafe() const {
+ return cast<const Type *>(Value.getPointer());
+ }
+
+ const ExtQualsTypeCommonBase *getCommonPtr() const {
+ assert(!isNull() && "Cannot retrieve a NULL type pointer");
+ auto CommonPtrVal = reinterpret_cast<uintptr_t>(Value.getOpaqueValue());
+ CommonPtrVal &= ~(uintptr_t)((1 << TypeAlignmentInBits) - 1);
+ return reinterpret_cast<ExtQualsTypeCommonBase*>(CommonPtrVal);
+ }
+
+public:
+ QualType() = default;
+ QualType(const Type *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
+ QualType(const ExtQuals *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
+
+ unsigned getLocalFastQualifiers() const { return Value.getInt(); }
+ void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); }
+
+ bool UseExcessPrecision(const ASTContext &Ctx);
+
+ /// Retrieves a pointer to the underlying (unqualified) type.
+ ///
+ /// This function requires that the type not be NULL. If the type might be
+ /// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
+ const Type *getTypePtr() const;
+
+ const Type *getTypePtrOrNull() const;
+
+ /// Retrieves a pointer to the name of the base type.
+ const IdentifierInfo *getBaseTypeIdentifier() const;
+
+ /// Divides a QualType into its unqualified type and a set of local
+ /// qualifiers.
+ SplitQualType split() const;
+
+ void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
+
+ static QualType getFromOpaquePtr(const void *Ptr) {
+ QualType T;
+ T.Value.setFromOpaqueValue(const_cast<void*>(Ptr));
+ return T;
+ }
+
+ const Type &operator*() const {
+ return *getTypePtr();
+ }
+
+ const Type *operator->() const {
+ return getTypePtr();
+ }
+
+ bool isCanonical() const;
+ bool isCanonicalAsParam() const;
+
+ /// Return true if this QualType doesn't point to a type yet.
+ bool isNull() const {
+ return Value.getPointer().isNull();
+ }
+
+ // Determines if a type can form `T&`.
+ bool isReferenceable() const;
+
+ /// Determine whether this particular QualType instance has the
+ /// "const" qualifier set, without looking through typedefs that may have
+ /// added "const" at a different level.
+ bool isLocalConstQualified() const {
+ return (getLocalFastQualifiers() & Qualifiers::Const);
+ }
+
+ /// Determine whether this type is const-qualified.
+ bool isConstQualified() const;
+
+ enum class NonConstantStorageReason {
+ MutableField,
+ NonConstNonReferenceType,
+ NonTrivialCtor,
+ NonTrivialDtor,
+ };
+ /// Determine whether instances of this type can be placed in immutable
+ /// storage.
+ /// If ExcludeCtor is true, the duration when the object's constructor runs
+ /// will not be considered. The caller will need to verify that the object is
+ /// not written to during its construction. ExcludeDtor works similarly.
+ std::optional<NonConstantStorageReason>
+ isNonConstantStorage(const ASTContext &Ctx, bool ExcludeCtor,
+ bool ExcludeDtor);
+
+ bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor,
+ bool ExcludeDtor) {
+ return !isNonConstantStorage(Ctx, ExcludeCtor, ExcludeDtor);
+ }
+
+ /// Determine whether this particular QualType instance has the
+ /// "restrict" qualifier set, without looking through typedefs that may have
+ /// added "restrict" at a different level.
+ bool isLocalRestrictQualified() const {
+ return (getLocalFastQualifiers() & Qualifiers::Restrict);
+ }
+
+ /// Determine whether this type is restrict-qualified.
+ bool isRestrictQualified() const;
+
+ /// Determine whether this particular QualType instance has the
+ /// "volatile" qualifier set, without looking through typedefs that may have
+ /// added "volatile" at a different level.
+ bool isLocalVolatileQualified() const {
+ return (getLocalFastQualifiers() & Qualifiers::Volatile);
+ }
+
+ /// Determine whether this type is volatile-qualified.
+ bool isVolatileQualified() const;
+
+ /// Determine whether this particular QualType instance has any
+ /// qualifiers, without looking through any typedefs that might add
+ /// qualifiers at a different level.
+ bool hasLocalQualifiers() const {
+ return getLocalFastQualifiers() || hasLocalNonFastQualifiers();
+ }
+
+ /// Determine whether this type has any qualifiers.
+ bool hasQualifiers() const;
+
+ /// Determine whether this particular QualType instance has any
+ /// "non-fast" qualifiers, e.g., those that are stored in an ExtQualType
+ /// instance.
+ bool hasLocalNonFastQualifiers() const {
+ return isa<const ExtQuals *>(Value.getPointer());
+ }
+
+ /// Retrieve the set of qualifiers local to this particular QualType
+ /// instance, not including any qualifiers acquired through typedefs or
+ /// other sugar.
+ Qualifiers getLocalQualifiers() const;
+
+ /// Retrieve the set of qualifiers applied to this type.
+ Qualifiers getQualifiers() const;
+
+ /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
+ /// local to this particular QualType instance, not including any qualifiers
+ /// acquired through typedefs or other sugar.
+ unsigned getLocalCVRQualifiers() const {
+ return getLocalFastQualifiers();
+ }
+
+ /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
+ /// applied to this type.
+ unsigned getCVRQualifiers() const;
+
+ bool isConstant(const ASTContext& Ctx) const {
+ return QualType::isConstant(*this, Ctx);
+ }
+
+ /// Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
+ bool isPODType(const ASTContext &Context) const;
+
+ /// Return true if this is a POD type according to the rules of the C++98
+ /// standard, regardless of the current compilation's language.
+ bool isCXX98PODType(const ASTContext &Context) const;
+
+ /// Return true if this is a POD type according to the more relaxed rules
+ /// of the C++11 standard, regardless of the current compilation's language.
+ /// (C++0x [basic.types]p9). Note that, unlike
+ /// CXXRecordDecl::isCXX11StandardLayout, this takes DRs into account.
+ bool isCXX11PODType(const ASTContext &Context) const;
+
+ /// Return true if this is a trivial type per (C++0x [basic.types]p9)
+ bool isTrivialType(const ASTContext &Context) const;
+
+ /// Return true if this is a trivially copyable type (C++0x [basic.types]p9)
+ bool isTriviallyCopyableType(const ASTContext &Context) const;
+
+ /// Return true if the type is safe to bitwise copy using memcpy/memmove.
+ ///
+ /// This is an extension in clang: bitwise cloneable types act as trivially
+ /// copyable types, meaning their underlying bytes can be safely copied by
+ /// memcpy or memmove. After the copy, the destination object has the same
+ /// object representation.
+ ///
+ /// However, there are cases where it is not safe to copy:
+ /// - When sanitizers, such as AddressSanitizer, add padding with poison,
+ /// which can cause issues if those poisoned padding bits are accessed.
+ /// - Types with Objective-C lifetimes, where specific runtime
+ /// semantics may not be preserved during a bitwise copy.
+ bool isBitwiseCloneableType(const ASTContext &Context) const;
+
+ /// Return true if this is a trivially copyable type
+ bool isTriviallyCopyConstructibleType(const ASTContext &Context) const;
+
+ /// Returns true if it is a class and it might be dynamic.
+ bool mayBeDynamicClass() const;
+
+ /// Returns true if it is not a class or if the class might not be dynamic.
+ bool mayBeNotDynamicClass() const;
+
+ /// Returns true if it is a WebAssembly Reference Type.
+ bool isWebAssemblyReferenceType() const;
+
+ /// Returns true if it is a WebAssembly Externref Type.
+ bool isWebAssemblyExternrefType() const;
+
+ /// Returns true if it is a WebAssembly Funcref Type.
+ bool isWebAssemblyFuncrefType() const;
+
+ // Don't promise in the API that anything besides 'const' can be
+ // easily added.
+
+ /// Add the `const` type qualifier to this QualType.
+ void addConst() {
+ addFastQualifiers(Qualifiers::Const);
+ }
+ QualType withConst() const {
+ return withFastQualifiers(Qualifiers::Const);
+ }
+
+ /// Add the `volatile` type qualifier to this QualType.
+ void addVolatile() {
+ addFastQualifiers(Qualifiers::Volatile);
+ }
+ QualType withVolatile() const {
+ return withFastQualifiers(Qualifiers::Volatile);
+ }
+
+ /// Add the `restrict` qualifier to this QualType.
+ void addRestrict() {
+ addFastQualifiers(Qualifiers::Restrict);
+ }
+ QualType withRestrict() const {
+ return withFastQualifiers(Qualifiers::Restrict);
+ }
+
+ QualType withCVRQualifiers(unsigned CVR) const {
+ return withFastQualifiers(CVR);
+ }
+
+ void addFastQualifiers(unsigned TQs) {
+ assert(!(TQs & ~Qualifiers::FastMask)
+ && "non-fast qualifier bits set in mask!");
+ Value.setInt(Value.getInt() | TQs);
+ }
+
+ void removeLocalConst();
+ void removeLocalVolatile();
+ void removeLocalRestrict();
+
+ void removeLocalFastQualifiers() { Value.setInt(0); }
+ void removeLocalFastQualifiers(unsigned Mask) {
+ assert(!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers");
+ Value.setInt(Value.getInt() & ~Mask);
+ }
+
+ // Creates a type with the given qualifiers in addition to any
+ // qualifiers already on this type.
+ QualType withFastQualifiers(unsigned TQs) const {
+ QualType T = *this;
+ T.addFastQualifiers(TQs);
+ return T;
+ }
+
+ // Creates a type with exactly the given fast qualifiers, removing
+ // any existing fast qualifiers.
+ QualType withExactLocalFastQualifiers(unsigned TQs) const {
+ return withoutLocalFastQualifiers().withFastQualifiers(TQs);
+ }
+
+ // Removes fast qualifiers, but leaves any extended qualifiers in place.
+ QualType withoutLocalFastQualifiers() const {
+ QualType T = *this;
+ T.removeLocalFastQualifiers();
+ return T;
+ }
+
+ QualType getCanonicalType() const;
+
+ /// Return this type with all of the instance-specific qualifiers
+ /// removed, but without removing any qualifiers that may have been applied
+ /// through typedefs.
+ QualType getLocalUnqualifiedType() const { return QualType(getTypePtr(), 0); }
+
+ /// Retrieve the unqualified variant of the given type,
+ /// removing as little sugar as possible.
+ ///
+ /// This routine looks through various kinds of sugar to find the
+ /// least-desugared type that is unqualified. For example, given:
+ ///
+ /// \code
+ /// typedef int Integer;
+ /// typedef const Integer CInteger;
+ /// typedef CInteger DifferenceType;
+ /// \endcode
+ ///
+ /// Executing \c getUnqualifiedType() on the type \c DifferenceType will
+ /// desugar until we hit the type \c Integer, which has no qualifiers on it.
+ ///
+ /// The resulting type might still be qualified if it's sugar for an array
+ /// type. To strip qualifiers even from within a sugared array type, use
+ /// ASTContext::getUnqualifiedArrayType.
+ ///
+ /// Note: In C, the _Atomic qualifier is special (see C23 6.2.5p32 for
+ /// details), and it is not stripped by this function. Use
+ /// getAtomicUnqualifiedType() to strip qualifiers including _Atomic.
+ inline QualType getUnqualifiedType() const;
+
+ /// Retrieve the unqualified variant of the given type, removing as little
+ /// sugar as possible.
+ ///
+ /// Like getUnqualifiedType(), but also returns the set of
+ /// qualifiers that were built up.
+ ///
+ /// The resulting type might still be qualified if it's sugar for an array
+ /// type. To strip qualifiers even from within a sugared array type, use
+ /// ASTContext::getUnqualifiedArrayType.
+ inline SplitQualType getSplitUnqualifiedType() const;
+
+ /// Determine whether this type is more qualified than the other
+ /// given type, requiring exact equality for non-CVR qualifiers.
+ bool isMoreQualifiedThan(QualType Other, const ASTContext &Ctx) const;
+
+ /// Determine whether this type is at least as qualified as the other
+ /// given type, requiring exact equality for non-CVR qualifiers.
+ bool isAtLeastAsQualifiedAs(QualType Other, const ASTContext &Ctx) const;
+
+ QualType getNonReferenceType() const;
+
+ /// Determine the type of a (typically non-lvalue) expression with the
+ /// specified result type.
+ ///
+ /// This routine should be used for expressions for which the return type is
+ /// explicitly specified (e.g., in a cast or call) and isn't necessarily
+ /// an lvalue. It removes a top-level reference (since there are no
+ /// expressions of reference type) and deletes top-level cvr-qualifiers
+ /// from non-class types (in C++) or all types (in C).
+ QualType getNonLValueExprType(const ASTContext &Context) const;
+
+ /// Remove an outer pack expansion type (if any) from this type. Used as part
+ /// of converting the type of a declaration to the type of an expression that
+ /// references that expression. It's meaningless for an expression to have a
+ /// pack expansion type.
+ QualType getNonPackExpansionType() const;
+
+ /// Return the specified type with any "sugar" removed from
+ /// the type. This takes off typedefs, typeof's etc. If the outer level of
+ /// the type is already concrete, it returns it unmodified. This is similar
+ /// to getting the canonical type, but it doesn't remove *all* typedefs. For
+ /// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
+ /// concrete.
+ ///
+ /// Qualifiers are left in place.
+ QualType getDesugaredType(const ASTContext &Context) const {
+ return getDesugaredType(*this, Context);
+ }
+
+ SplitQualType getSplitDesugaredType() const {
+ return getSplitDesugaredType(*this);
+ }
+
+ /// Return the specified type with one level of "sugar" removed from
+ /// the type.
+ ///
+ /// This routine takes off the first typedef, typeof, etc. If the outer level
+ /// of the type is already concrete, it returns it unmodified.
+ QualType getSingleStepDesugaredType(const ASTContext &Context) const {
+ return getSingleStepDesugaredTypeImpl(*this, Context);
+ }
+
+ /// Returns the specified type after dropping any
+ /// outer-level parentheses.
+ QualType IgnoreParens() const {
+ if (isa<ParenType>(*this))
+ return QualType::IgnoreParens(*this);
+ return *this;
+ }
+
+ /// Indicate whether the specified types and qualifiers are identical.
+ friend bool operator==(const QualType &LHS, const QualType &RHS) {
+ return LHS.Value == RHS.Value;
+ }
+ friend bool operator!=(const QualType &LHS, const QualType &RHS) {
+ return LHS.Value != RHS.Value;
+ }
+ friend bool operator<(const QualType &LHS, const QualType &RHS) {
+ return LHS.Value < RHS.Value;
+ }
+
+ static std::string getAsString(SplitQualType split,
+ const PrintingPolicy &Policy) {
+ return getAsString(split.Ty, split.Quals, Policy);
+ }
+ static std::string getAsString(const Type *ty, Qualifiers qs,
+ const PrintingPolicy &Policy);
+
+ std::string getAsString() const;
+ std::string getAsString(const PrintingPolicy &Policy) const;
+
+ void print(raw_ostream &OS, const PrintingPolicy &Policy,
+ const Twine &PlaceHolder = Twine(),
+ unsigned Indentation = 0) const;
+
+ static void print(SplitQualType split, raw_ostream &OS,
+ const PrintingPolicy &policy, const Twine &PlaceHolder,
+ unsigned Indentation = 0) {
+ return print(split.Ty, split.Quals, OS, policy, PlaceHolder, Indentation);
+ }
+
+ static void print(const Type *ty, Qualifiers qs,
+ raw_ostream &OS, const PrintingPolicy &policy,
+ const Twine &PlaceHolder,
+ unsigned Indentation = 0);
+
+ void getAsStringInternal(std::string &Str,
+ const PrintingPolicy &Policy) const;
+
+ static void getAsStringInternal(SplitQualType split, std::string &out,
+ const PrintingPolicy &policy) {
+ return getAsStringInternal(split.Ty, split.Quals, out, policy);
+ }
+
+ static void getAsStringInternal(const Type *ty, Qualifiers qs,
+ std::string &out,
+ const PrintingPolicy &policy);
+
+ class StreamedQualTypeHelper {
+ const QualType &T;
+ const PrintingPolicy &Policy;
+ const Twine &PlaceHolder;
+ unsigned Indentation;
+
+ public:
+ StreamedQualTypeHelper(const QualType &T, const PrintingPolicy &Policy,
+ const Twine &PlaceHolder, unsigned Indentation)
+ : T(T), Policy(Policy), PlaceHolder(PlaceHolder),
+ Indentation(Indentation) {}
+
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const StreamedQualTypeHelper &SQT) {
+ SQT.T.print(OS, SQT.Policy, SQT.PlaceHolder, SQT.Indentation);
+ return OS;
+ }
+ };
+
+ StreamedQualTypeHelper stream(const PrintingPolicy &Policy,
+ const Twine &PlaceHolder = Twine(),
+ unsigned Indentation = 0) const {
+ return StreamedQualTypeHelper(*this, Policy, PlaceHolder, Indentation);
+ }
+
+ void dump(const char *s) const;
+ void dump() const;
+ void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(getAsOpaquePtr());
+ }
+
+ /// Check if this type has any address space qualifier.
+ inline bool hasAddressSpace() const;
+
+ /// Return the address space of this type.
+ inline LangAS getAddressSpace() const;
+
+ /// Returns true if address space qualifiers overlap with T address space
+ /// qualifiers.
+ /// OpenCL C defines conversion rules for pointers to different address spaces
+ /// and notion of overlapping address spaces.
+ /// CL1.1 or CL1.2:
+ /// address spaces overlap iff they are they same.
+ /// OpenCL C v2.0 s6.5.5 adds:
+ /// __generic overlaps with any address space except for __constant.
+ bool isAddressSpaceOverlapping(QualType T, const ASTContext &Ctx) const {
+ Qualifiers Q = getQualifiers();
+ Qualifiers TQ = T.getQualifiers();
+ // Address spaces overlap if at least one of them is a superset of another
+ return Q.isAddressSpaceSupersetOf(TQ, Ctx) ||
+ TQ.isAddressSpaceSupersetOf(Q, Ctx);
+ }
+
+ /// Returns gc attribute of this type.
+ inline Qualifiers::GC getObjCGCAttr() const;
+
+ /// true when Type is objc's weak.
+ bool isObjCGCWeak() const {
+ return getObjCGCAttr() == Qualifiers::Weak;
+ }
+
+ /// true when Type is objc's strong.
+ bool isObjCGCStrong() const {
+ return getObjCGCAttr() == Qualifiers::Strong;
+ }
+
+ /// Returns lifetime attribute of this type.
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return getQualifiers().getObjCLifetime();
+ }
+
+ bool hasNonTrivialObjCLifetime() const {
+ return getQualifiers().hasNonTrivialObjCLifetime();
+ }
+
+ bool hasStrongOrWeakObjCLifetime() const {
+ return getQualifiers().hasStrongOrWeakObjCLifetime();
+ }
+
+ // true when Type is objc's weak and weak is enabled but ARC isn't.
+ bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const;
+
+ PointerAuthQualifier getPointerAuth() const {
+ return getQualifiers().getPointerAuth();
+ }
+
+ bool hasAddressDiscriminatedPointerAuth() const {
+ if (PointerAuthQualifier PtrAuth = getPointerAuth())
+ return PtrAuth.isAddressDiscriminated();
+ return false;
+ }
+
+ enum PrimitiveDefaultInitializeKind {
+ /// The type does not fall into any of the following categories. Note that
+ /// this case is zero-valued so that values of this enum can be used as a
+ /// boolean condition for non-triviality.
+ PDIK_Trivial,
+
+ /// The type is an Objective-C retainable pointer type that is qualified
+ /// with the ARC __strong qualifier.
+ PDIK_ARCStrong,
+
+ /// The type is an Objective-C retainable pointer type that is qualified
+ /// with the ARC __weak qualifier.
+ PDIK_ARCWeak,
+
+ /// The type is a struct containing a field whose type is not PCK_Trivial.
+ PDIK_Struct
+ };
+
+ /// Functions to query basic properties of non-trivial C struct types.
+
+ /// Check if this is a non-trivial type that would cause a C struct
+ /// transitively containing this type to be non-trivial to default initialize
+ /// and return the kind.
+ PrimitiveDefaultInitializeKind
+ isNonTrivialToPrimitiveDefaultInitialize() const;
+
+ enum PrimitiveCopyKind {
+ /// The type does not fall into any of the following categories. Note that
+ /// this case is zero-valued so that values of this enum can be used as a
+ /// boolean condition for non-triviality.
+ PCK_Trivial,
+
+ /// The type would be trivial except that it is volatile-qualified. Types
+ /// that fall into one of the other non-trivial cases may additionally be
+ /// volatile-qualified.
+ PCK_VolatileTrivial,
+
+ /// The type is an Objective-C retainable pointer type that is qualified
+ /// with the ARC __strong qualifier.
+ PCK_ARCStrong,
+
+ /// The type is an Objective-C retainable pointer type that is qualified
+ /// with the ARC __weak qualifier.
+ PCK_ARCWeak,
+
+ /// The type is an address-discriminated signed pointer type.
+ PCK_PtrAuth,
+
+ /// The type is a struct containing a field whose type is neither
+ /// PCK_Trivial nor PCK_VolatileTrivial.
+ /// Note that a C++ struct type does not necessarily match this; C++ copying
+ /// semantics are too complex to express here, in part because they depend
+ /// on the exact constructor or assignment operator that is chosen by
+ /// overload resolution to do the copy.
+ PCK_Struct
+ };
+
+ /// Check if this is a non-trivial type that would cause a C struct
+ /// transitively containing this type to be non-trivial to copy and return the
+ /// kind.
+ PrimitiveCopyKind isNonTrivialToPrimitiveCopy() const;
+
+ /// Check if this is a non-trivial type that would cause a C struct
+ /// transitively containing this type to be non-trivial to destructively
+ /// move and return the kind. Destructive move in this context is a C++-style
+ /// move in which the source object is placed in a valid but unspecified state
+ /// after it is moved, as opposed to a truly destructive move in which the
+ /// source object is placed in an uninitialized state.
+ PrimitiveCopyKind isNonTrivialToPrimitiveDestructiveMove() const;
+
+ enum DestructionKind {
+ DK_none,
+ DK_cxx_destructor,
+ DK_objc_strong_lifetime,
+ DK_objc_weak_lifetime,
+ DK_nontrivial_c_struct
+ };
+
+ /// Returns a nonzero value if objects of this type require
+ /// non-trivial work to clean up after. Non-zero because it's
+ /// conceivable that qualifiers (objc_gc(weak)?) could make
+ /// something require destruction.
+ DestructionKind isDestructedType() const {
+ return isDestructedTypeImpl(*this);
+ }
+
+ /// Check if this is or contains a C union that is non-trivial to
+ /// default-initialize, which is a union that has a member that is non-trivial
+ /// to default-initialize. If this returns true,
+ /// isNonTrivialToPrimitiveDefaultInitialize returns PDIK_Struct.
+ bool hasNonTrivialToPrimitiveDefaultInitializeCUnion() const;
+
+ /// Check if this is or contains a C union that is non-trivial to destruct,
+ /// which is a union that has a member that is non-trivial to destruct. If
+ /// this returns true, isDestructedType returns DK_nontrivial_c_struct.
+ bool hasNonTrivialToPrimitiveDestructCUnion() const;
+
+ /// Check if this is or contains a C union that is non-trivial to copy, which
+ /// is a union that has a member that is non-trivial to copy. If this returns
+ /// true, isNonTrivialToPrimitiveCopy returns PCK_Struct.
+ bool hasNonTrivialToPrimitiveCopyCUnion() const;
+
+ /// Determine whether expressions of the given type are forbidden
+ /// from being lvalues in C.
+ ///
+ /// The expression types that are forbidden to be lvalues are:
+ /// - 'void', but not qualified void
+ /// - function types
+ ///
+ /// The exact rule here is C99 6.3.2.1:
+ /// An lvalue is an expression with an object type or an incomplete
+ /// type other than void.
+ bool isCForbiddenLValueType() const;
+
+ /// Substitute type arguments for the Objective-C type parameters used in the
+ /// subject type.
+ ///
+ /// \param ctx ASTContext in which the type exists.
+ ///
+ /// \param typeArgs The type arguments that will be substituted for the
+ /// Objective-C type parameters in the subject type, which are generally
+ /// computed via \c Type::getObjCSubstitutions. If empty, the type
+ /// parameters will be replaced with their bounds or id/Class, as appropriate
+ /// for the context.
+ ///
+ /// \param context The context in which the subject type was written.
+ ///
+ /// \returns the resulting type.
+ QualType substObjCTypeArgs(ASTContext &ctx,
+ ArrayRef<QualType> typeArgs,
+ ObjCSubstitutionContext context) const;
+
+ /// Substitute type arguments from an object type for the Objective-C type
+ /// parameters used in the subject type.
+ ///
+ /// This operation combines the computation of type arguments for
+ /// substitution (\c Type::getObjCSubstitutions) with the actual process of
+ /// substitution (\c QualType::substObjCTypeArgs) for the convenience of
+ /// callers that need to perform a single substitution in isolation.
+ ///
+ /// \param objectType The type of the object whose member type we're
+ /// substituting into. For example, this might be the receiver of a message
+ /// or the base of a property access.
+ ///
+ /// \param dc The declaration context from which the subject type was
+ /// retrieved, which indicates (for example) which type parameters should
+ /// be substituted.
+ ///
+ /// \param context The context in which the subject type was written.
+ ///
+ /// \returns the subject type after replacing all of the Objective-C type
+ /// parameters with their corresponding arguments.
+ QualType substObjCMemberType(QualType objectType,
+ const DeclContext *dc,
+ ObjCSubstitutionContext context) const;
+
+ /// Strip Objective-C "__kindof" types from the given type.
+ QualType stripObjCKindOfType(const ASTContext &ctx) const;
+
+ /// Remove all qualifiers including _Atomic.
+ ///
+ /// Like getUnqualifiedType(), the type may still be qualified if it is a
+ /// sugared array type. To strip qualifiers even from within a sugared array
+ /// type, use in conjunction with ASTContext::getUnqualifiedArrayType.
+ QualType getAtomicUnqualifiedType() const;
+
+private:
+ // These methods are implemented in a separate translation unit;
+ // "static"-ize them to avoid creating temporary QualTypes in the
+ // caller.
+ static bool isConstant(QualType T, const ASTContext& Ctx);
+ static QualType getDesugaredType(QualType T, const ASTContext &Context);
+ static SplitQualType getSplitDesugaredType(QualType T);
+ static SplitQualType getSplitUnqualifiedTypeImpl(QualType type);
+ static QualType getSingleStepDesugaredTypeImpl(QualType type,
+ const ASTContext &C);
+ static QualType IgnoreParens(QualType T);
+ static DestructionKind isDestructedTypeImpl(QualType type);
+
+ /// Check if \param RD is or contains a non-trivial C union.
+ static bool hasNonTrivialToPrimitiveDefaultInitializeCUnion(const RecordDecl *RD);
+ static bool hasNonTrivialToPrimitiveDestructCUnion(const RecordDecl *RD);
+ static bool hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD);
+};
+
+raw_ostream &operator<<(raw_ostream &OS, QualType QT);
+
+} // namespace clang
+
+namespace llvm {
+
+/// Implement simplify_type for QualType, so that we can dyn_cast from QualType
+/// to a specific Type class.
+template<> struct simplify_type< ::clang::QualType> {
+ using SimpleType = const ::clang::Type *;
+
+ static SimpleType getSimplifiedValue(::clang::QualType Val) {
+ return Val.getTypePtr();
+ }
+};
+
+// Teach SmallPtrSet that QualType is "basically a pointer".
+template<>
+struct PointerLikeTypeTraits<clang::QualType> {
+ static inline void *getAsVoidPointer(clang::QualType P) {
+ return P.getAsOpaquePtr();
+ }
+
+ static inline clang::QualType getFromVoidPointer(void *P) {
+ return clang::QualType::getFromOpaquePtr(P);
+ }
+
+ // Various qualifiers go in low bits.
+ static constexpr int NumLowBitsAvailable = 0;
+};
+
+} // namespace llvm
+
+namespace clang {
+
+/// Base class that is common to both the \c ExtQuals and \c Type
+/// classes, which allows \c QualType to access the common fields between the
+/// two.
+class ExtQualsTypeCommonBase {
+ friend class ExtQuals;
+ friend class QualType;
+ friend class Type;
+ friend class ASTReader;
+
+ /// The "base" type of an extended qualifiers type (\c ExtQuals) or
+ /// a self-referential pointer (for \c Type).
+ ///
+ /// This pointer allows an efficient mapping from a QualType to its
+ /// underlying type pointer.
+ const Type *const BaseType;
+
+ /// The canonical type of this type. A QualType.
+ QualType CanonicalType;
+
+ ExtQualsTypeCommonBase(const Type *baseType, QualType canon)
+ : BaseType(baseType), CanonicalType(canon) {}
+};
+
+/// We can encode up to four bits in the low bits of a
+/// type pointer, but there are many more type qualifiers that we want
+/// to be able to apply to an arbitrary type. Therefore we have this
+/// struct, intended to be heap-allocated and used by QualType to
+/// store qualifiers.
+///
+/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
+/// in three low bits on the QualType pointer; a fourth bit records whether
+/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
+/// Objective-C GC attributes) are much more rare.
+class alignas(TypeAlignment) ExtQuals : public ExtQualsTypeCommonBase,
+ public llvm::FoldingSetNode {
+ // NOTE: changing the fast qualifiers should be straightforward as
+ // long as you don't make 'const' non-fast.
+ // 1. Qualifiers:
+ // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ).
+ // Fast qualifiers must occupy the low-order bits.
+ // b) Update Qualifiers::FastWidth and FastMask.
+ // 2. QualType:
+ // a) Update is{Volatile,Restrict}Qualified(), defined inline.
+ // b) Update remove{Volatile,Restrict}, defined near the end of
+ // this header.
+ // 3. ASTContext:
+ // a) Update get{Volatile,Restrict}Type.
+
+ /// The immutable set of qualifiers applied by this node. Always contains
+ /// extended qualifiers.
+ Qualifiers Quals;
+
+ ExtQuals *this_() { return this; }
+
+public:
+ ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
+ : ExtQualsTypeCommonBase(baseType,
+ canon.isNull() ? QualType(this_(), 0) : canon),
+ Quals(quals) {
+ assert(Quals.hasNonFastQualifiers()
+ && "ExtQuals created with no fast qualifiers");
+ assert(!Quals.hasFastQualifiers()
+ && "ExtQuals created with fast qualifiers");
+ }
+
+ Qualifiers getQualifiers() const { return Quals; }
+
+ bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); }
+ Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); }
+
+ bool hasObjCLifetime() const { return Quals.hasObjCLifetime(); }
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
+ bool hasAddressSpace() const { return Quals.hasAddressSpace(); }
+ LangAS getAddressSpace() const { return Quals.getAddressSpace(); }
+
+ const Type *getBaseType() const { return BaseType; }
+
+public:
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getBaseType(), Quals);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const Type *BaseType,
+ Qualifiers Quals) {
+ assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!");
+ ID.AddPointer(BaseType);
+ Quals.Profile(ID);
+ }
+};
+
+/// The kind of C++11 ref-qualifier associated with a function type.
+/// This determines whether a member function's "this" object can be an
+/// lvalue, rvalue, or neither.
+enum RefQualifierKind {
+ /// No ref-qualifier was provided.
+ RQ_None = 0,
+
+ /// An lvalue ref-qualifier was provided (\c &).
+ RQ_LValue,
+
+ /// An rvalue ref-qualifier was provided (\c &&).
+ RQ_RValue
+};
+
+/// Which keyword(s) were used to create an AutoType.
+enum class AutoTypeKeyword {
+ /// auto
+ Auto,
+
+ /// decltype(auto)
+ DecltypeAuto,
+
+ /// __auto_type (GNU extension)
+ GNUAutoType
+};
+
+enum class ArraySizeModifier;
+enum class ElaboratedTypeKeyword;
+enum class VectorKind;
+
+/// The base class of the type hierarchy.
+///
+/// A central concept with types is that each type always has a canonical
+/// type. A canonical type is the type with any typedef names stripped out
+/// of it or the types it references. For example, consider:
+///
+/// typedef int foo;
+/// typedef foo* bar;
+/// 'int *' 'foo *' 'bar'
+///
+/// There will be a Type object created for 'int'. Since int is canonical, its
+/// CanonicalType pointer points to itself. There is also a Type for 'foo' (a
+/// TypedefType). Its CanonicalType pointer points to the 'int' Type. Next
+/// there is a PointerType that represents 'int*', which, like 'int', is
+/// canonical. Finally, there is a PointerType type for 'foo*' whose canonical
+/// type is 'int*', and there is a TypedefType for 'bar', whose canonical type
+/// is also 'int*'.
+///
+/// Non-canonical types are useful for emitting diagnostics, without losing
+/// information about typedefs being used. Canonical types are useful for type
+/// comparisons (they allow by-pointer equality tests) and useful for reasoning
+/// about whether something has a particular form (e.g. is a function type),
+/// because they implicitly, recursively, strip all typedefs out of a type.
+///
+/// Types, once created, are immutable.
+///
+class alignas(TypeAlignment) Type : public ExtQualsTypeCommonBase {
+public:
+ enum TypeClass {
+#define TYPE(Class, Base) Class,
+#define LAST_TYPE(Class) TypeLast = Class
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.inc"
+ };
+
+private:
+ /// Bitfields required by the Type class.
+ class TypeBitfields {
+ friend class Type;
+ template <class T> friend class TypePropertyCache;
+
+ /// TypeClass bitfield - Enum that specifies what subclass this belongs to.
+ LLVM_PREFERRED_TYPE(TypeClass)
+ unsigned TC : 8;
+
+ /// Store information on the type dependency.
+ LLVM_PREFERRED_TYPE(TypeDependence)
+ unsigned Dependence : llvm::BitWidth<TypeDependence>;
+
+ /// True if the cache (i.e. the bitfields here starting with
+ /// 'Cache') is valid.
+ LLVM_PREFERRED_TYPE(bool)
+ mutable unsigned CacheValid : 1;
+
+ /// Linkage of this type.
+ LLVM_PREFERRED_TYPE(Linkage)
+ mutable unsigned CachedLinkage : 3;
+
+ /// Whether this type involves and local or unnamed types.
+ LLVM_PREFERRED_TYPE(bool)
+ mutable unsigned CachedLocalOrUnnamed : 1;
+
+ /// Whether this type comes from an AST file.
+ LLVM_PREFERRED_TYPE(bool)
+ mutable unsigned FromAST : 1;
+
+ bool isCacheValid() const {
+ return CacheValid;
+ }
+
+ Linkage getLinkage() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return static_cast<Linkage>(CachedLinkage);
+ }
+
+ bool hasLocalOrUnnamedType() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return CachedLocalOrUnnamed;
+ }
+ };
+ enum { NumTypeBits = 8 + llvm::BitWidth<TypeDependence> + 6 };
+
+protected:
+ // These classes allow subclasses to somewhat cleanly pack bitfields
+ // into Type.
+
+ class ArrayTypeBitfields {
+ friend class ArrayType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// CVR qualifiers from declarations like
+ /// 'int X[static restrict 4]'. For function parameters only.
+ LLVM_PREFERRED_TYPE(Qualifiers)
+ unsigned IndexTypeQuals : 3;
+
+ /// Storage class qualifiers from declarations like
+ /// 'int X[static restrict 4]'. For function parameters only.
+ LLVM_PREFERRED_TYPE(ArraySizeModifier)
+ unsigned SizeModifier : 3;
+ };
+ enum { NumArrayTypeBits = NumTypeBits + 6 };
+
+ class ConstantArrayTypeBitfields {
+ friend class ConstantArrayType;
+
+ LLVM_PREFERRED_TYPE(ArrayTypeBitfields)
+ unsigned : NumArrayTypeBits;
+
+ /// Whether we have a stored size expression.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasExternalSize : 1;
+
+ LLVM_PREFERRED_TYPE(unsigned)
+ unsigned SizeWidth : 5;
+ };
+
+ class BuiltinTypeBitfields {
+ friend class BuiltinType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// The kind (BuiltinType::Kind) of builtin type this is.
+ static constexpr unsigned NumOfBuiltinTypeBits = 9;
+ unsigned Kind : NumOfBuiltinTypeBits;
+ };
+
+public:
+ static constexpr int FunctionTypeNumParamsWidth = 16;
+ static constexpr int FunctionTypeNumParamsLimit = (1 << 16) - 1;
+
+protected:
+ /// FunctionTypeBitfields store various bits belonging to FunctionProtoType.
+ /// Only common bits are stored here. Additional uncommon bits are stored
+ /// in a trailing object after FunctionProtoType.
+ class FunctionTypeBitfields {
+ friend class FunctionProtoType;
+ friend class FunctionType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// The ref-qualifier associated with a \c FunctionProtoType.
+ ///
+ /// This is a value of type \c RefQualifierKind.
+ LLVM_PREFERRED_TYPE(RefQualifierKind)
+ unsigned RefQualifier : 2;
+
+ /// Used only by FunctionProtoType, put here to pack with the
+ /// other bitfields.
+ /// The qualifiers are part of FunctionProtoType because...
+ ///
+ /// C++ 8.3.5p4: The return type, the parameter type list and the
+ /// cv-qualifier-seq, [...], are part of the function type.
+ LLVM_PREFERRED_TYPE(Qualifiers)
+ unsigned FastTypeQuals : Qualifiers::FastWidth;
+ /// Whether this function has extended Qualifiers.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasExtQuals : 1;
+
+ /// The type of exception specification this function has.
+ LLVM_PREFERRED_TYPE(ExceptionSpecificationType)
+ unsigned ExceptionSpecType : 4;
+
+ /// Whether this function has extended parameter information.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasExtParameterInfos : 1;
+
+ /// Whether this function has extra bitfields for the prototype.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasExtraBitfields : 1;
+
+ /// Whether the function is variadic.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned Variadic : 1;
+
+ /// Whether this function has a trailing return type.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasTrailingReturn : 1;
+
+ /// Whether this function has is a cfi unchecked callee.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned CFIUncheckedCallee : 1;
+
+ /// Extra information which affects how the function is called, like
+ /// regparm and the calling convention.
+ LLVM_PREFERRED_TYPE(CallingConv)
+ unsigned ExtInfo : 14;
+
+ /// The number of parameters this function has, not counting '...'.
+ /// According to [implimits] 8 bits should be enough here but this is
+ /// somewhat easy to exceed with metaprogramming and so we would like to
+ /// keep NumParams as wide as reasonably possible.
+ unsigned NumParams : FunctionTypeNumParamsWidth;
+ };
+
+ class ObjCObjectTypeBitfields {
+ friend class ObjCObjectType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// The number of type arguments stored directly on this object type.
+ unsigned NumTypeArgs : 7;
+
+ /// The number of protocols stored directly on this object type.
+ unsigned NumProtocols : 6;
+
+ /// Whether this is a "kindof" type.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsKindOf : 1;
+ };
+
+ class ReferenceTypeBitfields {
+ friend class ReferenceType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// True if the type was originally spelled with an lvalue sigil.
+ /// This is never true of rvalue references but can also be false
+ /// on lvalue references because of C++0x [dcl.typedef]p9,
+ /// as follows:
+ ///
+ /// typedef int &ref; // lvalue, spelled lvalue
+ /// typedef int &&rvref; // rvalue
+ /// ref &a; // lvalue, inner ref, spelled lvalue
+ /// ref &&a; // lvalue, inner ref
+ /// rvref &a; // lvalue, inner ref, spelled lvalue
+ /// rvref &&a; // rvalue, inner ref
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned SpelledAsLValue : 1;
+
+ /// True if the inner type is a reference type. This only happens
+ /// in non-canonical forms.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned InnerRef : 1;
+ };
+
+ class KeywordWrapperBitfields {
+ template <class> friend class KeywordWrapper;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// An ElaboratedTypeKeyword. 8 bits for efficient access.
+ LLVM_PREFERRED_TYPE(ElaboratedTypeKeyword)
+ unsigned Keyword : 8;
+ };
+
+ enum { NumTypeWithKeywordBits = NumTypeBits + 8 };
+
+ class TagTypeBitfields {
+ friend class TagType;
+
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
+
+ /// Whether the TagType has a trailing Qualifier.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasQualifier : 1;
+
+ /// Whether the TagType owns the Tag.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned OwnsTag : 1;
+
+ /// Whether the TagType was created from an injected name.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsInjected : 1;
+ };
+
+ class VectorTypeBitfields {
+ friend class VectorType;
+ friend class DependentVectorType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// The kind of vector, either a generic vector type or some
+ /// target-specific vector type such as for AltiVec or Neon.
+ LLVM_PREFERRED_TYPE(VectorKind)
+ unsigned VecKind : 4;
+ /// The number of elements in the vector.
+ uint32_t NumElements;
+ };
+
+ class AttributedTypeBitfields {
+ friend class AttributedType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ LLVM_PREFERRED_TYPE(attr::Kind)
+ unsigned AttrKind : 32 - NumTypeBits;
+ };
+
+ class AutoTypeBitfields {
+ friend class AutoType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// Was this placeholder type spelled as 'auto', 'decltype(auto)',
+ /// or '__auto_type'? AutoTypeKeyword value.
+ LLVM_PREFERRED_TYPE(AutoTypeKeyword)
+ unsigned Keyword : 2;
+
+ /// The number of template arguments in the type-constraints, which is
+ /// expected to be able to hold at least 1024 according to [implimits].
+ /// However as this limit is somewhat easy to hit with template
+ /// metaprogramming we'd prefer to keep it as large as possible.
+ /// At the moment it has been left as a non-bitfield since this type
+ /// safely fits in 64 bits as an unsigned, so there is no reason to
+ /// introduce the performance impact of a bitfield.
+ unsigned NumArgs;
+ };
+
+ class TypeOfBitfields {
+ friend class TypeOfType;
+ friend class TypeOfExprType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+ LLVM_PREFERRED_TYPE(TypeOfKind)
+ unsigned Kind : 1;
+ };
+
+ class UnresolvedUsingBitfields {
+ friend class UnresolvedUsingType;
+
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
+
+ /// True if there is a non-null qualifier.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned hasQualifier : 1;
+ };
+
+ class UsingBitfields {
+ friend class UsingType;
+
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
+
+ /// True if there is a non-null qualifier.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned hasQualifier : 1;
+ };
+
+ class TypedefBitfields {
+ friend class TypedefType;
+
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
+
+ /// True if there is a non-null qualifier.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned hasQualifier : 1;
+
+ /// True if the underlying type is different from the declared one.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned hasTypeDifferentFromDecl : 1;
+ };
+
+ class TemplateTypeParmTypeBitfields {
+ friend class TemplateTypeParmType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// The depth of the template parameter.
+ unsigned Depth : 15;
+
+ /// Whether this is a template parameter pack.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ParameterPack : 1;
+
+ /// The index of the template parameter.
+ unsigned Index : 16;
+ };
+
+ class SubstTemplateTypeParmTypeBitfields {
+ friend class SubstTemplateTypeParmType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasNonCanonicalUnderlyingType : 1;
+
+ // The index of the template parameter this substitution represents.
+ unsigned Index : 15;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned Final : 1;
+
+ /// Represents the index within a pack if this represents a substitution
+ /// from a pack expansion. This index starts at the end of the pack and
+ /// increments towards the beginning.
+ /// Positive non-zero number represents the index + 1.
+ /// Zero means this is not substituted from an expansion.
+ unsigned PackIndex : 15;
+ };
+
+ class SubstPackTypeBitfields {
+ friend class SubstPackType;
+ friend class SubstTemplateTypeParmPackType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// The number of template arguments in \c Arguments, which is
+ /// expected to be able to hold at least 1024 according to [implimits].
+ /// However as this limit is somewhat easy to hit with template
+ /// metaprogramming we'd prefer to keep it as large as possible.
+ unsigned NumArgs : 16;
+
+ // The index of the template parameter this substitution represents.
+ // Only used by SubstTemplateTypeParmPackType. We keep it in the same
+ // class to avoid dealing with complexities of bitfields that go over
+ // the size of `unsigned`.
+ unsigned SubstTemplTypeParmPackIndex : 16;
+ };
+
+ class TemplateSpecializationTypeBitfields {
+ friend class TemplateSpecializationType;
+
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
+
+ /// Whether this template specialization type is a substituted type alias.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned TypeAlias : 1;
+
+ /// The number of template arguments named in this class template
+ /// specialization, which is expected to be able to hold at least 1024
+ /// according to [implimits]. However, as this limit is somewhat easy to
+ /// hit with template metaprogramming we'd prefer to keep it as large
+ /// as possible. At the moment it has been left as a non-bitfield since
+ /// this type safely fits in 64 bits as an unsigned, so there is no reason
+ /// to introduce the performance impact of a bitfield.
+ unsigned NumArgs;
+ };
+
+ class DependentTemplateSpecializationTypeBitfields {
+ friend class DependentTemplateSpecializationType;
+
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
+
+ /// The number of template arguments named in this class template
+ /// specialization, which is expected to be able to hold at least 1024
+ /// according to [implimits]. However, as this limit is somewhat easy to
+ /// hit with template metaprogramming we'd prefer to keep it as large
+ /// as possible. At the moment it has been left as a non-bitfield since
+ /// this type safely fits in 64 bits as an unsigned, so there is no reason
+ /// to introduce the performance impact of a bitfield.
+ unsigned NumArgs;
+ };
+
+ class PackExpansionTypeBitfields {
+ friend class PackExpansionType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// The number of expansions that this pack expansion will
+ /// generate when substituted (+1), which is expected to be able to
+ /// hold at least 1024 according to [implimits]. However, as this limit
+ /// is somewhat easy to hit with template metaprogramming we'd prefer to
+ /// keep it as large as possible. At the moment it has been left as a
+ /// non-bitfield since this type safely fits in 64 bits as an unsigned, so
+ /// there is no reason to introduce the performance impact of a bitfield.
+ ///
+ /// This field will only have a non-zero value when some of the parameter
+ /// packs that occur within the pattern have been substituted but others
+ /// have not.
+ unsigned NumExpansions;
+ };
+
+ enum class PredefinedSugarKind {
+ /// The "size_t" type.
+ SizeT,
+
+ /// The signed integer type corresponding to "size_t".
+ SignedSizeT,
+
+ /// The "ptrdiff_t" type.
+ PtrdiffT,
+
+ // Indicates how many items the enum has.
+ Last = PtrdiffT
+ };
+
+ class PresefinedSugarTypeBitfields {
+ friend class PredefinedSugarType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ LLVM_PREFERRED_TYPE(PredefinedSugarKind)
+ unsigned Kind : 8;
+ };
+
+ class CountAttributedTypeBitfields {
+ friend class CountAttributedType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ static constexpr unsigned NumCoupledDeclsBits = 4;
+ unsigned NumCoupledDecls : NumCoupledDeclsBits;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned CountInBytes : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned OrNull : 1;
+ };
+ static_assert(sizeof(CountAttributedTypeBitfields) <= sizeof(unsigned));
+
+ union {
+ TypeBitfields TypeBits;
+ ArrayTypeBitfields ArrayTypeBits;
+ ConstantArrayTypeBitfields ConstantArrayTypeBits;
+ AttributedTypeBitfields AttributedTypeBits;
+ AutoTypeBitfields AutoTypeBits;
+ TypeOfBitfields TypeOfBits;
+ TypedefBitfields TypedefBits;
+ UnresolvedUsingBitfields UnresolvedUsingBits;
+ UsingBitfields UsingBits;
+ BuiltinTypeBitfields BuiltinTypeBits;
+ FunctionTypeBitfields FunctionTypeBits;
+ ObjCObjectTypeBitfields ObjCObjectTypeBits;
+ ReferenceTypeBitfields ReferenceTypeBits;
+ KeywordWrapperBitfields KeywordWrapperBits;
+ TagTypeBitfields TagTypeBits;
+ VectorTypeBitfields VectorTypeBits;
+ TemplateTypeParmTypeBitfields TemplateTypeParmTypeBits;
+ SubstTemplateTypeParmTypeBitfields SubstTemplateTypeParmTypeBits;
+ SubstPackTypeBitfields SubstPackTypeBits;
+ TemplateSpecializationTypeBitfields TemplateSpecializationTypeBits;
+ DependentTemplateSpecializationTypeBitfields
+ DependentTemplateSpecializationTypeBits;
+ PackExpansionTypeBitfields PackExpansionTypeBits;
+ CountAttributedTypeBitfields CountAttributedTypeBits;
+ PresefinedSugarTypeBitfields PredefinedSugarTypeBits;
+ };
+
+private:
+ template <class T> friend class TypePropertyCache;
+
+ /// Set whether this type comes from an AST file.
+ void setFromAST(bool V = true) const {
+ TypeBits.FromAST = V;
+ }
+
+protected:
+ friend class ASTContext;
+
+ Type(TypeClass tc, QualType canon, TypeDependence Dependence)
+ : ExtQualsTypeCommonBase(this,
+ canon.isNull() ? QualType(this_(), 0) : canon) {
+ static_assert(sizeof(*this) <=
+ alignof(decltype(*this)) + sizeof(ExtQualsTypeCommonBase),
+ "changing bitfields changed sizeof(Type)!");
+ static_assert(alignof(decltype(*this)) % TypeAlignment == 0,
+ "Insufficient alignment!");
+ TypeBits.TC = tc;
+ TypeBits.Dependence = static_cast<unsigned>(Dependence);
+ TypeBits.CacheValid = false;
+ TypeBits.CachedLocalOrUnnamed = false;
+ TypeBits.CachedLinkage = llvm::to_underlying(Linkage::Invalid);
+ TypeBits.FromAST = false;
+ }
+
+ // silence VC++ warning C4355: 'this' : used in base member initializer list
+ Type *this_() { return this; }
+
+ void setDependence(TypeDependence D) {
+ TypeBits.Dependence = static_cast<unsigned>(D);
+ }
+
+ void addDependence(TypeDependence D) { setDependence(getDependence() | D); }
+
+public:
+ friend class ASTReader;
+ friend class ASTWriter;
+ template <class T> friend class serialization::AbstractTypeReader;
+ template <class T> friend class serialization::AbstractTypeWriter;
+
+ Type(const Type &) = delete;
+ Type(Type &&) = delete;
+ Type &operator=(const Type &) = delete;
+ Type &operator=(Type &&) = delete;
+
+ TypeClass getTypeClass() const { return static_cast<TypeClass>(TypeBits.TC); }
+
+ /// Whether this type comes from an AST file.
+ bool isFromAST() const { return TypeBits.FromAST; }
+
+ /// Whether this type is or contains an unexpanded parameter
+ /// pack, used to support C++0x variadic templates.
+ ///
+ /// A type that contains a parameter pack shall be expanded by the
+ /// ellipsis operator at some point. For example, the typedef in the
+ /// following example contains an unexpanded parameter pack 'T':
+ ///
+ /// \code
+ /// template<typename ...T>
+ /// struct X {
+ /// typedef T* pointer_types; // ill-formed; T is a parameter pack.
+ /// };
+ /// \endcode
+ ///
+ /// Note that this routine does not specify which
+ bool containsUnexpandedParameterPack() const {
+ return getDependence() & TypeDependence::UnexpandedPack;
+ }
+
+ /// Determines if this type would be canonical if it had no further
+ /// qualification.
+ bool isCanonicalUnqualified() const {
+ return CanonicalType == QualType(this, 0);
+ }
+
+ /// Pull a single level of sugar off of this locally-unqualified type.
+ /// Users should generally prefer SplitQualType::getSingleStepDesugaredType()
+ /// or QualType::getSingleStepDesugaredType(const ASTContext&).
+ QualType getLocallyUnqualifiedSingleStepDesugaredType() const;
+
+ /// As an extension, we classify types as one of "sized" or "sizeless";
+ /// every type is one or the other. Standard types are all sized;
+ /// sizeless types are purely an extension.
+ ///
+ /// Sizeless types contain data with no specified size, alignment,
+ /// or layout.
+ bool isSizelessType() const;
+ bool isSizelessBuiltinType() const;
+
+ /// Returns true for all scalable vector types.
+ bool isSizelessVectorType() const;
+
+ /// Returns true for SVE scalable vector types.
+ bool isSVESizelessBuiltinType() const;
+
+ /// Returns true for RVV scalable vector types.
+ bool isRVVSizelessBuiltinType() const;
+
+ /// Check if this is a WebAssembly Externref Type.
+ bool isWebAssemblyExternrefType() const;
+
+ /// Returns true if this is a WebAssembly table type: either an array of
+ /// reference types, or a pointer to a reference type (which can only be
+ /// created by array to pointer decay).
+ bool isWebAssemblyTableType() const;
+
+ /// Determines if this is a sizeless type supported by the
+ /// 'arm_sve_vector_bits' type attribute, which can be applied to a single
+ /// SVE vector or predicate, excluding tuple types such as svint32x4_t.
+ bool isSveVLSBuiltinType() const;
+
+ /// Returns the representative type for the element of an SVE builtin type.
+ /// This is used to represent fixed-length SVE vectors created with the
+ /// 'arm_sve_vector_bits' type attribute as VectorType.
+ QualType getSveEltType(const ASTContext &Ctx) const;
+
+ /// Determines if this is a sizeless type supported by the
+ /// 'riscv_rvv_vector_bits' type attribute, which can be applied to a single
+ /// RVV vector or mask.
+ bool isRVVVLSBuiltinType() const;
+
+ /// Returns the representative type for the element of an RVV builtin type.
+ /// This is used to represent fixed-length RVV vectors created with the
+ /// 'riscv_rvv_vector_bits' type attribute as VectorType.
+ QualType getRVVEltType(const ASTContext &Ctx) const;
+
+ /// Returns the representative type for the element of a sizeless vector
+ /// builtin type.
+ QualType getSizelessVectorEltType(const ASTContext &Ctx) const;
+
+ /// Types are partitioned into 3 broad categories (C99 6.2.5p1):
+ /// object types, function types, and incomplete types.
+
+ /// Return true if this is an incomplete type.
+ /// A type that can describe objects, but which lacks information needed to
+ /// determine its size (e.g. void, or a fwd declared struct). Clients of this
+ /// routine will need to determine if the size is actually required.
+ ///
+ /// Def If non-null, and the type refers to some kind of declaration
+ /// that can be completed (such as a C struct, C++ class, or Objective-C
+ /// class), will be set to the declaration.
+ bool isIncompleteType(NamedDecl **Def = nullptr) const;
+
+ /// Return true if this is an incomplete or object
+ /// type, in other words, not a function type.
+ bool isIncompleteOrObjectType() const {
+ return !isFunctionType();
+ }
+
+ /// \returns True if the type is incomplete and it is also a type that
+ /// cannot be completed by a later type definition.
+ ///
+ /// E.g. For `void` this is true but for `struct ForwardDecl;` this is false
+ /// because a definition for `ForwardDecl` could be provided later on in the
+ /// translation unit.
+ ///
+ /// Note even for types that this function returns true for it is still
+ /// possible for the declarations that contain this type to later have a
+ /// complete type in a translation unit. E.g.:
+ ///
+ /// \code{.c}
+ /// // This decl has type 'char[]' which is incomplete and cannot be later
+ /// // completed by another by another type declaration.
+ /// extern char foo[];
+ /// // This decl now has complete type 'char[5]'.
+ /// char foo[5]; // foo has a complete type
+ /// \endcode
+ bool isAlwaysIncompleteType() const;
+
+ /// Determine whether this type is an object type.
+ bool isObjectType() const {
+ // C++ [basic.types]p8:
+ // An object type is a (possibly cv-qualified) type that is not a
+ // function type, not a reference type, and not a void type.
+ return !isReferenceType() && !isFunctionType() && !isVoidType();
+ }
+
+ /// Return true if this is a literal type
+ /// (C++11 [basic.types]p10)
+ bool isLiteralType(const ASTContext &Ctx) const;
+
+ /// Determine if this type is a structural type, per C++20 [temp.param]p7.
+ bool isStructuralType() const;
+
+ /// Test if this type is a standard-layout type.
+ /// (C++0x [basic.type]p9)
+ bool isStandardLayoutType() const;
+
+ /// Helper methods to distinguish type categories. All type predicates
+ /// operate on the canonical type, ignoring typedefs and qualifiers.
+
+ /// Returns true if the type is a builtin type.
+ bool isBuiltinType() const;
+
+ /// Test for a particular builtin type.
+ bool isSpecificBuiltinType(unsigned K) const;
+
+ /// Test for a type which does not represent an actual type-system type but
+ /// is instead used as a placeholder for various convenient purposes within
+ /// Clang. All such types are BuiltinTypes.
+ bool isPlaceholderType() const;
+ const BuiltinType *getAsPlaceholderType() const;
+
+ /// Test for a specific placeholder type.
+ bool isSpecificPlaceholderType(unsigned K) const;
+
+ /// Test for a placeholder type other than Overload; see
+ /// BuiltinType::isNonOverloadPlaceholderType.
+ bool isNonOverloadPlaceholderType() const;
+
+ /// isIntegerType() does *not* include complex integers (a GCC extension).
+ /// isComplexIntegerType() can be used to test for complex integers.
+ bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum)
+ bool isEnumeralType() const;
+
+ /// Determine whether this type is a scoped enumeration type.
+ bool isScopedEnumeralType() const;
+ bool isBooleanType() const;
+ bool isCharType() const;
+ bool isWideCharType() const;
+ bool isChar8Type() const;
+ bool isChar16Type() const;
+ bool isChar32Type() const;
+ bool isAnyCharacterType() const;
+ bool isUnicodeCharacterType() const;
+ bool isIntegralType(const ASTContext &Ctx) const;
+
+ /// Determine whether this type is an integral or enumeration type.
+ bool isIntegralOrEnumerationType() const;
+
+ /// Determine whether this type is an integral or unscoped enumeration type.
+ bool isIntegralOrUnscopedEnumerationType() const;
+ bool isUnscopedEnumerationType() const;
+
+ /// Floating point categories.
+ bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
+ /// isComplexType() does *not* include complex integers (a GCC extension).
+ /// isComplexIntegerType() can be used to test for complex integers.
+ bool isComplexType() const; // C99 6.2.5p11 (complex)
+ bool isAnyComplexType() const; // C99 6.2.5p11 (complex) + Complex Int.
+ bool isFloatingType() const; // C99 6.2.5p11 (real floating + complex)
+ bool isHalfType() const; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half)
+ bool isFloat16Type() const; // C11 extension ISO/IEC TS 18661
+ bool isFloat32Type() const;
+ bool isDoubleType() const;
+ bool isBFloat16Type() const;
+ bool isMFloat8Type() const;
+ bool isFloat128Type() const;
+ bool isIbm128Type() const;
+ bool isRealType() const; // C99 6.2.5p17 (real floating + integer)
+ bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating)
+ bool isVoidType() const; // C99 6.2.5p19
+ bool isScalarType() const; // C99 6.2.5p21 (arithmetic + pointers)
+ bool isAggregateType() const;
+ bool isFundamentalType() const;
+ bool isCompoundType() const;
+
+ // Type Predicates: Check to see if this type is structurally the specified
+ // type, ignoring typedefs and qualifiers.
+ bool isFunctionType() const;
+ bool isFunctionNoProtoType() const { return getAs<FunctionNoProtoType>(); }
+ bool isFunctionProtoType() const { return getAs<FunctionProtoType>(); }
+ bool isPointerType() const;
+ bool isPointerOrReferenceType() const;
+ bool isSignableType(const ASTContext &Ctx) const;
+ bool isSignablePointerType() const;
+ bool isSignableIntegerType(const ASTContext &Ctx) const;
+ bool isAnyPointerType() const; // Any C pointer or ObjC object pointer
+ bool isCountAttributedType() const;
+ bool isCFIUncheckedCalleeFunctionType() const;
+ bool hasPointeeToToCFIUncheckedCalleeFunctionType() const;
+ bool isBlockPointerType() const;
+ bool isVoidPointerType() const;
+ bool isReferenceType() const;
+ bool isLValueReferenceType() const;
+ bool isRValueReferenceType() const;
+ bool isObjectPointerType() const;
+ bool isFunctionPointerType() const;
+ bool isFunctionReferenceType() const;
+ bool isMemberPointerType() const;
+ bool isMemberFunctionPointerType() const;
+ bool isMemberDataPointerType() const;
+ bool isArrayType() const;
+ bool isConstantArrayType() const;
+ bool isIncompleteArrayType() const;
+ bool isVariableArrayType() const;
+ bool isArrayParameterType() const;
+ bool isDependentSizedArrayType() const;
+ bool isRecordType() const;
+ bool isClassType() const;
+ bool isStructureType() const;
+ bool isStructureTypeWithFlexibleArrayMember() const;
+ bool isObjCBoxableRecordType() const;
+ bool isInterfaceType() const;
+ bool isStructureOrClassType() const;
+ bool isUnionType() const;
+ bool isComplexIntegerType() const; // GCC _Complex integer type.
+ bool isVectorType() const; // GCC vector type.
+ bool isExtVectorType() const; // Extended vector type.
+ bool isExtVectorBoolType() const; // Extended vector type with bool element.
+ // Extended vector type with bool element that is packed. HLSL doesn't pack
+ // its bool vectors.
+ bool isPackedVectorBoolType(const ASTContext &ctx) const;
+ bool isSubscriptableVectorType() const;
+ bool isMatrixType() const; // Matrix type.
+ bool isConstantMatrixType() const; // Constant matrix type.
+ bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
+ bool isObjCObjectPointerType() const; // pointer to ObjC object
+ bool isObjCRetainableType() const; // ObjC object or block pointer
+ bool isObjCLifetimeType() const; // (array of)* retainable type
+ bool isObjCIndirectLifetimeType() const; // (pointer to)* lifetime type
+ bool isObjCNSObjectType() const; // __attribute__((NSObject))
+ bool isObjCIndependentClassType() const; // __attribute__((objc_independent_class))
+ // FIXME: change this to 'raw' interface type, so we can used 'interface' type
+ // for the common case.
+ bool isObjCObjectType() const; // NSString or typeof(*(id)0)
+ bool isObjCQualifiedInterfaceType() const; // NSString<foo>
+ bool isObjCQualifiedIdType() const; // id<foo>
+ bool isObjCQualifiedClassType() const; // Class<foo>
+ bool isObjCObjectOrInterfaceType() const;
+ bool isObjCIdType() const; // id
+ bool isDecltypeType() const;
+ /// Was this type written with the special inert-in-ARC __unsafe_unretained
+ /// qualifier?
+ ///
+ /// This approximates the answer to the following question: if this
+ /// translation unit were compiled in ARC, would this type be qualified
+ /// with __unsafe_unretained?
+ bool isObjCInertUnsafeUnretainedType() const {
+ return hasAttr(attr::ObjCInertUnsafeUnretained);
+ }
+
+ /// Whether the type is Objective-C 'id' or a __kindof type of an
+ /// object type, e.g., __kindof NSView * or __kindof id
+ /// <NSCopying>.
+ ///
+ /// \param bound Will be set to the bound on non-id subtype types,
+ /// which will be (possibly specialized) Objective-C class type, or
+ /// null for 'id.
+ bool isObjCIdOrObjectKindOfType(const ASTContext &ctx,
+ const ObjCObjectType *&bound) const;
+
+ bool isObjCClassType() const; // Class
+
+ /// Whether the type is Objective-C 'Class' or a __kindof type of an
+ /// Class type, e.g., __kindof Class <NSCopying>.
+ ///
+ /// Unlike \c isObjCIdOrObjectKindOfType, there is no relevant bound
+ /// here because Objective-C's type system cannot express "a class
+ /// object for a subclass of NSFoo".
+ bool isObjCClassOrClassKindOfType() const;
+
+ bool isBlockCompatibleObjCPointerType(ASTContext &ctx) const;
+ bool isObjCSelType() const; // Class
+ bool isObjCBuiltinType() const; // 'id' or 'Class'
+ bool isObjCARCBridgableType() const;
+ bool isCARCBridgableType() const;
+ bool isTemplateTypeParmType() const; // C++ template type parameter
+ bool isNullPtrType() const; // C++11 std::nullptr_t or
+ // C23 nullptr_t
+ bool isNothrowT() const; // C++ std::nothrow_t
+ bool isAlignValT() const; // C++17 std::align_val_t
+ bool isStdByteType() const; // C++17 std::byte
+ bool isAtomicType() const; // C11 _Atomic()
+ bool isUndeducedAutoType() const; // C++11 auto or
+ // C++14 decltype(auto)
+ bool isTypedefNameType() const; // typedef or alias template
+
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ bool is##Id##Type() const;
+#include "clang/Basic/OpenCLImageTypes.def"
+
+ bool isImageType() const; // Any OpenCL image type
+
+ bool isSamplerT() const; // OpenCL sampler_t
+ bool isEventT() const; // OpenCL event_t
+ bool isClkEventT() const; // OpenCL clk_event_t
+ bool isQueueT() const; // OpenCL queue_t
+ bool isReserveIDT() const; // OpenCL reserve_id_t
+
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ bool is##Id##Type() const;
+#include "clang/Basic/OpenCLExtensionTypes.def"
+ // Type defined in cl_intel_device_side_avc_motion_estimation OpenCL extension
+ bool isOCLIntelSubgroupAVCType() const;
+ bool isOCLExtOpaqueType() const; // Any OpenCL extension type
+
+ bool isPipeType() const; // OpenCL pipe type
+ bool isBitIntType() const; // Bit-precise integer type
+ bool isOpenCLSpecificType() const; // Any OpenCL specific type
+
+#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) bool is##Id##Type() const;
+#include "clang/Basic/HLSLIntangibleTypes.def"
+ bool isHLSLSpecificType() const; // Any HLSL specific type
+ bool isHLSLBuiltinIntangibleType() const; // Any HLSL builtin intangible type
+ bool isHLSLAttributedResourceType() const;
+ bool isHLSLInlineSpirvType() const;
+ bool isHLSLResourceRecord() const;
+ bool isHLSLResourceRecordArray() const;
+ bool isHLSLIntangibleType()
+ const; // Any HLSL intangible type (builtin, array, class)
+
+ /// Determines if this type, which must satisfy
+ /// isObjCLifetimeType(), is implicitly __unsafe_unretained rather
+ /// than implicitly __strong.
+ bool isObjCARCImplicitlyUnretainedType() const;
+
+ /// Check if the type is the CUDA device builtin surface type.
+ bool isCUDADeviceBuiltinSurfaceType() const;
+ /// Check if the type is the CUDA device builtin texture type.
+ bool isCUDADeviceBuiltinTextureType() const;
+
+ /// Return the implicit lifetime for this type, which must not be dependent.
+ Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const;
+
+ enum ScalarTypeKind {
+ STK_CPointer,
+ STK_BlockPointer,
+ STK_ObjCObjectPointer,
+ STK_MemberPointer,
+ STK_Bool,
+ STK_Integral,
+ STK_Floating,
+ STK_IntegralComplex,
+ STK_FloatingComplex,
+ STK_FixedPoint
+ };
+
+ /// Given that this is a scalar type, classify it.
+ ScalarTypeKind getScalarTypeKind() const;
+
+ TypeDependence getDependence() const {
+ return static_cast<TypeDependence>(TypeBits.Dependence);
+ }
+
+ /// Whether this type is an error type.
+ bool containsErrors() const {
+ return getDependence() & TypeDependence::Error;
+ }
+
+ /// Whether this type is a dependent type, meaning that its definition
+ /// somehow depends on a template parameter (C++ [temp.dep.type]).
+ bool isDependentType() const {
+ return getDependence() & TypeDependence::Dependent;
+ }
+
+ /// Determine whether this type is an instantiation-dependent type,
+ /// meaning that the type involves a template parameter (even if the
+ /// definition does not actually depend on the type substituted for that
+ /// template parameter).
+ bool isInstantiationDependentType() const {
+ return getDependence() & TypeDependence::Instantiation;
+ }
+
+ /// Determine whether this type is an undeduced type, meaning that
+ /// it somehow involves a C++11 'auto' type or similar which has not yet been
+ /// deduced.
+ bool isUndeducedType() const;
+
+ /// Whether this type is a variably-modified type (C99 6.7.5).
+ bool isVariablyModifiedType() const {
+ return getDependence() & TypeDependence::VariablyModified;
+ }
+
+ /// Whether this type involves a variable-length array type
+ /// with a definite size.
+ bool hasSizedVLAType() const;
+
+ /// Whether this type is or contains a local or unnamed type.
+ bool hasUnnamedOrLocalType() const;
+
+ bool isOverloadableType() const;
+
+ /// Determine wither this type is a C++ elaborated-type-specifier.
+ bool isElaboratedTypeSpecifier() const;
+
+ bool canDecayToPointerType() const;
+
+ /// Whether this type is represented natively as a pointer. This includes
+ /// pointers, references, block pointers, and Objective-C interface,
+ /// qualified id, and qualified interface types, as well as nullptr_t.
+ bool hasPointerRepresentation() const;
+
+ /// Whether this type can represent an objective pointer type for the
+ /// purpose of GC'ability
+ bool hasObjCPointerRepresentation() const;
+
+ /// Determine whether this type has an integer representation
+ /// of some sort, e.g., it is an integer type or a vector.
+ bool hasIntegerRepresentation() const;
+
+ /// Determine whether this type has an signed integer representation
+ /// of some sort, e.g., it is an signed integer type or a vector.
+ bool hasSignedIntegerRepresentation() const;
+
+ /// Determine whether this type has an unsigned integer representation
+ /// of some sort, e.g., it is an unsigned integer type or a vector.
+ bool hasUnsignedIntegerRepresentation() const;
+
+ /// Determine whether this type has a floating-point representation
+ /// of some sort, e.g., it is a floating-point type or a vector thereof.
+ bool hasFloatingRepresentation() const;
+
+ /// Determine whether this type has a boolean representation -- i.e., it is a
+ /// boolean type, an enum type whose underlying type is a boolean type, or a
+ /// vector of booleans.
+ bool hasBooleanRepresentation() const;
+
+ // Type Checking Functions: Check to see if this type is structurally the
+ // specified type, ignoring typedefs and qualifiers, and return a pointer to
+ // the best type we can.
+ const RecordType *getAsStructureType() const;
+ /// NOTE: getAs*ArrayType are methods on ASTContext.
+ const RecordType *getAsUnionType() const;
+ const ComplexType *getAsComplexIntegerType() const; // GCC complex int type.
+ const ObjCObjectType *getAsObjCInterfaceType() const;
+
+ // The following is a convenience method that returns an ObjCObjectPointerType
+ // for object declared using an interface.
+ const ObjCObjectPointerType *getAsObjCInterfacePointerType() const;
+ const ObjCObjectPointerType *getAsObjCQualifiedIdType() const;
+ const ObjCObjectPointerType *getAsObjCQualifiedClassType() const;
+ const ObjCObjectType *getAsObjCQualifiedInterfaceType() const;
+
+ /// Retrieves the CXXRecordDecl that this type refers to, either
+ /// because the type is a RecordType or because it is the injected-class-name
+ /// type of a class template or class template partial specialization.
+ inline CXXRecordDecl *getAsCXXRecordDecl() const;
+ inline CXXRecordDecl *castAsCXXRecordDecl() const;
+
+ /// Retrieves the RecordDecl this type refers to.
+ inline RecordDecl *getAsRecordDecl() const;
+ inline RecordDecl *castAsRecordDecl() const;
+
+ /// Retrieves the EnumDecl this type refers to.
+ inline EnumDecl *getAsEnumDecl() const;
+ inline EnumDecl *castAsEnumDecl() const;
+
+ /// Retrieves the TagDecl that this type refers to, either
+ /// because the type is a TagType or because it is the injected-class-name
+ /// type of a class template or class template partial specialization.
+ inline TagDecl *getAsTagDecl() const;
+ inline TagDecl *castAsTagDecl() const;
+
+ /// If this is a pointer or reference to a RecordType, return the
+ /// CXXRecordDecl that the type refers to.
+ ///
+ /// If this is not a pointer or reference, or the type being pointed to does
+ /// not refer to a CXXRecordDecl, returns NULL.
+ const CXXRecordDecl *getPointeeCXXRecordDecl() const;
+
+ /// Get the DeducedType whose type will be deduced for a variable with
+ /// an initializer of this type. This looks through declarators like pointer
+ /// types, but not through decltype or typedefs.
+ DeducedType *getContainedDeducedType() const;
+
+ /// Get the AutoType whose type will be deduced for a variable with
+ /// an initializer of this type. This looks through declarators like pointer
+ /// types, but not through decltype or typedefs.
+ AutoType *getContainedAutoType() const {
+ return dyn_cast_or_null<AutoType>(getContainedDeducedType());
+ }
+
+ /// Determine whether this type was written with a leading 'auto'
+ /// corresponding to a trailing return type (possibly for a nested
+ /// function type within a pointer to function type or similar).
+ bool hasAutoForTrailingReturnType() const;
+
+ /// Member-template getAs<specific type>'. Look through sugar for
+ /// an instance of \<specific type>. This scheme will eventually
+ /// replace the specific getAsXXXX methods above.
+ ///
+ /// There are some specializations of this member template listed
+ /// immediately following this class.
+ ///
+ /// If you are interested only in the canonical properties of this type,
+ /// consider using getAsCanonical instead, as that is much faster.
+ template <typename T> const T *getAs() const;
+
+ /// If this type is canonically the specified type, return its canonical type
+ /// cast to that specified type, otherwise returns null.
+ template <typename T> const T *getAsCanonical() const {
+ return dyn_cast<T>(CanonicalType);
+ }
+
+ /// Return this type's canonical type cast to the specified type.
+ /// If the type is not canonically that specified type, the behaviour is
+ /// undefined.
+ template <typename T> const T *castAsCanonical() const {
+ return cast<T>(CanonicalType);
+ }
+
+// It is not helpful to use these on types which are never canonical
+#define TYPE(Class, Base)
+#define NEVER_CANONICAL_TYPE(Class) \
+ template <> inline const Class##Type *Type::getAsCanonical() const = delete; \
+ template <> inline const Class##Type *Type::castAsCanonical() const = delete;
+#include "clang/AST/TypeNodes.inc"
+
+ /// Look through sugar for an instance of TemplateSpecializationType which
+ /// is not a type alias, or null if there is no such type.
+ /// This is used when you want as-written template arguments or the template
+ /// name for a class template specialization.
+ const TemplateSpecializationType *
+ getAsNonAliasTemplateSpecializationType() const;
+
+ const TemplateSpecializationType *
+ castAsNonAliasTemplateSpecializationType() const {
+ const auto *TST = getAsNonAliasTemplateSpecializationType();
+ assert(TST && "not a TemplateSpecializationType");
+ return TST;
+ }
+
+ /// Member-template getAsAdjusted<specific type>. Look through specific kinds
+ /// of sugar (parens, attributes, etc) for an instance of \<specific type>.
+ /// This is used when you need to walk over sugar nodes that represent some
+ /// kind of type adjustment from a type that was written as a \<specific type>
+ /// to another type that is still canonically a \<specific type>.
+ template <typename T> const T *getAsAdjusted() const;
+
+ /// A variant of getAs<> for array types which silently discards
+ /// qualifiers from the outermost type.
+ const ArrayType *getAsArrayTypeUnsafe() const;
+
+ /// Member-template castAs<specific type>. Look through sugar for
+ /// the underlying instance of \<specific type>.
+ ///
+ /// This method has the same relationship to getAs<T> as cast<T> has
+ /// to dyn_cast<T>; which is to say, the underlying type *must*
+ /// have the intended type, and this method will never return null.
+ template <typename T> const T *castAs() const;
+
+ /// A variant of castAs<> for array type which silently discards
+ /// qualifiers from the outermost type.
+ const ArrayType *castAsArrayTypeUnsafe() const;
+
+ /// If this type represents a qualified-id, this returns its nested name
+ /// specifier. For example, for the qualified-id "foo::bar::baz", this returns
+ /// "foo::bar". Returns null if this type represents an unqualified-id.
+ NestedNameSpecifier getPrefix() const;
+
+ /// Determine whether this type had the specified attribute applied to it
+ /// (looking through top-level type sugar).
+ bool hasAttr(attr::Kind AK) const;
+
+ /// Get the base element type of this type, potentially discarding type
+ /// qualifiers. This should never be used when type qualifiers
+ /// are meaningful.
+ const Type *getBaseElementTypeUnsafe() const;
+
+ /// If this is an array type, return the element type of the array,
+ /// potentially with type qualifiers missing.
+ /// This should never be used when type qualifiers are meaningful.
+ const Type *getArrayElementTypeNoTypeQual() const;
+
+ /// If this is a pointer type, return the pointee type.
+ /// If this is an array type, return the array element type.
+ /// This should never be used when type qualifiers are meaningful.
+ const Type *getPointeeOrArrayElementType() const;
+
+ /// If this is a pointer, ObjC object pointer, or block
+ /// pointer, this returns the respective pointee.
+ QualType getPointeeType() const;
+
+ /// Return the specified type with any "sugar" removed from the type,
+ /// removing any typedefs, typeofs, etc., as well as any qualifiers.
+ const Type *getUnqualifiedDesugaredType() const;
+
+ /// Return true if this is an integer type that is
+ /// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
+ /// or an enum decl which has a signed representation.
+ bool isSignedIntegerType() const;
+
+ /// Return true if this is an integer type that is
+ /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool],
+ /// or an enum decl which has an unsigned representation.
+ bool isUnsignedIntegerType() const;
+
+ /// Determines whether this is an integer type that is signed or an
+ /// enumeration types whose underlying type is a signed integer type.
+ bool isSignedIntegerOrEnumerationType() const;
+
+ /// Determines whether this is an integer type that is unsigned or an
+ /// enumeration types whose underlying type is a unsigned integer type.
+ bool isUnsignedIntegerOrEnumerationType() const;
+
+ /// Return true if this is a fixed point type according to
+ /// ISO/IEC JTC1 SC22 WG14 N1169.
+ bool isFixedPointType() const;
+
+ /// Return true if this is a fixed point or integer type.
+ bool isFixedPointOrIntegerType() const;
+
+ /// Return true if this can be converted to (or from) a fixed point type.
+ bool isConvertibleToFixedPointType() const;
+
+ /// Return true if this is a saturated fixed point type according to
+ /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
+ bool isSaturatedFixedPointType() const;
+
+ /// Return true if this is a saturated fixed point type according to
+ /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
+ bool isUnsaturatedFixedPointType() const;
+
+ /// Return true if this is a fixed point type that is signed according
+ /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
+ bool isSignedFixedPointType() const;
+
+ /// Return true if this is a fixed point type that is unsigned according
+ /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
+ bool isUnsignedFixedPointType() const;
+
+ /// Return true if this is not a variable sized type,
+ /// according to the rules of C99 6.7.5p3. It is not legal to call this on
+ /// incomplete types.
+ bool isConstantSizeType() const;
+
+ /// Returns true if this type can be represented by some
+ /// set of type specifiers.
+ bool isSpecifierType() const;
+
+ /// Determine the linkage of this type.
+ Linkage getLinkage() const;
+
+ /// Determine the visibility of this type.
+ Visibility getVisibility() const {
+ return getLinkageAndVisibility().getVisibility();
+ }
+
+ /// Return true if the visibility was explicitly set is the code.
+ bool isVisibilityExplicit() const {
+ return getLinkageAndVisibility().isVisibilityExplicit();
+ }
+
+ /// Determine the linkage and visibility of this type.
+ LinkageInfo getLinkageAndVisibility() const;
+
+ /// True if the computed linkage is valid. Used for consistency
+ /// checking. Should always return true.
+ bool isLinkageValid() const;
+
+ /// Determine the nullability of the given type.
+ ///
+ /// Note that nullability is only captured as sugar within the type
+ /// system, not as part of the canonical type, so nullability will
+ /// be lost by canonicalization and desugaring.
+ std::optional<NullabilityKind> getNullability() const;
+
+ /// Determine whether the given type can have a nullability
+ /// specifier applied to it, i.e., if it is any kind of pointer type.
+ ///
+ /// \param ResultIfUnknown The value to return if we don't yet know whether
+ /// this type can have nullability because it is dependent.
+ bool canHaveNullability(bool ResultIfUnknown = true) const;
+
+ /// Retrieve the set of substitutions required when accessing a member
+ /// of the Objective-C receiver type that is declared in the given context.
+ ///
+ /// \c *this is the type of the object we're operating on, e.g., the
+ /// receiver for a message send or the base of a property access, and is
+ /// expected to be of some object or object pointer type.
+ ///
+ /// \param dc The declaration context for which we are building up a
+ /// substitution mapping, which should be an Objective-C class, extension,
+ /// category, or method within.
+ ///
+ /// \returns an array of type arguments that can be substituted for
+ /// the type parameters of the given declaration context in any type described
+ /// within that context, or an empty optional to indicate that no
+ /// substitution is required.
+ std::optional<ArrayRef<QualType>>
+ getObjCSubstitutions(const DeclContext *dc) const;
+
+ /// Determines if this is an ObjC interface type that may accept type
+ /// parameters.
+ bool acceptsObjCTypeParams() const;
+
+ const char *getTypeClassName() const;
+
+ QualType getCanonicalTypeInternal() const {
+ return CanonicalType;
+ }
+
+ CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
+ void dump() const;
+ void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
+};
+
+/// This will check for a TypedefType by removing any existing sugar
+/// until it reaches a TypedefType or a non-sugared type.
+template <> const TypedefType *Type::getAs() const;
+template <> const UsingType *Type::getAs() const;
+
+/// This will check for a TemplateSpecializationType by removing any
+/// existing sugar until it reaches a TemplateSpecializationType or a
+/// non-sugared type.
+template <> const TemplateSpecializationType *Type::getAs() const;
+
+/// This will check for an AttributedType by removing any existing sugar
+/// until it reaches an AttributedType or a non-sugared type.
+template <> const AttributedType *Type::getAs() const;
+
+/// This will check for a BoundsAttributedType by removing any existing
+/// sugar until it reaches an BoundsAttributedType or a non-sugared type.
+template <> const BoundsAttributedType *Type::getAs() const;
+
+/// This will check for a CountAttributedType by removing any existing
+/// sugar until it reaches an CountAttributedType or a non-sugared type.
+template <> const CountAttributedType *Type::getAs() const;
+
+// We can do always canonical types faster, because we don't have to
+// worry about preserving decoration.
+#define TYPE(Class, Base)
+#define ALWAYS_CANONICAL_TYPE(Class) \
+ template <> inline const Class##Type *Type::getAs() const { \
+ return dyn_cast<Class##Type>(CanonicalType); \
+ } \
+ template <> inline const Class##Type *Type::castAs() const { \
+ return cast<Class##Type>(CanonicalType); \
+ }
+#include "clang/AST/TypeNodes.inc"
+
+/// This class is used for builtin types like 'int'. Builtin
+/// types are always canonical and have a literal name field.
+class BuiltinType : public Type {
+public:
+ enum Kind {
+// OpenCL image types
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) Id,
+#include "clang/Basic/OpenCLImageTypes.def"
+// OpenCL extension types
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) Id,
+#include "clang/Basic/OpenCLExtensionTypes.def"
+// SVE Types
+#define SVE_TYPE(Name, Id, SingletonId) Id,
+#include "clang/Basic/AArch64ACLETypes.def"
+// PPC MMA Types
+#define PPC_VECTOR_TYPE(Name, Id, Size) Id,
+#include "clang/Basic/PPCTypes.def"
+// RVV Types
+#define RVV_TYPE(Name, Id, SingletonId) Id,
+#include "clang/Basic/RISCVVTypes.def"
+// WebAssembly reference types
+#define WASM_TYPE(Name, Id, SingletonId) Id,
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+// AMDGPU types
+#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) Id,
+#include "clang/Basic/AMDGPUTypes.def"
+// HLSL intangible Types
+#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) Id,
+#include "clang/Basic/HLSLIntangibleTypes.def"
+// All other builtin types
+#define BUILTIN_TYPE(Id, SingletonId) Id,
+#define LAST_BUILTIN_TYPE(Id) LastKind = Id
+#include "clang/AST/BuiltinTypes.def"
+ };
+
+private:
+ friend class ASTContext; // ASTContext creates these.
+
+ BuiltinType(Kind K)
+ : Type(Builtin, QualType(),
+ K == Dependent ? TypeDependence::DependentInstantiation
+ : TypeDependence::None) {
+ static_assert(Kind::LastKind <
+ (1 << BuiltinTypeBitfields::NumOfBuiltinTypeBits) &&
+ "Defined builtin type exceeds the allocated space for serial "
+ "numbering");
+ BuiltinTypeBits.Kind = K;
+ }
+
+public:
+ Kind getKind() const { return static_cast<Kind>(BuiltinTypeBits.Kind); }
+ StringRef getName(const PrintingPolicy &Policy) const;
+
+ const char *getNameAsCString(const PrintingPolicy &Policy) const {
+ // The StringRef is null-terminated.
+ StringRef str = getName(Policy);
+ assert(!str.empty() && str.data()[str.size()] == '\0');
+ return str.data();
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ bool isInteger() const {
+ return getKind() >= Bool && getKind() <= Int128;
+ }
+
+ bool isSignedInteger() const {
+ return getKind() >= Char_S && getKind() <= Int128;
+ }
+
+ bool isUnsignedInteger() const {
+ return getKind() >= Bool && getKind() <= UInt128;
+ }
+
+ bool isFloatingPoint() const {
+ return getKind() >= Half && getKind() <= Ibm128;
+ }
+
+ bool isSVEBool() const { return getKind() == Kind::SveBool; }
+
+ bool isSVECount() const { return getKind() == Kind::SveCount; }
+
+ /// Determines whether the given kind corresponds to a placeholder type.
+ static bool isPlaceholderTypeKind(Kind K) {
+ return K >= Overload;
+ }
+
+ /// Determines whether this type is a placeholder type, i.e. a type
+ /// which cannot appear in arbitrary positions in a fully-formed
+ /// expression.
+ bool isPlaceholderType() const {
+ return isPlaceholderTypeKind(getKind());
+ }
+
+ /// Determines whether this type is a placeholder type other than
+ /// Overload. Most placeholder types require only syntactic
+ /// information about their context in order to be resolved (e.g.
+ /// whether it is a call expression), which means they can (and
+ /// should) be resolved in an earlier "phase" of analysis.
+ /// Overload expressions sometimes pick up further information
+ /// from their context, like whether the context expects a
+ /// specific function-pointer type, and so frequently need
+ /// special treatment.
+ bool isNonOverloadPlaceholderType() const {
+ return getKind() > Overload;
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
+};
+
+/// Complex values, per C99 6.2.5p11. This supports the C99 complex
+/// types (_Complex float etc) as well as the GCC integer complex extensions.
+class ComplexType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType ElementType;
+
+ ComplexType(QualType Element, QualType CanonicalPtr)
+ : Type(Complex, CanonicalPtr, Element->getDependence()),
+ ElementType(Element) {}
+
+public:
+ QualType getElementType() const { return ElementType; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Element) {
+ ID.AddPointer(Element.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Complex; }
+};
+
+/// Sugar for parentheses used when specifying types.
+class ParenType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType Inner;
+
+ ParenType(QualType InnerType, QualType CanonType)
+ : Type(Paren, CanonType, InnerType->getDependence()), Inner(InnerType) {}
+
+public:
+ QualType getInnerType() const { return Inner; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getInnerType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getInnerType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Inner) {
+ Inner.Profile(ID);
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Paren; }
+};
+
+/// PointerType - C99 6.7.5.1 - Pointer Declarators.
+class PointerType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType PointeeType;
+
+ PointerType(QualType Pointee, QualType CanonicalPtr)
+ : Type(Pointer, CanonicalPtr, Pointee->getDependence()),
+ PointeeType(Pointee) {}
+
+public:
+ QualType getPointeeType() const { return PointeeType; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPointeeType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
+ ID.AddPointer(Pointee.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Pointer; }
+};
+
+/// [BoundsSafety] Represents information of declarations referenced by the
+/// arguments of the `counted_by` attribute and the likes.
+class TypeCoupledDeclRefInfo {
+public:
+ using BaseTy = llvm::PointerIntPair<ValueDecl *, 1, unsigned>;
+
+private:
+ enum {
+ DerefShift = 0,
+ DerefMask = 1,
+ };
+ BaseTy Data;
+
+public:
+ /// \p D is to a declaration referenced by the argument of attribute. \p Deref
+ /// indicates whether \p D is referenced as a dereferenced form, e.g., \p
+ /// Deref is true for `*n` in `int *__counted_by(*n)`.
+ TypeCoupledDeclRefInfo(ValueDecl *D = nullptr, bool Deref = false);
+
+ bool isDeref() const;
+ ValueDecl *getDecl() const;
+ unsigned getInt() const;
+ void *getOpaqueValue() const;
+ bool operator==(const TypeCoupledDeclRefInfo &Other) const;
+ void setFromOpaqueValue(void *V);
+};
+
+/// [BoundsSafety] Represents a parent type class for CountAttributedType and
+/// similar sugar types that will be introduced to represent a type with a
+/// bounds attribute.
+///
+/// Provides a common interface to navigate declarations referred to by the
+/// bounds expression.
+
+class BoundsAttributedType : public Type, public llvm::FoldingSetNode {
+ QualType WrappedTy;
+
+protected:
+ ArrayRef<TypeCoupledDeclRefInfo> Decls; // stored in trailing objects
+
+ BoundsAttributedType(TypeClass TC, QualType Wrapped, QualType Canon);
+
+public:
+ bool isSugared() const { return true; }
+ QualType desugar() const { return WrappedTy; }
+
+ using decl_iterator = const TypeCoupledDeclRefInfo *;
+ using decl_range = llvm::iterator_range<decl_iterator>;
+
+ decl_iterator dependent_decl_begin() const { return Decls.begin(); }
+ decl_iterator dependent_decl_end() const { return Decls.end(); }
+
+ unsigned getNumCoupledDecls() const { return Decls.size(); }
+
+ decl_range dependent_decls() const {
+ return decl_range(dependent_decl_begin(), dependent_decl_end());
+ }
+
+ ArrayRef<TypeCoupledDeclRefInfo> getCoupledDecls() const {
+ return {dependent_decl_begin(), dependent_decl_end()};
+ }
+
+ bool referencesFieldDecls() const;
+
+ static bool classof(const Type *T) {
+ // Currently, only `class CountAttributedType` inherits
+ // `BoundsAttributedType` but the subclass will grow as we add more bounds
+ // annotations.
+ switch (T->getTypeClass()) {
+ case CountAttributed:
+ return true;
+ default:
+ return false;
+ }
+ }
+};
+
+/// Represents a sugar type with `__counted_by` or `__sized_by` annotations,
+/// including their `_or_null` variants.
+class CountAttributedType final
+ : public BoundsAttributedType,
+ public llvm::TrailingObjects<CountAttributedType,
+ TypeCoupledDeclRefInfo> {
+ friend class ASTContext;
+
+ Expr *CountExpr;
+ /// \p CountExpr represents the argument of __counted_by or the likes. \p
+ /// CountInBytes indicates that \p CountExpr is a byte count (i.e.,
+ /// __sized_by(_or_null)) \p OrNull means it's an or_null variant (i.e.,
+ /// __counted_by_or_null or __sized_by_or_null) \p CoupledDecls contains the
+ /// list of declarations referenced by \p CountExpr, which the type depends on
+ /// for the bounds information.
+ CountAttributedType(QualType Wrapped, QualType Canon, Expr *CountExpr,
+ bool CountInBytes, bool OrNull,
+ ArrayRef<TypeCoupledDeclRefInfo> CoupledDecls);
+
+ unsigned numTrailingObjects(OverloadToken<TypeCoupledDeclRefInfo>) const {
+ return CountAttributedTypeBits.NumCoupledDecls;
+ }
+
+public:
+ enum DynamicCountPointerKind {
+ CountedBy = 0,
+ SizedBy,
+ CountedByOrNull,
+ SizedByOrNull,
+ };
+
+ Expr *getCountExpr() const { return CountExpr; }
+ bool isCountInBytes() const { return CountAttributedTypeBits.CountInBytes; }
+ bool isOrNull() const { return CountAttributedTypeBits.OrNull; }
+
+ DynamicCountPointerKind getKind() const {
+ if (isOrNull())
+ return isCountInBytes() ? SizedByOrNull : CountedByOrNull;
+ return isCountInBytes() ? SizedBy : CountedBy;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, desugar(), CountExpr, isCountInBytes(), isOrNull());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType WrappedTy,
+ Expr *CountExpr, bool CountInBytes, bool Nullable);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == CountAttributed;
+ }
+
+ StringRef getAttributeName(bool WithMacroPrefix) const;
+};
+
+/// Represents a type which was implicitly adjusted by the semantic
+/// engine for arbitrary reasons. For example, array and function types can
+/// decay, and function types can have their calling conventions adjusted.
+class AdjustedType : public Type, public llvm::FoldingSetNode {
+ QualType OriginalTy;
+ QualType AdjustedTy;
+
+protected:
+ friend class ASTContext; // ASTContext creates these.
+
+ AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy,
+ QualType CanonicalPtr)
+ : Type(TC, CanonicalPtr, OriginalTy->getDependence()),
+ OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {}
+
+public:
+ QualType getOriginalType() const { return OriginalTy; }
+ QualType getAdjustedType() const { return AdjustedTy; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return AdjustedTy; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, OriginalTy, AdjustedTy);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Orig, QualType New) {
+ ID.AddPointer(Orig.getAsOpaquePtr());
+ ID.AddPointer(New.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Adjusted || T->getTypeClass() == Decayed;
+ }
+};
+
+/// Represents a pointer type decayed from an array or function type.
+class DecayedType : public AdjustedType {
+ friend class ASTContext; // ASTContext creates these.
+
+ inline
+ DecayedType(QualType OriginalType, QualType Decayed, QualType Canonical);
+
+public:
+ QualType getDecayedType() const { return getAdjustedType(); }
+
+ inline QualType getPointeeType() const;
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Decayed; }
+};
+
+/// Pointer to a block type.
+/// This type is to represent types syntactically represented as
+/// "void (^)(int)", etc. Pointee is required to always be a function type.
+class BlockPointerType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ // Block is some kind of pointer type
+ QualType PointeeType;
+
+ BlockPointerType(QualType Pointee, QualType CanonicalCls)
+ : Type(BlockPointer, CanonicalCls, Pointee->getDependence()),
+ PointeeType(Pointee) {}
+
+public:
+ // Get the pointee type. Pointee is required to always be a function type.
+ QualType getPointeeType() const { return PointeeType; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPointeeType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
+ ID.AddPointer(Pointee.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == BlockPointer;
+ }
+};
+
+/// Base for LValueReferenceType and RValueReferenceType
+class ReferenceType : public Type, public llvm::FoldingSetNode {
+ QualType PointeeType;
+
+protected:
+ ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef,
+ bool SpelledAsLValue)
+ : Type(tc, CanonicalRef, Referencee->getDependence()),
+ PointeeType(Referencee) {
+ ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
+ ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
+ }
+
+public:
+ bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; }
+ bool isInnerRef() const { return ReferenceTypeBits.InnerRef; }
+
+ QualType getPointeeTypeAsWritten() const { return PointeeType; }
+
+ QualType getPointeeType() const {
+ // FIXME: this might strip inner qualifiers; okay?
+ const ReferenceType *T = this;
+ while (T->isInnerRef())
+ T = T->PointeeType->castAs<ReferenceType>();
+ return T->PointeeType;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, PointeeType, isSpelledAsLValue());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ QualType Referencee,
+ bool SpelledAsLValue) {
+ ID.AddPointer(Referencee.getAsOpaquePtr());
+ ID.AddBoolean(SpelledAsLValue);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == LValueReference ||
+ T->getTypeClass() == RValueReference;
+ }
+};
+
+/// An lvalue reference type, per C++11 [dcl.ref].
+class LValueReferenceType : public ReferenceType {
+ friend class ASTContext; // ASTContext creates these
+
+ LValueReferenceType(QualType Referencee, QualType CanonicalRef,
+ bool SpelledAsLValue)
+ : ReferenceType(LValueReference, Referencee, CanonicalRef,
+ SpelledAsLValue) {}
+
+public:
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == LValueReference;
+ }
+};
+
+/// An rvalue reference type, per C++11 [dcl.ref].
+class RValueReferenceType : public ReferenceType {
+ friend class ASTContext; // ASTContext creates these
+
+ RValueReferenceType(QualType Referencee, QualType CanonicalRef)
+ : ReferenceType(RValueReference, Referencee, CanonicalRef, false) {}
+
+public:
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == RValueReference;
+ }
+};
+
+/// A pointer to member type per C++ 8.3.3 - Pointers to members.
+///
+/// This includes both pointers to data members and pointer to member functions.
+class MemberPointerType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType PointeeType;
+
+ /// The class of which the pointee is a member. Must ultimately be a
+ /// CXXRecordType, but could be a typedef or a template parameter too.
+ NestedNameSpecifier Qualifier;
+
+ MemberPointerType(QualType Pointee, NestedNameSpecifier Qualifier,
+ QualType CanonicalPtr)
+ : Type(MemberPointer, CanonicalPtr,
+ (toTypeDependence(Qualifier.getDependence()) &
+ ~TypeDependence::VariablyModified) |
+ Pointee->getDependence()),
+ PointeeType(Pointee), Qualifier(Qualifier) {}
+
+public:
+ QualType getPointeeType() const { return PointeeType; }
+
+ /// Returns true if the member type (i.e. the pointee type) is a
+ /// function type rather than a data-member type.
+ bool isMemberFunctionPointer() const {
+ return PointeeType->isFunctionProtoType();
+ }
+
+ /// Returns true if the member type (i.e. the pointee type) is a
+ /// data type rather than a function type.
+ bool isMemberDataPointer() const {
+ return !PointeeType->isFunctionProtoType();
+ }
+
+ NestedNameSpecifier getQualifier() const { return Qualifier; }
+ /// Note: this can trigger extra deserialization when external AST sources are
+ /// used. Prefer `getCXXRecordDecl()` unless you really need the most recent
+ /// decl.
+ CXXRecordDecl *getMostRecentCXXRecordDecl() const;
+
+ bool isSugared() const;
+ QualType desugar() const {
+ return isSugared() ? getCanonicalTypeInternal() : QualType(this, 0);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ // FIXME: `getMostRecentCXXRecordDecl()` should be possible to use here,
+ // however when external AST sources are used it causes nondeterminism
+ // issues (see https://github.com/llvm/llvm-project/pull/137910).
+ Profile(ID, getPointeeType(), getQualifier(), getCXXRecordDecl());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
+ const NestedNameSpecifier Qualifier,
+ const CXXRecordDecl *Cls);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == MemberPointer;
+ }
+
+private:
+ CXXRecordDecl *getCXXRecordDecl() const;
+};
+
+/// Capture whether this is a normal array (e.g. int X[4])
+/// an array with a static size (e.g. int X[static 4]), or an array
+/// with a star size (e.g. int X[*]).
+/// 'static' is only allowed on function parameters.
+enum class ArraySizeModifier { Normal, Static, Star };
+
+/// Represents an array type, per C99 6.7.5.2 - Array Declarators.
+class ArrayType : public Type, public llvm::FoldingSetNode {
+private:
+ /// The element type of the array.
+ QualType ElementType;
+
+protected:
+ friend class ASTContext; // ASTContext creates these.
+
+ ArrayType(TypeClass tc, QualType et, QualType can, ArraySizeModifier sm,
+ unsigned tq, const Expr *sz = nullptr);
+
+public:
+ QualType getElementType() const { return ElementType; }
+
+ ArraySizeModifier getSizeModifier() const {
+ return ArraySizeModifier(ArrayTypeBits.SizeModifier);
+ }
+
+ Qualifiers getIndexTypeQualifiers() const {
+ return Qualifiers::fromCVRMask(getIndexTypeCVRQualifiers());
+ }
+
+ unsigned getIndexTypeCVRQualifiers() const {
+ return ArrayTypeBits.IndexTypeQuals;
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ConstantArray ||
+ T->getTypeClass() == VariableArray ||
+ T->getTypeClass() == IncompleteArray ||
+ T->getTypeClass() == DependentSizedArray ||
+ T->getTypeClass() == ArrayParameter;
+ }
+};
+
+/// Represents the canonical version of C arrays with a specified constant size.
+/// For example, the canonical type for 'int A[4 + 4*100]' is a
+/// ConstantArrayType where the element type is 'int' and the size is 404.
+class ConstantArrayType : public ArrayType {
+ friend class ASTContext; // ASTContext creates these.
+
+ struct ExternalSize {
+ ExternalSize(const llvm::APInt &Sz, const Expr *SE)
+ : Size(Sz), SizeExpr(SE) {}
+ llvm::APInt Size; // Allows us to unique the type.
+ const Expr *SizeExpr;
+ };
+
+ union {
+ uint64_t Size;
+ ExternalSize *SizePtr;
+ };
+
+ ConstantArrayType(QualType Et, QualType Can, uint64_t Width, uint64_t Sz,
+ ArraySizeModifier SM, unsigned TQ)
+ : ArrayType(ConstantArray, Et, Can, SM, TQ, nullptr), Size(Sz) {
+ ConstantArrayTypeBits.HasExternalSize = false;
+ ConstantArrayTypeBits.SizeWidth = Width / 8;
+ // The in-structure size stores the size in bytes rather than bits so we
+ // drop the three least significant bits since they're always zero anyways.
+ assert(Width < 0xFF && "Type width in bits must be less than 8 bits");
+ }
+
+ ConstantArrayType(QualType Et, QualType Can, ExternalSize *SzPtr,
+ ArraySizeModifier SM, unsigned TQ)
+ : ArrayType(ConstantArray, Et, Can, SM, TQ, SzPtr->SizeExpr),
+ SizePtr(SzPtr) {
+ ConstantArrayTypeBits.HasExternalSize = true;
+ ConstantArrayTypeBits.SizeWidth = 0;
+
+ assert((SzPtr->SizeExpr == nullptr || !Can.isNull()) &&
+ "canonical constant array should not have size expression");
+ }
+
+ static ConstantArrayType *Create(const ASTContext &Ctx, QualType ET,
+ QualType Can, const llvm::APInt &Sz,
+ const Expr *SzExpr, ArraySizeModifier SzMod,
+ unsigned Qual);
+
+protected:
+ ConstantArrayType(TypeClass Tc, const ConstantArrayType *ATy, QualType Can)
+ : ArrayType(Tc, ATy->getElementType(), Can, ATy->getSizeModifier(),
+ ATy->getIndexTypeQualifiers().getAsOpaqueValue(), nullptr) {
+ ConstantArrayTypeBits.HasExternalSize =
+ ATy->ConstantArrayTypeBits.HasExternalSize;
+ if (!ConstantArrayTypeBits.HasExternalSize) {
+ ConstantArrayTypeBits.SizeWidth = ATy->ConstantArrayTypeBits.SizeWidth;
+ Size = ATy->Size;
+ } else
+ SizePtr = ATy->SizePtr;
+ }
+
+public:
+ /// Return the constant array size as an APInt.
+ llvm::APInt getSize() const {
+ return ConstantArrayTypeBits.HasExternalSize
+ ? SizePtr->Size
+ : llvm::APInt(ConstantArrayTypeBits.SizeWidth * 8, Size);
+ }
+
+ /// Return the bit width of the size type.
+ unsigned getSizeBitWidth() const {
+ return ConstantArrayTypeBits.HasExternalSize
+ ? SizePtr->Size.getBitWidth()
+ : static_cast<unsigned>(ConstantArrayTypeBits.SizeWidth * 8);
+ }
+
+ /// Return true if the size is zero.
+ bool isZeroSize() const {
+ return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.isZero()
+ : 0 == Size;
+ }
+
+ /// Return the size zero-extended as a uint64_t.
+ uint64_t getZExtSize() const {
+ return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.getZExtValue()
+ : Size;
+ }
+
+ /// Return the size sign-extended as a uint64_t.
+ int64_t getSExtSize() const {
+ return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.getSExtValue()
+ : static_cast<int64_t>(Size);
+ }
+
+ /// Return the size zero-extended to uint64_t or UINT64_MAX if the value is
+ /// larger than UINT64_MAX.
+ uint64_t getLimitedSize() const {
+ return ConstantArrayTypeBits.HasExternalSize
+ ? SizePtr->Size.getLimitedValue()
+ : Size;
+ }
+
+ /// Return a pointer to the size expression.
+ const Expr *getSizeExpr() const {
+ return ConstantArrayTypeBits.HasExternalSize ? SizePtr->SizeExpr : nullptr;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ /// Determine the number of bits required to address a member of
+ // an array with the given element type and number of elements.
+ static unsigned getNumAddressingBits(const ASTContext &Context,
+ QualType ElementType,
+ const llvm::APInt &NumElements);
+
+ unsigned getNumAddressingBits(const ASTContext &Context) const;
+
+ /// Determine the maximum number of active bits that an array's size
+ /// can require, which limits the maximum size of the array.
+ static unsigned getMaxSizeBits(const ASTContext &Context);
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
+ Profile(ID, Ctx, getElementType(), getZExtSize(), getSizeExpr(),
+ getSizeModifier(), getIndexTypeCVRQualifiers());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx,
+ QualType ET, uint64_t ArraySize, const Expr *SizeExpr,
+ ArraySizeModifier SizeMod, unsigned TypeQuals);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ConstantArray ||
+ T->getTypeClass() == ArrayParameter;
+ }
+};
+
+/// Represents a constant array type that does not decay to a pointer when used
+/// as a function parameter.
+class ArrayParameterType : public ConstantArrayType {
+ friend class ASTContext; // ASTContext creates these.
+
+ ArrayParameterType(const ConstantArrayType *ATy, QualType CanTy)
+ : ConstantArrayType(ArrayParameter, ATy, CanTy) {}
+
+public:
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ArrayParameter;
+ }
+
+ QualType getConstantArrayType(const ASTContext &Ctx) const;
+};
+
+/// Represents a C array with an unspecified size. For example 'int A[]' has
+/// an IncompleteArrayType where the element type is 'int' and the size is
+/// unspecified.
+class IncompleteArrayType : public ArrayType {
+ friend class ASTContext; // ASTContext creates these.
+
+ IncompleteArrayType(QualType et, QualType can,
+ ArraySizeModifier sm, unsigned tq)
+ : ArrayType(IncompleteArray, et, can, sm, tq) {}
+
+public:
+ friend class StmtIteratorBase;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == IncompleteArray;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType(), getSizeModifier(),
+ getIndexTypeCVRQualifiers());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ET,
+ ArraySizeModifier SizeMod, unsigned TypeQuals) {
+ ID.AddPointer(ET.getAsOpaquePtr());
+ ID.AddInteger(llvm::to_underlying(SizeMod));
+ ID.AddInteger(TypeQuals);
+ }
+};
+
+/// Represents a C array with a specified size that is not an
+/// integer-constant-expression. For example, 'int s[x+foo()]'.
+/// Since the size expression is an arbitrary expression, we store it as such.
+///
+/// Note: VariableArrayType's aren't uniqued (since the expressions aren't) and
+/// should not be: two lexically equivalent variable array types could mean
+/// different things, for example, these variables do not have the same type
+/// dynamically:
+///
+/// void foo(int x) {
+/// int Y[x];
+/// ++x;
+/// int Z[x];
+/// }
+///
+/// FIXME: Even constant array types might be represented by a
+/// VariableArrayType, as in:
+///
+/// void func(int n) {
+/// int array[7][n];
+/// }
+///
+/// Even though 'array' is a constant-size array of seven elements of type
+/// variable-length array of size 'n', it will be represented as a
+/// VariableArrayType whose 'SizeExpr' is an IntegerLiteral whose value is 7.
+/// Instead, this should be a ConstantArrayType whose element is a
+/// VariableArrayType, which models the type better.
+class VariableArrayType : public ArrayType {
+ friend class ASTContext; // ASTContext creates these.
+
+ /// An assignment-expression. VLA's are only permitted within
+ /// a function block.
+ Stmt *SizeExpr;
+
+ VariableArrayType(QualType et, QualType can, Expr *e, ArraySizeModifier sm,
+ unsigned tq)
+ : ArrayType(VariableArray, et, can, sm, tq, e), SizeExpr((Stmt *)e) {}
+
+public:
+ friend class StmtIteratorBase;
+
+ Expr *getSizeExpr() const {
+ // We use C-style casts instead of cast<> here because we do not wish
+ // to have a dependency of Type.h on Stmt.h/Expr.h.
+ return (Expr*) SizeExpr;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == VariableArray;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ llvm_unreachable("Cannot unique VariableArrayTypes.");
+ }
+};
+
+/// Represents an array type in C++ whose size is a value-dependent expression.
+///
+/// For example:
+/// \code
+/// template<typename T, int Size>
+/// class array {
+/// T data[Size];
+/// };
+/// \endcode
+///
+/// For these types, we won't actually know what the array bound is
+/// until template instantiation occurs, at which point this will
+/// become either a ConstantArrayType or a VariableArrayType.
+class DependentSizedArrayType : public ArrayType {
+ friend class ASTContext; // ASTContext creates these.
+
+ /// An assignment expression that will instantiate to the
+ /// size of the array.
+ ///
+ /// The expression itself might be null, in which case the array
+ /// type will have its size deduced from an initializer.
+ Stmt *SizeExpr;
+
+ DependentSizedArrayType(QualType et, QualType can, Expr *e,
+ ArraySizeModifier sm, unsigned tq);
+
+public:
+ friend class StmtIteratorBase;
+
+ Expr *getSizeExpr() const {
+ // We use C-style casts instead of cast<> here because we do not wish
+ // to have a dependency of Type.h on Stmt.h/Expr.h.
+ return (Expr*) SizeExpr;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentSizedArray;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getElementType(),
+ getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType ET, ArraySizeModifier SizeMod,
+ unsigned TypeQuals, Expr *E);
+};
+
+/// Represents an extended address space qualifier where the input address space
+/// value is dependent. Non-dependent address spaces are not represented with a
+/// special Type subclass; they are stored on an ExtQuals node as part of a QualType.
+///
+/// For example:
+/// \code
+/// template<typename T, int AddrSpace>
+/// class AddressSpace {
+/// typedef T __attribute__((address_space(AddrSpace))) type;
+/// }
+/// \endcode
+class DependentAddressSpaceType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext;
+
+ Expr *AddrSpaceExpr;
+ QualType PointeeType;
+ SourceLocation loc;
+
+ DependentAddressSpaceType(QualType PointeeType, QualType can,
+ Expr *AddrSpaceExpr, SourceLocation loc);
+
+public:
+ Expr *getAddrSpaceExpr() const { return AddrSpaceExpr; }
+ QualType getPointeeType() const { return PointeeType; }
+ SourceLocation getAttributeLoc() const { return loc; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentAddressSpace;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getPointeeType(), getAddrSpaceExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType PointeeType, Expr *AddrSpaceExpr);
+};
+
+/// Represents an extended vector type where either the type or size is
+/// dependent.
+///
+/// For example:
+/// \code
+/// template<typename T, int Size>
+/// class vector {
+/// typedef T __attribute__((ext_vector_type(Size))) type;
+/// }
+/// \endcode
+class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext;
+
+ Expr *SizeExpr;
+
+ /// The element type of the array.
+ QualType ElementType;
+
+ SourceLocation loc;
+
+ DependentSizedExtVectorType(QualType ElementType, QualType can,
+ Expr *SizeExpr, SourceLocation loc);
+
+public:
+ Expr *getSizeExpr() const { return SizeExpr; }
+ QualType getElementType() const { return ElementType; }
+ SourceLocation getAttributeLoc() const { return loc; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentSizedExtVector;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getElementType(), getSizeExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType ElementType, Expr *SizeExpr);
+};
+
+enum class VectorKind {
+ /// not a target-specific vector type
+ Generic,
+
+ /// is AltiVec vector
+ AltiVecVector,
+
+ /// is AltiVec 'vector Pixel'
+ AltiVecPixel,
+
+ /// is AltiVec 'vector bool ...'
+ AltiVecBool,
+
+ /// is ARM Neon vector
+ Neon,
+
+ /// is ARM Neon polynomial vector
+ NeonPoly,
+
+ /// is AArch64 SVE fixed-length data vector
+ SveFixedLengthData,
+
+ /// is AArch64 SVE fixed-length predicate vector
+ SveFixedLengthPredicate,
+
+ /// is RISC-V RVV fixed-length data vector
+ RVVFixedLengthData,
+
+ /// is RISC-V RVV fixed-length mask vector
+ RVVFixedLengthMask,
+
+ RVVFixedLengthMask_1,
+ RVVFixedLengthMask_2,
+ RVVFixedLengthMask_4
+};
+
+/// Represents a GCC generic vector type. This type is created using
+/// __attribute__((vector_size(n)), where "n" specifies the vector size in
+/// bytes; or from an Altivec __vector or vector declaration.
+/// Since the constructor takes the number of vector elements, the
+/// client is responsible for converting the size into the number of elements.
+class VectorType : public Type, public llvm::FoldingSetNode {
+protected:
+ friend class ASTContext; // ASTContext creates these.
+
+ /// The element type of the vector.
+ QualType ElementType;
+
+ VectorType(QualType vecType, unsigned nElements, QualType canonType,
+ VectorKind vecKind);
+
+ VectorType(TypeClass tc, QualType vecType, unsigned nElements,
+ QualType canonType, VectorKind vecKind);
+
+public:
+ QualType getElementType() const { return ElementType; }
+ unsigned getNumElements() const { return VectorTypeBits.NumElements; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ VectorKind getVectorKind() const {
+ return VectorKind(VectorTypeBits.VecKind);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType(), getNumElements(),
+ getTypeClass(), getVectorKind());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
+ unsigned NumElements, TypeClass TypeClass,
+ VectorKind VecKind) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ ID.AddInteger(NumElements);
+ ID.AddInteger(TypeClass);
+ ID.AddInteger(llvm::to_underlying(VecKind));
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Vector || T->getTypeClass() == ExtVector;
+ }
+};
+
+/// Represents a vector type where either the type or size is dependent.
+////
+/// For example:
+/// \code
+/// template<typename T, int Size>
+/// class vector {
+/// typedef T __attribute__((vector_size(Size))) type;
+/// }
+/// \endcode
+class DependentVectorType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext;
+
+ QualType ElementType;
+ Expr *SizeExpr;
+ SourceLocation Loc;
+
+ DependentVectorType(QualType ElementType, QualType CanonType, Expr *SizeExpr,
+ SourceLocation Loc, VectorKind vecKind);
+
+public:
+ Expr *getSizeExpr() const { return SizeExpr; }
+ QualType getElementType() const { return ElementType; }
+ SourceLocation getAttributeLoc() const { return Loc; }
+ VectorKind getVectorKind() const {
+ return VectorKind(VectorTypeBits.VecKind);
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentVector;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getElementType(), getSizeExpr(), getVectorKind());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType ElementType, const Expr *SizeExpr,
+ VectorKind VecKind);
+};
+
+/// ExtVectorType - Extended vector type. This type is created using
+/// __attribute__((ext_vector_type(n)), where "n" is the number of elements.
+/// Unlike vector_size, ext_vector_type is only allowed on typedef's. This
+/// class enables syntactic extensions, like Vector Components for accessing
+/// points (as .xyzw), colors (as .rgba), and textures (modeled after OpenGL
+/// Shading Language).
+class ExtVectorType : public VectorType {
+ friend class ASTContext; // ASTContext creates these.
+
+ ExtVectorType(QualType vecType, unsigned nElements, QualType canonType)
+ : VectorType(ExtVector, vecType, nElements, canonType,
+ VectorKind::Generic) {}
+
+public:
+ static int getPointAccessorIdx(char c) {
+ switch (c) {
+ default: return -1;
+ case 'x': case 'r': return 0;
+ case 'y': case 'g': return 1;
+ case 'z': case 'b': return 2;
+ case 'w': case 'a': return 3;
+ }
+ }
+
+ static int getNumericAccessorIdx(char c) {
+ switch (c) {
+ default: return -1;
+ case '0': return 0;
+ case '1': return 1;
+ case '2': return 2;
+ case '3': return 3;
+ case '4': return 4;
+ case '5': return 5;
+ case '6': return 6;
+ case '7': return 7;
+ case '8': return 8;
+ case '9': return 9;
+ case 'A':
+ case 'a': return 10;
+ case 'B':
+ case 'b': return 11;
+ case 'C':
+ case 'c': return 12;
+ case 'D':
+ case 'd': return 13;
+ case 'E':
+ case 'e': return 14;
+ case 'F':
+ case 'f': return 15;
+ }
+ }
+
+ static int getAccessorIdx(char c, bool isNumericAccessor) {
+ if (isNumericAccessor)
+ return getNumericAccessorIdx(c);
+ else
+ return getPointAccessorIdx(c);
+ }
+
+ bool isAccessorWithinNumElements(char c, bool isNumericAccessor) const {
+ if (int idx = getAccessorIdx(c, isNumericAccessor)+1)
+ return unsigned(idx-1) < getNumElements();
+ return false;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ExtVector;
+ }
+};
+
+/// Represents a matrix type, as defined in the Matrix Types clang extensions.
+/// __attribute__((matrix_type(rows, columns))), where "rows" specifies
+/// number of rows and "columns" specifies the number of columns.
+class MatrixType : public Type, public llvm::FoldingSetNode {
+protected:
+ friend class ASTContext;
+
+ /// The element type of the matrix.
+ QualType ElementType;
+
+ MatrixType(QualType ElementTy, QualType CanonElementTy);
+
+ MatrixType(TypeClass TypeClass, QualType ElementTy, QualType CanonElementTy,
+ const Expr *RowExpr = nullptr, const Expr *ColumnExpr = nullptr);
+
+public:
+ /// Returns type of the elements being stored in the matrix
+ QualType getElementType() const { return ElementType; }
+
+ /// Valid elements types are the following:
+ /// * an integer type (as in C23 6.2.5p22), but excluding enumerated types
+ /// and _Bool
+ /// * the standard floating types float or double
+ /// * a half-precision floating point type, if one is supported on the target
+ static bool isValidElementType(QualType T) {
+ return T->isDependentType() ||
+ (T->isRealType() && !T->isBooleanType() && !T->isEnumeralType());
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ConstantMatrix ||
+ T->getTypeClass() == DependentSizedMatrix;
+ }
+};
+
+/// Represents a concrete matrix type with constant number of rows and columns
+class ConstantMatrixType final : public MatrixType {
+protected:
+ friend class ASTContext;
+
+ /// Number of rows and columns.
+ unsigned NumRows;
+ unsigned NumColumns;
+
+ static constexpr unsigned MaxElementsPerDimension = (1 << 20) - 1;
+
+ ConstantMatrixType(QualType MatrixElementType, unsigned NRows,
+ unsigned NColumns, QualType CanonElementType);
+
+ ConstantMatrixType(TypeClass typeClass, QualType MatrixType, unsigned NRows,
+ unsigned NColumns, QualType CanonElementType);
+
+public:
+ /// Returns the number of rows in the matrix.
+ unsigned getNumRows() const { return NumRows; }
+
+ /// Returns the number of columns in the matrix.
+ unsigned getNumColumns() const { return NumColumns; }
+
+ /// Returns the number of elements required to embed the matrix into a vector.
+ unsigned getNumElementsFlattened() const {
+ return getNumRows() * getNumColumns();
+ }
+
+ /// Returns true if \p NumElements is a valid matrix dimension.
+ static constexpr bool isDimensionValid(size_t NumElements) {
+ return NumElements > 0 && NumElements <= MaxElementsPerDimension;
+ }
+
+ /// Returns the maximum number of elements per dimension.
+ static constexpr unsigned getMaxElementsPerDimension() {
+ return MaxElementsPerDimension;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType(), getNumRows(), getNumColumns(),
+ getTypeClass());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
+ unsigned NumRows, unsigned NumColumns,
+ TypeClass TypeClass) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ ID.AddInteger(NumRows);
+ ID.AddInteger(NumColumns);
+ ID.AddInteger(TypeClass);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ConstantMatrix;
+ }
+};
+
+/// Represents a matrix type where the type and the number of rows and columns
+/// is dependent on a template.
+class DependentSizedMatrixType final : public MatrixType {
+ friend class ASTContext;
+
+ Expr *RowExpr;
+ Expr *ColumnExpr;
+
+ SourceLocation loc;
+
+ DependentSizedMatrixType(QualType ElementType, QualType CanonicalType,
+ Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc);
+
+public:
+ Expr *getRowExpr() const { return RowExpr; }
+ Expr *getColumnExpr() const { return ColumnExpr; }
+ SourceLocation getAttributeLoc() const { return loc; }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentSizedMatrix;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getElementType(), getRowExpr(), getColumnExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType ElementType, Expr *RowExpr, Expr *ColumnExpr);
+};
+
+/// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base
+/// class of FunctionNoProtoType and FunctionProtoType.
+class FunctionType : public Type {
+ // The type returned by the function.
+ QualType ResultType;
+
+public:
+ /// Interesting information about a specific parameter that can't simply
+ /// be reflected in parameter's type. This is only used by FunctionProtoType
+ /// but is in FunctionType to make this class available during the
+ /// specification of the bases of FunctionProtoType.
+ ///
+ /// It makes sense to model language features this way when there's some
+ /// sort of parameter-specific override (such as an attribute) that
+ /// affects how the function is called. For example, the ARC ns_consumed
+ /// attribute changes whether a parameter is passed at +0 (the default)
+ /// or +1 (ns_consumed). This must be reflected in the function type,
+ /// but isn't really a change to the parameter type.
+ ///
+ /// One serious disadvantage of modelling language features this way is
+ /// that they generally do not work with language features that attempt
+ /// to destructure types. For example, template argument deduction will
+ /// not be able to match a parameter declared as
+ /// T (*)(U)
+ /// against an argument of type
+ /// void (*)(__attribute__((ns_consumed)) id)
+ /// because the substitution of T=void, U=id into the former will
+ /// not produce the latter.
+ class ExtParameterInfo {
+ enum {
+ ABIMask = 0x0F,
+ IsConsumed = 0x10,
+ HasPassObjSize = 0x20,
+ IsNoEscape = 0x40,
+ };
+ unsigned char Data = 0;
+
+ public:
+ ExtParameterInfo() = default;
+
+ /// Return the ABI treatment of this parameter.
+ ParameterABI getABI() const { return ParameterABI(Data & ABIMask); }
+ ExtParameterInfo withABI(ParameterABI kind) const {
+ ExtParameterInfo copy = *this;
+ copy.Data = (copy.Data & ~ABIMask) | unsigned(kind);
+ return copy;
+ }
+
+ /// Is this parameter considered "consumed" by Objective-C ARC?
+ /// Consumed parameters must have retainable object type.
+ bool isConsumed() const { return (Data & IsConsumed); }
+ ExtParameterInfo withIsConsumed(bool consumed) const {
+ ExtParameterInfo copy = *this;
+ if (consumed)
+ copy.Data |= IsConsumed;
+ else
+ copy.Data &= ~IsConsumed;
+ return copy;
+ }
+
+ bool hasPassObjectSize() const { return Data & HasPassObjSize; }
+ ExtParameterInfo withHasPassObjectSize() const {
+ ExtParameterInfo Copy = *this;
+ Copy.Data |= HasPassObjSize;
+ return Copy;
+ }
+
+ bool isNoEscape() const { return Data & IsNoEscape; }
+ ExtParameterInfo withIsNoEscape(bool NoEscape) const {
+ ExtParameterInfo Copy = *this;
+ if (NoEscape)
+ Copy.Data |= IsNoEscape;
+ else
+ Copy.Data &= ~IsNoEscape;
+ return Copy;
+ }
+
+ unsigned char getOpaqueValue() const { return Data; }
+ static ExtParameterInfo getFromOpaqueValue(unsigned char data) {
+ ExtParameterInfo result;
+ result.Data = data;
+ return result;
+ }
+
+ friend bool operator==(ExtParameterInfo lhs, ExtParameterInfo rhs) {
+ return lhs.Data == rhs.Data;
+ }
+
+ friend bool operator!=(ExtParameterInfo lhs, ExtParameterInfo rhs) {
+ return lhs.Data != rhs.Data;
+ }
+ };
+
+ /// A class which abstracts out some details necessary for
+ /// making a call.
+ ///
+ /// It is not actually used directly for storing this information in
+ /// a FunctionType, although FunctionType does currently use the
+ /// same bit-pattern.
+ ///
+ // If you add a field (say Foo), other than the obvious places (both,
+ // constructors, compile failures), what you need to update is
+ // * Operator==
+ // * getFoo
+ // * withFoo
+ // * functionType. Add Foo, getFoo.
+ // * ASTContext::getFooType
+ // * ASTContext::mergeFunctionTypes
+ // * FunctionNoProtoType::Profile
+ // * FunctionProtoType::Profile
+ // * TypePrinter::PrintFunctionProto
+ // * AST read and write
+ // * Codegen
+ class ExtInfo {
+ friend class FunctionType;
+
+ // Feel free to rearrange or add bits, but if you go over 16, you'll need to
+ // adjust the Bits field below, and if you add bits, you'll need to adjust
+ // Type::FunctionTypeBitfields::ExtInfo as well.
+
+ // | CC |noreturn|produces|nocallersavedregs|regparm|nocfcheck|cmsenscall|
+ // |0 .. 5| 6 | 7 | 8 |9 .. 11| 12 | 13 |
+ //
+ // regparm is either 0 (no regparm attribute) or the regparm value+1.
+ enum { CallConvMask = 0x3F };
+ enum { NoReturnMask = 0x40 };
+ enum { ProducesResultMask = 0x80 };
+ enum { NoCallerSavedRegsMask = 0x100 };
+ enum { RegParmMask = 0xe00, RegParmOffset = 9 };
+ enum { NoCfCheckMask = 0x1000 };
+ enum { CmseNSCallMask = 0x2000 };
+ uint16_t Bits = CC_C;
+
+ ExtInfo(unsigned Bits) : Bits(static_cast<uint16_t>(Bits)) {}
+
+ public:
+ // Constructor with no defaults. Use this when you know that you
+ // have all the elements (when reading an AST file for example).
+ ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc,
+ bool producesResult, bool noCallerSavedRegs, bool NoCfCheck,
+ bool cmseNSCall) {
+ assert((!hasRegParm || regParm < 7) && "Invalid regparm value");
+ Bits = ((unsigned)cc) | (noReturn ? NoReturnMask : 0) |
+ (producesResult ? ProducesResultMask : 0) |
+ (noCallerSavedRegs ? NoCallerSavedRegsMask : 0) |
+ (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0) |
+ (NoCfCheck ? NoCfCheckMask : 0) |
+ (cmseNSCall ? CmseNSCallMask : 0);
+ }
+
+ // Constructor with all defaults. Use when for example creating a
+ // function known to use defaults.
+ ExtInfo() = default;
+
+ // Constructor with just the calling convention, which is an important part
+ // of the canonical type.
+ ExtInfo(CallingConv CC) : Bits(CC) {}
+
+ bool getNoReturn() const { return Bits & NoReturnMask; }
+ bool getProducesResult() const { return Bits & ProducesResultMask; }
+ bool getCmseNSCall() const { return Bits & CmseNSCallMask; }
+ bool getNoCallerSavedRegs() const { return Bits & NoCallerSavedRegsMask; }
+ bool getNoCfCheck() const { return Bits & NoCfCheckMask; }
+ bool getHasRegParm() const { return ((Bits & RegParmMask) >> RegParmOffset) != 0; }
+
+ unsigned getRegParm() const {
+ unsigned RegParm = (Bits & RegParmMask) >> RegParmOffset;
+ if (RegParm > 0)
+ --RegParm;
+ return RegParm;
+ }
+
+ CallingConv getCC() const { return CallingConv(Bits & CallConvMask); }
+
+ bool operator==(ExtInfo Other) const {
+ return Bits == Other.Bits;
+ }
+ bool operator!=(ExtInfo Other) const {
+ return Bits != Other.Bits;
+ }
+
+ // Note that we don't have setters. That is by design, use
+ // the following with methods instead of mutating these objects.
+
+ ExtInfo withNoReturn(bool noReturn) const {
+ if (noReturn)
+ return ExtInfo(Bits | NoReturnMask);
+ else
+ return ExtInfo(Bits & ~NoReturnMask);
+ }
+
+ ExtInfo withProducesResult(bool producesResult) const {
+ if (producesResult)
+ return ExtInfo(Bits | ProducesResultMask);
+ else
+ return ExtInfo(Bits & ~ProducesResultMask);
+ }
+
+ ExtInfo withCmseNSCall(bool cmseNSCall) const {
+ if (cmseNSCall)
+ return ExtInfo(Bits | CmseNSCallMask);
+ else
+ return ExtInfo(Bits & ~CmseNSCallMask);
+ }
+
+ ExtInfo withNoCallerSavedRegs(bool noCallerSavedRegs) const {
+ if (noCallerSavedRegs)
+ return ExtInfo(Bits | NoCallerSavedRegsMask);
+ else
+ return ExtInfo(Bits & ~NoCallerSavedRegsMask);
+ }
+
+ ExtInfo withNoCfCheck(bool noCfCheck) const {
+ if (noCfCheck)
+ return ExtInfo(Bits | NoCfCheckMask);
+ else
+ return ExtInfo(Bits & ~NoCfCheckMask);
+ }
+
+ ExtInfo withRegParm(unsigned RegParm) const {
+ assert(RegParm < 7 && "Invalid regparm value");
+ return ExtInfo((Bits & ~RegParmMask) |
+ ((RegParm + 1) << RegParmOffset));
+ }
+
+ ExtInfo withCallingConv(CallingConv cc) const {
+ return ExtInfo((Bits & ~CallConvMask) | (unsigned) cc);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Bits);
+ }
+ };
+
+ /// A simple holder for a QualType representing a type in an
+ /// exception specification. Unfortunately needed by FunctionProtoType
+ /// because TrailingObjects cannot handle repeated types.
+ struct ExceptionType { QualType Type; };
+
+ /// A simple holder for various uncommon bits which do not fit in
+ /// FunctionTypeBitfields. Aligned to alignof(void *) to maintain the
+ /// alignment of subsequent objects in TrailingObjects.
+ struct alignas(void *) FunctionTypeExtraBitfields {
+ /// The number of types in the exception specification.
+ /// A whole unsigned is not needed here and according to
+ /// [implimits] 8 bits would be enough here.
+ unsigned NumExceptionType : 10;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasExtraAttributeInfo : 1;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasArmTypeAttributes : 1;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned EffectsHaveConditions : 1;
+ unsigned NumFunctionEffects : 4;
+
+ FunctionTypeExtraBitfields()
+ : NumExceptionType(0), HasExtraAttributeInfo(false),
+ HasArmTypeAttributes(false), EffectsHaveConditions(false),
+ NumFunctionEffects(0) {}
+ };
+
+ /// A holder for extra information from attributes which aren't part of an
+ /// \p AttributedType.
+ struct alignas(void *) FunctionTypeExtraAttributeInfo {
+ /// A CFI "salt" that differentiates functions with the same prototype.
+ StringRef CFISalt;
+
+ operator bool() const { return !CFISalt.empty(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddString(CFISalt); }
+ };
+
+ /// The AArch64 SME ACLE (Arm C/C++ Language Extensions) define a number
+ /// of function type attributes that can be set on function types, including
+ /// function pointers.
+ enum AArch64SMETypeAttributes : uint16_t {
+ SME_NormalFunction = 0,
+ SME_PStateSMEnabledMask = 1 << 0,
+ SME_PStateSMCompatibleMask = 1 << 1,
+
+ // Describes the value of the state using ArmStateValue.
+ SME_ZAShift = 2,
+ SME_ZAMask = 0b111 << SME_ZAShift,
+ SME_ZT0Shift = 5,
+ SME_ZT0Mask = 0b111 << SME_ZT0Shift,
+
+ // A bit to tell whether a function is agnostic about sme ZA state.
+ SME_AgnosticZAStateShift = 8,
+ SME_AgnosticZAStateMask = 1 << SME_AgnosticZAStateShift,
+
+ SME_AttributeMask =
+ 0b1'111'111'11 // We can't support more than 9 bits because of
+ // the bitmask in FunctionTypeArmAttributes
+ // and ExtProtoInfo.
+ };
+
+ enum ArmStateValue : unsigned {
+ ARM_None = 0,
+ ARM_Preserves = 1,
+ ARM_In = 2,
+ ARM_Out = 3,
+ ARM_InOut = 4,
+ };
+
+ static ArmStateValue getArmZAState(unsigned AttrBits) {
+ return static_cast<ArmStateValue>((AttrBits & SME_ZAMask) >> SME_ZAShift);
+ }
+
+ static ArmStateValue getArmZT0State(unsigned AttrBits) {
+ return static_cast<ArmStateValue>((AttrBits & SME_ZT0Mask) >> SME_ZT0Shift);
+ }
+
+ /// A holder for Arm type attributes as described in the Arm C/C++
+ /// Language extensions which are not particularly common to all
+ /// types and therefore accounted separately from FunctionTypeBitfields.
+ struct alignas(void *) FunctionTypeArmAttributes {
+ /// Any AArch64 SME ACLE type attributes that need to be propagated
+ /// on declarations and function pointers.
+ LLVM_PREFERRED_TYPE(AArch64SMETypeAttributes)
+ unsigned AArch64SMEAttributes : 9;
+
+ FunctionTypeArmAttributes() : AArch64SMEAttributes(SME_NormalFunction) {}
+ };
+
+protected:
+ FunctionType(TypeClass tc, QualType res, QualType Canonical,
+ TypeDependence Dependence, ExtInfo Info)
+ : Type(tc, Canonical, Dependence), ResultType(res) {
+ FunctionTypeBits.ExtInfo = Info.Bits;
+ }
+
+ Qualifiers getFastTypeQuals() const {
+ if (isFunctionProtoType())
+ return Qualifiers::fromFastMask(FunctionTypeBits.FastTypeQuals);
+
+ return Qualifiers();
+ }
+
+public:
+ QualType getReturnType() const { return ResultType; }
+
+ bool getHasRegParm() const { return getExtInfo().getHasRegParm(); }
+ unsigned getRegParmType() const { return getExtInfo().getRegParm(); }
+
+ /// Determine whether this function type includes the GNU noreturn
+ /// attribute. The C++11 [[noreturn]] attribute does not affect the function
+ /// type.
+ bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
+
+ /// Determine whether this is a function prototype that includes the
+ /// cfi_unchecked_callee attribute.
+ bool getCFIUncheckedCalleeAttr() const;
+
+ bool getCmseNSCallAttr() const { return getExtInfo().getCmseNSCall(); }
+ CallingConv getCallConv() const { return getExtInfo().getCC(); }
+ ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
+
+ static_assert((~Qualifiers::FastMask & Qualifiers::CVRMask) == 0,
+ "Const, volatile and restrict are assumed to be a subset of "
+ "the fast qualifiers.");
+
+ bool isConst() const { return getFastTypeQuals().hasConst(); }
+ bool isVolatile() const { return getFastTypeQuals().hasVolatile(); }
+ bool isRestrict() const { return getFastTypeQuals().hasRestrict(); }
+
+ /// Determine the type of an expression that calls a function of
+ /// this type.
+ QualType getCallResultType(const ASTContext &Context) const {
+ return getReturnType().getNonLValueExprType(Context);
+ }
+
+ static StringRef getNameForCallConv(CallingConv CC);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == FunctionNoProto ||
+ T->getTypeClass() == FunctionProto;
+ }
+};
+
+/// Represents a K&R-style 'int foo()' function, which has
+/// no information available about its arguments.
+class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
+ : FunctionType(FunctionNoProto, Result, Canonical,
+ Result->getDependence() &
+ ~(TypeDependence::DependentInstantiation |
+ TypeDependence::UnexpandedPack),
+ Info) {}
+
+public:
+ // No additional state past what FunctionType provides.
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getReturnType(), getExtInfo());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ResultType,
+ ExtInfo Info) {
+ Info.Profile(ID);
+ ID.AddPointer(ResultType.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == FunctionNoProto;
+ }
+};
+
+// ------------------------------------------------------------------------------
+
+/// Represents an abstract function effect, using just an enumeration describing
+/// its kind.
+class FunctionEffect {
+public:
+ /// Identifies the particular effect.
+ enum class Kind : uint8_t {
+ NonBlocking,
+ NonAllocating,
+ Blocking,
+ Allocating,
+ Last = Allocating
+ };
+ constexpr static size_t KindCount = static_cast<size_t>(Kind::Last) + 1;
+
+ /// Flags describing some behaviors of the effect.
+ using Flags = unsigned;
+ enum FlagBit : Flags {
+ // Can verification inspect callees' implementations? (e.g. nonblocking:
+ // yes, tcb+types: no). This also implies the need for 2nd-pass
+ // verification.
+ FE_InferrableOnCallees = 0x1,
+
+ // Language constructs which effects can diagnose as disallowed.
+ FE_ExcludeThrow = 0x2,
+ FE_ExcludeCatch = 0x4,
+ FE_ExcludeObjCMessageSend = 0x8,
+ FE_ExcludeStaticLocalVars = 0x10,
+ FE_ExcludeThreadLocalVars = 0x20
+ };
+
+private:
+ Kind FKind;
+
+ // Expansion: for hypothetical TCB+types, there could be one Kind for TCB,
+ // then ~16(?) bits "SubKind" to map to a specific named TCB. SubKind would
+ // be considered for uniqueness.
+
+public:
+ explicit FunctionEffect(Kind K) : FKind(K) {}
+
+ /// The kind of the effect.
+ Kind kind() const { return FKind; }
+
+ /// Return the opposite kind, for effects which have opposites.
+ Kind oppositeKind() const;
+
+ /// For serialization.
+ uint32_t toOpaqueInt32() const { return uint32_t(FKind); }
+ static FunctionEffect fromOpaqueInt32(uint32_t Value) {
+ return FunctionEffect(Kind(Value));
+ }
+
+ /// Flags describing some behaviors of the effect.
+ Flags flags() const {
+ switch (kind()) {
+ case Kind::NonBlocking:
+ return FE_InferrableOnCallees | FE_ExcludeThrow | FE_ExcludeCatch |
+ FE_ExcludeObjCMessageSend | FE_ExcludeStaticLocalVars |
+ FE_ExcludeThreadLocalVars;
+ case Kind::NonAllocating:
+ // Same as NonBlocking, except without FE_ExcludeStaticLocalVars.
+ return FE_InferrableOnCallees | FE_ExcludeThrow | FE_ExcludeCatch |
+ FE_ExcludeObjCMessageSend | FE_ExcludeThreadLocalVars;
+ case Kind::Blocking:
+ case Kind::Allocating:
+ return 0;
+ }
+ llvm_unreachable("unknown effect kind");
+ }
+
+ /// The description printed in diagnostics, e.g. 'nonblocking'.
+ StringRef name() const;
+
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const FunctionEffect &Effect) {
+ OS << Effect.name();
+ return OS;
+ }
+
+ /// Determine whether the effect is allowed to be inferred on the callee,
+ /// which is either a FunctionDecl or BlockDecl. If the returned optional
+ /// is empty, inference is permitted; otherwise it holds the effect which
+ /// blocked inference.
+ /// Example: This allows nonblocking(false) to prevent inference for the
+ /// function.
+ std::optional<FunctionEffect>
+ effectProhibitingInference(const Decl &Callee,
+ FunctionEffectKindSet CalleeFX) const;
+
+ // Return false for success. When true is returned for a direct call, then the
+ // FE_InferrableOnCallees flag may trigger inference rather than an immediate
+ // diagnostic. Caller should be assumed to have the effect (it may not have it
+ // explicitly when inferring).
+ bool shouldDiagnoseFunctionCall(bool Direct,
+ FunctionEffectKindSet CalleeFX) const;
+
+ friend bool operator==(FunctionEffect LHS, FunctionEffect RHS) {
+ return LHS.FKind == RHS.FKind;
+ }
+ friend bool operator!=(FunctionEffect LHS, FunctionEffect RHS) {
+ return !(LHS == RHS);
+ }
+ friend bool operator<(FunctionEffect LHS, FunctionEffect RHS) {
+ return LHS.FKind < RHS.FKind;
+ }
+};
+
+/// Wrap a function effect's condition expression in another struct so
+/// that FunctionProtoType's TrailingObjects can treat it separately.
+class EffectConditionExpr {
+ Expr *Cond = nullptr; // if null, unconditional.
+
+public:
+ EffectConditionExpr() = default;
+ EffectConditionExpr(Expr *E) : Cond(E) {}
+
+ Expr *getCondition() const { return Cond; }
+
+ bool operator==(const EffectConditionExpr &RHS) const {
+ return Cond == RHS.Cond;
+ }
+};
+
+/// A FunctionEffect plus a potential boolean expression determining whether
+/// the effect is declared (e.g. nonblocking(expr)). Generally the condition
+/// expression when present, is dependent.
+struct FunctionEffectWithCondition {
+ FunctionEffect Effect;
+ EffectConditionExpr Cond;
+
+ FunctionEffectWithCondition(FunctionEffect E, const EffectConditionExpr &C)
+ : Effect(E), Cond(C) {}
+
+ /// Return a textual description of the effect, and its condition, if any.
+ std::string description() const;
+
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const FunctionEffectWithCondition &CFE);
+};
+
+/// Support iteration in parallel through a pair of FunctionEffect and
+/// EffectConditionExpr containers.
+template <typename Container> class FunctionEffectIterator {
+ friend Container;
+
+ const Container *Outer = nullptr;
+ size_t Idx = 0;
+
+public:
+ FunctionEffectIterator();
+ FunctionEffectIterator(const Container &O, size_t I) : Outer(&O), Idx(I) {}
+ bool operator==(const FunctionEffectIterator &Other) const {
+ return Idx == Other.Idx;
+ }
+ bool operator!=(const FunctionEffectIterator &Other) const {
+ return Idx != Other.Idx;
+ }
+
+ FunctionEffectIterator operator++() {
+ ++Idx;
+ return *this;
+ }
+
+ FunctionEffectWithCondition operator*() const {
+ assert(Outer != nullptr && "invalid FunctionEffectIterator");
+ bool HasConds = !Outer->Conditions.empty();
+ return FunctionEffectWithCondition{Outer->Effects[Idx],
+ HasConds ? Outer->Conditions[Idx]
+ : EffectConditionExpr()};
+ }
+};
+
+/// An immutable set of FunctionEffects and possibly conditions attached to
+/// them. The effects and conditions reside in memory not managed by this object
+/// (typically, trailing objects in FunctionProtoType, or borrowed references
+/// from a FunctionEffectSet).
+///
+/// Invariants:
+/// - there is never more than one instance of any given effect.
+/// - the array of conditions is either empty or has the same size as the
+/// array of effects.
+/// - some conditions may be null expressions; each condition pertains to
+/// the effect at the same array index.
+///
+/// Also, if there are any conditions, at least one of those expressions will be
+/// dependent, but this is only asserted in the constructor of
+/// FunctionProtoType.
+///
+/// See also FunctionEffectSet, in Sema, which provides a mutable set.
+class FunctionEffectsRef {
+ // Restrict classes which can call the private constructor -- these friends
+ // all maintain the required invariants. FunctionEffectSet is generally the
+ // only way in which the arrays are created; FunctionProtoType will not
+ // reorder them.
+ friend FunctionProtoType;
+ friend FunctionEffectSet;
+
+ ArrayRef<FunctionEffect> Effects;
+ ArrayRef<EffectConditionExpr> Conditions;
+
+ // The arrays are expected to have been sorted by the caller, with the
+ // effects in order. The conditions array must be empty or the same size
+ // as the effects array, since the conditions are associated with the effects
+ // at the same array indices.
+ FunctionEffectsRef(ArrayRef<FunctionEffect> FX,
+ ArrayRef<EffectConditionExpr> Conds)
+ : Effects(FX), Conditions(Conds) {}
+
+public:
+ /// Extract the effects from a Type if it is a function, block, or member
+ /// function pointer, or a reference or pointer to one.
+ static FunctionEffectsRef get(QualType QT);
+
+ /// Asserts invariants.
+ static FunctionEffectsRef create(ArrayRef<FunctionEffect> FX,
+ ArrayRef<EffectConditionExpr> Conds);
+
+ FunctionEffectsRef() = default;
+
+ bool empty() const { return Effects.empty(); }
+ size_t size() const { return Effects.size(); }
+
+ ArrayRef<FunctionEffect> effects() const { return Effects; }
+ ArrayRef<EffectConditionExpr> conditions() const { return Conditions; }
+
+ using iterator = FunctionEffectIterator<FunctionEffectsRef>;
+ friend iterator;
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, size()); }
+
+ friend bool operator==(const FunctionEffectsRef &LHS,
+ const FunctionEffectsRef &RHS) {
+ return LHS.Effects == RHS.Effects && LHS.Conditions == RHS.Conditions;
+ }
+ friend bool operator!=(const FunctionEffectsRef &LHS,
+ const FunctionEffectsRef &RHS) {
+ return !(LHS == RHS);
+ }
+
+ void dump(llvm::raw_ostream &OS) const;
+};
+
+/// A mutable set of FunctionEffect::Kind.
+class FunctionEffectKindSet {
+ // For now this only needs to be a bitmap.
+ constexpr static size_t EndBitPos = FunctionEffect::KindCount;
+ using KindBitsT = std::bitset<EndBitPos>;
+
+ KindBitsT KindBits{};
+
+ explicit FunctionEffectKindSet(KindBitsT KB) : KindBits(KB) {}
+
+ // Functions to translate between an effect kind, starting at 1, and a
+ // position in the bitset.
+
+ constexpr static size_t kindToPos(FunctionEffect::Kind K) {
+ return static_cast<size_t>(K);
+ }
+
+ constexpr static FunctionEffect::Kind posToKind(size_t Pos) {
+ return static_cast<FunctionEffect::Kind>(Pos);
+ }
+
+ // Iterates through the bits which are set.
+ class iterator {
+ const FunctionEffectKindSet *Outer = nullptr;
+ size_t Idx = 0;
+
+ // If Idx does not reference a set bit, advance it until it does,
+ // or until it reaches EndBitPos.
+ void advanceToNextSetBit() {
+ while (Idx < EndBitPos && !Outer->KindBits.test(Idx))
+ ++Idx;
+ }
+
+ public:
+ iterator();
+ iterator(const FunctionEffectKindSet &O, size_t I) : Outer(&O), Idx(I) {
+ advanceToNextSetBit();
+ }
+ bool operator==(const iterator &Other) const { return Idx == Other.Idx; }
+ bool operator!=(const iterator &Other) const { return Idx != Other.Idx; }
+
+ iterator operator++() {
+ ++Idx;
+ advanceToNextSetBit();
+ return *this;
+ }
+
+ FunctionEffect operator*() const {
+ assert(Idx < EndBitPos && "Dereference of end iterator");
+ return FunctionEffect(posToKind(Idx));
+ }
+ };
+
+public:
+ FunctionEffectKindSet() = default;
+ explicit FunctionEffectKindSet(FunctionEffectsRef FX) { insert(FX); }
+
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, EndBitPos); }
+
+ void insert(FunctionEffect Effect) { KindBits.set(kindToPos(Effect.kind())); }
+ void insert(FunctionEffectsRef FX) {
+ for (FunctionEffect Item : FX.effects())
+ insert(Item);
+ }
+ void insert(FunctionEffectKindSet Set) { KindBits |= Set.KindBits; }
+
+ bool empty() const { return KindBits.none(); }
+ bool contains(const FunctionEffect::Kind EK) const {
+ return KindBits.test(kindToPos(EK));
+ }
+ void dump(llvm::raw_ostream &OS) const;
+
+ static FunctionEffectKindSet difference(FunctionEffectKindSet LHS,
+ FunctionEffectKindSet RHS) {
+ return FunctionEffectKindSet(LHS.KindBits & ~RHS.KindBits);
+ }
+};
+
+/// A mutable set of FunctionEffects and possibly conditions attached to them.
+/// Used to compare and merge effects on declarations.
+///
+/// Has the same invariants as FunctionEffectsRef.
+class FunctionEffectSet {
+ SmallVector<FunctionEffect> Effects;
+ SmallVector<EffectConditionExpr> Conditions;
+
+public:
+ FunctionEffectSet() = default;
+
+ explicit FunctionEffectSet(const FunctionEffectsRef &FX)
+ : Effects(FX.effects()), Conditions(FX.conditions()) {}
+
+ bool empty() const { return Effects.empty(); }
+ size_t size() const { return Effects.size(); }
+
+ using iterator = FunctionEffectIterator<FunctionEffectSet>;
+ friend iterator;
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, size()); }
+
+ operator FunctionEffectsRef() const { return {Effects, Conditions}; }
+
+ void dump(llvm::raw_ostream &OS) const;
+
+ // Mutators
+
+ // On insertion, a conflict occurs when attempting to insert an
+ // effect which is opposite an effect already in the set, or attempting
+ // to insert an effect which is already in the set but with a condition
+ // which is not identical.
+ struct Conflict {
+ FunctionEffectWithCondition Kept;
+ FunctionEffectWithCondition Rejected;
+ };
+ using Conflicts = SmallVector<Conflict>;
+
+ // Returns true for success (obviating a check of Errs.empty()).
+ bool insert(const FunctionEffectWithCondition &NewEC, Conflicts &Errs);
+
+ // Returns true for success (obviating a check of Errs.empty()).
+ bool insert(const FunctionEffectsRef &Set, Conflicts &Errs);
+
+ // Set operations
+
+ static FunctionEffectSet getUnion(FunctionEffectsRef LHS,
+ FunctionEffectsRef RHS, Conflicts &Errs);
+ static FunctionEffectSet getIntersection(FunctionEffectsRef LHS,
+ FunctionEffectsRef RHS);
+};
+
+/// Represents a prototype with parameter type info, e.g.
+/// 'int foo(int)' or 'int foo(void)'. 'void' is represented as having no
+/// parameters, not as having a single void parameter. Such a type can have
+/// an exception specification, but this specification is not part of the
+/// canonical type. FunctionProtoType has several trailing objects, some of
+/// which optional. For more information about the trailing objects see
+/// the first comment inside FunctionProtoType.
+class FunctionProtoType final
+ : public FunctionType,
+ public llvm::FoldingSetNode,
+ private llvm::TrailingObjects<
+ FunctionProtoType, QualType, SourceLocation,
+ FunctionType::FunctionTypeExtraBitfields,
+ FunctionType::FunctionTypeExtraAttributeInfo,
+ FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
+ Expr *, FunctionDecl *, FunctionType::ExtParameterInfo, Qualifiers,
+ FunctionEffect, EffectConditionExpr> {
+ friend class ASTContext; // ASTContext creates these.
+ friend TrailingObjects;
+
+ // FunctionProtoType is followed by several trailing objects, some of
+ // which optional. They are in order:
+ //
+ // * An array of getNumParams() QualType holding the parameter types.
+ // Always present. Note that for the vast majority of FunctionProtoType,
+ // these will be the only trailing objects.
+ //
+ // * Optionally if the function is variadic, the SourceLocation of the
+ // ellipsis.
+ //
+ // * Optionally if some extra data is stored in FunctionTypeExtraBitfields
+ // (see FunctionTypeExtraBitfields and FunctionTypeBitfields):
+ // a single FunctionTypeExtraBitfields. Present if and only if
+ // hasExtraBitfields() is true.
+ //
+ // * Optionally exactly one of:
+ // * an array of getNumExceptions() ExceptionType,
+ // * a single Expr *,
+ // * a pair of FunctionDecl *,
+ // * a single FunctionDecl *
+ // used to store information about the various types of exception
+ // specification. See getExceptionSpecSize for the details.
+ //
+ // * Optionally an array of getNumParams() ExtParameterInfo holding
+ // an ExtParameterInfo for each of the parameters. Present if and
+ // only if hasExtParameterInfos() is true.
+ //
+ // * Optionally a Qualifiers object to represent extra qualifiers that can't
+ // be represented by FunctionTypeBitfields.FastTypeQuals. Present if and
+ // only if hasExtQualifiers() is true.
+ //
+ // * Optionally, an array of getNumFunctionEffects() FunctionEffect.
+ // Present only when getNumFunctionEffects() > 0
+ //
+ // * Optionally, an array of getNumFunctionEffects() EffectConditionExpr.
+ // Present only when getNumFunctionEffectConditions() > 0.
+ //
+ // The optional FunctionTypeExtraBitfields has to be before the data
+ // related to the exception specification since it contains the number
+ // of exception types.
+ //
+ // We put the ExtParameterInfos later. If all were equal, it would make
+ // more sense to put these before the exception specification, because
+ // it's much easier to skip past them compared to the elaborate switch
+ // required to skip the exception specification. However, all is not
+ // equal; ExtParameterInfos are used to model very uncommon features,
+ // and it's better not to burden the more common paths.
+
+public:
+ /// Holds information about the various types of exception specification.
+ /// ExceptionSpecInfo is not stored as such in FunctionProtoType but is
+ /// used to group together the various bits of information about the
+ /// exception specification.
+ struct ExceptionSpecInfo {
+ /// The kind of exception specification this is.
+ ExceptionSpecificationType Type = EST_None;
+
+ /// Explicitly-specified list of exception types.
+ ArrayRef<QualType> Exceptions;
+
+ /// Noexcept expression, if this is a computed noexcept specification.
+ Expr *NoexceptExpr = nullptr;
+
+ /// The function whose exception specification this is, for
+ /// EST_Unevaluated and EST_Uninstantiated.
+ FunctionDecl *SourceDecl = nullptr;
+
+ /// The function template whose exception specification this is instantiated
+ /// from, for EST_Uninstantiated.
+ FunctionDecl *SourceTemplate = nullptr;
+
+ ExceptionSpecInfo() = default;
+
+ ExceptionSpecInfo(ExceptionSpecificationType EST) : Type(EST) {}
+
+ void instantiate();
+ };
+
+ /// Extra information about a function prototype. ExtProtoInfo is not
+ /// stored as such in FunctionProtoType but is used to group together
+ /// the various bits of extra information about a function prototype.
+ struct ExtProtoInfo {
+ FunctionType::ExtInfo ExtInfo;
+ Qualifiers TypeQuals;
+ RefQualifierKind RefQualifier = RQ_None;
+ ExceptionSpecInfo ExceptionSpec;
+ const ExtParameterInfo *ExtParameterInfos = nullptr;
+ SourceLocation EllipsisLoc;
+ FunctionEffectsRef FunctionEffects;
+ FunctionTypeExtraAttributeInfo ExtraAttributeInfo;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned Variadic : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasTrailingReturn : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned CFIUncheckedCallee : 1;
+ LLVM_PREFERRED_TYPE(AArch64SMETypeAttributes)
+ unsigned AArch64SMEAttributes : 9;
+
+ ExtProtoInfo()
+ : Variadic(false), HasTrailingReturn(false), CFIUncheckedCallee(false),
+ AArch64SMEAttributes(SME_NormalFunction) {}
+
+ ExtProtoInfo(CallingConv CC)
+ : ExtInfo(CC), Variadic(false), HasTrailingReturn(false),
+ CFIUncheckedCallee(false), AArch64SMEAttributes(SME_NormalFunction) {}
+
+ ExtProtoInfo withExceptionSpec(const ExceptionSpecInfo &ESI) {
+ ExtProtoInfo Result(*this);
+ Result.ExceptionSpec = ESI;
+ return Result;
+ }
+
+ ExtProtoInfo withCFIUncheckedCallee(bool CFIUncheckedCallee) {
+ ExtProtoInfo Result(*this);
+ Result.CFIUncheckedCallee = CFIUncheckedCallee;
+ return Result;
+ }
+
+ bool requiresFunctionProtoTypeExtraBitfields() const {
+ return ExceptionSpec.Type == EST_Dynamic ||
+ requiresFunctionProtoTypeArmAttributes() ||
+ requiresFunctionProtoTypeExtraAttributeInfo() ||
+ !FunctionEffects.empty();
+ }
+
+ bool requiresFunctionProtoTypeArmAttributes() const {
+ return AArch64SMEAttributes != SME_NormalFunction;
+ }
+
+ bool requiresFunctionProtoTypeExtraAttributeInfo() const {
+ return static_cast<bool>(ExtraAttributeInfo);
+ }
+
+ void setArmSMEAttribute(AArch64SMETypeAttributes Kind, bool Enable = true) {
+ if (Enable)
+ AArch64SMEAttributes |= Kind;
+ else
+ AArch64SMEAttributes &= ~Kind;
+ }
+ };
+
+private:
+ unsigned numTrailingObjects(OverloadToken<QualType>) const {
+ return getNumParams();
+ }
+
+ unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
+ return isVariadic();
+ }
+
+ unsigned numTrailingObjects(OverloadToken<FunctionTypeArmAttributes>) const {
+ return hasArmTypeAttributes();
+ }
+
+ unsigned numTrailingObjects(OverloadToken<FunctionTypeExtraBitfields>) const {
+ return hasExtraBitfields();
+ }
+
+ unsigned
+ numTrailingObjects(OverloadToken<FunctionTypeExtraAttributeInfo>) const {
+ return hasExtraAttributeInfo();
+ }
+
+ unsigned numTrailingObjects(OverloadToken<ExceptionType>) const {
+ return getExceptionSpecSize().NumExceptionType;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<Expr *>) const {
+ return getExceptionSpecSize().NumExprPtr;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<FunctionDecl *>) const {
+ return getExceptionSpecSize().NumFunctionDeclPtr;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<ExtParameterInfo>) const {
+ return hasExtParameterInfos() ? getNumParams() : 0;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<Qualifiers>) const {
+ return hasExtQualifiers() ? 1 : 0;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<FunctionEffect>) const {
+ return getNumFunctionEffects();
+ }
+
+ /// Determine whether there are any argument types that
+ /// contain an unexpanded parameter pack.
+ static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
+ unsigned numArgs) {
+ for (unsigned Idx = 0; Idx < numArgs; ++Idx)
+ if (ArgArray[Idx]->containsUnexpandedParameterPack())
+ return true;
+
+ return false;
+ }
+
+ FunctionProtoType(QualType result, ArrayRef<QualType> params,
+ QualType canonical, const ExtProtoInfo &epi);
+
+ /// This struct is returned by getExceptionSpecSize and is used to
+ /// translate an ExceptionSpecificationType to the number and kind
+ /// of trailing objects related to the exception specification.
+ struct ExceptionSpecSizeHolder {
+ unsigned NumExceptionType;
+ unsigned NumExprPtr;
+ unsigned NumFunctionDeclPtr;
+ };
+
+ /// Return the number and kind of trailing objects
+ /// related to the exception specification.
+ static ExceptionSpecSizeHolder
+ getExceptionSpecSize(ExceptionSpecificationType EST, unsigned NumExceptions) {
+ switch (EST) {
+ case EST_None:
+ case EST_DynamicNone:
+ case EST_MSAny:
+ case EST_BasicNoexcept:
+ case EST_Unparsed:
+ case EST_NoThrow:
+ return {0, 0, 0};
+
+ case EST_Dynamic:
+ return {NumExceptions, 0, 0};
+
+ case EST_DependentNoexcept:
+ case EST_NoexceptFalse:
+ case EST_NoexceptTrue:
+ return {0, 1, 0};
+
+ case EST_Uninstantiated:
+ return {0, 0, 2};
+
+ case EST_Unevaluated:
+ return {0, 0, 1};
+ }
+ llvm_unreachable("bad exception specification kind");
+ }
+
+ /// Return the number and kind of trailing objects
+ /// related to the exception specification.
+ ExceptionSpecSizeHolder getExceptionSpecSize() const {
+ return getExceptionSpecSize(getExceptionSpecType(), getNumExceptions());
+ }
+
+ /// Whether the trailing FunctionTypeExtraBitfields is present.
+ bool hasExtraBitfields() const {
+ assert((getExceptionSpecType() != EST_Dynamic ||
+ FunctionTypeBits.HasExtraBitfields) &&
+ "ExtraBitfields are required for given ExceptionSpecType");
+ return FunctionTypeBits.HasExtraBitfields;
+
+ }
+
+ bool hasExtraAttributeInfo() const {
+ return FunctionTypeBits.HasExtraBitfields &&
+ getTrailingObjects<FunctionTypeExtraBitfields>()
+ ->HasExtraAttributeInfo;
+ }
+
+ bool hasArmTypeAttributes() const {
+ return FunctionTypeBits.HasExtraBitfields &&
+ getTrailingObjects<FunctionTypeExtraBitfields>()
+ ->HasArmTypeAttributes;
+ }
+
+ bool hasExtQualifiers() const {
+ return FunctionTypeBits.HasExtQuals;
+ }
+
+public:
+ unsigned getNumParams() const { return FunctionTypeBits.NumParams; }
+
+ QualType getParamType(unsigned i) const {
+ assert(i < getNumParams() && "invalid parameter index");
+ return param_type_begin()[i];
+ }
+
+ ArrayRef<QualType> getParamTypes() const {
+ return {param_type_begin(), param_type_end()};
+ }
+
+ ExtProtoInfo getExtProtoInfo() const {
+ ExtProtoInfo EPI;
+ EPI.ExtInfo = getExtInfo();
+ EPI.Variadic = isVariadic();
+ EPI.EllipsisLoc = getEllipsisLoc();
+ EPI.HasTrailingReturn = hasTrailingReturn();
+ EPI.CFIUncheckedCallee = hasCFIUncheckedCallee();
+ EPI.ExceptionSpec = getExceptionSpecInfo();
+ EPI.TypeQuals = getMethodQuals();
+ EPI.RefQualifier = getRefQualifier();
+ EPI.ExtParameterInfos = getExtParameterInfosOrNull();
+ EPI.ExtraAttributeInfo = getExtraAttributeInfo();
+ EPI.AArch64SMEAttributes = getAArch64SMEAttributes();
+ EPI.FunctionEffects = getFunctionEffects();
+ return EPI;
+ }
+
+ /// Get the kind of exception specification on this function.
+ ExceptionSpecificationType getExceptionSpecType() const {
+ return static_cast<ExceptionSpecificationType>(
+ FunctionTypeBits.ExceptionSpecType);
+ }
+
+ /// Return whether this function has any kind of exception spec.
+ bool hasExceptionSpec() const { return getExceptionSpecType() != EST_None; }
+
+ /// Return whether this function has a dynamic (throw) exception spec.
+ bool hasDynamicExceptionSpec() const {
+ return isDynamicExceptionSpec(getExceptionSpecType());
+ }
+
+ /// Return whether this function has a noexcept exception spec.
+ bool hasNoexceptExceptionSpec() const {
+ return isNoexceptExceptionSpec(getExceptionSpecType());
+ }
+
+ /// Return whether this function has a dependent exception spec.
+ bool hasDependentExceptionSpec() const;
+
+ /// Return whether this function has an instantiation-dependent exception
+ /// spec.
+ bool hasInstantiationDependentExceptionSpec() const;
+
+ /// Return all the available information about this type's exception spec.
+ ExceptionSpecInfo getExceptionSpecInfo() const {
+ ExceptionSpecInfo Result;
+ Result.Type = getExceptionSpecType();
+ if (Result.Type == EST_Dynamic) {
+ Result.Exceptions = exceptions();
+ } else if (isComputedNoexcept(Result.Type)) {
+ Result.NoexceptExpr = getNoexceptExpr();
+ } else if (Result.Type == EST_Uninstantiated) {
+ Result.SourceDecl = getExceptionSpecDecl();
+ Result.SourceTemplate = getExceptionSpecTemplate();
+ } else if (Result.Type == EST_Unevaluated) {
+ Result.SourceDecl = getExceptionSpecDecl();
+ }
+ return Result;
+ }
+
+ /// Return the number of types in the exception specification.
+ unsigned getNumExceptions() const {
+ return getExceptionSpecType() == EST_Dynamic
+ ? getTrailingObjects<FunctionTypeExtraBitfields>()
+ ->NumExceptionType
+ : 0;
+ }
+
+ /// Return the ith exception type, where 0 <= i < getNumExceptions().
+ QualType getExceptionType(unsigned i) const {
+ assert(i < getNumExceptions() && "Invalid exception number!");
+ return exception_begin()[i];
+ }
+
+ /// Return the expression inside noexcept(expression), or a null pointer
+ /// if there is none (because the exception spec is not of this form).
+ Expr *getNoexceptExpr() const {
+ if (!isComputedNoexcept(getExceptionSpecType()))
+ return nullptr;
+ return *getTrailingObjects<Expr *>();
+ }
+
+ /// If this function type has an exception specification which hasn't
+ /// been determined yet (either because it has not been evaluated or because
+ /// it has not been instantiated), this is the function whose exception
+ /// specification is represented by this type.
+ FunctionDecl *getExceptionSpecDecl() const {
+ if (getExceptionSpecType() != EST_Uninstantiated &&
+ getExceptionSpecType() != EST_Unevaluated)
+ return nullptr;
+ return getTrailingObjects<FunctionDecl *>()[0];
+ }
+
+ /// If this function type has an uninstantiated exception
+ /// specification, this is the function whose exception specification
+ /// should be instantiated to find the exception specification for
+ /// this type.
+ FunctionDecl *getExceptionSpecTemplate() const {
+ if (getExceptionSpecType() != EST_Uninstantiated)
+ return nullptr;
+ return getTrailingObjects<FunctionDecl *>()[1];
+ }
+
+ /// Determine whether this function type has a non-throwing exception
+ /// specification.
+ CanThrowResult canThrow() const;
+
+ /// Determine whether this function type has a non-throwing exception
+ /// specification. If this depends on template arguments, returns
+ /// \c ResultIfDependent.
+ bool isNothrow(bool ResultIfDependent = false) const {
+ return ResultIfDependent ? canThrow() != CT_Can : canThrow() == CT_Cannot;
+ }
+
+ /// Whether this function prototype is variadic.
+ bool isVariadic() const { return FunctionTypeBits.Variadic; }
+
+ SourceLocation getEllipsisLoc() const {
+ return isVariadic() ? *getTrailingObjects<SourceLocation>()
+ : SourceLocation();
+ }
+
+ /// Determines whether this function prototype contains a
+ /// parameter pack at the end.
+ ///
+ /// A function template whose last parameter is a parameter pack can be
+ /// called with an arbitrary number of arguments, much like a variadic
+ /// function.
+ bool isTemplateVariadic() const;
+
+ /// Whether this function prototype has a trailing return type.
+ bool hasTrailingReturn() const { return FunctionTypeBits.HasTrailingReturn; }
+
+ bool hasCFIUncheckedCallee() const {
+ return FunctionTypeBits.CFIUncheckedCallee;
+ }
+
+ Qualifiers getMethodQuals() const {
+ if (hasExtQualifiers())
+ return *getTrailingObjects<Qualifiers>();
+ else
+ return getFastTypeQuals();
+ }
+
+ /// Retrieve the ref-qualifier associated with this function type.
+ RefQualifierKind getRefQualifier() const {
+ return static_cast<RefQualifierKind>(FunctionTypeBits.RefQualifier);
+ }
+
+ using param_type_iterator = const QualType *;
+
+ ArrayRef<QualType> param_types() const {
+ return {param_type_begin(), param_type_end()};
+ }
+
+ param_type_iterator param_type_begin() const {
+ return getTrailingObjects<QualType>();
+ }
+
+ param_type_iterator param_type_end() const {
+ return param_type_begin() + getNumParams();
+ }
+
+ using exception_iterator = const QualType *;
+
+ ArrayRef<QualType> exceptions() const {
+ return {exception_begin(), exception_end()};
+ }
+
+ exception_iterator exception_begin() const {
+ return reinterpret_cast<exception_iterator>(
+ getTrailingObjects<ExceptionType>());
+ }
+
+ exception_iterator exception_end() const {
+ return exception_begin() + getNumExceptions();
+ }
+
+ /// Is there any interesting extra information for any of the parameters
+ /// of this function type?
+ bool hasExtParameterInfos() const {
+ return FunctionTypeBits.HasExtParameterInfos;
+ }
+
+ ArrayRef<ExtParameterInfo> getExtParameterInfos() const {
+ assert(hasExtParameterInfos());
+ return ArrayRef<ExtParameterInfo>(getTrailingObjects<ExtParameterInfo>(),
+ getNumParams());
+ }
+
+ /// Return a pointer to the beginning of the array of extra parameter
+ /// information, if present, or else null if none of the parameters
+ /// carry it. This is equivalent to getExtProtoInfo().ExtParameterInfos.
+ const ExtParameterInfo *getExtParameterInfosOrNull() const {
+ if (!hasExtParameterInfos())
+ return nullptr;
+ return getTrailingObjects<ExtParameterInfo>();
+ }
+
+ /// Return the extra attribute information.
+ FunctionTypeExtraAttributeInfo getExtraAttributeInfo() const {
+ if (hasExtraAttributeInfo())
+ return *getTrailingObjects<FunctionTypeExtraAttributeInfo>();
+ return FunctionTypeExtraAttributeInfo();
+ }
+
+ /// Return a bitmask describing the SME attributes on the function type, see
+ /// AArch64SMETypeAttributes for their values.
+ unsigned getAArch64SMEAttributes() const {
+ if (!hasArmTypeAttributes())
+ return SME_NormalFunction;
+ return getTrailingObjects<FunctionTypeArmAttributes>()
+ ->AArch64SMEAttributes;
+ }
+
+ ExtParameterInfo getExtParameterInfo(unsigned I) const {
+ assert(I < getNumParams() && "parameter index out of range");
+ if (hasExtParameterInfos())
+ return getTrailingObjects<ExtParameterInfo>()[I];
+ return ExtParameterInfo();
+ }
+
+ ParameterABI getParameterABI(unsigned I) const {
+ assert(I < getNumParams() && "parameter index out of range");
+ if (hasExtParameterInfos())
+ return getTrailingObjects<ExtParameterInfo>()[I].getABI();
+ return ParameterABI::Ordinary;
+ }
+
+ bool isParamConsumed(unsigned I) const {
+ assert(I < getNumParams() && "parameter index out of range");
+ if (hasExtParameterInfos())
+ return getTrailingObjects<ExtParameterInfo>()[I].isConsumed();
+ return false;
+ }
+
+ unsigned getNumFunctionEffects() const {
+ return hasExtraBitfields()
+ ? getTrailingObjects<FunctionTypeExtraBitfields>()
+ ->NumFunctionEffects
+ : 0;
+ }
+
+ // For serialization.
+ ArrayRef<FunctionEffect> getFunctionEffectsWithoutConditions() const {
+ if (hasExtraBitfields()) {
+ const auto *Bitfields = getTrailingObjects<FunctionTypeExtraBitfields>();
+ if (Bitfields->NumFunctionEffects > 0)
+ return getTrailingObjects<FunctionEffect>(
+ Bitfields->NumFunctionEffects);
+ }
+ return {};
+ }
+
+ unsigned getNumFunctionEffectConditions() const {
+ if (hasExtraBitfields()) {
+ const auto *Bitfields = getTrailingObjects<FunctionTypeExtraBitfields>();
+ if (Bitfields->EffectsHaveConditions)
+ return Bitfields->NumFunctionEffects;
+ }
+ return 0;
+ }
+
+ // For serialization.
+ ArrayRef<EffectConditionExpr> getFunctionEffectConditions() const {
+ if (hasExtraBitfields()) {
+ const auto *Bitfields = getTrailingObjects<FunctionTypeExtraBitfields>();
+ if (Bitfields->EffectsHaveConditions)
+ return getTrailingObjects<EffectConditionExpr>(
+ Bitfields->NumFunctionEffects);
+ }
+ return {};
+ }
+
+ // Combines effects with their conditions.
+ FunctionEffectsRef getFunctionEffects() const {
+ if (hasExtraBitfields()) {
+ const auto *Bitfields = getTrailingObjects<FunctionTypeExtraBitfields>();
+ if (Bitfields->NumFunctionEffects > 0) {
+ const size_t NumConds = Bitfields->EffectsHaveConditions
+ ? Bitfields->NumFunctionEffects
+ : 0;
+ return FunctionEffectsRef(
+ getTrailingObjects<FunctionEffect>(Bitfields->NumFunctionEffects),
+ {NumConds ? getTrailingObjects<EffectConditionExpr>() : nullptr,
+ NumConds});
+ }
+ }
+ return {};
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void printExceptionSpecification(raw_ostream &OS,
+ const PrintingPolicy &Policy) const;
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == FunctionProto;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Result,
+ param_type_iterator ArgTys, unsigned NumArgs,
+ const ExtProtoInfo &EPI, const ASTContext &Context,
+ bool Canonical);
+};
+
+/// The elaboration keyword that precedes a qualified type name or
+/// introduces an elaborated-type-specifier.
+enum class ElaboratedTypeKeyword {
+ /// The "struct" keyword introduces the elaborated-type-specifier.
+ Struct,
+
+ /// The "__interface" keyword introduces the elaborated-type-specifier.
+ Interface,
+
+ /// The "union" keyword introduces the elaborated-type-specifier.
+ Union,
+
+ /// The "class" keyword introduces the elaborated-type-specifier.
+ Class,
+
+ /// The "enum" keyword introduces the elaborated-type-specifier.
+ Enum,
+
+ /// The "typename" keyword precedes the qualified type name, e.g.,
+ /// \c typename T::type.
+ Typename,
+
+ /// No keyword precedes the qualified type name.
+ None
+};
+
+/// The kind of a tag type.
+enum class TagTypeKind {
+ /// The "struct" keyword.
+ Struct,
+
+ /// The "__interface" keyword.
+ Interface,
+
+ /// The "union" keyword.
+ Union,
+
+ /// The "class" keyword.
+ Class,
+
+ /// The "enum" keyword.
+ Enum
+};
+
+/// Provides a few static helpers for converting and printing
+/// elaborated type keyword and tag type kind enumerations.
+struct KeywordHelpers {
+ /// Converts a type specifier (DeclSpec::TST) into an elaborated type keyword.
+ static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec);
+
+ /// Converts a type specifier (DeclSpec::TST) into a tag type kind.
+ /// It is an error to provide a type specifier which *isn't* a tag kind here.
+ static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec);
+
+ /// Converts a TagTypeKind into an elaborated type keyword.
+ static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag);
+
+ /// Converts an elaborated type keyword into a TagTypeKind.
+ /// It is an error to provide an elaborated type keyword
+ /// which *isn't* a tag kind here.
+ static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword);
+
+ static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword);
+
+ static StringRef getKeywordName(ElaboratedTypeKeyword Keyword);
+
+ static StringRef getTagTypeKindName(TagTypeKind Kind) {
+ return getKeywordName(getKeywordForTagTypeKind(Kind));
+ }
+};
+
+template <class T> class KeywordWrapper : public T, public KeywordHelpers {
+protected:
+ template <class... As>
+ KeywordWrapper(ElaboratedTypeKeyword Keyword, As &&...as)
+ : T(std::forward<As>(as)...) {
+ this->KeywordWrapperBits.Keyword = llvm::to_underlying(Keyword);
+ }
+
+public:
+ ElaboratedTypeKeyword getKeyword() const {
+ return static_cast<ElaboratedTypeKeyword>(this->KeywordWrapperBits.Keyword);
+ }
+
+ class CannotCastToThisType {};
+ static CannotCastToThisType classof(const T *);
+};
+
+/// A helper class for Type nodes having an ElaboratedTypeKeyword.
+/// The keyword in stored in the free bits of the base class.
+class TypeWithKeyword : public KeywordWrapper<Type> {
+protected:
+ TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
+ QualType Canonical, TypeDependence Dependence)
+ : KeywordWrapper(Keyword, tc, Canonical, Dependence) {}
+};
+
+template <class T> struct FoldingSetPlaceholder : llvm::FoldingSetNode {
+ void Profile(llvm::FoldingSetNodeID &ID) { getType()->Profile(ID); }
+
+ inline const T *getType() const {
+ constexpr unsigned long Offset =
+ llvm::alignTo(sizeof(T), alignof(FoldingSetPlaceholder));
+ const auto *Addr = reinterpret_cast<const T *>(
+ reinterpret_cast<const char *>(this) - Offset);
+ assert(llvm::isAddrAligned(llvm::Align(alignof(T)), Addr));
+ return Addr;
+ }
+};
+
+/// Represents the dependent type named by a dependently-scoped
+/// typename using declaration, e.g.
+/// using typename Base<T>::foo;
+///
+/// Template instantiation turns these into the underlying type.
+class UnresolvedUsingType final
+ : public TypeWithKeyword,
+ private llvm::TrailingObjects<UnresolvedUsingType,
+ FoldingSetPlaceholder<UnresolvedUsingType>,
+ NestedNameSpecifier> {
+ friend class ASTContext; // ASTContext creates these.
+ friend TrailingObjects;
+
+ UnresolvedUsingTypenameDecl *Decl;
+
+ unsigned numTrailingObjects(
+ OverloadToken<FoldingSetPlaceholder<UnresolvedUsingType>>) const {
+ assert(UnresolvedUsingBits.hasQualifier ||
+ getKeyword() != ElaboratedTypeKeyword::None);
+ return 1;
+ }
+
+ FoldingSetPlaceholder<UnresolvedUsingType> *getFoldingSetPlaceholder() {
+ assert(numTrailingObjects(
+ OverloadToken<FoldingSetPlaceholder<UnresolvedUsingType>>{}) ==
+ 1);
+ return getTrailingObjects<FoldingSetPlaceholder<UnresolvedUsingType>>();
+ }
+
+ UnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D,
+ const Type *CanonicalType);
+
+public:
+ NestedNameSpecifier getQualifier() const {
+ return UnresolvedUsingBits.hasQualifier
+ ? *getTrailingObjects<NestedNameSpecifier>()
+ : std::nullopt;
+ }
+
+ UnresolvedUsingTypenameDecl *getDecl() const { return Decl; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D) {
+ static_assert(llvm::to_underlying(ElaboratedTypeKeyword::None) <= 7);
+ ID.AddInteger(uintptr_t(D) | llvm::to_underlying(Keyword));
+ if (Qualifier)
+ Qualifier.Profile(ID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getKeyword(), getQualifier(), getDecl());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == UnresolvedUsing;
+ }
+};
+
+class UsingType final : public TypeWithKeyword,
+ public llvm::FoldingSetNode,
+ llvm::TrailingObjects<UsingType, NestedNameSpecifier> {
+ UsingShadowDecl *D;
+ QualType UnderlyingType;
+
+ friend class ASTContext; // ASTContext creates these.
+ friend TrailingObjects;
+
+ UsingType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ const UsingShadowDecl *D, QualType UnderlyingType);
+
+public:
+ NestedNameSpecifier getQualifier() const {
+ return UsingBits.hasQualifier ? *getTrailingObjects() : std::nullopt;
+ }
+
+ UsingShadowDecl *getDecl() const { return D; }
+
+ QualType desugar() const { return UnderlyingType; }
+ bool isSugared() const { return true; }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const UsingShadowDecl *D,
+ QualType UnderlyingType) {
+ static_assert(llvm::to_underlying(ElaboratedTypeKeyword::None) <= 7);
+ ID.AddInteger(uintptr_t(D) | llvm::to_underlying(Keyword));
+ UnderlyingType.Profile(ID);
+ if (Qualifier)
+ Qualifier.Profile(ID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getKeyword(), getQualifier(), D, desugar());
+ }
+ static bool classof(const Type *T) { return T->getTypeClass() == Using; }
+};
+
+class TypedefType final
+ : public TypeWithKeyword,
+ private llvm::TrailingObjects<TypedefType,
+ FoldingSetPlaceholder<TypedefType>,
+ NestedNameSpecifier, QualType> {
+ TypedefNameDecl *Decl;
+ friend class ASTContext; // ASTContext creates these.
+ friend TrailingObjects;
+
+ unsigned
+ numTrailingObjects(OverloadToken<FoldingSetPlaceholder<TypedefType>>) const {
+ assert(TypedefBits.hasQualifier || TypedefBits.hasTypeDifferentFromDecl ||
+ getKeyword() != ElaboratedTypeKeyword::None);
+ return 1;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<NestedNameSpecifier>) const {
+ return TypedefBits.hasQualifier;
+ }
+
+ TypedefType(TypeClass TC, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TypedefNameDecl *D,
+ QualType UnderlyingType, bool HasTypeDifferentFromDecl);
+
+ FoldingSetPlaceholder<TypedefType> *getFoldingSetPlaceholder() {
+ assert(numTrailingObjects(
+ OverloadToken<FoldingSetPlaceholder<TypedefType>>{}) == 1);
+ return getTrailingObjects<FoldingSetPlaceholder<TypedefType>>();
+ }
+
+public:
+ NestedNameSpecifier getQualifier() const {
+ return TypedefBits.hasQualifier ? *getTrailingObjects<NestedNameSpecifier>()
+ : std::nullopt;
+ }
+
+ TypedefNameDecl *getDecl() const { return Decl; }
+
+ bool isSugared() const { return true; }
+
+ // This always has the 'same' type as declared, but not necessarily identical.
+ QualType desugar() const;
+
+ // Internal helper, for debugging purposes.
+ bool typeMatchesDecl() const { return !TypedefBits.hasTypeDifferentFromDecl; }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypedefNameDecl *Decl, QualType Underlying) {
+
+ ID.AddInteger(uintptr_t(Decl) | (Keyword != ElaboratedTypeKeyword::None) |
+ (!Qualifier << 1));
+ if (Keyword != ElaboratedTypeKeyword::None)
+ ID.AddInteger(llvm::to_underlying(Keyword));
+ if (Qualifier)
+ Qualifier.Profile(ID);
+ if (!Underlying.isNull())
+ Underlying.Profile(ID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getKeyword(), getQualifier(), getDecl(),
+ typeMatchesDecl() ? QualType() : desugar());
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
+};
+
+/// Sugar type that represents a type that was qualified by a qualifier written
+/// as a macro invocation.
+class MacroQualifiedType : public Type {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType UnderlyingTy;
+ const IdentifierInfo *MacroII;
+
+ MacroQualifiedType(QualType UnderlyingTy, QualType CanonTy,
+ const IdentifierInfo *MacroII)
+ : Type(MacroQualified, CanonTy, UnderlyingTy->getDependence()),
+ UnderlyingTy(UnderlyingTy), MacroII(MacroII) {
+ assert(isa<AttributedType>(UnderlyingTy) &&
+ "Expected a macro qualified type to only wrap attributed types.");
+ }
+
+public:
+ const IdentifierInfo *getMacroIdentifier() const { return MacroII; }
+ QualType getUnderlyingType() const { return UnderlyingTy; }
+
+ /// Return this attributed type's modified type with no qualifiers attached to
+ /// it.
+ QualType getModifiedType() const;
+
+ bool isSugared() const { return true; }
+ QualType desugar() const;
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == MacroQualified;
+ }
+};
+
+/// Represents a `typeof` (or __typeof__) expression (a C23 feature and GCC
+/// extension) or a `typeof_unqual` expression (a C23 feature).
+class TypeOfExprType : public Type {
+ Expr *TOExpr;
+ const ASTContext &Context;
+
+protected:
+ friend class ASTContext; // ASTContext creates these.
+
+ TypeOfExprType(const ASTContext &Context, Expr *E, TypeOfKind Kind,
+ QualType Can = QualType());
+
+public:
+ Expr *getUnderlyingExpr() const { return TOExpr; }
+
+ /// Returns the kind of 'typeof' type this is.
+ TypeOfKind getKind() const {
+ return static_cast<TypeOfKind>(TypeOfBits.Kind);
+ }
+
+ /// Remove a single level of sugar.
+ QualType desugar() const;
+
+ /// Returns whether this type directly provides sugar.
+ bool isSugared() const;
+
+ static bool classof(const Type *T) { return T->getTypeClass() == TypeOfExpr; }
+};
+
+/// Internal representation of canonical, dependent
+/// `typeof(expr)` types.
+///
+/// This class is used internally by the ASTContext to manage
+/// canonical, dependent types, only. Clients will only see instances
+/// of this class via TypeOfExprType nodes.
+class DependentTypeOfExprType : public TypeOfExprType,
+ public llvm::FoldingSetNode {
+public:
+ DependentTypeOfExprType(const ASTContext &Context, Expr *E, TypeOfKind Kind)
+ : TypeOfExprType(Context, E, Kind) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getUnderlyingExpr(),
+ getKind() == TypeOfKind::Unqualified);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ Expr *E, bool IsUnqual);
+};
+
+/// Represents `typeof(type)`, a C23 feature and GCC extension, or
+/// `typeof_unqual(type), a C23 feature.
+class TypeOfType : public Type {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType TOType;
+ const ASTContext &Context;
+
+ TypeOfType(const ASTContext &Context, QualType T, QualType Can,
+ TypeOfKind Kind);
+
+public:
+ QualType getUnmodifiedType() const { return TOType; }
+
+ /// Remove a single level of sugar.
+ QualType desugar() const;
+
+ /// Returns whether this type directly provides sugar.
+ bool isSugared() const { return true; }
+
+ /// Returns the kind of 'typeof' type this is.
+ TypeOfKind getKind() const {
+ return static_cast<TypeOfKind>(TypeOfBits.Kind);
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; }
+};
+
+/// Represents the type `decltype(expr)` (C++11).
+class DecltypeType : public Type {
+ Expr *E;
+ QualType UnderlyingType;
+
+protected:
+ friend class ASTContext; // ASTContext creates these.
+
+ DecltypeType(Expr *E, QualType underlyingType, QualType can = QualType());
+
+public:
+ Expr *getUnderlyingExpr() const { return E; }
+ QualType getUnderlyingType() const { return UnderlyingType; }
+
+ /// Remove a single level of sugar.
+ QualType desugar() const;
+
+ /// Returns whether this type directly provides sugar.
+ bool isSugared() const;
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Decltype; }
+};
+
+/// Internal representation of canonical, dependent
+/// decltype(expr) types.
+///
+/// This class is used internally by the ASTContext to manage
+/// canonical, dependent types, only. Clients will only see instances
+/// of this class via DecltypeType nodes.
+class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode {
+public:
+ DependentDecltypeType(Expr *E);
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getUnderlyingExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ Expr *E);
+};
+
+class PackIndexingType final
+ : public Type,
+ public llvm::FoldingSetNode,
+ private llvm::TrailingObjects<PackIndexingType, QualType> {
+ friend TrailingObjects;
+
+ QualType Pattern;
+ Expr *IndexExpr;
+
+ unsigned Size : 31;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned FullySubstituted : 1;
+
+protected:
+ friend class ASTContext; // ASTContext creates these.
+ PackIndexingType(QualType Canonical, QualType Pattern, Expr *IndexExpr,
+ bool FullySubstituted, ArrayRef<QualType> Expansions = {});
+
+public:
+ Expr *getIndexExpr() const { return IndexExpr; }
+ QualType getPattern() const { return Pattern; }
+
+ bool isSugared() const { return hasSelectedType(); }
+
+ QualType desugar() const {
+ if (hasSelectedType())
+ return getSelectedType();
+ return QualType(this, 0);
+ }
+
+ QualType getSelectedType() const {
+ assert(hasSelectedType() && "Type is dependant");
+ return *(getExpansionsPtr() + *getSelectedIndex());
+ }
+
+ UnsignedOrNone getSelectedIndex() const;
+
+ bool hasSelectedType() const { return getSelectedIndex() != std::nullopt; }
+
+ bool isFullySubstituted() const { return FullySubstituted; }
+
+ bool expandsToEmptyPack() const { return isFullySubstituted() && Size == 0; }
+
+ ArrayRef<QualType> getExpansions() const {
+ return {getExpansionsPtr(), Size};
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == PackIndexing;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context);
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType Pattern, Expr *E, bool FullySubstituted,
+ ArrayRef<QualType> Expansions);
+
+private:
+ const QualType *getExpansionsPtr() const { return getTrailingObjects(); }
+
+ static TypeDependence computeDependence(QualType Pattern, Expr *IndexExpr,
+ ArrayRef<QualType> Expansions = {});
+};
+
+/// A unary type transform, which is a type constructed from another.
+class UnaryTransformType : public Type, public llvm::FoldingSetNode {
+public:
+ enum UTTKind {
+#define TRANSFORM_TYPE_TRAIT_DEF(Enum, _) Enum,
+#include "clang/Basic/TransformTypeTraits.def"
+ };
+
+private:
+ /// The untransformed type.
+ QualType BaseType;
+
+ /// The transformed type if not dependent, otherwise the same as BaseType.
+ QualType UnderlyingType;
+
+ UTTKind UKind;
+
+protected:
+ friend class ASTContext;
+
+ UnaryTransformType(QualType BaseTy, QualType UnderlyingTy, UTTKind UKind,
+ QualType CanonicalTy);
+
+public:
+ bool isSugared() const { return !isDependentType(); }
+ QualType desugar() const { return UnderlyingType; }
+
+ QualType getUnderlyingType() const { return UnderlyingType; }
+ QualType getBaseType() const { return BaseType; }
+
+ UTTKind getUTTKind() const { return UKind; }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == UnaryTransform;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getBaseType(), getUnderlyingType(), getUTTKind());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType BaseType,
+ QualType UnderlyingType, UTTKind UKind) {
+ BaseType.Profile(ID);
+ UnderlyingType.Profile(ID);
+ ID.AddInteger(UKind);
+ }
+};
+
+class TagType : public TypeWithKeyword {
+ friend class ASTContext; // ASTContext creates these.
+
+ /// Stores the TagDecl associated with this type. The decl may point to any
+ /// TagDecl that declares the entity.
+ TagDecl *decl;
+
+ void *getTrailingPointer() const;
+ NestedNameSpecifier &getTrailingQualifier() const;
+
+protected:
+ TagType(TypeClass TC, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *TD, bool OwnsTag,
+ bool IsInjected, const Type *CanonicalType);
+
+public:
+ // FIXME: Temporarily renamed from `getDecl` in order to facilitate
+ // rebasing, due to change in behaviour. This should be renamed back
+ // to `getDecl` once the change is settled.
+ TagDecl *getOriginalDecl() const { return decl; }
+
+ NestedNameSpecifier getQualifier() const;
+
+ /// Does the TagType own this declaration of the Tag?
+ bool isTagOwned() const { return TagTypeBits.OwnsTag; }
+
+ bool isInjected() const { return TagTypeBits.IsInjected; }
+
+ ClassTemplateDecl *getTemplateDecl() const;
+ TemplateName getTemplateName(const ASTContext &Ctx) const;
+ ArrayRef<TemplateArgument> getTemplateArgs(const ASTContext &Ctx) const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return getCanonicalTypeInternal(); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Enum || T->getTypeClass() == Record ||
+ T->getTypeClass() == InjectedClassName;
+ }
+};
+
+struct TagTypeFoldingSetPlaceholder : public llvm::FoldingSetNode {
+ static constexpr size_t getOffset() {
+ return alignof(TagType) -
+ (sizeof(TagTypeFoldingSetPlaceholder) % alignof(TagType));
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *Tag,
+ bool OwnsTag, bool IsInjected) {
+ ID.AddInteger(uintptr_t(Tag) | OwnsTag | (IsInjected << 1) |
+ ((Keyword != ElaboratedTypeKeyword::None) << 2));
+ if (Keyword != ElaboratedTypeKeyword::None)
+ ID.AddInteger(llvm::to_underlying(Keyword));
+ if (Qualifier)
+ Qualifier.Profile(ID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ const TagType *T = getTagType();
+ Profile(ID, T->getKeyword(), T->getQualifier(), T->getOriginalDecl(),
+ T->isTagOwned(), T->isInjected());
+ }
+
+ TagType *getTagType() {
+ return reinterpret_cast<TagType *>(reinterpret_cast<char *>(this + 1) +
+ getOffset());
+ }
+ const TagType *getTagType() const {
+ return const_cast<TagTypeFoldingSetPlaceholder *>(this)->getTagType();
+ }
+ static TagTypeFoldingSetPlaceholder *fromTagType(TagType *T) {
+ return reinterpret_cast<TagTypeFoldingSetPlaceholder *>(
+ reinterpret_cast<char *>(T) - getOffset()) -
+ 1;
+ }
+};
+
+/// A helper class that allows the use of isa/cast/dyncast
+/// to detect TagType objects of structs/unions/classes.
+class RecordType final : public TagType {
+ using TagType::TagType;
+
+public:
+ // FIXME: Temporarily renamed from `getDecl` in order to facilitate
+ // rebasing, due to change in behaviour. This should be renamed back
+ // to `getDecl` once the change is settled.
+ RecordDecl *getOriginalDecl() const {
+ return reinterpret_cast<RecordDecl *>(TagType::getOriginalDecl());
+ }
+
+ /// Recursively check all fields in the record for const-ness. If any field
+ /// is declared const, return true. Otherwise, return false.
+ bool hasConstFields() const;
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Record; }
+};
+
+/// A helper class that allows the use of isa/cast/dyncast
+/// to detect TagType objects of enums.
+class EnumType final : public TagType {
+ using TagType::TagType;
+
+public:
+ // FIXME: Temporarily renamed from `getDecl` in order to facilitate
+ // rebasing, due to change in behaviour. This should be renamed back
+ // to `getDecl` once the change is settled.
+ EnumDecl *getOriginalDecl() const {
+ return reinterpret_cast<EnumDecl *>(TagType::getOriginalDecl());
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
+};
+
+/// The injected class name of a C++ class template or class
+/// template partial specialization. Used to record that a type was
+/// spelled with a bare identifier rather than as a template-id; the
+/// equivalent for non-templated classes is just RecordType.
+///
+/// Injected class name types are always dependent. Template
+/// instantiation turns these into RecordTypes.
+///
+/// Injected class name types are always canonical. This works
+/// because it is impossible to compare an injected class name type
+/// with the corresponding non-injected template type, for the same
+/// reason that it is impossible to directly compare template
+/// parameters from different dependent contexts: injected class name
+/// types can only occur within the scope of a particular templated
+/// declaration, and within that scope every template specialization
+/// will canonicalize to the injected class name (when appropriate
+/// according to the rules of the language).
+class InjectedClassNameType final : public TagType {
+ friend class ASTContext; // ASTContext creates these.
+
+ InjectedClassNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *TD,
+ bool IsInjected, const Type *CanonicalType);
+
+public:
+ // FIXME: Temporarily renamed from `getDecl` in order to facilitate
+ // rebasing, due to change in behaviour. This should be renamed back
+ // to `getDecl` once the change is settled.
+ CXXRecordDecl *getOriginalDecl() const {
+ return reinterpret_cast<CXXRecordDecl *>(TagType::getOriginalDecl());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == InjectedClassName;
+ }
+};
+
+/// An attributed type is a type to which a type attribute has been applied.
+///
+/// The "modified type" is the fully-sugared type to which the attributed
+/// type was applied; generally it is not canonically equivalent to the
+/// attributed type. The "equivalent type" is the minimally-desugared type
+/// which the type is canonically equivalent to.
+///
+/// For example, in the following attributed type:
+/// int32_t __attribute__((vector_size(16)))
+/// - the modified type is the TypedefType for int32_t
+/// - the equivalent type is VectorType(16, int32_t)
+/// - the canonical type is VectorType(16, int)
+class AttributedType : public Type, public llvm::FoldingSetNode {
+public:
+ using Kind = attr::Kind;
+
+private:
+ friend class ASTContext; // ASTContext creates these
+
+ const Attr *Attribute;
+
+ QualType ModifiedType;
+ QualType EquivalentType;
+
+ AttributedType(QualType canon, attr::Kind attrKind, QualType modified,
+ QualType equivalent)
+ : AttributedType(canon, attrKind, nullptr, modified, equivalent) {}
+
+ AttributedType(QualType canon, const Attr *attr, QualType modified,
+ QualType equivalent);
+
+private:
+ AttributedType(QualType canon, attr::Kind attrKind, const Attr *attr,
+ QualType modified, QualType equivalent);
+
+public:
+ Kind getAttrKind() const {
+ return static_cast<Kind>(AttributedTypeBits.AttrKind);
+ }
+
+ const Attr *getAttr() const { return Attribute; }
+
+ QualType getModifiedType() const { return ModifiedType; }
+ QualType getEquivalentType() const { return EquivalentType; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getEquivalentType(); }
+
+ /// Does this attribute behave like a type qualifier?
+ ///
+ /// A type qualifier adjusts a type to provide specialized rules for
+ /// a specific object, like the standard const and volatile qualifiers.
+ /// This includes attributes controlling things like nullability,
+ /// address spaces, and ARC ownership. The value of the object is still
+ /// largely described by the modified type.
+ ///
+ /// In contrast, many type attributes "rewrite" their modified type to
+ /// produce a fundamentally different type, not necessarily related in any
+ /// formalizable way to the original type. For example, calling convention
+ /// and vector attributes are not simple type qualifiers.
+ ///
+ /// Type qualifiers are often, but not always, reflected in the canonical
+ /// type.
+ bool isQualifier() const;
+
+ bool isMSTypeSpec() const;
+
+ bool isWebAssemblyFuncrefSpec() const;
+
+ bool isCallingConv() const;
+
+ std::optional<NullabilityKind> getImmediateNullability() const;
+
+ /// Strip off the top-level nullability annotation on the given
+ /// type, if it's there.
+ ///
+ /// \param T The type to strip. If the type is exactly an
+ /// AttributedType specifying nullability (without looking through
+ /// type sugar), the nullability is returned and this type changed
+ /// to the underlying modified type.
+ ///
+ /// \returns the top-level nullability, if present.
+ static std::optional<NullabilityKind> stripOuterNullability(QualType &T);
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAttrKind(), ModifiedType, EquivalentType, Attribute);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, Kind attrKind,
+ QualType modified, QualType equivalent,
+ const Attr *attr) {
+ ID.AddInteger(attrKind);
+ ID.AddPointer(modified.getAsOpaquePtr());
+ ID.AddPointer(equivalent.getAsOpaquePtr());
+ ID.AddPointer(attr);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Attributed;
+ }
+};
+
+class BTFTagAttributedType : public Type, public llvm::FoldingSetNode {
+private:
+ friend class ASTContext; // ASTContext creates these
+
+ QualType WrappedType;
+ const BTFTypeTagAttr *BTFAttr;
+
+ BTFTagAttributedType(QualType Canon, QualType Wrapped,
+ const BTFTypeTagAttr *BTFAttr)
+ : Type(BTFTagAttributed, Canon, Wrapped->getDependence()),
+ WrappedType(Wrapped), BTFAttr(BTFAttr) {}
+
+public:
+ QualType getWrappedType() const { return WrappedType; }
+ const BTFTypeTagAttr *getAttr() const { return BTFAttr; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getWrappedType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, WrappedType, BTFAttr);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Wrapped,
+ const BTFTypeTagAttr *BTFAttr) {
+ ID.AddPointer(Wrapped.getAsOpaquePtr());
+ ID.AddPointer(BTFAttr);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == BTFTagAttributed;
+ }
+};
+
+class HLSLAttributedResourceType : public Type, public llvm::FoldingSetNode {
+public:
+ struct Attributes {
+ // Data gathered from HLSL resource attributes
+ llvm::dxil::ResourceClass ResourceClass;
+
+ LLVM_PREFERRED_TYPE(bool)
+ uint8_t IsROV : 1;
+
+ LLVM_PREFERRED_TYPE(bool)
+ uint8_t RawBuffer : 1;
+
+ Attributes(llvm::dxil::ResourceClass ResourceClass, bool IsROV = false,
+ bool RawBuffer = false)
+ : ResourceClass(ResourceClass), IsROV(IsROV), RawBuffer(RawBuffer) {}
+
+ Attributes() : Attributes(llvm::dxil::ResourceClass::UAV, false, false) {}
+
+ friend bool operator==(const Attributes &LHS, const Attributes &RHS) {
+ return std::tie(LHS.ResourceClass, LHS.IsROV, LHS.RawBuffer) ==
+ std::tie(RHS.ResourceClass, RHS.IsROV, RHS.RawBuffer);
+ }
+ friend bool operator!=(const Attributes &LHS, const Attributes &RHS) {
+ return !(LHS == RHS);
+ }
+ };
+
+private:
+ friend class ASTContext; // ASTContext creates these
+
+ QualType WrappedType;
+ QualType ContainedType;
+ const Attributes Attrs;
+
+ HLSLAttributedResourceType(QualType Wrapped, QualType Contained,
+ const Attributes &Attrs)
+ : Type(HLSLAttributedResource, QualType(),
+ Contained.isNull() ? TypeDependence::None
+ : Contained->getDependence()),
+ WrappedType(Wrapped), ContainedType(Contained), Attrs(Attrs) {}
+
+public:
+ QualType getWrappedType() const { return WrappedType; }
+ QualType getContainedType() const { return ContainedType; }
+ bool hasContainedType() const { return !ContainedType.isNull(); }
+ const Attributes &getAttrs() const { return Attrs; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, WrappedType, ContainedType, Attrs);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Wrapped,
+ QualType Contained, const Attributes &Attrs) {
+ ID.AddPointer(Wrapped.getAsOpaquePtr());
+ ID.AddPointer(Contained.getAsOpaquePtr());
+ ID.AddInteger(static_cast<uint32_t>(Attrs.ResourceClass));
+ ID.AddBoolean(Attrs.IsROV);
+ ID.AddBoolean(Attrs.RawBuffer);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == HLSLAttributedResource;
+ }
+
+ // Returns handle type from HLSL resource, if the type is a resource
+ static const HLSLAttributedResourceType *
+ findHandleTypeOnResource(const Type *RT);
+};
+
+/// Instances of this class represent operands to a SPIR-V type instruction.
+class SpirvOperand {
+public:
+ enum SpirvOperandKind : unsigned char {
+ Invalid, ///< Uninitialized.
+ ConstantId, ///< Integral value to represent as a SPIR-V OpConstant
+ ///< instruction ID.
+ Literal, ///< Integral value to represent as an immediate literal.
+ TypeId, ///< Type to represent as a SPIR-V type ID.
+
+ Max,
+ };
+
+private:
+ SpirvOperandKind Kind = Invalid;
+
+ QualType ResultType;
+ llvm::APInt Value; // Signedness of constants is represented by ResultType.
+
+public:
+ SpirvOperand() : Kind(Invalid), ResultType(), Value() {}
+
+ SpirvOperand(SpirvOperandKind Kind, QualType ResultType, llvm::APInt Value)
+ : Kind(Kind), ResultType(ResultType), Value(std::move(Value)) {}
+
+ SpirvOperand(const SpirvOperand &Other) { *this = Other; }
+ ~SpirvOperand() {}
+
+ SpirvOperand &operator=(const SpirvOperand &Other) = default;
+
+ bool operator==(const SpirvOperand &Other) const {
+ return Kind == Other.Kind && ResultType == Other.ResultType &&
+ Value == Other.Value;
+ }
+
+ bool operator!=(const SpirvOperand &Other) const { return !(*this == Other); }
+
+ SpirvOperandKind getKind() const { return Kind; }
+
+ bool isValid() const { return Kind != Invalid && Kind < Max; }
+ bool isConstant() const { return Kind == ConstantId; }
+ bool isLiteral() const { return Kind == Literal; }
+ bool isType() const { return Kind == TypeId; }
+
+ llvm::APInt getValue() const {
+ assert((isConstant() || isLiteral()) &&
+ "This is not an operand with a value!");
+ return Value;
+ }
+
+ QualType getResultType() const {
+ assert((isConstant() || isType()) &&
+ "This is not an operand with a result type!");
+ return ResultType;
+ }
+
+ static SpirvOperand createConstant(QualType ResultType, llvm::APInt Val) {
+ return SpirvOperand(ConstantId, ResultType, std::move(Val));
+ }
+
+ static SpirvOperand createLiteral(llvm::APInt Val) {
+ return SpirvOperand(Literal, QualType(), std::move(Val));
+ }
+
+ static SpirvOperand createType(QualType T) {
+ return SpirvOperand(TypeId, T, llvm::APSInt());
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Kind);
+ ID.AddPointer(ResultType.getAsOpaquePtr());
+ Value.Profile(ID);
+ }
+};
+
+/// Represents an arbitrary, user-specified SPIR-V type instruction.
+class HLSLInlineSpirvType final
+ : public Type,
+ public llvm::FoldingSetNode,
+ private llvm::TrailingObjects<HLSLInlineSpirvType, SpirvOperand> {
+ friend class ASTContext; // ASTContext creates these
+ friend TrailingObjects;
+
+private:
+ uint32_t Opcode;
+ uint32_t Size;
+ uint32_t Alignment;
+ size_t NumOperands;
+
+ HLSLInlineSpirvType(uint32_t Opcode, uint32_t Size, uint32_t Alignment,
+ ArrayRef<SpirvOperand> Operands)
+ : Type(HLSLInlineSpirv, QualType(), TypeDependence::None), Opcode(Opcode),
+ Size(Size), Alignment(Alignment), NumOperands(Operands.size()) {
+ for (size_t I = 0; I < NumOperands; I++) {
+ // Since Operands are stored as a trailing object, they have not been
+ // initialized yet. Call the constructor manually.
+ auto *Operand = new (&getTrailingObjects()[I]) SpirvOperand();
+ *Operand = Operands[I];
+ }
+ }
+
+public:
+ uint32_t getOpcode() const { return Opcode; }
+ uint32_t getSize() const { return Size; }
+ uint32_t getAlignment() const { return Alignment; }
+ ArrayRef<SpirvOperand> getOperands() const {
+ return getTrailingObjects(NumOperands);
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Opcode, Size, Alignment, getOperands());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, uint32_t Opcode,
+ uint32_t Size, uint32_t Alignment,
+ ArrayRef<SpirvOperand> Operands) {
+ ID.AddInteger(Opcode);
+ ID.AddInteger(Size);
+ ID.AddInteger(Alignment);
+ for (auto &Operand : Operands)
+ Operand.Profile(ID);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == HLSLInlineSpirv;
+ }
+};
+
+class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these
+
+ // The associated TemplateTypeParmDecl for the non-canonical type.
+ TemplateTypeParmDecl *TTPDecl;
+
+ TemplateTypeParmType(unsigned D, unsigned I, bool PP,
+ TemplateTypeParmDecl *TTPDecl, QualType Canon)
+ : Type(TemplateTypeParm, Canon,
+ TypeDependence::DependentInstantiation |
+ (PP ? TypeDependence::UnexpandedPack : TypeDependence::None)),
+ TTPDecl(TTPDecl) {
+ assert(!TTPDecl == Canon.isNull());
+ TemplateTypeParmTypeBits.Depth = D;
+ TemplateTypeParmTypeBits.Index = I;
+ TemplateTypeParmTypeBits.ParameterPack = PP;
+ }
+
+public:
+ unsigned getDepth() const { return TemplateTypeParmTypeBits.Depth; }
+ unsigned getIndex() const { return TemplateTypeParmTypeBits.Index; }
+ bool isParameterPack() const {
+ return TemplateTypeParmTypeBits.ParameterPack;
+ }
+
+ TemplateTypeParmDecl *getDecl() const { return TTPDecl; }
+
+ IdentifierInfo *getIdentifier() const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getDepth(), getIndex(), isParameterPack(), getDecl());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, unsigned Depth,
+ unsigned Index, bool ParameterPack,
+ TemplateTypeParmDecl *TTPDecl) {
+ ID.AddInteger(Depth);
+ ID.AddInteger(Index);
+ ID.AddBoolean(ParameterPack);
+ ID.AddPointer(TTPDecl);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == TemplateTypeParm;
+ }
+};
+
+/// Represents the result of substituting a type for a template
+/// type parameter.
+///
+/// Within an instantiated template, all template type parameters have
+/// been replaced with these. They are used solely to record that a
+/// type was originally written as a template type parameter;
+/// therefore they are never canonical.
+class SubstTemplateTypeParmType final
+ : public Type,
+ public llvm::FoldingSetNode,
+ private llvm::TrailingObjects<SubstTemplateTypeParmType, QualType> {
+ friend class ASTContext;
+ friend class llvm::TrailingObjects<SubstTemplateTypeParmType, QualType>;
+
+ Decl *AssociatedDecl;
+
+ SubstTemplateTypeParmType(QualType Replacement, Decl *AssociatedDecl,
+ unsigned Index, UnsignedOrNone PackIndex,
+ bool Final);
+
+public:
+ /// Gets the type that was substituted for the template
+ /// parameter.
+ QualType getReplacementType() const {
+ return SubstTemplateTypeParmTypeBits.HasNonCanonicalUnderlyingType
+ ? *getTrailingObjects()
+ : getCanonicalTypeInternal();
+ }
+
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will usually own a set of template parameters, or in some
+ /// cases might even be a template parameter itself.
+ Decl *getAssociatedDecl() const { return AssociatedDecl; }
+
+ /// Gets the template parameter declaration that was substituted for.
+ const TemplateTypeParmDecl *getReplacedParameter() const;
+
+ /// Returns the index of the replaced parameter in the associated declaration.
+ /// This should match the result of `getReplacedParameter()->getIndex()`.
+ unsigned getIndex() const { return SubstTemplateTypeParmTypeBits.Index; }
+
+ // This substitution is Final, which means the substitution is fully
+ // sugared: it doesn't need to be resugared later.
+ unsigned getFinal() const { return SubstTemplateTypeParmTypeBits.Final; }
+
+ UnsignedOrNone getPackIndex() const {
+ return UnsignedOrNone::fromInternalRepresentation(
+ SubstTemplateTypeParmTypeBits.PackIndex);
+ }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getReplacementType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getReplacementType(), getAssociatedDecl(), getIndex(),
+ getPackIndex(), getFinal());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Replacement,
+ const Decl *AssociatedDecl, unsigned Index,
+ UnsignedOrNone PackIndex, bool Final);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == SubstTemplateTypeParm;
+ }
+};
+
+/// Represents the result of substituting a set of types as a template argument
+/// that needs to be expanded later.
+///
+/// These types are always dependent and produced depending on the situations:
+/// - SubstTemplateTypeParmPack is an expansion that had to be delayed,
+/// - SubstBuiltinTemplatePackType is an expansion from a builtin.
+class SubstPackType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext;
+
+ /// A pointer to the set of template arguments that this
+ /// parameter pack is instantiated with.
+ const TemplateArgument *Arguments;
+
+protected:
+ SubstPackType(TypeClass Derived, QualType Canon,
+ const TemplateArgument &ArgPack);
+
+public:
+ unsigned getNumArgs() const { return SubstPackTypeBits.NumArgs; }
+
+ TemplateArgument getArgumentPack() const;
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateArgument &ArgPack);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == SubstTemplateTypeParmPack ||
+ T->getTypeClass() == SubstBuiltinTemplatePack;
+ }
+};
+
+/// Represents the result of substituting a builtin template as a pack.
+class SubstBuiltinTemplatePackType : public SubstPackType {
+ friend class ASTContext;
+
+ SubstBuiltinTemplatePackType(QualType Canon, const TemplateArgument &ArgPack);
+
+public:
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ /// Mark that we reuse the Profile. We do not introduce new fields.
+ using SubstPackType::Profile;
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == SubstBuiltinTemplatePack;
+ }
+};
+
+/// Represents the result of substituting a set of types for a template
+/// type parameter pack.
+///
+/// When a pack expansion in the source code contains multiple parameter packs
+/// and those parameter packs correspond to different levels of template
+/// parameter lists, this type node is used to represent a template type
+/// parameter pack from an outer level, which has already had its argument pack
+/// substituted but that still lives within a pack expansion that itself
+/// could not be instantiated. When actually performing a substitution into
+/// that pack expansion (e.g., when all template parameters have corresponding
+/// arguments), this type will be replaced with the \c SubstTemplateTypeParmType
+/// at the current pack substitution index.
+class SubstTemplateTypeParmPackType : public SubstPackType {
+ friend class ASTContext;
+
+ /// A pointer to the set of template arguments that this
+ /// parameter pack is instantiated with.
+ const TemplateArgument *Arguments;
+
+ llvm::PointerIntPair<Decl *, 1, bool> AssociatedDeclAndFinal;
+
+ SubstTemplateTypeParmPackType(QualType Canon, Decl *AssociatedDecl,
+ unsigned Index, bool Final,
+ const TemplateArgument &ArgPack);
+
+public:
+ IdentifierInfo *getIdentifier() const;
+
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will usually own a set of template parameters, or in some
+ /// cases might even be a template parameter itself.
+ Decl *getAssociatedDecl() const;
+
+ /// Gets the template parameter declaration that was substituted for.
+ const TemplateTypeParmDecl *getReplacedParameter() const;
+
+ /// Returns the index of the replaced parameter in the associated declaration.
+ /// This should match the result of `getReplacedParameter()->getIndex()`.
+ unsigned getIndex() const {
+ return SubstPackTypeBits.SubstTemplTypeParmPackIndex;
+ }
+
+ // This substitution will be Final, which means the substitution will be fully
+ // sugared: it doesn't need to be resugared later.
+ bool getFinal() const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+ static void Profile(llvm::FoldingSetNodeID &ID, const Decl *AssociatedDecl,
+ unsigned Index, bool Final,
+ const TemplateArgument &ArgPack);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == SubstTemplateTypeParmPack;
+ }
+};
+
+/// Common base class for placeholders for types that get replaced by
+/// placeholder type deduction: C++11 auto, C++14 decltype(auto), C++17 deduced
+/// class template types, and constrained type names.
+///
+/// These types are usually a placeholder for a deduced type. However, before
+/// the initializer is attached, or (usually) if the initializer is
+/// type-dependent, there is no deduced type and the type is canonical. In
+/// the latter case, it is also a dependent type.
+class DeducedType : public Type {
+ QualType DeducedAsType;
+
+protected:
+ DeducedType(TypeClass TC, QualType DeducedAsType,
+ TypeDependence ExtraDependence, QualType Canon)
+ : Type(TC, Canon,
+ ExtraDependence | (DeducedAsType.isNull()
+ ? TypeDependence::None
+ : DeducedAsType->getDependence() &
+ ~TypeDependence::VariablyModified)),
+ DeducedAsType(DeducedAsType) {}
+
+public:
+ bool isSugared() const { return !DeducedAsType.isNull(); }
+ QualType desugar() const {
+ return isSugared() ? DeducedAsType : QualType(this, 0);
+ }
+
+ /// Get the type deduced for this placeholder type, or null if it
+ /// has not been deduced.
+ QualType getDeducedType() const { return DeducedAsType; }
+ bool isDeduced() const {
+ return !DeducedAsType.isNull() || isDependentType();
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Auto ||
+ T->getTypeClass() == DeducedTemplateSpecialization;
+ }
+};
+
+/// Represents a C++11 auto or C++14 decltype(auto) type, possibly constrained
+/// by a type-constraint.
+class AutoType : public DeducedType {
+ friend class ASTContext; // ASTContext creates these
+
+ TemplateDecl *TypeConstraintConcept;
+
+ AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
+ TypeDependence ExtraDependence, QualType Canon, TemplateDecl *CD,
+ ArrayRef<TemplateArgument> TypeConstraintArgs);
+
+public:
+ ArrayRef<TemplateArgument> getTypeConstraintArguments() const {
+ return {reinterpret_cast<const TemplateArgument *>(this + 1),
+ AutoTypeBits.NumArgs};
+ }
+
+ TemplateDecl *getTypeConstraintConcept() const {
+ return TypeConstraintConcept;
+ }
+
+ bool isConstrained() const {
+ return TypeConstraintConcept != nullptr;
+ }
+
+ bool isDecltypeAuto() const {
+ return getKeyword() == AutoTypeKeyword::DecltypeAuto;
+ }
+
+ bool isGNUAutoType() const {
+ return getKeyword() == AutoTypeKeyword::GNUAutoType;
+ }
+
+ AutoTypeKeyword getKeyword() const {
+ return (AutoTypeKeyword)AutoTypeBits.Keyword;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context);
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType Deduced, AutoTypeKeyword Keyword,
+ bool IsDependent, TemplateDecl *CD,
+ ArrayRef<TemplateArgument> Arguments);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Auto;
+ }
+};
+
+/// Represents a C++17 deduced template specialization type.
+class DeducedTemplateSpecializationType : public KeywordWrapper<DeducedType>,
+ public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these
+
+ /// The name of the template whose arguments will be deduced.
+ TemplateName Template;
+
+ DeducedTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ TemplateName Template,
+ QualType DeducedAsType,
+ bool IsDeducedAsDependent, QualType Canon)
+ : KeywordWrapper(Keyword, DeducedTemplateSpecialization, DeducedAsType,
+ toTypeDependence(Template.getDependence()) |
+ (IsDeducedAsDependent
+ ? TypeDependence::DependentInstantiation
+ : TypeDependence::None),
+ Canon),
+ Template(Template) {}
+
+public:
+ /// Retrieve the name of the template that we are deducing.
+ TemplateName getTemplateName() const { return Template; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getKeyword(), getTemplateName(), getDeducedType(),
+ isDependentType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ TemplateName Template, QualType Deduced,
+ bool IsDependent) {
+ ID.AddInteger(llvm::to_underlying(Keyword));
+ Template.Profile(ID);
+ Deduced.Profile(ID);
+ ID.AddBoolean(IsDependent || Template.isDependent());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DeducedTemplateSpecialization;
+ }
+};
+
+/// Represents a type template specialization; the template
+/// must be a class template, a type alias template, or a template
+/// template parameter. A template which cannot be resolved to one of
+/// these, e.g. because it is written with a dependent scope
+/// specifier, is instead represented as a
+/// @c DependentTemplateSpecializationType.
+///
+/// A non-dependent template specialization type is always "sugar",
+/// typically for a \c RecordType. For example, a class template
+/// specialization type of \c vector<int> will refer to a tag type for
+/// the instantiation \c std::vector<int, std::allocator<int>>
+///
+/// Template specializations are dependent if either the template or
+/// any of the template arguments are dependent, in which case the
+/// type may also be canonical.
+///
+/// Instances of this type are allocated with a trailing array of
+/// TemplateArguments, followed by a QualType representing the
+/// non-canonical aliased type when the template is a type alias
+/// template.
+class TemplateSpecializationType : public TypeWithKeyword,
+ public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these
+
+ /// The name of the template being specialized. This is
+ /// either a TemplateName::Template (in which case it is a
+ /// ClassTemplateDecl*, a TemplateTemplateParmDecl*, or a
+ /// TypeAliasTemplateDecl*), a
+ /// TemplateName::SubstTemplateTemplateParmPack, or a
+ /// TemplateName::SubstTemplateTemplateParm (in which case the
+ /// replacement must, recursively, be one of these).
+ TemplateName Template;
+
+ TemplateSpecializationType(ElaboratedTypeKeyword Keyword, TemplateName T,
+ bool IsAlias, ArrayRef<TemplateArgument> Args,
+ QualType Underlying);
+
+public:
+ /// Determine whether any of the given template arguments are dependent.
+ ///
+ /// The converted arguments should be supplied when known; whether an
+ /// argument is dependent can depend on the conversions performed on it
+ /// (for example, a 'const int' passed as a template argument might be
+ /// dependent if the parameter is a reference but non-dependent if the
+ /// parameter is an int).
+ ///
+ /// Note that the \p Args parameter is unused: this is intentional, to remind
+ /// the caller that they need to pass in the converted arguments, not the
+ /// specified arguments.
+ static bool
+ anyDependentTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
+ ArrayRef<TemplateArgument> Converted);
+ static bool
+ anyDependentTemplateArguments(const TemplateArgumentListInfo &,
+ ArrayRef<TemplateArgument> Converted);
+ static bool anyInstantiationDependentTemplateArguments(
+ ArrayRef<TemplateArgumentLoc> Args);
+
+ /// True if this template specialization type matches a current
+ /// instantiation in the context in which it is found.
+ bool isCurrentInstantiation() const {
+ return isa<InjectedClassNameType>(getCanonicalTypeInternal());
+ }
+
+ /// Determine if this template specialization type is for a type alias
+ /// template that has been substituted.
+ ///
+ /// Nearly every template specialization type whose template is an alias
+ /// template will be substituted. However, this is not the case when
+ /// the specialization contains a pack expansion but the template alias
+ /// does not have a corresponding parameter pack, e.g.,
+ ///
+ /// \code
+ /// template<typename T, typename U, typename V> struct S;
+ /// template<typename T, typename U> using A = S<T, int, U>;
+ /// template<typename... Ts> struct X {
+ /// typedef A<Ts...> type; // not a type alias
+ /// };
+ /// \endcode
+ bool isTypeAlias() const { return TemplateSpecializationTypeBits.TypeAlias; }
+
+ /// Get the aliased type, if this is a specialization of a type alias
+ /// template.
+ QualType getAliasedType() const;
+
+ /// Retrieve the name of the template that we are specializing.
+ TemplateName getTemplateName() const { return Template; }
+
+ ArrayRef<TemplateArgument> template_arguments() const {
+ return {reinterpret_cast<const TemplateArgument *>(this + 1),
+ TemplateSpecializationTypeBits.NumArgs};
+ }
+
+ bool isSugared() const;
+
+ QualType desugar() const {
+ return isTypeAlias() ? getAliasedType() : getCanonicalTypeInternal();
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
+ static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
+ ArrayRef<TemplateArgument> Args, QualType Underlying,
+ const ASTContext &Context);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == TemplateSpecialization;
+ }
+};
+
+/// Print a template argument list, including the '<' and '>'
+/// enclosing the template arguments.
+void printTemplateArgumentList(raw_ostream &OS,
+ ArrayRef<TemplateArgument> Args,
+ const PrintingPolicy &Policy,
+ const TemplateParameterList *TPL = nullptr);
+
+void printTemplateArgumentList(raw_ostream &OS,
+ ArrayRef<TemplateArgumentLoc> Args,
+ const PrintingPolicy &Policy,
+ const TemplateParameterList *TPL = nullptr);
+
+void printTemplateArgumentList(raw_ostream &OS,
+ const TemplateArgumentListInfo &Args,
+ const PrintingPolicy &Policy,
+ const TemplateParameterList *TPL = nullptr);
+
+/// Make a best-effort determination of whether the type T can be produced by
+/// substituting Args into the default argument of Param.
+bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
+ const NamedDecl *Param,
+ ArrayRef<TemplateArgument> Args,
+ unsigned Depth);
+
+/// Represents a qualified type name for which the type name is
+/// dependent.
+///
+/// DependentNameType represents a class of dependent types that involve a
+/// possibly dependent nested-name-specifier (e.g., "T::") followed by a
+/// name of a type. The DependentNameType may start with a "typename" (for a
+/// typename-specifier), "class", "struct", "union", or "enum" (for a
+/// dependent elaborated-type-specifier), or nothing (in contexts where we
+/// know that we must be referring to a type, e.g., in a base class specifier).
+/// Typically the nested-name-specifier is dependent, but in MSVC compatibility
+/// mode, this type is used with non-dependent names to delay name lookup until
+/// instantiation.
+class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these
+
+ /// The nested name specifier containing the qualifier.
+ NestedNameSpecifier NNS;
+
+ /// The type that this typename specifier refers to.
+ const IdentifierInfo *Name;
+
+ DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier NNS,
+ const IdentifierInfo *Name, QualType CanonType)
+ : TypeWithKeyword(Keyword, DependentName, CanonType,
+ TypeDependence::DependentInstantiation |
+ (NNS ? toTypeDependence(NNS.getDependence())
+ : TypeDependence::Dependent)),
+ NNS(NNS), Name(Name) {
+ assert(Name);
+ }
+
+public:
+ /// Retrieve the qualification on this type.
+ NestedNameSpecifier getQualifier() const { return NNS; }
+
+ /// Retrieve the identifier that terminates this type name.
+ /// For example, "type" in "typename T::type".
+ const IdentifierInfo *getIdentifier() const {
+ return Name;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getKeyword(), NNS, Name);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier NNS, const IdentifierInfo *Name) {
+ ID.AddInteger(llvm::to_underlying(Keyword));
+ NNS.Profile(ID);
+ ID.AddPointer(Name);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentName;
+ }
+};
+
+/// Represents a template specialization type whose template cannot be
+/// resolved, e.g.
+/// A<T>::template B<T>
+class DependentTemplateSpecializationType : public TypeWithKeyword {
+ friend class ASTContext; // ASTContext creates these
+
+ DependentTemplateStorage Name;
+
+ DependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ const DependentTemplateStorage &Name,
+ ArrayRef<TemplateArgument> Args,
+ QualType Canon);
+
+public:
+ const DependentTemplateStorage &getDependentTemplateName() const {
+ return Name;
+ }
+
+ ArrayRef<TemplateArgument> template_arguments() const {
+ return {reinterpret_cast<const TemplateArgument *>(this + 1),
+ DependentTemplateSpecializationTypeBits.NumArgs};
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getKeyword(), Name, template_arguments());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ ElaboratedTypeKeyword Keyword,
+ const DependentTemplateStorage &Name,
+ ArrayRef<TemplateArgument> Args);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentTemplateSpecialization;
+ }
+};
+
+/// Represents a pack expansion of types.
+///
+/// Pack expansions are part of C++11 variadic templates. A pack
+/// expansion contains a pattern, which itself contains one or more
+/// "unexpanded" parameter packs. When instantiated, a pack expansion
+/// produces a series of types, each instantiated from the pattern of
+/// the expansion, where the Ith instantiation of the pattern uses the
+/// Ith arguments bound to each of the unexpanded parameter packs. The
+/// pack expansion is considered to "expand" these unexpanded
+/// parameter packs.
+///
+/// \code
+/// template<typename ...Types> struct tuple;
+///
+/// template<typename ...Types>
+/// struct tuple_of_references {
+/// typedef tuple<Types&...> type;
+/// };
+/// \endcode
+///
+/// Here, the pack expansion \c Types&... is represented via a
+/// PackExpansionType whose pattern is Types&.
+class PackExpansionType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these
+
+ /// The pattern of the pack expansion.
+ QualType Pattern;
+
+ PackExpansionType(QualType Pattern, QualType Canon,
+ UnsignedOrNone NumExpansions)
+ : Type(PackExpansion, Canon,
+ (Pattern->getDependence() | TypeDependence::Dependent |
+ TypeDependence::Instantiation) &
+ ~TypeDependence::UnexpandedPack),
+ Pattern(Pattern) {
+ PackExpansionTypeBits.NumExpansions =
+ NumExpansions ? *NumExpansions + 1 : 0;
+ }
+
+public:
+ /// Retrieve the pattern of this pack expansion, which is the
+ /// type that will be repeatedly instantiated when instantiating the
+ /// pack expansion itself.
+ QualType getPattern() const { return Pattern; }
+
+ /// Retrieve the number of expansions that this pack expansion will
+ /// generate, if known.
+ UnsignedOrNone getNumExpansions() const {
+ if (PackExpansionTypeBits.NumExpansions)
+ return PackExpansionTypeBits.NumExpansions - 1;
+ return std::nullopt;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPattern(), getNumExpansions());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern,
+ UnsignedOrNone NumExpansions) {
+ ID.AddPointer(Pattern.getAsOpaquePtr());
+ ID.AddInteger(NumExpansions.toInternalRepresentation());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == PackExpansion;
+ }
+};
+
+/// This class wraps the list of protocol qualifiers. For types that can
+/// take ObjC protocol qualifers, they can subclass this class.
+template <class T>
+class ObjCProtocolQualifiers {
+protected:
+ ObjCProtocolQualifiers() = default;
+
+ ObjCProtocolDecl * const *getProtocolStorage() const {
+ return const_cast<ObjCProtocolQualifiers*>(this)->getProtocolStorage();
+ }
+
+ ObjCProtocolDecl **getProtocolStorage() {
+ return static_cast<T*>(this)->getProtocolStorageImpl();
+ }
+
+ void setNumProtocols(unsigned N) {
+ static_cast<T*>(this)->setNumProtocolsImpl(N);
+ }
+
+ void initialize(ArrayRef<ObjCProtocolDecl *> protocols) {
+ setNumProtocols(protocols.size());
+ assert(getNumProtocols() == protocols.size() &&
+ "bitfield overflow in protocol count");
+ if (!protocols.empty())
+ memcpy(getProtocolStorage(), protocols.data(),
+ protocols.size() * sizeof(ObjCProtocolDecl*));
+ }
+
+public:
+ using qual_iterator = ObjCProtocolDecl * const *;
+ using qual_range = llvm::iterator_range<qual_iterator>;
+
+ qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
+ qual_iterator qual_begin() const { return getProtocolStorage(); }
+ qual_iterator qual_end() const { return qual_begin() + getNumProtocols(); }
+
+ bool qual_empty() const { return getNumProtocols() == 0; }
+
+ /// Return the number of qualifying protocols in this type, or 0 if
+ /// there are none.
+ unsigned getNumProtocols() const {
+ return static_cast<const T*>(this)->getNumProtocolsImpl();
+ }
+
+ /// Fetch a protocol by index.
+ ObjCProtocolDecl *getProtocol(unsigned I) const {
+ assert(I < getNumProtocols() && "Out-of-range protocol access");
+ return qual_begin()[I];
+ }
+
+ /// Retrieve all of the protocol qualifiers.
+ ArrayRef<ObjCProtocolDecl *> getProtocols() const {
+ return ArrayRef<ObjCProtocolDecl *>(qual_begin(), getNumProtocols());
+ }
+};
+
+/// Represents a type parameter type in Objective C. It can take
+/// a list of protocols.
+class ObjCTypeParamType : public Type,
+ public ObjCProtocolQualifiers<ObjCTypeParamType>,
+ public llvm::FoldingSetNode {
+ friend class ASTContext;
+ friend class ObjCProtocolQualifiers<ObjCTypeParamType>;
+
+ /// The number of protocols stored on this type.
+ unsigned NumProtocols : 6;
+
+ ObjCTypeParamDecl *OTPDecl;
+
+ /// The protocols are stored after the ObjCTypeParamType node. In the
+ /// canonical type, the list of protocols are sorted alphabetically
+ /// and uniqued.
+ ObjCProtocolDecl **getProtocolStorageImpl();
+
+ /// Return the number of qualifying protocols in this interface type,
+ /// or 0 if there are none.
+ unsigned getNumProtocolsImpl() const {
+ return NumProtocols;
+ }
+
+ void setNumProtocolsImpl(unsigned N) {
+ NumProtocols = N;
+ }
+
+ ObjCTypeParamType(const ObjCTypeParamDecl *D,
+ QualType can,
+ ArrayRef<ObjCProtocolDecl *> protocols);
+
+public:
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getCanonicalTypeInternal(); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ObjCTypeParam;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const ObjCTypeParamDecl *OTPDecl,
+ QualType CanonicalType,
+ ArrayRef<ObjCProtocolDecl *> protocols);
+
+ ObjCTypeParamDecl *getDecl() const { return OTPDecl; }
+};
+
+/// Represents a class type in Objective C.
+///
+/// Every Objective C type is a combination of a base type, a set of
+/// type arguments (optional, for parameterized classes) and a list of
+/// protocols.
+///
+/// Given the following declarations:
+/// \code
+/// \@class C<T>;
+/// \@protocol P;
+/// \endcode
+///
+/// 'C' is an ObjCInterfaceType C. It is sugar for an ObjCObjectType
+/// with base C and no protocols.
+///
+/// 'C<P>' is an unspecialized ObjCObjectType with base C and protocol list [P].
+/// 'C<C*>' is a specialized ObjCObjectType with type arguments 'C*' and no
+/// protocol list.
+/// 'C<C*><P>' is a specialized ObjCObjectType with base C, type arguments 'C*',
+/// and protocol list [P].
+///
+/// 'id' is a TypedefType which is sugar for an ObjCObjectPointerType whose
+/// pointee is an ObjCObjectType with base BuiltinType::ObjCIdType
+/// and no protocols.
+///
+/// 'id<P>' is an ObjCObjectPointerType whose pointee is an ObjCObjectType
+/// with base BuiltinType::ObjCIdType and protocol list [P]. Eventually
+/// this should get its own sugar class to better represent the source.
+class ObjCObjectType : public Type,
+ public ObjCProtocolQualifiers<ObjCObjectType> {
+ friend class ObjCProtocolQualifiers<ObjCObjectType>;
+
+ // ObjCObjectType.NumTypeArgs - the number of type arguments stored
+ // after the ObjCObjectPointerType node.
+ // ObjCObjectType.NumProtocols - the number of protocols stored
+ // after the type arguments of ObjCObjectPointerType node.
+ //
+ // These protocols are those written directly on the type. If
+ // protocol qualifiers ever become additive, the iterators will need
+ // to get kindof complicated.
+ //
+ // In the canonical object type, these are sorted alphabetically
+ // and uniqued.
+
+ /// Either a BuiltinType or an InterfaceType or sugar for either.
+ QualType BaseType;
+
+ /// Cached superclass type.
+ mutable llvm::PointerIntPair<const ObjCObjectType *, 1, bool>
+ CachedSuperClassType;
+
+ QualType *getTypeArgStorage();
+ const QualType *getTypeArgStorage() const {
+ return const_cast<ObjCObjectType *>(this)->getTypeArgStorage();
+ }
+
+ ObjCProtocolDecl **getProtocolStorageImpl();
+ /// Return the number of qualifying protocols in this interface type,
+ /// or 0 if there are none.
+ unsigned getNumProtocolsImpl() const {
+ return ObjCObjectTypeBits.NumProtocols;
+ }
+ void setNumProtocolsImpl(unsigned N) {
+ ObjCObjectTypeBits.NumProtocols = N;
+ }
+
+protected:
+ enum Nonce_ObjCInterface { Nonce_ObjCInterface };
+
+ ObjCObjectType(QualType Canonical, QualType Base,
+ ArrayRef<QualType> typeArgs,
+ ArrayRef<ObjCProtocolDecl *> protocols,
+ bool isKindOf);
+
+ ObjCObjectType(enum Nonce_ObjCInterface)
+ : Type(ObjCInterface, QualType(), TypeDependence::None),
+ BaseType(QualType(this_(), 0)) {
+ ObjCObjectTypeBits.NumProtocols = 0;
+ ObjCObjectTypeBits.NumTypeArgs = 0;
+ ObjCObjectTypeBits.IsKindOf = 0;
+ }
+
+ void computeSuperClassTypeSlow() const;
+
+public:
+ /// Gets the base type of this object type. This is always (possibly
+ /// sugar for) one of:
+ /// - the 'id' builtin type (as opposed to the 'id' type visible to the
+ /// user, which is a typedef for an ObjCObjectPointerType)
+ /// - the 'Class' builtin type (same caveat)
+ /// - an ObjCObjectType (currently always an ObjCInterfaceType)
+ QualType getBaseType() const { return BaseType; }
+
+ bool isObjCId() const {
+ return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCId);
+ }
+
+ bool isObjCClass() const {
+ return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCClass);
+ }
+
+ bool isObjCUnqualifiedId() const { return qual_empty() && isObjCId(); }
+ bool isObjCUnqualifiedClass() const { return qual_empty() && isObjCClass(); }
+ bool isObjCUnqualifiedIdOrClass() const {
+ if (!qual_empty()) return false;
+ if (const BuiltinType *T = getBaseType()->getAs<BuiltinType>())
+ return T->getKind() == BuiltinType::ObjCId ||
+ T->getKind() == BuiltinType::ObjCClass;
+ return false;
+ }
+ bool isObjCQualifiedId() const { return !qual_empty() && isObjCId(); }
+ bool isObjCQualifiedClass() const { return !qual_empty() && isObjCClass(); }
+
+ /// Gets the interface declaration for this object type, if the base type
+ /// really is an interface.
+ ObjCInterfaceDecl *getInterface() const;
+
+ /// Determine whether this object type is "specialized", meaning
+ /// that it has type arguments.
+ bool isSpecialized() const;
+
+ /// Determine whether this object type was written with type arguments.
+ bool isSpecializedAsWritten() const {
+ return ObjCObjectTypeBits.NumTypeArgs > 0;
+ }
+
+ /// Determine whether this object type is "unspecialized", meaning
+ /// that it has no type arguments.
+ bool isUnspecialized() const { return !isSpecialized(); }
+
+ /// Determine whether this object type is "unspecialized" as
+ /// written, meaning that it has no type arguments.
+ bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
+
+ /// Retrieve the type arguments of this object type (semantically).
+ ArrayRef<QualType> getTypeArgs() const;
+
+ /// Retrieve the type arguments of this object type as they were
+ /// written.
+ ArrayRef<QualType> getTypeArgsAsWritten() const {
+ return {getTypeArgStorage(), ObjCObjectTypeBits.NumTypeArgs};
+ }
+
+ /// Whether this is a "__kindof" type as written.
+ bool isKindOfTypeAsWritten() const { return ObjCObjectTypeBits.IsKindOf; }
+
+ /// Whether this ia a "__kindof" type (semantically).
+ bool isKindOfType() const;
+
+ /// Retrieve the type of the superclass of this object type.
+ ///
+ /// This operation substitutes any type arguments into the
+ /// superclass of the current class type, potentially producing a
+ /// specialization of the superclass type. Produces a null type if
+ /// there is no superclass.
+ QualType getSuperClassType() const {
+ if (!CachedSuperClassType.getInt())
+ computeSuperClassTypeSlow();
+
+ assert(CachedSuperClassType.getInt() && "Superclass not set?");
+ return QualType(CachedSuperClassType.getPointer(), 0);
+ }
+
+ /// Strip off the Objective-C "kindof" type and (with it) any
+ /// protocol qualifiers.
+ QualType stripObjCKindOfTypeAndQuals(const ASTContext &ctx) const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ObjCObject ||
+ T->getTypeClass() == ObjCInterface;
+ }
+};
+
+/// A class providing a concrete implementation
+/// of ObjCObjectType, so as to not increase the footprint of
+/// ObjCInterfaceType. Code outside of ASTContext and the core type
+/// system should not reference this type.
+class ObjCObjectTypeImpl : public ObjCObjectType, public llvm::FoldingSetNode {
+ friend class ASTContext;
+
+ // If anyone adds fields here, ObjCObjectType::getProtocolStorage()
+ // will need to be modified.
+
+ ObjCObjectTypeImpl(QualType Canonical, QualType Base,
+ ArrayRef<QualType> typeArgs,
+ ArrayRef<ObjCProtocolDecl *> protocols,
+ bool isKindOf)
+ : ObjCObjectType(Canonical, Base, typeArgs, protocols, isKindOf) {}
+
+public:
+ void Profile(llvm::FoldingSetNodeID &ID);
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ QualType Base,
+ ArrayRef<QualType> typeArgs,
+ ArrayRef<ObjCProtocolDecl *> protocols,
+ bool isKindOf);
+};
+
+inline QualType *ObjCObjectType::getTypeArgStorage() {
+ return reinterpret_cast<QualType *>(static_cast<ObjCObjectTypeImpl*>(this)+1);
+}
+
+inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorageImpl() {
+ return reinterpret_cast<ObjCProtocolDecl**>(
+ getTypeArgStorage() + ObjCObjectTypeBits.NumTypeArgs);
+}
+
+inline ObjCProtocolDecl **ObjCTypeParamType::getProtocolStorageImpl() {
+ return reinterpret_cast<ObjCProtocolDecl**>(
+ static_cast<ObjCTypeParamType*>(this)+1);
+}
+
+/// Interfaces are the core concept in Objective-C for object oriented design.
+/// They basically correspond to C++ classes. There are two kinds of interface
+/// types: normal interfaces like `NSString`, and qualified interfaces, which
+/// are qualified with a protocol list like `NSString<NSCopyable, NSAmazing>`.
+///
+/// ObjCInterfaceType guarantees the following properties when considered
+/// as a subtype of its superclass, ObjCObjectType:
+/// - There are no protocol qualifiers. To reinforce this, code which
+/// tries to invoke the protocol methods via an ObjCInterfaceType will
+/// fail to compile.
+/// - It is its own base type. That is, if T is an ObjCInterfaceType*,
+/// T->getBaseType() == QualType(T, 0).
+class ObjCInterfaceType : public ObjCObjectType {
+ friend class ASTContext; // ASTContext creates these.
+ friend class ASTReader;
+ template <class T> friend class serialization::AbstractTypeReader;
+
+ ObjCInterfaceDecl *Decl;
+
+ ObjCInterfaceType(const ObjCInterfaceDecl *D)
+ : ObjCObjectType(Nonce_ObjCInterface),
+ Decl(const_cast<ObjCInterfaceDecl*>(D)) {}
+
+public:
+ /// Get the declaration of this interface.
+ ObjCInterfaceDecl *getDecl() const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ObjCInterface;
+ }
+
+ // Nonsense to "hide" certain members of ObjCObjectType within this
+ // class. People asking for protocols on an ObjCInterfaceType are
+ // not going to get what they want: ObjCInterfaceTypes are
+ // guaranteed to have no protocols.
+ enum {
+ qual_iterator,
+ qual_begin,
+ qual_end,
+ getNumProtocols,
+ getProtocol
+ };
+};
+
+inline ObjCInterfaceDecl *ObjCObjectType::getInterface() const {
+ QualType baseType = getBaseType();
+ while (const auto *ObjT = baseType->getAs<ObjCObjectType>()) {
+ if (const auto *T = dyn_cast<ObjCInterfaceType>(ObjT))
+ return T->getDecl();
+
+ baseType = ObjT->getBaseType();
+ }
+
+ return nullptr;
+}
+
+/// Represents a pointer to an Objective C object.
+///
+/// These are constructed from pointer declarators when the pointee type is
+/// an ObjCObjectType (or sugar for one). In addition, the 'id' and 'Class'
+/// types are typedefs for these, and the protocol-qualified types 'id<P>'
+/// and 'Class<P>' are translated into these.
+///
+/// Pointers to pointers to Objective C objects are still PointerTypes;
+/// only the first level of pointer gets it own type implementation.
+class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType PointeeType;
+
+ ObjCObjectPointerType(QualType Canonical, QualType Pointee)
+ : Type(ObjCObjectPointer, Canonical, Pointee->getDependence()),
+ PointeeType(Pointee) {}
+
+public:
+ /// Gets the type pointed to by this ObjC pointer.
+ /// The result will always be an ObjCObjectType or sugar thereof.
+ QualType getPointeeType() const { return PointeeType; }
+
+ /// Gets the type pointed to by this ObjC pointer. Always returns non-null.
+ ///
+ /// This method is equivalent to getPointeeType() except that
+ /// it discards any typedefs (or other sugar) between this
+ /// type and the "outermost" object type. So for:
+ /// \code
+ /// \@class A; \@protocol P; \@protocol Q;
+ /// typedef A<P> AP;
+ /// typedef A A1;
+ /// typedef A1<P> A1P;
+ /// typedef A1P<Q> A1PQ;
+ /// \endcode
+ /// For 'A*', getObjectType() will return 'A'.
+ /// For 'A<P>*', getObjectType() will return 'A<P>'.
+ /// For 'AP*', getObjectType() will return 'A<P>'.
+ /// For 'A1*', getObjectType() will return 'A'.
+ /// For 'A1<P>*', getObjectType() will return 'A1<P>'.
+ /// For 'A1P*', getObjectType() will return 'A1<P>'.
+ /// For 'A1PQ*', getObjectType() will return 'A1<Q>', because
+ /// adding protocols to a protocol-qualified base discards the
+ /// old qualifiers (for now). But if it didn't, getObjectType()
+ /// would return 'A1P<Q>' (and we'd have to make iterating over
+ /// qualifiers more complicated).
+ const ObjCObjectType *getObjectType() const {
+ return PointeeType->castAs<ObjCObjectType>();
+ }
+
+ /// If this pointer points to an Objective C
+ /// \@interface type, gets the type for that interface. Any protocol
+ /// qualifiers on the interface are ignored.
+ ///
+ /// \return null if the base type for this pointer is 'id' or 'Class'
+ const ObjCInterfaceType *getInterfaceType() const;
+
+ /// If this pointer points to an Objective \@interface
+ /// type, gets the declaration for that interface.
+ ///
+ /// \return null if the base type for this pointer is 'id' or 'Class'
+ ObjCInterfaceDecl *getInterfaceDecl() const {
+ return getObjectType()->getInterface();
+ }
+
+ /// True if this is equivalent to the 'id' type, i.e. if
+ /// its object type is the primitive 'id' type with no protocols.
+ bool isObjCIdType() const {
+ return getObjectType()->isObjCUnqualifiedId();
+ }
+
+ /// True if this is equivalent to the 'Class' type,
+ /// i.e. if its object tive is the primitive 'Class' type with no protocols.
+ bool isObjCClassType() const {
+ return getObjectType()->isObjCUnqualifiedClass();
+ }
+
+ /// True if this is equivalent to the 'id' or 'Class' type,
+ bool isObjCIdOrClassType() const {
+ return getObjectType()->isObjCUnqualifiedIdOrClass();
+ }
+
+ /// True if this is equivalent to 'id<P>' for some non-empty set of
+ /// protocols.
+ bool isObjCQualifiedIdType() const {
+ return getObjectType()->isObjCQualifiedId();
+ }
+
+ /// True if this is equivalent to 'Class<P>' for some non-empty set of
+ /// protocols.
+ bool isObjCQualifiedClassType() const {
+ return getObjectType()->isObjCQualifiedClass();
+ }
+
+ /// Whether this is a "__kindof" type.
+ bool isKindOfType() const { return getObjectType()->isKindOfType(); }
+
+ /// Whether this type is specialized, meaning that it has type arguments.
+ bool isSpecialized() const { return getObjectType()->isSpecialized(); }
+
+ /// Whether this type is specialized, meaning that it has type arguments.
+ bool isSpecializedAsWritten() const {
+ return getObjectType()->isSpecializedAsWritten();
+ }
+
+ /// Whether this type is unspecialized, meaning that is has no type arguments.
+ bool isUnspecialized() const { return getObjectType()->isUnspecialized(); }
+
+ /// Determine whether this object type is "unspecialized" as
+ /// written, meaning that it has no type arguments.
+ bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
+
+ /// Retrieve the type arguments for this type.
+ ArrayRef<QualType> getTypeArgs() const {
+ return getObjectType()->getTypeArgs();
+ }
+
+ /// Retrieve the type arguments for this type.
+ ArrayRef<QualType> getTypeArgsAsWritten() const {
+ return getObjectType()->getTypeArgsAsWritten();
+ }
+
+ /// An iterator over the qualifiers on the object type. Provided
+ /// for convenience. This will always iterate over the full set of
+ /// protocols on a type, not just those provided directly.
+ using qual_iterator = ObjCObjectType::qual_iterator;
+ using qual_range = llvm::iterator_range<qual_iterator>;
+
+ qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
+
+ qual_iterator qual_begin() const {
+ return getObjectType()->qual_begin();
+ }
+
+ qual_iterator qual_end() const {
+ return getObjectType()->qual_end();
+ }
+
+ bool qual_empty() const { return getObjectType()->qual_empty(); }
+
+ /// Return the number of qualifying protocols on the object type.
+ unsigned getNumProtocols() const {
+ return getObjectType()->getNumProtocols();
+ }
+
+ /// Retrieve a qualifying protocol by index on the object type.
+ ObjCProtocolDecl *getProtocol(unsigned I) const {
+ return getObjectType()->getProtocol(I);
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ /// Retrieve the type of the superclass of this object pointer type.
+ ///
+ /// This operation substitutes any type arguments into the
+ /// superclass of the current class type, potentially producing a
+ /// pointer to a specialization of the superclass type. Produces a
+ /// null type if there is no superclass.
+ QualType getSuperClassType() const;
+
+ /// Strip off the Objective-C "kindof" type and (with it) any
+ /// protocol qualifiers.
+ const ObjCObjectPointerType *stripObjCKindOfTypeAndQuals(
+ const ASTContext &ctx) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPointeeType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
+ ID.AddPointer(T.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ObjCObjectPointer;
+ }
+};
+
+class AtomicType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType ValueType;
+
+ AtomicType(QualType ValTy, QualType Canonical)
+ : Type(Atomic, Canonical, ValTy->getDependence()), ValueType(ValTy) {}
+
+public:
+ /// Gets the type contained by this atomic type, i.e.
+ /// the type returned by performing an atomic load of this atomic type.
+ QualType getValueType() const { return ValueType; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getValueType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
+ ID.AddPointer(T.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Atomic;
+ }
+};
+
+/// PipeType - OpenCL20.
+class PipeType : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext; // ASTContext creates these.
+
+ QualType ElementType;
+ bool isRead;
+
+ PipeType(QualType elemType, QualType CanonicalPtr, bool isRead)
+ : Type(Pipe, CanonicalPtr, elemType->getDependence()),
+ ElementType(elemType), isRead(isRead) {}
+
+public:
+ QualType getElementType() const { return ElementType; }
+
+ bool isSugared() const { return false; }
+
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType(), isReadOnly());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType T, bool isRead) {
+ ID.AddPointer(T.getAsOpaquePtr());
+ ID.AddBoolean(isRead);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Pipe;
+ }
+
+ bool isReadOnly() const { return isRead; }
+};
+
+/// A fixed int type of a specified bitwidth.
+class BitIntType final : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsUnsigned : 1;
+ unsigned NumBits : 24;
+
+protected:
+ BitIntType(bool isUnsigned, unsigned NumBits);
+
+public:
+ bool isUnsigned() const { return IsUnsigned; }
+ bool isSigned() const { return !IsUnsigned; }
+ unsigned getNumBits() const { return NumBits; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, isUnsigned(), getNumBits());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, bool IsUnsigned,
+ unsigned NumBits) {
+ ID.AddBoolean(IsUnsigned);
+ ID.AddInteger(NumBits);
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == BitInt; }
+};
+
+class DependentBitIntType final : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext;
+ llvm::PointerIntPair<Expr*, 1, bool> ExprAndUnsigned;
+
+protected:
+ DependentBitIntType(bool IsUnsigned, Expr *NumBits);
+
+public:
+ bool isUnsigned() const;
+ bool isSigned() const { return !isUnsigned(); }
+ Expr *getNumBitsExpr() const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, isUnsigned(), getNumBitsExpr());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool IsUnsigned, Expr *NumBitsExpr);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentBitInt;
+ }
+};
+
+class PredefinedSugarType final : public Type {
+public:
+ friend class ASTContext;
+ using Kind = PredefinedSugarKind;
+
+private:
+ PredefinedSugarType(Kind KD, const IdentifierInfo *IdentName,
+ QualType CanonicalType)
+ : Type(PredefinedSugar, CanonicalType, TypeDependence::None),
+ Name(IdentName) {
+ PredefinedSugarTypeBits.Kind = llvm::to_underlying(KD);
+ }
+
+ static StringRef getName(Kind KD);
+
+ const IdentifierInfo *Name;
+
+public:
+ bool isSugared() const { return true; }
+
+ QualType desugar() const { return getCanonicalTypeInternal(); }
+
+ Kind getKind() const { return Kind(PredefinedSugarTypeBits.Kind); }
+
+ const IdentifierInfo *getIdentifier() const { return Name; }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == PredefinedSugar;
+ }
+};
+
+/// A qualifier set is used to build a set of qualifiers.
+class QualifierCollector : public Qualifiers {
+public:
+ QualifierCollector(Qualifiers Qs = Qualifiers()) : Qualifiers(Qs) {}
+
+ /// Collect any qualifiers on the given type and return an
+ /// unqualified type. The qualifiers are assumed to be consistent
+ /// with those already in the type.
+ const Type *strip(QualType type) {
+ addFastQualifiers(type.getLocalFastQualifiers());
+ if (!type.hasLocalNonFastQualifiers())
+ return type.getTypePtrUnsafe();
+
+ const ExtQuals *extQuals = type.getExtQualsUnsafe();
+ addConsistentQualifiers(extQuals->getQualifiers());
+ return extQuals->getBaseType();
+ }
+
+ /// Apply the collected qualifiers to the given type.
+ QualType apply(const ASTContext &Context, QualType QT) const;
+
+ /// Apply the collected qualifiers to the given type.
+ QualType apply(const ASTContext &Context, const Type* T) const;
+};
+
+/// A container of type source information.
+///
+/// A client can read the relevant info using TypeLoc wrappers, e.g:
+/// @code
+/// TypeLoc TL = TypeSourceInfo->getTypeLoc();
+/// TL.getBeginLoc().print(OS, SrcMgr);
+/// @endcode
+class alignas(8) TypeSourceInfo {
+ // Contains a memory block after the class, used for type source information,
+ // allocated by ASTContext.
+ friend class ASTContext;
+
+ QualType Ty;
+
+ TypeSourceInfo(QualType ty, size_t DataSize); // implemented in TypeLoc.h
+
+public:
+ /// Return the type wrapped by this type source info.
+ QualType getType() const { return Ty; }
+
+ /// Return the TypeLoc wrapper for the type source info.
+ TypeLoc getTypeLoc() const; // implemented in TypeLoc.h
+
+ /// Override the type stored in this TypeSourceInfo. Use with caution!
+ void overrideType(QualType T) { Ty = T; }
+};
+
+// Inline function definitions.
+
+inline SplitQualType SplitQualType::getSingleStepDesugaredType() const {
+ SplitQualType desugar =
+ Ty->getLocallyUnqualifiedSingleStepDesugaredType().split();
+ desugar.Quals.addConsistentQualifiers(Quals);
+ return desugar;
+}
+
+inline const Type *QualType::getTypePtr() const {
+ return getCommonPtr()->BaseType;
+}
+
+inline const Type *QualType::getTypePtrOrNull() const {
+ return (isNull() ? nullptr : getCommonPtr()->BaseType);
+}
+
+inline bool QualType::isReferenceable() const {
+ // C++ [defns.referenceable]
+ // type that is either an object type, a function type that does not have
+ // cv-qualifiers or a ref-qualifier, or a reference type.
+ const Type &Self = **this;
+ if (Self.isObjectType() || Self.isReferenceType())
+ return true;
+ if (const auto *F = Self.getAs<FunctionProtoType>())
+ return F->getMethodQuals().empty() && F->getRefQualifier() == RQ_None;
+
+ return false;
+}
+
+inline SplitQualType QualType::split() const {
+ if (!hasLocalNonFastQualifiers())
+ return SplitQualType(getTypePtrUnsafe(),
+ Qualifiers::fromFastMask(getLocalFastQualifiers()));
+
+ const ExtQuals *eq = getExtQualsUnsafe();
+ Qualifiers qs = eq->getQualifiers();
+ qs.addFastQualifiers(getLocalFastQualifiers());
+ return SplitQualType(eq->getBaseType(), qs);
+}
+
+inline Qualifiers QualType::getLocalQualifiers() const {
+ Qualifiers Quals;
+ if (hasLocalNonFastQualifiers())
+ Quals = getExtQualsUnsafe()->getQualifiers();
+ Quals.addFastQualifiers(getLocalFastQualifiers());
+ return Quals;
+}
+
+inline Qualifiers QualType::getQualifiers() const {
+ Qualifiers quals = getCommonPtr()->CanonicalType.getLocalQualifiers();
+ quals.addFastQualifiers(getLocalFastQualifiers());
+ return quals;
+}
+
+inline unsigned QualType::getCVRQualifiers() const {
+ unsigned cvr = getCommonPtr()->CanonicalType.getLocalCVRQualifiers();
+ cvr |= getLocalCVRQualifiers();
+ return cvr;
+}
+
+inline QualType QualType::getCanonicalType() const {
+ QualType canon = getCommonPtr()->CanonicalType;
+ return canon.withFastQualifiers(getLocalFastQualifiers());
+}
+
+inline bool QualType::isCanonical() const {
+ return getTypePtr()->isCanonicalUnqualified();
+}
+
+inline bool QualType::isCanonicalAsParam() const {
+ if (!isCanonical()) return false;
+ if (hasLocalQualifiers()) return false;
+
+ const Type *T = getTypePtr();
+ if (T->isVariablyModifiedType() && T->hasSizedVLAType())
+ return false;
+
+ return !isa<FunctionType>(T) &&
+ (!isa<ArrayType>(T) || isa<ArrayParameterType>(T));
+}
+
+inline bool QualType::isConstQualified() const {
+ return isLocalConstQualified() ||
+ getCommonPtr()->CanonicalType.isLocalConstQualified();
+}
+
+inline bool QualType::isRestrictQualified() const {
+ return isLocalRestrictQualified() ||
+ getCommonPtr()->CanonicalType.isLocalRestrictQualified();
+}
+
+
+inline bool QualType::isVolatileQualified() const {
+ return isLocalVolatileQualified() ||
+ getCommonPtr()->CanonicalType.isLocalVolatileQualified();
+}
+
+inline bool QualType::hasQualifiers() const {
+ return hasLocalQualifiers() ||
+ getCommonPtr()->CanonicalType.hasLocalQualifiers();
+}
+
+inline QualType QualType::getUnqualifiedType() const {
+ if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
+ return QualType(getTypePtr(), 0);
+
+ return QualType(getSplitUnqualifiedTypeImpl(*this).Ty, 0);
+}
+
+inline SplitQualType QualType::getSplitUnqualifiedType() const {
+ if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
+ return split();
+
+ return getSplitUnqualifiedTypeImpl(*this);
+}
+
+inline void QualType::removeLocalConst() {
+ removeLocalFastQualifiers(Qualifiers::Const);
+}
+
+inline void QualType::removeLocalRestrict() {
+ removeLocalFastQualifiers(Qualifiers::Restrict);
+}
+
+inline void QualType::removeLocalVolatile() {
+ removeLocalFastQualifiers(Qualifiers::Volatile);
+}
+
+/// Check if this type has any address space qualifier.
+inline bool QualType::hasAddressSpace() const {
+ return getQualifiers().hasAddressSpace();
+}
+
+/// Return the address space of this type.
+inline LangAS QualType::getAddressSpace() const {
+ return getQualifiers().getAddressSpace();
+}
+
+/// Return the gc attribute of this type.
+inline Qualifiers::GC QualType::getObjCGCAttr() const {
+ return getQualifiers().getObjCGCAttr();
+}
+
+inline FunctionType::ExtInfo getFunctionExtInfo(const Type &t) {
+ if (const auto *PT = t.getAs<PointerType>()) {
+ if (const auto *FT = PT->getPointeeType()->getAs<FunctionType>())
+ return FT->getExtInfo();
+ } else if (const auto *FT = t.getAs<FunctionType>())
+ return FT->getExtInfo();
+
+ return FunctionType::ExtInfo();
+}
+
+inline FunctionType::ExtInfo getFunctionExtInfo(QualType t) {
+ return getFunctionExtInfo(*t);
+}
+
+/// Determine whether this type is more
+/// qualified than the Other type. For example, "const volatile int"
+/// is more qualified than "const int", "volatile int", and
+/// "int". However, it is not more qualified than "const volatile
+/// int".
+inline bool QualType::isMoreQualifiedThan(QualType other,
+ const ASTContext &Ctx) const {
+ Qualifiers MyQuals = getQualifiers();
+ Qualifiers OtherQuals = other.getQualifiers();
+ return (MyQuals != OtherQuals && MyQuals.compatiblyIncludes(OtherQuals, Ctx));
+}
+
+/// Determine whether this type is at last
+/// as qualified as the Other type. For example, "const volatile
+/// int" is at least as qualified as "const int", "volatile int",
+/// "int", and "const volatile int".
+inline bool QualType::isAtLeastAsQualifiedAs(QualType other,
+ const ASTContext &Ctx) const {
+ Qualifiers OtherQuals = other.getQualifiers();
+
+ // Ignore __unaligned qualifier if this type is a void.
+ if (getUnqualifiedType()->isVoidType())
+ OtherQuals.removeUnaligned();
+
+ return getQualifiers().compatiblyIncludes(OtherQuals, Ctx);
+}
+
+/// If Type is a reference type (e.g., const
+/// int&), returns the type that the reference refers to ("const
+/// int"). Otherwise, returns the type itself. This routine is used
+/// throughout Sema to implement C++ 5p6:
+///
+/// If an expression initially has the type "reference to T" (8.3.2,
+/// 8.5.3), the type is adjusted to "T" prior to any further
+/// analysis, the expression designates the object or function
+/// denoted by the reference, and the expression is an lvalue.
+inline QualType QualType::getNonReferenceType() const {
+ if (const auto *RefType = (*this)->getAs<ReferenceType>())
+ return RefType->getPointeeType();
+ else
+ return *this;
+}
+
+inline bool QualType::isCForbiddenLValueType() const {
+ return ((getTypePtr()->isVoidType() && !hasQualifiers()) ||
+ getTypePtr()->isFunctionType());
+}
+
+/// Tests whether the type is categorized as a fundamental type.
+///
+/// \returns True for types specified in C++0x [basic.fundamental].
+inline bool Type::isFundamentalType() const {
+ return isVoidType() ||
+ isNullPtrType() ||
+ // FIXME: It's really annoying that we don't have an
+ // 'isArithmeticType()' which agrees with the standard definition.
+ (isArithmeticType() && !isEnumeralType());
+}
+
+/// Tests whether the type is categorized as a compound type.
+///
+/// \returns True for types specified in C++0x [basic.compound].
+inline bool Type::isCompoundType() const {
+ // C++0x [basic.compound]p1:
+ // Compound types can be constructed in the following ways:
+ // -- arrays of objects of a given type [...];
+ return isArrayType() ||
+ // -- functions, which have parameters of given types [...];
+ isFunctionType() ||
+ // -- pointers to void or objects or functions [...];
+ isPointerType() ||
+ // -- references to objects or functions of a given type. [...]
+ isReferenceType() ||
+ // -- classes containing a sequence of objects of various types, [...];
+ isRecordType() ||
+ // -- unions, which are classes capable of containing objects of different
+ // types at different times;
+ isUnionType() ||
+ // -- enumerations, which comprise a set of named constant values. [...];
+ isEnumeralType() ||
+ // -- pointers to non-static class members, [...].
+ isMemberPointerType();
+}
+
+inline bool Type::isFunctionType() const {
+ return isa<FunctionType>(CanonicalType);
+}
+
+inline bool Type::isPointerType() const {
+ return isa<PointerType>(CanonicalType);
+}
+
+inline bool Type::isPointerOrReferenceType() const {
+ return isPointerType() || isReferenceType();
+}
+
+inline bool Type::isAnyPointerType() const {
+ return isPointerType() || isObjCObjectPointerType();
+}
+
+inline bool Type::isSignableType(const ASTContext &Ctx) const {
+ return isSignablePointerType() || isSignableIntegerType(Ctx);
+}
+
+inline bool Type::isSignablePointerType() const {
+ return isPointerType() || isObjCClassType() || isObjCQualifiedClassType();
+}
+
+inline bool Type::isBlockPointerType() const {
+ return isa<BlockPointerType>(CanonicalType);
+}
+
+inline bool Type::isReferenceType() const {
+ return isa<ReferenceType>(CanonicalType);
+}
+
+inline bool Type::isLValueReferenceType() const {
+ return isa<LValueReferenceType>(CanonicalType);
+}
+
+inline bool Type::isRValueReferenceType() const {
+ return isa<RValueReferenceType>(CanonicalType);
+}
+
+inline bool Type::isObjectPointerType() const {
+ // Note: an "object pointer type" is not the same thing as a pointer to an
+ // object type; rather, it is a pointer to an object type or a pointer to cv
+ // void.
+ if (const auto *T = getAs<PointerType>())
+ return !T->getPointeeType()->isFunctionType();
+ else
+ return false;
+}
+
+inline bool Type::isCFIUncheckedCalleeFunctionType() const {
+ if (const auto *Fn = getAs<FunctionProtoType>())
+ return Fn->hasCFIUncheckedCallee();
+ return false;
+}
+
+inline bool Type::hasPointeeToToCFIUncheckedCalleeFunctionType() const {
+ QualType Pointee;
+ if (const auto *PT = getAs<PointerType>())
+ Pointee = PT->getPointeeType();
+ else if (const auto *RT = getAs<ReferenceType>())
+ Pointee = RT->getPointeeType();
+ else if (const auto *MPT = getAs<MemberPointerType>())
+ Pointee = MPT->getPointeeType();
+ else if (const auto *DT = getAs<DecayedType>())
+ Pointee = DT->getPointeeType();
+ else
+ return false;
+ return Pointee->isCFIUncheckedCalleeFunctionType();
+}
+
+inline bool Type::isFunctionPointerType() const {
+ if (const auto *T = getAs<PointerType>())
+ return T->getPointeeType()->isFunctionType();
+ else
+ return false;
+}
+
+inline bool Type::isFunctionReferenceType() const {
+ if (const auto *T = getAs<ReferenceType>())
+ return T->getPointeeType()->isFunctionType();
+ else
+ return false;
+}
+
+inline bool Type::isMemberPointerType() const {
+ return isa<MemberPointerType>(CanonicalType);
+}
+
+inline bool Type::isMemberFunctionPointerType() const {
+ if (const auto *T = getAs<MemberPointerType>())
+ return T->isMemberFunctionPointer();
+ else
+ return false;
+}
+
+inline bool Type::isMemberDataPointerType() const {
+ if (const auto *T = getAs<MemberPointerType>())
+ return T->isMemberDataPointer();
+ else
+ return false;
+}
+
+inline bool Type::isArrayType() const {
+ return isa<ArrayType>(CanonicalType);
+}
+
+inline bool Type::isConstantArrayType() const {
+ return isa<ConstantArrayType>(CanonicalType);
+}
+
+inline bool Type::isIncompleteArrayType() const {
+ return isa<IncompleteArrayType>(CanonicalType);
+}
+
+inline bool Type::isVariableArrayType() const {
+ return isa<VariableArrayType>(CanonicalType);
+}
+
+inline bool Type::isArrayParameterType() const {
+ return isa<ArrayParameterType>(CanonicalType);
+}
+
+inline bool Type::isDependentSizedArrayType() const {
+ return isa<DependentSizedArrayType>(CanonicalType);
+}
+
+inline bool Type::isBuiltinType() const {
+ return isa<BuiltinType>(CanonicalType);
+}
+
+inline bool Type::isRecordType() const {
+ return isa<RecordType>(CanonicalType);
+}
+
+inline bool Type::isEnumeralType() const {
+ return isa<EnumType>(CanonicalType);
+}
+
+inline bool Type::isAnyComplexType() const {
+ return isa<ComplexType>(CanonicalType);
+}
+
+inline bool Type::isVectorType() const {
+ return isa<VectorType>(CanonicalType);
+}
+
+inline bool Type::isExtVectorType() const {
+ return isa<ExtVectorType>(CanonicalType);
+}
+
+inline bool Type::isExtVectorBoolType() const {
+ if (!isExtVectorType())
+ return false;
+ return cast<ExtVectorType>(CanonicalType)->getElementType()->isBooleanType();
+}
+
+inline bool Type::isSubscriptableVectorType() const {
+ return isVectorType() || isSveVLSBuiltinType();
+}
+
+inline bool Type::isMatrixType() const {
+ return isa<MatrixType>(CanonicalType);
+}
+
+inline bool Type::isConstantMatrixType() const {
+ return isa<ConstantMatrixType>(CanonicalType);
+}
+
+inline bool Type::isDependentAddressSpaceType() const {
+ return isa<DependentAddressSpaceType>(CanonicalType);
+}
+
+inline bool Type::isObjCObjectPointerType() const {
+ return isa<ObjCObjectPointerType>(CanonicalType);
+}
+
+inline bool Type::isObjCObjectType() const {
+ return isa<ObjCObjectType>(CanonicalType);
+}
+
+inline bool Type::isObjCObjectOrInterfaceType() const {
+ return isa<ObjCInterfaceType>(CanonicalType) ||
+ isa<ObjCObjectType>(CanonicalType);
+}
+
+inline bool Type::isAtomicType() const {
+ return isa<AtomicType>(CanonicalType);
+}
+
+inline bool Type::isUndeducedAutoType() const {
+ return isa<AutoType>(CanonicalType);
+}
+
+inline bool Type::isObjCQualifiedIdType() const {
+ if (const auto *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->isObjCQualifiedIdType();
+ return false;
+}
+
+inline bool Type::isObjCQualifiedClassType() const {
+ if (const auto *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->isObjCQualifiedClassType();
+ return false;
+}
+
+inline bool Type::isObjCIdType() const {
+ if (const auto *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->isObjCIdType();
+ return false;
+}
+
+inline bool Type::isObjCClassType() const {
+ if (const auto *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->isObjCClassType();
+ return false;
+}
+
+inline bool Type::isObjCSelType() const {
+ if (const auto *OPT = getAs<PointerType>())
+ return OPT->getPointeeType()->isSpecificBuiltinType(BuiltinType::ObjCSel);
+ return false;
+}
+
+inline bool Type::isObjCBuiltinType() const {
+ return isObjCIdType() || isObjCClassType() || isObjCSelType();
+}
+
+inline bool Type::isDecltypeType() const {
+ return isa<DecltypeType>(this);
+}
+
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ inline bool Type::is##Id##Type() const { \
+ return isSpecificBuiltinType(BuiltinType::Id); \
+ }
+#include "clang/Basic/OpenCLImageTypes.def"
+
+inline bool Type::isSamplerT() const {
+ return isSpecificBuiltinType(BuiltinType::OCLSampler);
+}
+
+inline bool Type::isEventT() const {
+ return isSpecificBuiltinType(BuiltinType::OCLEvent);
+}
+
+inline bool Type::isClkEventT() const {
+ return isSpecificBuiltinType(BuiltinType::OCLClkEvent);
+}
+
+inline bool Type::isQueueT() const {
+ return isSpecificBuiltinType(BuiltinType::OCLQueue);
+}
+
+inline bool Type::isReserveIDT() const {
+ return isSpecificBuiltinType(BuiltinType::OCLReserveID);
+}
+
+inline bool Type::isImageType() const {
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) is##Id##Type() ||
+ return
+#include "clang/Basic/OpenCLImageTypes.def"
+ false; // end boolean or operation
+}
+
+inline bool Type::isPipeType() const {
+ return isa<PipeType>(CanonicalType);
+}
+
+inline bool Type::isBitIntType() const {
+ return isa<BitIntType>(CanonicalType);
+}
+
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ inline bool Type::is##Id##Type() const { \
+ return isSpecificBuiltinType(BuiltinType::Id); \
+ }
+#include "clang/Basic/OpenCLExtensionTypes.def"
+
+inline bool Type::isOCLIntelSubgroupAVCType() const {
+#define INTEL_SUBGROUP_AVC_TYPE(ExtType, Id) \
+ isOCLIntelSubgroupAVC##Id##Type() ||
+ return
+#include "clang/Basic/OpenCLExtensionTypes.def"
+ false; // end of boolean or operation
+}
+
+inline bool Type::isOCLExtOpaqueType() const {
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) is##Id##Type() ||
+ return
+#include "clang/Basic/OpenCLExtensionTypes.def"
+ false; // end of boolean or operation
+}
+
+inline bool Type::isOpenCLSpecificType() const {
+ return isSamplerT() || isEventT() || isImageType() || isClkEventT() ||
+ isQueueT() || isReserveIDT() || isPipeType() || isOCLExtOpaqueType();
+}
+
+#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
+ inline bool Type::is##Id##Type() const { \
+ return isSpecificBuiltinType(BuiltinType::Id); \
+ }
+#include "clang/Basic/HLSLIntangibleTypes.def"
+
+inline bool Type::isHLSLBuiltinIntangibleType() const {
+#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) is##Id##Type() ||
+ return
+#include "clang/Basic/HLSLIntangibleTypes.def"
+ false;
+}
+
+inline bool Type::isHLSLSpecificType() const {
+ return isHLSLBuiltinIntangibleType() || isHLSLAttributedResourceType() ||
+ isHLSLInlineSpirvType();
+}
+
+inline bool Type::isHLSLAttributedResourceType() const {
+ return isa<HLSLAttributedResourceType>(this);
+}
+
+inline bool Type::isHLSLInlineSpirvType() const {
+ return isa<HLSLInlineSpirvType>(this);
+}
+
+inline bool Type::isTemplateTypeParmType() const {
+ return isa<TemplateTypeParmType>(CanonicalType);
+}
+
+inline bool Type::isSpecificBuiltinType(unsigned K) const {
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ return BT->getKind() == static_cast<BuiltinType::Kind>(K);
+ }
+ return false;
+}
+
+inline bool Type::isPlaceholderType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(this))
+ return BT->isPlaceholderType();
+ return false;
+}
+
+inline const BuiltinType *Type::getAsPlaceholderType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(this))
+ if (BT->isPlaceholderType())
+ return BT;
+ return nullptr;
+}
+
+inline bool Type::isSpecificPlaceholderType(unsigned K) const {
+ assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K));
+ return isSpecificBuiltinType(K);
+}
+
+inline bool Type::isNonOverloadPlaceholderType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(this))
+ return BT->isNonOverloadPlaceholderType();
+ return false;
+}
+
+inline bool Type::isVoidType() const {
+ return isSpecificBuiltinType(BuiltinType::Void);
+}
+
+inline bool Type::isHalfType() const {
+ // FIXME: Should we allow complex __fp16? Probably not.
+ return isSpecificBuiltinType(BuiltinType::Half);
+}
+
+inline bool Type::isFloat16Type() const {
+ return isSpecificBuiltinType(BuiltinType::Float16);
+}
+
+inline bool Type::isFloat32Type() const {
+ return isSpecificBuiltinType(BuiltinType::Float);
+}
+
+inline bool Type::isDoubleType() const {
+ return isSpecificBuiltinType(BuiltinType::Double);
+}
+
+inline bool Type::isBFloat16Type() const {
+ return isSpecificBuiltinType(BuiltinType::BFloat16);
+}
+
+inline bool Type::isMFloat8Type() const {
+ return isSpecificBuiltinType(BuiltinType::MFloat8);
+}
+
+inline bool Type::isFloat128Type() const {
+ return isSpecificBuiltinType(BuiltinType::Float128);
+}
+
+inline bool Type::isIbm128Type() const {
+ return isSpecificBuiltinType(BuiltinType::Ibm128);
+}
+
+inline bool Type::isNullPtrType() const {
+ return isSpecificBuiltinType(BuiltinType::NullPtr);
+}
+
+bool IsEnumDeclComplete(EnumDecl *);
+bool IsEnumDeclScoped(EnumDecl *);
+
+inline bool Type::isIntegerType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->isInteger();
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ return IsEnumDeclComplete(ET->getOriginalDecl()) &&
+ !IsEnumDeclScoped(ET->getOriginalDecl());
+ }
+ return isBitIntType();
+}
+
+inline bool Type::isFixedPointType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::ShortAccum &&
+ BT->getKind() <= BuiltinType::SatULongFract;
+ }
+ return false;
+}
+
+inline bool Type::isFixedPointOrIntegerType() const {
+ return isFixedPointType() || isIntegerType();
+}
+
+inline bool Type::isConvertibleToFixedPointType() const {
+ return isRealFloatingType() || isFixedPointOrIntegerType();
+}
+
+inline bool Type::isSaturatedFixedPointType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::SatShortAccum &&
+ BT->getKind() <= BuiltinType::SatULongFract;
+ }
+ return false;
+}
+
+inline bool Type::isUnsaturatedFixedPointType() const {
+ return isFixedPointType() && !isSaturatedFixedPointType();
+}
+
+inline bool Type::isSignedFixedPointType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return ((BT->getKind() >= BuiltinType::ShortAccum &&
+ BT->getKind() <= BuiltinType::LongAccum) ||
+ (BT->getKind() >= BuiltinType::ShortFract &&
+ BT->getKind() <= BuiltinType::LongFract) ||
+ (BT->getKind() >= BuiltinType::SatShortAccum &&
+ BT->getKind() <= BuiltinType::SatLongAccum) ||
+ (BT->getKind() >= BuiltinType::SatShortFract &&
+ BT->getKind() <= BuiltinType::SatLongFract));
+ }
+ return false;
+}
+
+inline bool Type::isUnsignedFixedPointType() const {
+ return isFixedPointType() && !isSignedFixedPointType();
+}
+
+inline bool Type::isScalarType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() > BuiltinType::Void &&
+ BT->getKind() <= BuiltinType::NullPtr;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ // Enums are scalar types, but only if they are defined. Incomplete enums
+ // are not treated as scalar types.
+ return IsEnumDeclComplete(ET->getOriginalDecl());
+ return isa<PointerType>(CanonicalType) ||
+ isa<BlockPointerType>(CanonicalType) ||
+ isa<MemberPointerType>(CanonicalType) ||
+ isa<ComplexType>(CanonicalType) ||
+ isa<ObjCObjectPointerType>(CanonicalType) ||
+ isBitIntType();
+}
+
+inline bool Type::isIntegralOrEnumerationType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->isInteger();
+
+ // Check for a complete enum type; incomplete enum types are not properly an
+ // enumeration type in the sense required here.
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
+ return IsEnumDeclComplete(ET->getOriginalDecl());
+
+ return isBitIntType();
+}
+
+inline bool Type::isBooleanType() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Bool;
+ return false;
+}
+
+inline bool Type::isUndeducedType() const {
+ auto *DT = getContainedDeducedType();
+ return DT && !DT->isDeduced();
+}
+
+/// Determines whether this is a type for which one can define
+/// an overloaded operator.
+inline bool Type::isOverloadableType() const {
+ if (!isDependentType())
+ return isRecordType() || isEnumeralType();
+ return !isArrayType() && !isFunctionType() && !isAnyPointerType() &&
+ !isMemberPointerType();
+}
+
+/// Determines whether this type is written as a typedef-name.
+inline bool Type::isTypedefNameType() const {
+ if (getAs<TypedefType>())
+ return true;
+ if (auto *TST = getAs<TemplateSpecializationType>())
+ return TST->isTypeAlias();
+ return false;
+}
+
+/// Determines whether this type can decay to a pointer type.
+inline bool Type::canDecayToPointerType() const {
+ return isFunctionType() || (isArrayType() && !isArrayParameterType());
+}
+
+inline bool Type::hasPointerRepresentation() const {
+ return (isPointerType() || isReferenceType() || isBlockPointerType() ||
+ isObjCObjectPointerType() || isNullPtrType());
+}
+
+inline bool Type::hasObjCPointerRepresentation() const {
+ return isObjCObjectPointerType();
+}
+
+inline const Type *Type::getBaseElementTypeUnsafe() const {
+ const Type *type = this;
+ while (const ArrayType *arrayType = type->getAsArrayTypeUnsafe())
+ type = arrayType->getElementType().getTypePtr();
+ return type;
+}
+
+inline const Type *Type::getPointeeOrArrayElementType() const {
+ const Type *type = this;
+ if (type->isAnyPointerType())
+ return type->getPointeeType().getTypePtr();
+ else if (type->isArrayType())
+ return type->getBaseElementTypeUnsafe();
+ return type;
+}
+/// Insertion operator for partial diagnostics. This allows sending adress
+/// spaces into a diagnostic with <<.
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
+ LangAS AS) {
+ PD.AddTaggedVal(llvm::to_underlying(AS),
+ DiagnosticsEngine::ArgumentKind::ak_addrspace);
+ return PD;
+}
+
+/// Insertion operator for partial diagnostics. This allows sending Qualifiers
+/// into a diagnostic with <<.
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
+ Qualifiers Q) {
+ PD.AddTaggedVal(Q.getAsOpaqueValue(),
+ DiagnosticsEngine::ArgumentKind::ak_qual);
+ return PD;
+}
+
+/// Insertion operator for partial diagnostics. This allows sending QualType's
+/// into a diagnostic with <<.
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
+ QualType T) {
+ PD.AddTaggedVal(reinterpret_cast<uint64_t>(T.getAsOpaquePtr()),
+ DiagnosticsEngine::ak_qualtype);
+ return PD;
+}
+
+// Helper class template that is used by Type::getAs to ensure that one does
+// not try to look through a qualified type to get to an array type.
+template <typename T>
+using TypeIsArrayType =
+ std::integral_constant<bool, std::is_same<T, ArrayType>::value ||
+ std::is_base_of<ArrayType, T>::value>;
+
+// Member-template getAs<specific type>'.
+template <typename T> const T *Type::getAs() const {
+ static_assert(!TypeIsArrayType<T>::value,
+ "ArrayType cannot be used with getAs!");
+
+ // If this is directly a T type, return it.
+ if (const auto *Ty = dyn_cast<T>(this))
+ return Ty;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<T>(CanonicalType))
+ return nullptr;
+
+ // If this is a typedef for the type, strip the typedef off without
+ // losing all typedef information.
+ return cast<T>(getUnqualifiedDesugaredType());
+}
+
+template <typename T> const T *Type::getAsAdjusted() const {
+ static_assert(!TypeIsArrayType<T>::value, "ArrayType cannot be used with getAsAdjusted!");
+
+ // If this is directly a T type, return it.
+ if (const auto *Ty = dyn_cast<T>(this))
+ return Ty;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<T>(CanonicalType))
+ return nullptr;
+
+ // Strip off type adjustments that do not modify the underlying nature of the
+ // type.
+ const Type *Ty = this;
+ while (Ty) {
+ if (const auto *A = dyn_cast<AttributedType>(Ty))
+ Ty = A->getModifiedType().getTypePtr();
+ else if (const auto *A = dyn_cast<BTFTagAttributedType>(Ty))
+ Ty = A->getWrappedType().getTypePtr();
+ else if (const auto *A = dyn_cast<HLSLAttributedResourceType>(Ty))
+ Ty = A->getWrappedType().getTypePtr();
+ else if (const auto *P = dyn_cast<ParenType>(Ty))
+ Ty = P->desugar().getTypePtr();
+ else if (const auto *A = dyn_cast<AdjustedType>(Ty))
+ Ty = A->desugar().getTypePtr();
+ else if (const auto *M = dyn_cast<MacroQualifiedType>(Ty))
+ Ty = M->desugar().getTypePtr();
+ else
+ break;
+ }
+
+ // Just because the canonical type is correct does not mean we can use cast<>,
+ // since we may not have stripped off all the sugar down to the base type.
+ return dyn_cast<T>(Ty);
+}
+
+inline const ArrayType *Type::getAsArrayTypeUnsafe() const {
+ // If this is directly an array type, return it.
+ if (const auto *arr = dyn_cast<ArrayType>(this))
+ return arr;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ArrayType>(CanonicalType))
+ return nullptr;
+
+ // If this is a typedef for the type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ArrayType>(getUnqualifiedDesugaredType());
+}
+
+template <typename T> const T *Type::castAs() const {
+ static_assert(!TypeIsArrayType<T>::value,
+ "ArrayType cannot be used with castAs!");
+
+ if (const auto *ty = dyn_cast<T>(this)) return ty;
+ assert(isa<T>(CanonicalType));
+ return cast<T>(getUnqualifiedDesugaredType());
+}
+
+inline const ArrayType *Type::castAsArrayTypeUnsafe() const {
+ assert(isa<ArrayType>(CanonicalType));
+ if (const auto *arr = dyn_cast<ArrayType>(this)) return arr;
+ return cast<ArrayType>(getUnqualifiedDesugaredType());
+}
+
+DecayedType::DecayedType(QualType OriginalType, QualType DecayedPtr,
+ QualType CanonicalPtr)
+ : AdjustedType(Decayed, OriginalType, DecayedPtr, CanonicalPtr) {
+#ifndef NDEBUG
+ QualType Adjusted = getAdjustedType();
+ (void)AttributedType::stripOuterNullability(Adjusted);
+ assert(isa<PointerType>(Adjusted));
+#endif
+}
+
+QualType DecayedType::getPointeeType() const {
+ QualType Decayed = getDecayedType();
+ (void)AttributedType::stripOuterNullability(Decayed);
+ return cast<PointerType>(Decayed)->getPointeeType();
+}
+
+// Get the decimal string representation of a fixed point type, represented
+// as a scaled integer.
+// TODO: At some point, we should change the arguments to instead just accept an
+// APFixedPoint instead of APSInt and scale.
+void FixedPointValueToString(SmallVectorImpl<char> &Str, llvm::APSInt Val,
+ unsigned Scale);
+
+inline FunctionEffectsRef FunctionEffectsRef::get(QualType QT) {
+ const Type *TypePtr = QT.getTypePtr();
+ while (true) {
+ if (QualType Pointee = TypePtr->getPointeeType(); !Pointee.isNull())
+ TypePtr = Pointee.getTypePtr();
+ else if (TypePtr->isArrayType())
+ TypePtr = TypePtr->getBaseElementTypeUnsafe();
+ else
+ break;
+ }
+ if (const auto *FPT = TypePtr->getAs<FunctionProtoType>())
+ return FPT->getFunctionEffects();
+ return {};
+}
+
+} // namespace clang
+
+#endif // LLVM_CLANG_AST_TYPE_BASE_H
diff --git a/clang/include/clang/AST/TypeLoc.h b/clang/include/clang/AST/TypeLoc.h
index 52ef7ac..d52e104 100644
--- a/clang/include/clang/AST/TypeLoc.h
+++ b/clang/include/clang/AST/TypeLoc.h
@@ -16,9 +16,9 @@
#include "clang/AST/ASTConcept.h"
#include "clang/AST/DeclarationName.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/TemplateBase.h"
-#include "clang/AST/Type.h"
+#include "clang/AST/TypeBase.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
@@ -193,6 +193,21 @@ public:
/// Get the SourceLocation of the template keyword (if any).
SourceLocation getTemplateKeywordLoc() const;
+ /// If this type represents a qualified-id, this returns it's nested name
+ /// specifier. For example, for the qualified-id "foo::bar::baz", this returns
+ /// "foo::bar". Returns null if this type represents an unqualified-id.
+ NestedNameSpecifierLoc getPrefix() const;
+
+ /// This returns the position of the type after any elaboration, such as the
+ /// 'struct' keyword, and name qualifiers. This will the 'template' keyword if
+ /// present, or the name location otherwise.
+ SourceLocation getNonPrefixBeginLoc() const;
+
+ /// This returns the position of the type after any elaboration, such as the
+ /// 'struct' keyword. This may be the position of the name qualifiers,
+ /// 'template' keyword, or the name location otherwise.
+ SourceLocation getNonElaboratedBeginLoc() const;
+
/// Initializes this to state that every location in this
/// type is the given location.
///
@@ -679,62 +694,164 @@ public:
}
};
-/// Wrapper for source info for types used via transparent aliases.
-class UsingTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- UsingTypeLoc, UsingType> {
-public:
- QualType getUnderlyingType() const {
- return getTypePtr()->getUnderlyingType();
+struct ElaboratedNameLocInfo {
+ SourceLocation NameLoc;
+ SourceLocation ElaboratedKeywordLoc;
+
+ ElaboratedNameLocInfo() = default;
+ ElaboratedNameLocInfo(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation NameLoc)
+ : NameLoc(NameLoc), ElaboratedKeywordLoc(ElaboratedKeywordLoc),
+ QualifierData(QualifierLoc.getOpaqueData()) {}
+ ElaboratedNameLocInfo(ASTContext &Context, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, SourceLocation Loc)
+ : NameLoc(Loc),
+ ElaboratedKeywordLoc(
+ Keyword != ElaboratedTypeKeyword::None ? Loc : SourceLocation()),
+ QualifierData(getTrivialQualifierData(Context, Qualifier, Loc)) {}
+
+ NestedNameSpecifierLoc getQualifierLoc(NestedNameSpecifier Qualifier) const {
+ assert(!Qualifier == !QualifierData);
+ return NestedNameSpecifierLoc(Qualifier, QualifierData);
+ }
+
+ SourceRange getLocalSourceRange(NestedNameSpecifier Qualifier) const {
+ SourceLocation BeginLoc = ElaboratedKeywordLoc;
+ if (NestedNameSpecifierLoc QualifierLoc = getQualifierLoc(Qualifier);
+ BeginLoc.isInvalid() && Qualifier)
+ BeginLoc = QualifierLoc.getBeginLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = NameLoc;
+ return SourceRange(BeginLoc, NameLoc);
}
- UsingShadowDecl *getFoundDecl() const { return getTypePtr()->getFoundDecl(); }
-};
-/// Wrapper for source info for typedefs.
-class TypedefTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- TypedefTypeLoc,
- TypedefType> {
-public:
- TypedefNameDecl *getTypedefNameDecl() const {
- return getTypePtr()->getDecl();
+private:
+ void *QualifierData;
+
+ static void *getTrivialQualifierData(ASTContext &Context,
+ NestedNameSpecifier Qualifier,
+ SourceLocation Loc) {
+ if (!Qualifier)
+ return nullptr;
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, Qualifier, Loc);
+ return Builder.getWithLocInContext(Context).getOpaqueData();
}
};
-/// Wrapper for source info for injected class names of class
-/// templates.
-class InjectedClassNameTypeLoc :
- public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- InjectedClassNameTypeLoc,
- InjectedClassNameType> {
+template <class TL, class T>
+class ElaboratedNameTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, TL, T, ElaboratedNameLocInfo> {
public:
- CXXRecordDecl *getDecl() const {
- return getTypePtr()->getDecl();
+ auto *getDecl() const { return this->getTypePtr()->getDecl(); }
+
+ void set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation NameLoc) {
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ this->getTypePtr()->getQualifier());
+ *this->getLocalData() =
+ ElaboratedNameLocInfo(ElaboratedKeywordLoc, QualifierLoc, NameLoc);
+ }
+
+ SourceLocation getElaboratedKeywordLoc() const {
+ return this->getLocalData()->ElaboratedKeywordLoc;
+ }
+
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ return this->getLocalData()->getQualifierLoc(
+ this->getTypePtr()->getQualifier());
+ }
+
+ SourceLocation getNameLoc() const { return this->getLocalData()->NameLoc; }
+
+ SourceRange getLocalSourceRange() const {
+ return this->getLocalData()->getLocalSourceRange(
+ this->getTypePtr()->getQualifier());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ const auto *Ptr = this->getTypePtr();
+ *this->getLocalData() = ElaboratedNameLocInfo(Context, Ptr->getKeyword(),
+ Ptr->getQualifier(), Loc);
}
};
+/// Wrapper for source info for typedefs.
+class TypedefTypeLoc
+ : public ElaboratedNameTypeLoc<TypedefTypeLoc, TypedefType> {};
+
/// Wrapper for source info for unresolved typename using decls.
-class UnresolvedUsingTypeLoc :
- public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- UnresolvedUsingTypeLoc,
- UnresolvedUsingType> {
-public:
- UnresolvedUsingTypenameDecl *getDecl() const {
- return getTypePtr()->getDecl();
- }
+class UnresolvedUsingTypeLoc
+ : public ElaboratedNameTypeLoc<UnresolvedUsingTypeLoc,
+ UnresolvedUsingType> {};
+
+/// Wrapper for source info for types used via transparent aliases.
+class UsingTypeLoc : public ElaboratedNameTypeLoc<UsingTypeLoc, UsingType> {};
+
+struct TagTypeLocInfo {
+ SourceLocation NameLoc;
+ SourceLocation ElaboratedKWLoc;
+ void *QualifierData;
};
-/// Wrapper for source info for tag types. Note that this only
-/// records source info for the name itself; a type written 'struct foo'
-/// should be represented as an ElaboratedTypeLoc. We currently
-/// only do that when C++ is enabled because of the expense of
-/// creating an ElaboratedType node for so many type references in C.
-class TagTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- TagTypeLoc,
- TagType> {
+class TagTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc, TagTypeLoc, TagType,
+ TagTypeLocInfo> {
public:
- TagDecl *getDecl() const { return getTypePtr()->getDecl(); }
+ TagDecl *getOriginalDecl() const { return getTypePtr()->getOriginalDecl(); }
/// True if the tag was defined in this type specifier.
bool isDefinition() const;
+
+ SourceLocation getElaboratedKeywordLoc() const {
+ return getLocalData()->ElaboratedKWLoc;
+ }
+
+ void setElaboratedKeywordLoc(SourceLocation Loc) {
+ getLocalData()->ElaboratedKWLoc = Loc;
+ }
+
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ NestedNameSpecifier Qualifier = getTypePtr()->getQualifier();
+ void *QualifierData = getLocalData()->QualifierData;
+ assert(!Qualifier == !QualifierData);
+ return NestedNameSpecifierLoc(Qualifier, QualifierData);
+ }
+
+ void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ getTypePtr()->getQualifier());
+ getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
+ }
+
+ SourceLocation getNameLoc() const { return getLocalData()->NameLoc; }
+
+ void setNameLoc(SourceLocation Loc) { getLocalData()->NameLoc = Loc; }
+
+ SourceRange getLocalSourceRange() const {
+ SourceLocation BeginLoc = getElaboratedKeywordLoc();
+ if (NestedNameSpecifierLoc Qualifier = getQualifierLoc();
+ BeginLoc.isInvalid() && Qualifier)
+ BeginLoc = Qualifier.getBeginLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = getNameLoc();
+ return SourceRange(BeginLoc, getNameLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setElaboratedKeywordLoc(getTypePtr()->getKeyword() !=
+ ElaboratedTypeKeyword::None
+ ? Loc
+ : SourceLocation());
+ if (NestedNameSpecifier Qualifier = getTypePtr()->getQualifier()) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, Qualifier, Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+ } else {
+ getLocalData()->QualifierData = nullptr;
+ }
+ setNameLoc(Loc);
+ }
};
/// Wrapper for source info for record types.
@@ -742,7 +859,9 @@ class RecordTypeLoc : public InheritingConcreteTypeLoc<TagTypeLoc,
RecordTypeLoc,
RecordType> {
public:
- RecordDecl *getDecl() const { return getTypePtr()->getDecl(); }
+ RecordDecl *getOriginalDecl() const {
+ return getTypePtr()->getOriginalDecl();
+ }
};
/// Wrapper for source info for enum types.
@@ -750,7 +869,18 @@ class EnumTypeLoc : public InheritingConcreteTypeLoc<TagTypeLoc,
EnumTypeLoc,
EnumType> {
public:
- EnumDecl *getDecl() const { return getTypePtr()->getDecl(); }
+ EnumDecl *getOriginalDecl() const { return getTypePtr()->getOriginalDecl(); }
+};
+
+/// Wrapper for source info for injected class names of class
+/// templates.
+class InjectedClassNameTypeLoc
+ : public InheritingConcreteTypeLoc<TagTypeLoc, InjectedClassNameTypeLoc,
+ InjectedClassNameType> {
+public:
+ CXXRecordDecl *getOriginalDecl() const {
+ return getTypePtr()->getOriginalDecl();
+ }
};
/// Wrapper for template type parameters.
@@ -859,12 +989,22 @@ class SubstTemplateTypeParmTypeLoc :
SubstTemplateTypeParmType> {
};
- /// Wrapper for substituted template type parameters.
-class SubstTemplateTypeParmPackTypeLoc :
- public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- SubstTemplateTypeParmPackTypeLoc,
- SubstTemplateTypeParmPackType> {
-};
+/// Abstract type representing delayed type pack expansions.
+class SubstPackTypeLoc
+ : public InheritingConcreteTypeLoc<TypeSpecTypeLoc, SubstPackTypeLoc,
+ SubstPackType> {};
+
+/// Wrapper for substituted template type parameters.
+class SubstTemplateTypeParmPackTypeLoc
+ : public InheritingConcreteTypeLoc<SubstPackTypeLoc,
+ SubstTemplateTypeParmPackTypeLoc,
+ SubstTemplateTypeParmPackType> {};
+
+/// Wrapper for substituted template type parameters.
+class SubstBuiltinTemplatePackTypeLoc
+ : public InheritingConcreteTypeLoc<SubstPackTypeLoc,
+ SubstBuiltinTemplatePackTypeLoc,
+ SubstBuiltinTemplatePackType> {};
struct AttributedLocInfo {
const Attr *TypeAttr;
@@ -1405,7 +1545,7 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setSigilLoc(Loc);
- if (auto *Qualifier = getTypePtr()->getQualifier()) {
+ if (NestedNameSpecifier Qualifier = getTypePtr()->getQualifier()) {
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, Qualifier, Loc);
setQualifierLoc(Builder.getWithLocInContext(Context));
@@ -1701,9 +1841,11 @@ struct TemplateNameLocInfo {
};
struct TemplateSpecializationLocInfo : TemplateNameLocInfo {
+ SourceRange SR;
+ SourceLocation ElaboratedKWLoc;
SourceLocation TemplateKWLoc;
SourceLocation LAngleLoc;
- SourceLocation RAngleLoc;
+ void *QualifierData;
};
class TemplateSpecializationTypeLoc :
@@ -1712,54 +1854,52 @@ class TemplateSpecializationTypeLoc :
TemplateSpecializationType,
TemplateSpecializationLocInfo> {
public:
- SourceLocation getTemplateKeywordLoc() const {
- return getLocalData()->TemplateKWLoc;
- }
+ void set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc, SourceLocation NameLoc,
+ SourceLocation LAngleLoc, SourceLocation RAngleLoc);
- void setTemplateKeywordLoc(SourceLocation Loc) {
- getLocalData()->TemplateKWLoc = Loc;
- }
+ void set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc, SourceLocation NameLoc,
+ const TemplateArgumentListInfo &TAL);
- SourceLocation getLAngleLoc() const {
- return getLocalData()->LAngleLoc;
+ SourceLocation getElaboratedKeywordLoc() const {
+ return getLocalData()->ElaboratedKWLoc;
}
- void setLAngleLoc(SourceLocation Loc) {
- getLocalData()->LAngleLoc = Loc;
- }
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ if (!getLocalData()->QualifierData)
+ return NestedNameSpecifierLoc();
- SourceLocation getRAngleLoc() const {
- return getLocalData()->RAngleLoc;
+ NestedNameSpecifier Qualifier =
+ getTypePtr()->getTemplateName().getQualifier();
+ assert(Qualifier && "missing qualification");
+ return NestedNameSpecifierLoc(Qualifier, getLocalData()->QualifierData);
}
- void setRAngleLoc(SourceLocation Loc) {
- getLocalData()->RAngleLoc = Loc;
+ SourceLocation getTemplateKeywordLoc() const {
+ return getLocalData()->TemplateKWLoc;
}
+ SourceLocation getTemplateNameLoc() const { return getLocalData()->NameLoc; }
+
+ SourceLocation getLAngleLoc() const { return getLocalData()->LAngleLoc; }
+
unsigned getNumArgs() const {
return getTypePtr()->template_arguments().size();
}
- void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
- getArgInfos()[i] = AI;
- }
-
- TemplateArgumentLocInfo getArgLocInfo(unsigned i) const {
- return getArgInfos()[i];
+ MutableArrayRef<TemplateArgumentLocInfo> getArgLocInfos() {
+ return {getArgInfos(), getNumArgs()};
}
TemplateArgumentLoc getArgLoc(unsigned i) const {
return TemplateArgumentLoc(getTypePtr()->template_arguments()[i],
- getArgLocInfo(i));
- }
-
- SourceLocation getTemplateNameLoc() const {
- return getLocalData()->NameLoc;
+ getArgInfos()[i]);
}
- void setTemplateNameLoc(SourceLocation Loc) {
- getLocalData()->NameLoc = Loc;
- }
+ SourceLocation getRAngleLoc() const { return getLocalData()->SR.getEnd(); }
/// - Copy the location information from the given info.
void copy(TemplateSpecializationTypeLoc Loc) {
@@ -1773,21 +1913,9 @@ public:
memcpy(Data, Loc.Data, size);
}
- SourceRange getLocalSourceRange() const {
- if (getTemplateKeywordLoc().isValid())
- return SourceRange(getTemplateKeywordLoc(), getRAngleLoc());
- else
- return SourceRange(getTemplateNameLoc(), getRAngleLoc());
- }
+ SourceRange getLocalSourceRange() const { return getLocalData()->SR; }
- void initializeLocal(ASTContext &Context, SourceLocation Loc) {
- setTemplateKeywordLoc(SourceLocation());
- setTemplateNameLoc(Loc);
- setLAngleLoc(Loc);
- setRAngleLoc(Loc);
- initializeArgLocs(Context, getTypePtr()->template_arguments(),
- getArgInfos(), Loc);
- }
+ void initializeLocal(ASTContext &Context, SourceLocation Loc);
static void initializeArgLocs(ASTContext &Context,
ArrayRef<TemplateArgument> Args,
@@ -2346,99 +2474,73 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc);
};
-class DeducedTemplateSpecializationTypeLoc
- : public InheritingConcreteTypeLoc<DeducedTypeLoc,
- DeducedTemplateSpecializationTypeLoc,
- DeducedTemplateSpecializationType> {
-public:
- SourceLocation getTemplateNameLoc() const {
- return getNameLoc();
- }
-
- void setTemplateNameLoc(SourceLocation Loc) {
- setNameLoc(Loc);
- }
-};
-
-struct ElaboratedLocInfo {
+struct DeducedTemplateSpecializationLocInfo : TypeSpecLocInfo {
SourceLocation ElaboratedKWLoc;
-
/// Data associated with the nested-name-specifier location.
void *QualifierData;
};
-class ElaboratedTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
- ElaboratedTypeLoc,
- ElaboratedType,
- ElaboratedLocInfo> {
+class DeducedTemplateSpecializationTypeLoc
+ : public ConcreteTypeLoc<DeducedTypeLoc,
+ DeducedTemplateSpecializationTypeLoc,
+ DeducedTemplateSpecializationType,
+ DeducedTemplateSpecializationLocInfo> {
public:
SourceLocation getElaboratedKeywordLoc() const {
- return !isEmpty() ? getLocalData()->ElaboratedKWLoc : SourceLocation();
+ return getLocalData()->ElaboratedKWLoc;
}
void setElaboratedKeywordLoc(SourceLocation Loc) {
- if (isEmpty()) {
- assert(Loc.isInvalid());
- return;
- }
getLocalData()->ElaboratedKWLoc = Loc;
}
+ SourceLocation getTemplateNameLoc() const { return getNameLoc(); }
+
+ void setTemplateNameLoc(SourceLocation Loc) { setNameLoc(Loc); }
+
NestedNameSpecifierLoc getQualifierLoc() const {
- return !isEmpty() ? NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
- getLocalData()->QualifierData)
- : NestedNameSpecifierLoc();
+ void *Data = getLocalData()->QualifierData;
+ if (!Data)
+ return NestedNameSpecifierLoc();
+ NestedNameSpecifier Qualifier =
+ getTypePtr()->getTemplateName().getQualifier();
+ assert(Qualifier && "missing qualification");
+ return NestedNameSpecifierLoc(Qualifier, Data);
}
void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
- assert(QualifierLoc.getNestedNameSpecifier() ==
- getTypePtr()->getQualifier() &&
- "Inconsistent nested-name-specifier pointer");
- if (isEmpty()) {
- assert(!QualifierLoc.hasQualifier());
+ if (!QualifierLoc) {
+ // Even if we have a nested-name-specifier in the dependent
+ // template specialization type, we won't record the nested-name-specifier
+ // location information when this type-source location information is
+ // part of a nested-name-specifier.
+ getLocalData()->QualifierData = nullptr;
return;
}
+
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ getTypePtr()->getTemplateName().getQualifier() &&
+ "Inconsistent nested-name-specifier pointer");
getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
}
SourceRange getLocalSourceRange() const {
- if (getElaboratedKeywordLoc().isValid())
- if (getQualifierLoc())
- return SourceRange(getElaboratedKeywordLoc(),
- getQualifierLoc().getEndLoc());
- else
- return SourceRange(getElaboratedKeywordLoc());
- else
- return getQualifierLoc().getSourceRange();
+ SourceLocation BeginLoc = getElaboratedKeywordLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = getQualifierLoc().getBeginLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = getNameLoc();
+ return {BeginLoc, getNameLoc()};
}
void initializeLocal(ASTContext &Context, SourceLocation Loc);
+};
- TypeLoc getNamedTypeLoc() const { return getInnerTypeLoc(); }
-
- QualType getInnerType() const { return getTypePtr()->getNamedType(); }
-
- bool isEmpty() const {
- return getTypePtr()->getKeyword() == ElaboratedTypeKeyword::None &&
- !getTypePtr()->getQualifier();
- }
-
- unsigned getLocalDataAlignment() const {
- // FIXME: We want to return 1 here in the empty case, but
- // there are bugs in how alignment is handled in TypeLocs
- // that prevent this from working.
- return ConcreteTypeLoc::getLocalDataAlignment();
- }
-
- unsigned getLocalDataSize() const {
- return !isEmpty() ? ConcreteTypeLoc::getLocalDataSize() : 0;
- }
+struct ElaboratedLocInfo {
+ SourceLocation ElaboratedKWLoc;
- void copy(ElaboratedTypeLoc Loc) {
- unsigned size = getFullDataSize();
- assert(size == Loc.getFullDataSize());
- memcpy(Data, Loc.Data, size);
- }
+ /// Data associated with the nested-name-specifier location.
+ void *QualifierData;
};
// This is exactly the structure of an ElaboratedTypeLoc whose inner
@@ -2749,8 +2851,6 @@ inline T TypeLoc::getAsAdjusted() const {
Cur = ATL.getWrappedLoc();
else if (auto ATL = Cur.getAs<HLSLAttributedResourceTypeLoc>())
Cur = ATL.getWrappedLoc();
- else if (auto ETL = Cur.getAs<ElaboratedTypeLoc>())
- Cur = ETL.getNamedTypeLoc();
else if (auto ATL = Cur.getAs<AdjustedTypeLoc>())
Cur = ATL.getOriginalLoc();
else if (auto MQL = Cur.getAs<MacroQualifiedTypeLoc>())
diff --git a/clang/include/clang/AST/TypeProperties.td b/clang/include/clang/AST/TypeProperties.td
index 3373e96..185a968 100644
--- a/clang/include/clang/AST/TypeProperties.td
+++ b/clang/include/clang/AST/TypeProperties.td
@@ -379,38 +379,59 @@ let Class = AtomicType in {
}
let Class = UnresolvedUsingType in {
- def : Property<"declaration", DeclRef> {
- let Read = [{ node->getDecl() }];
+ def : Property<"IsCanonical", Bool> {
+ let Read = [{ node->isCanonicalUnqualified() }];
}
-
+ def : Property<"Keyword", ElaboratedTypeKeyword> {
+ let Conditional = [{ !IsCanonical }];
+ let Read = [{ node->getKeyword() }];
+ }
+ def : Property<"Qualifier", NestedNameSpecifier> {
+ let Conditional = [{ !IsCanonical }];
+ let Read = [{ node->getQualifier() }];
+ }
+ def : Property<"D", DeclRef> { let Read = [{ node->getDecl() }]; }
def : Creator<[{
- return ctx.getUnresolvedUsingType(cast<UnresolvedUsingTypenameDecl>(declaration));
+ auto *UD = cast<UnresolvedUsingTypenameDecl>(D);
+ return IsCanonical ? ctx.getCanonicalUnresolvedUsingType(UD) : ctx.getUnresolvedUsingType(*Keyword, *Qualifier, UD);
}]>;
}
let Class = UsingType in {
- def : Property<"foundDeclaration", UsingShadowDeclRef> {
- let Read = [{ node->getFoundDecl() }];
+ def : Property<"Keyword", ElaboratedTypeKeyword> {
+ let Read = [{ node->getKeyword() }];
}
- def : Property<"underlyingType", QualType> {
- let Read = [{ node->getUnderlyingType() }];
+ def : Property<"Qualifier", NestedNameSpecifier> {
+ let Read = [{ node->getQualifier() }];
+ }
+ def : Property<"D", UsingShadowDeclRef> { let Read = [{ node->getDecl() }]; }
+ def : Property<"UnderlyingType", QualType> {
+ let Read = [{ node->desugar() }];
}
-
def : Creator<[{
- return ctx.getUsingType(foundDeclaration, underlyingType);
+ return ctx.getUsingType(Keyword, Qualifier, D, UnderlyingType);
}]>;
}
let Class = TypedefType in {
+ def : Property<"Keyword", ElaboratedTypeKeyword> {
+ let Read = [{ node->getKeyword() }];
+ }
+ def : Property<"Qualifier", NestedNameSpecifier> {
+ let Read = [{ node->getQualifier() }];
+ }
def : Property<"declaration", DeclRef> {
let Read = [{ node->getDecl() }];
}
- def : Property<"underlyingType", QualType> {
+ def : Property<"UnderlyingType", QualType> {
let Read = [{ node->desugar() }];
}
+ def : Property<"TypeMatchesDecl", Bool> {
+ let Read = [{ node->typeMatchesDecl() }];
+ }
def : Creator<[{
- return ctx.getTypedefType(cast<TypedefNameDecl>(declaration), underlyingType);
+ return ctx.getTypedefType(Keyword, Qualifier, cast<TypedefNameDecl>(declaration), UnderlyingType, TypeMatchesDecl);
}]>;
}
@@ -520,6 +541,9 @@ let Class = AutoType in {
}
let Class = DeducedTemplateSpecializationType in {
+ def : Property<"keyword", ElaboratedTypeKeyword> {
+ let Read = [{ node->getKeyword() }];
+ }
def : Property<"templateName", Optional<TemplateName>> {
let Read = [{ makeOptionalFromNullable(node->getTemplateName()) }];
}
@@ -533,97 +557,42 @@ let Class = DeducedTemplateSpecializationType in {
}
def : Creator<[{
- return ctx.getDeducedTemplateSpecializationType(
+ return ctx.getDeducedTemplateSpecializationType(keyword,
makeNullableFromOptional(templateName),
deducedType, dependent);
}]>;
}
let Class = TagType in {
- def : Property<"dependent", Bool> {
- let Read = [{ node->isDependentType() }];
+ def : Property<"IsCanonical", Bool> {
+ let Read = [{ node->isCanonicalUnqualified() }];
}
- def : Property<"declaration", DeclRef> {
- // We don't know which declaration was originally referenced here, and we
- // cannot reference a declaration that follows the use (because that can
- // introduce deserialization cycles), so conservatively generate a
- // reference to the first declaration.
- // FIXME: If this is a reference to a class template specialization, that
- // can still introduce a deserialization cycle.
- let Read = [{ node->getDecl()->getCanonicalDecl() }];
+ def : Property<"Keyword", ElaboratedTypeKeyword> {
+ let Conditional = [{ !IsCanonical }];
+ let Read = [{ node->getKeyword() }];
}
+ def : Property<"Qualifier", NestedNameSpecifier> {
+ let Conditional = [{ !IsCanonical }];
+ let Read = [{ node->getQualifier() }];
+ }
+ def : Property<"TD", TagDeclRef> { let Read = [{ node->getOriginalDecl() }]; }
}
let Class = EnumType in {
+ def : Property<"OwnsTag", Bool> { let Read = [{ node->isTagOwned() }]; }
def : Creator<[{
- QualType result = ctx.getEnumType(cast<EnumDecl>(declaration));
- if (dependent)
- const_cast<Type *>(result.getTypePtr())
- ->addDependence(TypeDependence::DependentInstantiation);
- return result;
+ return IsCanonical ? ctx.getCanonicalTagType(TD) : ctx.getTagType(*Keyword, *Qualifier, TD, OwnsTag);
}]>;
}
-
let Class = RecordType in {
+ def : Property<"OwnsTag", Bool> { let Read = [{ node->isTagOwned() }]; }
def : Creator<[{
- auto record = cast<RecordDecl>(declaration);
- QualType result = ctx.getRecordType(record);
- if (dependent)
- const_cast<Type *>(result.getTypePtr())
- ->addDependence(TypeDependence::DependentInstantiation);
- return result;
- }]>;
-}
-
-let Class = ElaboratedType in {
- def : Property<"keyword", ElaboratedTypeKeyword> {
- let Read = [{ node->getKeyword() }];
- }
- def : Property<"qualifier", NestedNameSpecifier> {
- let Read = [{ node->getQualifier() }];
- }
- def : Property<"namedType", QualType> {
- let Read = [{ node->getNamedType() }];
- }
- def : Property<"ownedTag", Optional<TagDeclRef>> {
- let Read = [{ makeOptionalFromPointer(
- const_cast<const TagDecl *>(node->getOwnedTagDecl())) }];
- }
-
- def : Creator<[{
- return ctx.getElaboratedType(keyword, qualifier, namedType,
- makePointerFromOptional(ownedTag));
+ return IsCanonical ? ctx.getCanonicalTagType(TD) : ctx.getTagType(*Keyword, *Qualifier, TD, OwnsTag);
}]>;
}
-
let Class = InjectedClassNameType in {
- def : Property<"declaration", DeclRef> {
- // FIXME: drilling down to the canonical declaration is what the
- // existing serialization code was doing, but it's not clear why.
- let Read = [{ node->getDecl()->getCanonicalDecl() }];
- }
- def : Property<"injectedSpecializationType", QualType> {
- let Read = [{ node->getInjectedSpecializationType() }];
- }
-
def : Creator<[{
- // FIXME: ASTContext::getInjectedClassNameType is not currently suitable
- // for AST reading, too much interdependencies.
- const Type *T = nullptr;
- auto typeDecl = cast<CXXRecordDecl>(declaration);
- for (auto *DI = typeDecl; DI; DI = DI->getPreviousDecl()) {
- if (const Type *existing = DI->getTypeForDecl()) {
- T = existing;
- break;
- }
- }
- if (!T) {
- T = new (ctx, TypeAlignment)
- InjectedClassNameType(typeDecl, injectedSpecializationType);
- for (auto *DI = typeDecl; DI; DI = DI->getPreviousDecl())
- DI->setTypeForDecl(T);
- }
- return QualType(T, 0);
+ return IsCanonical ? ctx.getCanonicalTagType(TD) : ctx.getTagType(*Keyword, *Qualifier, TD, /*OwnsTag=*/false);
}]>;
}
@@ -741,6 +710,9 @@ let Class = DependentAddressSpaceType in {
}
let Class = TemplateSpecializationType in {
+ def : Property<"keyword", ElaboratedTypeKeyword> {
+ let Read = [{ node->getKeyword() }];
+ }
def : Property<"templateName", TemplateName> {
let Read = [{ node->getTemplateName() }];
}
@@ -753,7 +725,7 @@ let Class = TemplateSpecializationType in {
}
def : Creator<[{
- return ctx.getTemplateSpecializationType(templateName, args, {}, UnderlyingType);
+ return ctx.getTemplateSpecializationType(keyword, templateName, args, {}, UnderlyingType);
}]>;
}
@@ -848,6 +820,12 @@ let Class = PackExpansionType in {
}]>;
}
+let Class = SubstPackType in {
+ def : Property<"replacementPack", TemplateArgument> {
+ let Read = [{ node->getArgumentPack() }];
+ }
+}
+
let Class = SubstTemplateTypeParmPackType in {
def : Property<"associatedDecl", DeclRef> {
let Read = [{ node->getAssociatedDecl() }];
@@ -855,12 +833,7 @@ let Class = SubstTemplateTypeParmPackType in {
def : Property<"Index", UInt32> {
let Read = [{ node->getIndex() }];
}
- def : Property<"Final", Bool> {
- let Read = [{ node->getFinal() }];
- }
- def : Property<"replacementPack", TemplateArgument> {
- let Read = [{ node->getArgumentPack() }];
- }
+ def : Property<"Final", Bool> { let Read = [{ node->getFinal() }]; }
def : Creator<[{
return ctx.getSubstTemplateTypeParmPackType(
@@ -868,6 +841,12 @@ let Class = SubstTemplateTypeParmPackType in {
}]>;
}
+let Class = SubstBuiltinTemplatePackType in {
+ def : Creator<[{
+ return ctx.getSubstBuiltinTemplatePack(replacementPack);
+ }]>;
+}
+
let Class = BuiltinType in {
def : Property<"kind", BuiltinTypeKind> {
let Read = [{ node->getKind() }];
diff --git a/clang/include/clang/ASTMatchers/ASTMatchFinder.h b/clang/include/clang/ASTMatchers/ASTMatchFinder.h
index 73cbcf1..2d36e8c 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchFinder.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchFinder.h
@@ -135,10 +135,15 @@ public:
llvm::StringMap<llvm::TimeRecord> &Records;
};
+ MatchFinderOptions() {}
+
/// Enables per-check timers.
///
/// It prints a report after match.
std::optional<Profiling> CheckProfiling;
+
+ /// Avoids matching declarations in system headers.
+ bool IgnoreSystemHeaders{false};
};
MatchFinder(MatchFinderOptions Options = MatchFinderOptions());
diff --git a/clang/include/clang/ASTMatchers/ASTMatchers.h b/clang/include/clang/ASTMatchers/ASTMatchers.h
index 08c898f..f1d88a9 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -222,6 +222,19 @@ extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
+/// \brief Matches shadow declarations introduced into a scope by a
+/// (resolved) using declaration.
+///
+/// Given
+/// \code
+/// namespace n { int f; }
+/// namespace declToImport { using n::f; }
+/// \endcode
+/// usingShadowDecl()
+/// matches \code f \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingShadowDecl>
+ usingShadowDecl;
+
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
@@ -3740,7 +3753,7 @@ extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
-/// Matcher<UnresolvedUsingType>
+/// Matcher<UnresolvedUsingType>, Matcher<UsingType>
inline internal::PolymorphicMatcher<
internal::HasDeclarationMatcher,
void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>
@@ -4375,7 +4388,13 @@ AST_POLYMORPHIC_MATCHER_P(throughUsingDecl,
AST_POLYMORPHIC_SUPPORTED_TYPES(DeclRefExpr,
UsingType),
internal::Matcher<UsingShadowDecl>, Inner) {
- const NamedDecl *FoundDecl = Node.getFoundDecl();
+ const NamedDecl *FoundDecl;
+ if constexpr (std::is_same_v<NodeType, UsingType>) {
+ FoundDecl = Node.getDecl();
+ } else {
+ static_assert(std::is_same_v<NodeType, DeclRefExpr>);
+ FoundDecl = Node.getFoundDecl();
+ }
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return Inner.matches(*UsingDecl, Finder, Builder);
return false;
@@ -5642,8 +5661,8 @@ AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
-/// Matches the condition expression of an if statement, for loop,
-/// switch statement or conditional operator.
+/// Matches the condition expression of an if statement, for loop, while loop,
+/// do-while loop, switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
@@ -5720,16 +5739,29 @@ AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
return Builder->removeBindings(Predicate);
}
-/// Matches the condition variable statement in an if statement.
+/// Matches a declaration if it declares the same entity as the node previously
+/// bound to \p ID.
+AST_MATCHER_P(Decl, declaresSameEntityAsBoundNode, std::string, ID) {
+ return Builder->removeBindings([&](const internal::BoundNodesMap &Nodes) {
+ return !clang::declaresSameEntity(&Node, Nodes.getNodeAs<Decl>(ID));
+ });
+}
+
+/// Matches the condition variable statement in an if statement, for loop,
+/// while loop or switch statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
+/// for (; A* a = GetAPointer(); ) {}
/// \endcode
/// hasConditionVariableStatement(...)
-/// matches 'A* a = GetAPointer()'.
-AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
- internal::Matcher<DeclStmt>, InnerMatcher) {
+/// matches both 'A* a = GetAPointer()'.
+AST_POLYMORPHIC_MATCHER_P(hasConditionVariableStatement,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt,
+ WhileStmt,
+ SwitchStmt),
+ internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
@@ -7004,37 +7036,6 @@ AST_POLYMORPHIC_MATCHER_P2(
InnerMatcher.matches(Args[Index], Finder, Builder);
}
-/// Matches C or C++ elaborated `TypeLoc`s.
-///
-/// Given
-/// \code
-/// struct s {};
-/// struct s ss;
-/// \endcode
-/// elaboratedTypeLoc()
-/// matches the `TypeLoc` of the variable declaration of `ss`.
-extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, ElaboratedTypeLoc>
- elaboratedTypeLoc;
-
-/// Matches elaborated `TypeLoc`s that have a named `TypeLoc` matching
-/// `InnerMatcher`.
-///
-/// Given
-/// \code
-/// template <typename T>
-/// class C {};
-/// class C<int> c;
-///
-/// class D {};
-/// class D d;
-/// \endcode
-/// elaboratedTypeLoc(hasNamedTypeLoc(templateSpecializationTypeLoc()));
-/// matches the `TypeLoc` of the variable declaration of `c`, but not `d`.
-AST_MATCHER_P(ElaboratedTypeLoc, hasNamedTypeLoc, internal::Matcher<TypeLoc>,
- InnerMatcher) {
- return InnerMatcher.matches(Node.getNamedTypeLoc(), Finder, Builder);
-}
-
/// Matches type \c bool.
///
/// Given
@@ -7301,7 +7302,7 @@ extern const AstTypeMatcher<DecltypeType> decltypeType;
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
-/// Matches \c DecltypeType or \c UsingType nodes to find the underlying type.
+/// Matches \c QualType nodes to find the underlying type.
///
/// Given
/// \code
@@ -7311,10 +7312,13 @@ AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
-/// Usable as: Matcher<DecltypeType>, Matcher<UsingType>
-AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
- AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType,
- UsingType));
+/// Usable as: Matcher<QualType>
+AST_MATCHER_P(Type, hasUnderlyingType, internal::Matcher<QualType>, Inner) {
+ QualType QT = Node.getLocallyUnqualifiedSingleStepDesugaredType();
+ if (QT == QualType(&Node, 0))
+ return false;
+ return Inner.matches(QT, Finder, Builder);
+}
/// Matches \c FunctionType nodes.
///
@@ -7593,27 +7597,7 @@ extern const AstTypeMatcher<RecordType> recordType;
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
-/// Matches types specified with an elaborated type keyword or with a
-/// qualified name.
-///
-/// Given
-/// \code
-/// namespace N {
-/// namespace M {
-/// class D {};
-/// }
-/// }
-/// class C {};
-///
-/// class C c;
-/// N::M::D d;
-/// \endcode
-///
-/// \c elaboratedType() matches the type of the variable declarations of both
-/// \c c and \c d.
-extern const AstTypeMatcher<ElaboratedType> elaboratedType;
-
-/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
+/// Matches Types whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
@@ -7628,34 +7612,14 @@ extern const AstTypeMatcher<ElaboratedType> elaboratedType;
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
-AST_MATCHER_P(ElaboratedType, hasQualifier,
- internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
- if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
- return InnerMatcher.matches(*Qualifier, Finder, Builder);
+AST_MATCHER_P(Type, hasQualifier, internal::Matcher<NestedNameSpecifier>,
+ InnerMatcher) {
+ if (NestedNameSpecifier Qualifier = Node.getPrefix())
+ return InnerMatcher.matches(Qualifier, Finder, Builder);
return false;
}
-/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
-///
-/// Given
-/// \code
-/// namespace N {
-/// namespace M {
-/// class D {};
-/// }
-/// }
-/// N::M::D d;
-/// \endcode
-///
-/// \c elaboratedType(namesType(recordType(
-/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
-/// declaration of \c d.
-AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
- InnerMatcher) {
- return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
-}
-
/// Matches types specified through a using declaration.
///
/// Given
@@ -7824,7 +7788,7 @@ AST_MATCHER_FUNCTION_P_OVERLOAD(
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
- if (!Node.getAsType())
+ if (Node.getKind() != NestedNameSpecifier::Kind::Type)
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
@@ -7842,8 +7806,12 @@ AST_MATCHER_P(NestedNameSpecifier, specifiesType,
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
- return Node && Node.getNestedNameSpecifier()->getAsType() &&
- InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
+ if (!Node)
+ return false;
+ TypeLoc TL = Node.getAsTypeLoc();
+ if (!TL)
+ return false;
+ return InnerMatcher.matches(TL, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
@@ -7858,10 +7826,21 @@ AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
- const NestedNameSpecifier *NextNode = Node.getPrefix();
+ NestedNameSpecifier NextNode = std::nullopt;
+ switch (Node.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace:
+ NextNode = Node.getAsNamespaceAndPrefix().Prefix;
+ break;
+ case NestedNameSpecifier::Kind::Type:
+ NextNode = Node.getAsType()->getPrefix();
+ break;
+ default:
+ break;
+ }
+
if (!NextNode)
return false;
- return InnerMatcher.matches(*NextNode, Finder, Builder);
+ return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
@@ -7876,7 +7855,12 @@ AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
- NestedNameSpecifierLoc NextNode = Node.getPrefix();
+ NestedNameSpecifierLoc NextNode;
+ if (TypeLoc TL = Node.getAsTypeLoc())
+ NextNode = TL.getPrefix();
+ else
+ NextNode = Node.getAsNamespaceAndPrefix().Prefix;
+
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
@@ -7894,9 +7878,13 @@ AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
- if (auto *NS = dyn_cast_if_present<NamespaceDecl>(Node.getAsNamespace()))
- return InnerMatcher.matches(*NS, Finder, Builder);
- return false;
+ if (Node.getKind() != NestedNameSpecifier::Kind::Namespace)
+ return false;
+ const auto *Namespace =
+ dyn_cast<NamespaceDecl>(Node.getAsNamespaceAndPrefix().Namespace);
+ if (!Namespace)
+ return false;
+ return InnerMatcher.matches(*Namespace, Finder, Builder);
}
/// Matches attributes.
@@ -8835,7 +8823,7 @@ AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
/// #pragma omp for
/// \endcode
///
-/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
+/// ``ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
diff --git a/clang/include/clang/ASTMatchers/ASTMatchersInternal.h b/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
index 5df2294..1ab6f11 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
@@ -1017,10 +1017,7 @@ private:
// First, for any types that have a declaration, extract the declaration and
// match on it.
if (const auto *S = dyn_cast<TagType>(&Node)) {
- return matchesDecl(S->getDecl(), Finder, Builder);
- }
- if (const auto *S = dyn_cast<InjectedClassNameType>(&Node)) {
- return matchesDecl(S->getDecl(), Finder, Builder);
+ return matchesDecl(S->getOriginalDecl(), Finder, Builder);
}
if (const auto *S = dyn_cast<TemplateTypeParmType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
@@ -1031,6 +1028,9 @@ private:
if (const auto *S = dyn_cast<UnresolvedUsingType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
}
+ if (const auto *S = dyn_cast<UsingType>(&Node)) {
+ return matchesDecl(S->getDecl(), Finder, Builder);
+ }
if (const auto *S = dyn_cast<ObjCObjectType>(&Node)) {
return matchesDecl(S->getInterface(), Finder, Builder);
}
@@ -1066,12 +1066,6 @@ private:
Builder);
}
- // FIXME: We desugar elaborated types. This makes the assumption that users
- // do never want to match on whether a type is elaborated - there are
- // arguments for both sides; for now, continue desugaring.
- if (const auto *S = dyn_cast<ElaboratedType>(&Node)) {
- return matchesSpecialized(S->desugar(), Finder, Builder);
- }
// Similarly types found via using declarations.
// These are *usually* meaningless sugar, and this matches the historical
// behavior prior to the introduction of UsingType.
@@ -1211,8 +1205,8 @@ using AdaptativeDefaultToTypes =
/// All types that are supported by HasDeclarationMatcher above.
using HasDeclarationSupportedTypes =
TypeList<CallExpr, CXXConstructExpr, CXXNewExpr, DeclRefExpr, EnumType,
- ElaboratedType, InjectedClassNameType, LabelStmt, AddrLabelExpr,
- MemberExpr, QualType, RecordType, TagType,
+ InjectedClassNameType, LabelStmt, AddrLabelExpr, MemberExpr,
+ QualType, RecordType, TagType, UsingType,
TemplateSpecializationType, TemplateTypeParmType, TypedefType,
UnresolvedUsingType, ObjCIvarRefExpr, ObjCInterfaceDecl>;
@@ -1789,7 +1783,7 @@ public:
private:
static DynTypedNode extract(const NestedNameSpecifierLoc &Loc) {
- return DynTypedNode::create(*Loc.getNestedNameSpecifier());
+ return DynTypedNode::create(Loc.getNestedNameSpecifier());
}
};
diff --git a/clang/include/clang/Analysis/Analyses/LifetimeSafety.h b/clang/include/clang/Analysis/Analyses/LifetimeSafety.h
index 1c00558..7e1bfc9 100644
--- a/clang/include/clang/Analysis/Analyses/LifetimeSafety.h
+++ b/clang/include/clang/Analysis/Analyses/LifetimeSafety.h
@@ -19,14 +19,35 @@
#define LLVM_CLANG_ANALYSIS_ANALYSES_LIFETIMESAFETY_H
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/ADT/StringMap.h"
#include <memory>
namespace clang::lifetimes {
+/// Enum to track the confidence level of a potential error.
+enum class Confidence {
+ None,
+ Maybe, // Reported as a potential error (-Wlifetime-safety-strict)
+ Definite // Reported as a definite error (-Wlifetime-safety-permissive)
+};
+
+class LifetimeSafetyReporter {
+public:
+ LifetimeSafetyReporter() = default;
+ virtual ~LifetimeSafetyReporter() = default;
+
+ virtual void reportUseAfterFree(const Expr *IssueExpr, const Expr *UseExpr,
+ SourceLocation FreeLoc,
+ Confidence Confidence) {}
+};
+
/// The main entry point for the analysis.
-void runLifetimeSafetyAnalysis(AnalysisDeclContext &AC);
+void runLifetimeSafetyAnalysis(AnalysisDeclContext &AC,
+ LifetimeSafetyReporter *Reporter);
namespace internal {
// Forward declarations of internal types.
@@ -53,6 +74,7 @@ template <typename Tag> struct ID {
IDBuilder.AddInteger(Value);
}
};
+
template <typename Tag>
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, ID<Tag> ID) {
return OS << ID.Value;
@@ -78,7 +100,8 @@ using ProgramPoint = const Fact *;
/// encapsulates the various dataflow analyses.
class LifetimeSafetyAnalysis {
public:
- LifetimeSafetyAnalysis(AnalysisDeclContext &AC);
+ LifetimeSafetyAnalysis(AnalysisDeclContext &AC,
+ LifetimeSafetyReporter *Reporter);
~LifetimeSafetyAnalysis();
void run();
@@ -87,7 +110,7 @@ public:
LoanSet getLoansAtPoint(OriginID OID, ProgramPoint PP) const;
/// Returns the set of loans that have expired at a specific program point.
- LoanSet getExpiredLoansAtPoint(ProgramPoint PP) const;
+ std::vector<LoanID> getExpiredLoansAtPoint(ProgramPoint PP) const;
/// Finds the OriginID for a given declaration.
/// Returns a null optional if not found.
@@ -110,6 +133,7 @@ public:
private:
AnalysisDeclContext &AC;
+ LifetimeSafetyReporter *Reporter;
std::unique_ptr<LifetimeFactory> Factory;
std::unique_ptr<FactManager> FactMgr;
std::unique_ptr<LoanPropagationAnalysis> LoanPropagation;
@@ -118,4 +142,25 @@ private:
} // namespace internal
} // namespace clang::lifetimes
+namespace llvm {
+template <typename Tag>
+struct DenseMapInfo<clang::lifetimes::internal::ID<Tag>> {
+ using ID = clang::lifetimes::internal::ID<Tag>;
+
+ static inline ID getEmptyKey() {
+ return {DenseMapInfo<uint32_t>::getEmptyKey()};
+ }
+
+ static inline ID getTombstoneKey() {
+ return {DenseMapInfo<uint32_t>::getTombstoneKey()};
+ }
+
+ static unsigned getHashValue(const ID &Val) {
+ return DenseMapInfo<uint32_t>::getHashValue(Val.Value);
+ }
+
+ static bool isEqual(const ID &LHS, const ID &RHS) { return LHS == RHS; }
+};
+} // namespace llvm
+
#endif // LLVM_CLANG_ANALYSIS_ANALYSES_LIFETIMESAFETY_H
diff --git a/clang/include/clang/Analysis/FlowSensitive/ASTOps.h b/clang/include/clang/Analysis/FlowSensitive/ASTOps.h
index 8c7ee86..a404b06 100644
--- a/clang/include/clang/Analysis/FlowSensitive/ASTOps.h
+++ b/clang/include/clang/Analysis/FlowSensitive/ASTOps.h
@@ -112,8 +112,14 @@ public:
// fields that are only used in these.
// Note: The operand of the `noexcept` operator is an unevaluated operand, but
// nevertheless it appears in the Clang CFG, so we don't exclude it here.
- bool TraverseDecltypeTypeLoc(DecltypeTypeLoc) override { return true; }
- bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc) override { return true; }
+ bool TraverseDecltypeTypeLoc(DecltypeTypeLoc,
+ bool TraverseQualifier) override {
+ return true;
+ }
+ bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc,
+ bool TraverseQualifier) override {
+ return true;
+ }
bool TraverseCXXTypeidExpr(CXXTypeidExpr *TIE) override {
if (TIE->isPotentiallyEvaluated())
return DynamicRecursiveASTVisitor::TraverseCXXTypeidExpr(TIE);
diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h b/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
index 5be4a11..11042e8 100644
--- a/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
+++ b/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
@@ -42,6 +42,18 @@ struct ContextSensitiveOptions {
unsigned Depth = 2;
};
+/// A simple representation of essential elements of the logical context used in
+/// environments. Designed for import/export for applications requiring
+/// serialization support.
+struct SimpleLogicalContext {
+ // Global invariant that applies for all definitions in the context.
+ const Formula *Invariant;
+ // Flow-condition tokens in the context.
+ llvm::DenseMap<Atom, const Formula *> TokenDefs;
+ // Dependencies between flow-condition definitions.
+ llvm::DenseMap<Atom, llvm::DenseSet<Atom>> TokenDeps;
+};
+
/// Owns objects that encompass the state of a program and stores context that
/// is used during dataflow analysis.
class DataflowAnalysisContext {
@@ -140,6 +152,15 @@ public:
/// Adds `Constraint` to the flow condition identified by `Token`.
void addFlowConditionConstraint(Atom Token, const Formula &Constraint);
+ /// Adds `Deps` to the dependencies of the flow condition identified by
+ /// `Token`. Intended for use in deserializing contexts. The formula alone
+ /// doesn't have enough information to indicate its deps.
+ void addFlowConditionDeps(Atom Token, const llvm::DenseSet<Atom> &Deps) {
+ // Avoid creating an entry for `Token` with an empty set.
+ if (!Deps.empty())
+ FlowConditionDeps[Token].insert(Deps.begin(), Deps.end());
+ }
+
/// Creates a new flow condition with the same constraints as the flow
/// condition identified by `Token` and returns its token.
Atom forkFlowCondition(Atom Token);
@@ -207,6 +228,14 @@ public:
return {};
}
+ /// Export the logical-context portions of `AC`, limited to the given target
+ /// flow-condition tokens.
+ SimpleLogicalContext
+ exportLogicalContext(llvm::DenseSet<dataflow::Atom> TargetTokens) const;
+
+ /// Initializes this context's "logical" components with `LC`.
+ void initLogicalContext(SimpleLogicalContext LC);
+
private:
friend class Environment;
@@ -228,6 +257,11 @@ private:
DataflowAnalysisContext(Solver &S, std::unique_ptr<Solver> &&OwnedSolver,
Options Opts);
+ /// Computes the transitive closure of dependencies of (flow-condition)
+ /// `Tokens`. That is, the set of flow-condition tokens reachable from
+ /// `Tokens` in the dependency graph.
+ llvm::DenseSet<Atom> collectDependencies(llvm::DenseSet<Atom> Tokens) const;
+
// Extends the set of modeled field declarations.
void addModeledFields(const FieldSet &Fields);
diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
index 097ff2b..0767144 100644
--- a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
+++ b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -157,10 +157,18 @@ public:
};
/// Creates an environment that uses `DACtx` to store objects that encompass
- /// the state of a program.
+ /// the state of a program. `FlowConditionToken` sets the flow condition
+ /// associated with the environment. Generally, new environments should be
+ /// initialized with a fresh token, by using one of the other
+ /// constructors. This constructor is for specialized use, including
+ /// deserialization and delegation from other constructors.
+ Environment(DataflowAnalysisContext &DACtx, Atom FlowConditionToken)
+ : DACtx(&DACtx), FlowConditionToken(FlowConditionToken) {}
+
+ /// Creates an environment that uses `DACtx` to store objects that encompass
+ /// the state of a program. Populates a fresh atom as flow condition token.
explicit Environment(DataflowAnalysisContext &DACtx)
- : DACtx(&DACtx),
- FlowConditionToken(DACtx.arena().makeFlowConditionToken()) {}
+ : Environment(DACtx, DACtx.arena().makeFlowConditionToken()) {}
/// Creates an environment that uses `DACtx` to store objects that encompass
/// the state of a program, with `S` as the statement to analyze.
diff --git a/clang/include/clang/Analysis/FlowSensitive/Formula.h b/clang/include/clang/Analysis/FlowSensitive/Formula.h
index 0e63524..3959bc9 100644
--- a/clang/include/clang/Analysis/FlowSensitive/Formula.h
+++ b/clang/include/clang/Analysis/FlowSensitive/Formula.h
@@ -85,21 +85,17 @@ public:
}
using AtomNames = llvm::DenseMap<Atom, std::string>;
- // Produce a stable human-readable representation of this formula.
- // For example: (V3 | !(V1 & V2))
- // If AtomNames is provided, these override the default V0, V1... names.
+ /// Produces a stable human-readable representation of this formula.
+ /// For example: (V3 | !(V1 & V2))
+ /// If AtomNames is provided, these override the default V0, V1... names.
void print(llvm::raw_ostream &OS, const AtomNames * = nullptr) const;
- // Allocate Formulas using Arena rather than calling this function directly.
+ /// Allocates Formulas using Arena rather than calling this function directly.
static const Formula &create(llvm::BumpPtrAllocator &Alloc, Kind K,
ArrayRef<const Formula *> Operands,
unsigned Value = 0);
-private:
- Formula() = default;
- Formula(const Formula &) = delete;
- Formula &operator=(const Formula &) = delete;
-
+ /// Count of operands (sub-formulas) associated with Formulas of kind `K`.
static unsigned numOperands(Kind K) {
switch (K) {
case AtomRef:
@@ -116,6 +112,11 @@ private:
llvm_unreachable("Unhandled Formula::Kind enum");
}
+private:
+ Formula() = default;
+ Formula(const Formula &) = delete;
+ Formula &operator=(const Formula &) = delete;
+
Kind FormulaKind;
// Some kinds of formula have scalar values, e.g. AtomRef's atom number.
unsigned Value;
diff --git a/clang/include/clang/Analysis/FlowSensitive/FormulaSerialization.h b/clang/include/clang/Analysis/FlowSensitive/FormulaSerialization.h
new file mode 100644
index 0000000..119f93e
--- /dev/null
+++ b/clang/include/clang/Analysis/FlowSensitive/FormulaSerialization.h
@@ -0,0 +1,40 @@
+//=== FormulaSerialization.h - Formula De/Serialization support -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_FORMULA_SERIALIZATION_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_FORMULA_SERIALIZATION_H
+
+#include "clang/Analysis/FlowSensitive/Arena.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <string>
+
+namespace clang::dataflow {
+
+/// Prints `F` to `OS` in a compact format, optimized for easy parsing
+/// (deserialization) rather than human use.
+void serializeFormula(const Formula &F, llvm::raw_ostream &OS);
+
+/// Parses `Str` to build a serialized Formula.
+/// @returns error on parse failure or if parsing does not fully consume `Str`.
+/// @param A used to construct the formula components.
+/// @param AtomMap maps serialized Atom identifiers (unsigned ints) to Atoms.
+/// This map is provided by the caller to enable consistency across
+/// multiple formulas in a single file.
+llvm::Expected<const Formula *>
+parseFormula(llvm::StringRef Str, Arena &A,
+ llvm::DenseMap<unsigned, Atom> &AtomMap);
+
+} // namespace clang::dataflow
+#endif
diff --git a/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h b/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
index 8fcc6a4..534b9a0 100644
--- a/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
+++ b/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
@@ -17,6 +17,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Debug.h"
#include <cassert>
@@ -152,6 +153,11 @@ public:
return {SyntheticFields.begin(), SyntheticFields.end()};
}
+ /// Add a synthetic field, if none by that name is already present.
+ void addSyntheticField(llvm::StringRef Name, StorageLocation &Loc) {
+ SyntheticFields.insert({Name, &Loc});
+ }
+
/// Changes the child storage location for a field `D` of reference type.
/// All other fields cannot change their storage location and always retain
/// the storage location passed to the `RecordStorageLocation` constructor.
@@ -164,6 +170,11 @@ public:
Children[&D] = Loc;
}
+ /// Add a child storage location for a field `D`, if not already present.
+ void addChild(const ValueDecl &D, StorageLocation *Loc) {
+ Children.insert({&D, Loc});
+ }
+
llvm::iterator_range<FieldToLoc::const_iterator> children() const {
return {Children.begin(), Children.end()};
}
diff --git a/clang/include/clang/Basic/ABIVersions.def b/clang/include/clang/Basic/ABIVersions.def
new file mode 100644
index 0000000..f6524bc
--- /dev/null
+++ b/clang/include/clang/Basic/ABIVersions.def
@@ -0,0 +1,135 @@
+//===--- ABIVersions.def - Clang ABI Versions Database ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file enumerates Clang ABI versions.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file ABIVersions.def
+///
+/// In this file, each of the Clang ABI Versions is enumerated
+/// ABI_VER_MAJOR_MINOR, ABI_VER_MAJOR, or ABI_VER_LATEST macro.
+///
+/// ABI_VER_MAJOR is used when the minor version is 0 or can be omitted.
+///
+/// The first argument of ABI_VER_MAJOR_MINOR and ABI_VER_MAJOR is the major
+/// version.
+///
+/// The second argument of ABI_VER_MAJOR_MINOR is the minor version.
+///
+/// The first argument of ABI_VER_LATEST is an identifier `Latest`.
+
+#if defined(ABI_VER_MAJOR_MINOR) != defined(ABI_VER_MAJOR) || \
+ defined(ABI_VER_MAJOR) != defined(ABI_VER_LATEST)
+# error ABI_VER_MAJOR_MINOR, ABI_VER_MAJOR and ABI_VER_LATEST should be defined simultaneously
+#endif
+
+#ifndef ABI_VER_MAJOR_MINOR
+# define ABI_VER_MAJOR_MINOR(Major, Minor)
+#endif
+
+#ifndef ABI_VER_MAJOR
+# define ABI_VER_MAJOR(Major)
+#endif
+
+#ifndef ABI_VER_LATEST
+# define ABI_VER_LATEST(Latest)
+#endif
+
+/// Attempt to be ABI-compatible with code generated by Clang 3.8.x
+/// (SVN r257626). This causes <1 x long long> to be passed in an integer
+/// register instead of an SSE register on x64_64.
+ABI_VER_MAJOR_MINOR(3, 8)
+
+/// Attempt to be ABI-compatible with code generated by Clang 4.0.x
+/// (SVN r291814). This causes move operations to be ignored when determining
+/// whether a class type can be passed or returned directly.
+ABI_VER_MAJOR(4)
+
+/// Attempt to be ABI-compatible with code generated by Clang 6.0.x
+/// (SVN r321711). This causes determination of whether a type is
+/// standard-layout to ignore collisions between empty base classes and between
+/// base classes and member subobjects, which affects whether we reuse base
+/// class tail padding in some ABIs.
+ABI_VER_MAJOR(6)
+
+/// Attempt to be ABI-compatible with code generated by Clang 7.0.x
+/// (SVN r338536). This causes alignof (C++) and _Alignof (C11) to be compatible
+/// with __alignof (i.e., return the preferred alignment) rather than returning
+/// the required alignment.
+ABI_VER_MAJOR(7)
+
+/// Attempt to be ABI-compatible with code generated by Clang 9.0.x
+/// (SVN r351319). This causes vectors of __int128 to be passed in memory
+/// instead of passing in multiple scalar registers on x86_64 on Linux and
+/// NetBSD.
+ABI_VER_MAJOR(9)
+
+/// Attempt to be ABI-compatible with code generated by Clang 11.0.x
+/// (git 2e10b7a39b93). This causes clang to pass unions with a 256-bit vector
+/// member on the stack instead of using registers, to not properly mangle
+/// substitutions for template names in some cases, and to mangle declaration
+/// template arguments without a cast to the parameter type even when that can
+/// lead to mangling collisions.
+ABI_VER_MAJOR(11)
+
+/// Attempt to be ABI-compatible with code generated by Clang 12.0.x
+/// (git 8e464dd76bef). This causes clang to mangle lambdas within global-scope
+/// inline variables incorrectly.
+ABI_VER_MAJOR(12)
+
+/// Attempt to be ABI-compatible with code generated by Clang 14.0.x.
+/// This causes clang to:
+/// - mangle dependent nested names incorrectly.
+/// - make trivial only those defaulted copy constructors with a
+/// parameter-type-list equivalent to the parameter-type-list of an implicit
+/// declaration.
+ABI_VER_MAJOR(14)
+
+/// Attempt to be ABI-compatible with code generated by Clang 15.0.x.
+/// This causes clang to:
+/// - Reverse the implementation for CWG692, CWG1395 and CWG1432.
+/// - pack non-POD members of packed structs.
+/// - consider classes with defaulted special member functions non-pod.
+ABI_VER_MAJOR(15)
+
+/// Attempt to be ABI-compatible with code generated by Clang 17.0.x.
+/// This causes clang to revert some fixes to its implementation of the Itanium
+/// name mangling scheme, with the consequence that overloaded function
+/// templates are mangled the same if they differ only by:
+/// - constraints
+/// - whether a non-type template parameter has a deduced type
+/// - the parameter list of a template template parameter
+ABI_VER_MAJOR(17)
+
+/// Attempt to be ABI-compatible with code generated by Clang 18.0.x.
+/// This causes clang to revert some fixes to the mangling of lambdas in the
+/// initializers of members of local classes.
+ABI_VER_MAJOR(18)
+
+/// Attempt to be ABI-compatible with code generated by Clang 19.0.x.
+/// This causes clang to:
+/// - Incorrectly mangle the 'base type' substitutions of the CXX construction
+/// vtable because it hasn't added 'type' as a substitution.
+/// - Skip mangling enclosing class templates of member-like friend function
+/// templates.
+/// - Ignore empty struct arguments in C++ mode for ARM, instead of passing
+/// them as if they had a size of 1 byte.
+ABI_VER_MAJOR(19)
+
+/// Attempt to be ABI-compatible with code generated by Clang 20.0.x.
+/// This causes clang to:
+/// - Incorrectly return C++ records in AVX registers on x86_64.
+ABI_VER_MAJOR(20)
+
+/// Conform to the underlying platform's C and C++ ABIs as closely as we can.
+ABI_VER_LATEST(Latest)
+
+#undef ABI_VER_MAJOR_MINOR
+#undef ABI_VER_MAJOR
+#undef ABI_VER_LATEST
diff --git a/clang/include/clang/Basic/AllDiagnosticKinds.inc b/clang/include/clang/Basic/AllDiagnosticKinds.inc
index a946b4a..2d08bb0 100644
--- a/clang/include/clang/Basic/AllDiagnosticKinds.inc
+++ b/clang/include/clang/Basic/AllDiagnosticKinds.inc
@@ -30,4 +30,5 @@
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
#include "clang/Basic/DiagnosticInstallAPIKinds.inc"
+#include "clang/Basic/DiagnosticTrapKinds.inc"
// clang-format on
diff --git a/clang/include/clang/Basic/AllDiagnostics.h b/clang/include/clang/Basic/AllDiagnostics.h
index e64634c..78e5428 100644
--- a/clang/include/clang/Basic/AllDiagnostics.h
+++ b/clang/include/clang/Basic/AllDiagnostics.h
@@ -23,20 +23,21 @@
#include "clang/Basic/DiagnosticInstallAPI.h"
#include "clang/Basic/DiagnosticLex.h"
#include "clang/Basic/DiagnosticParse.h"
+#include "clang/Basic/DiagnosticRefactoring.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/DiagnosticSerialization.h"
-#include "clang/Basic/DiagnosticRefactoring.h"
+#include "clang/Basic/DiagnosticTrap.h"
namespace clang {
-template <size_t SizeOfStr, typename FieldType>
-class StringSizerHelper {
+template <size_t SizeOfStr, typename FieldType> class StringSizerHelper {
static_assert(SizeOfStr <= FieldType(~0U), "Field too small!");
+
public:
enum { Size = SizeOfStr };
};
} // end namespace clang
-#define STR_SIZE(str, fieldTy) clang::StringSizerHelper<sizeof(str)-1, \
- fieldTy>::Size
+#define STR_SIZE(str, fieldTy) \
+ clang::StringSizerHelper<sizeof(str) - 1, fieldTy>::Size
#endif
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 30efb9f..29364c5 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -1632,6 +1632,13 @@ def DeviceKernel : DeclOrTypeAttr {
}];
}
+def SYCLExternal : InheritableAttr {
+ let Spellings = [CXX11<"clang", "sycl_external">];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let LangOpts = [SYCLHost, SYCLDevice];
+ let Documentation = [SYCLExternalDocs];
+}
+
def SYCLKernelEntryPoint : InheritableAttr {
let Spellings = [CXX11<"clang", "sycl_kernel_entry_point">];
let Args = [
@@ -3922,6 +3929,14 @@ def CFICanonicalJumpTable : InheritableAttr {
let SimpleHandler = 1;
}
+def CFISalt : TypeAttr {
+ let Spellings = [Clang<"cfi_salt">];
+ let Args = [StringArgument<"Salt">];
+ let Subjects = SubjectList<[FunctionLike], ErrorDiag>;
+ let Documentation = [CFISaltDocs];
+ let LangOpts = [COnly];
+}
+
// C/C++ Thread safety attributes (e.g. for deadlock, data race checking)
// Not all of these attributes will be given a [[]] spelling. The attributes
// which require access to function parameter names cannot use the [[]] spelling
@@ -4932,6 +4947,7 @@ def HLSLResourceBinding: InheritableAttr {
return SpaceNumber;
}
void setImplicitBindingOrderID(uint32_t Value) {
+ assert(!hasImplicitBindingOrderID() && "attribute already has implicit binding order id");
ImplicitBindingOrderID = Value;
}
bool hasImplicitBindingOrderID() const {
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index 2b095ab..2504841 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -476,6 +476,47 @@ The SYCL kernel in the previous code sample meets these expectations.
}];
}
+def SYCLExternalDocs : Documentation {
+ let Category = DocCatFunction;
+ let Heading = "sycl_external";
+ let Content = [{
+The ``sycl_external`` attribute indicates that a function defined in another
+translation unit may be called by a device function defined in the current
+translation unit or, if defined in the current translation unit, the function
+may be called by device functions defined in other translation units.
+The attribute is intended for use in the implementation of the ``SYCL_EXTERNAL``
+macro as specified in section 5.10.1, "SYCL functions and member functions
+linkage", of the SYCL 2020 specification.
+
+The attribute only appertains to functions and only those that meet the
+following requirements:
+
+* Has external linkage
+* Is not explicitly defined as deleted (the function may be an explicitly
+ defaulted function that is defined as deleted)
+
+The attribute shall be present on the first declaration of a function and
+may optionally be present on subsequent declarations.
+
+When compiling for a SYCL device target that does not support the generic
+address space, the function shall not specify a raw pointer or reference type
+as the return type or as a parameter type.
+See section 5.10, "SYCL offline linking", of the SYCL 2020 specification.
+The following examples demonstrate the use of this attribute:
+
+.. code-block:: c++
+
+ [[clang::sycl_external]] void Foo(); // Ok.
+
+ [[clang::sycl_external]] void Bar() { /* ... */ } // Ok.
+
+ [[clang::sycl_external]] extern void Baz(); // Ok.
+
+ [[clang::sycl_external]] static void Quux() { /* ... */ } // error: Quux() has internal linkage.
+
+ }];
+}
+
def SYCLKernelEntryPointDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -932,6 +973,21 @@ An example of how to use ``alloc_size``
assert(__builtin_object_size(a, 0) == 100);
}
+When ``-Walloc-size`` is enabled, this attribute allows the compiler to
+diagnose cases when the allocated memory is insufficient for the size of the
+type the returned pointer is cast to.
+
+.. code-block:: c
+
+ void *my_malloc(int a) __attribute__((alloc_size(1)));
+ void consumer_func(int *);
+
+ int main() {
+ int *ptr = my_malloc(sizeof(int)); // no warning
+ int *w = my_malloc(1); // warning: allocation of insufficient size '1' for type 'int' with size '4'
+ consumer_func(my_malloc(1)); // warning: allocation of insufficient size '1' for type 'int' with size '4'
+ }
+
.. Note:: This attribute works differently in clang than it does in GCC.
Specifically, clang will only trace ``const`` pointers (as above); we give up
on pointers that are not marked as ``const``. In the vast majority of cases,
@@ -3646,6 +3702,99 @@ make the function's CFI jump table canonical. See :ref:`the CFI documentation
}];
}
+def CFISaltDocs : Documentation {
+ let Category = DocCatFunction;
+ let Heading = "cfi_salt";
+ let Label = "langext-cfi_salt";
+ let Content = [{
+The ``cfi_salt`` attribute specifies a string literal that is used as a salt
+for Control-Flow Integrity (CFI) checks to distinguish between functions with
+the same type signature. This attribute can be applied to function declarations,
+function definitions, and function pointer typedefs.
+
+The attribute prevents function pointers from being replaced with pointers to
+functions that have a compatible type, which can be a CFI bypass vector.
+
+**Syntax:**
+
+* GNU-style: ``__attribute__((cfi_salt("<salt_string>")))``
+* C++11-style: ``[[clang::cfi_salt("<salt_string>")]]``
+
+**Usage:**
+
+The attribute takes a single string literal argument that serves as the salt.
+Functions or function types with different salt values will have different CFI
+hashes, even if they have identical type signatures.
+
+**Motivation:**
+
+In large codebases like the Linux kernel, there are often hundreds of functions
+with identical type signatures that are called indirectly:
+
+.. code-block::
+
+ 1662 functions with void (*)(void)
+ 1179 functions with int (*)(void)
+ ...
+
+By salting the CFI hashes, you can make CFI more robust by ensuring that
+functions intended for different purposes have distinct CFI identities.
+
+**Type Compatibility:**
+
+* Functions with different salt values are considered to have incompatible types
+* Function pointers with different salt values cannot be assigned to each other
+* All declarations of the same function must use the same salt value
+
+**Example:**
+
+.. code-block:: c
+
+ // Header file - define convenience macros
+ #define __cfi_salt(s) __attribute__((cfi_salt(s)))
+
+ // Typedef for regular function pointers
+ typedef int (*fptr_t)(void);
+
+ // Typedef for salted function pointers
+ typedef int (*fptr_salted_t)(void) __cfi_salt("pepper");
+
+ struct widget_ops {
+ fptr_t init; // Regular CFI
+ fptr_salted_t exec; // Salted CFI
+ fptr_t cleanup; // Regular CFI
+ };
+
+ // Function implementations
+ static int widget_init(void) { return 0; }
+ static int widget_exec(void) __cfi_salt("pepper") { return 1; }
+ static int widget_cleanup(void) { return 0; }
+
+ static struct widget_ops ops = {
+ .init = widget_init, // OK - compatible types
+ .exec = widget_exec, // OK - both use "pepper" salt
+ .cleanup = widget_cleanup // OK - compatible types
+ };
+
+ // Using C++11 attribute syntax
+ void secure_callback(void) [[clang::cfi_salt("secure")]];
+
+ // This would cause a compilation error:
+ // fptr_t bad_ptr = widget_exec; // Error: incompatible types
+
+**Notes:**
+
+* The salt string can contain non-NULL ASCII characters, including spaces and
+ quotes
+* This attribute only applies to function types; using it on non-function
+ types will generate a warning
+* All declarations and definitions of the same function must use identical
+ salt values
+* The attribute affects type compatibility during compilation and CFI hash
+ generation during code generation
+ }];
+}
+
def DocCatTypeSafety : DocumentationCategory<"Type Safety Checking"> {
let Content = [{
Clang supports additional attributes to enable checking type safety properties
diff --git a/clang/include/clang/Basic/BuiltinTemplates.td b/clang/include/clang/Basic/BuiltinTemplates.td
index 5b9672b..504405a 100644
--- a/clang/include/clang/Basic/BuiltinTemplates.td
+++ b/clang/include/clang/Basic/BuiltinTemplates.td
@@ -62,3 +62,7 @@ def __builtin_common_type : CPlusPlusBuiltinTemplate<
// typename ...Operands>
def __hlsl_spirv_type : HLSLBuiltinTemplate<
[Uint32T, Uint32T, Uint32T, Class<"Operands", /*is_variadic=*/1>]>;
+
+// template <class ...Args>
+def __builtin_dedup_pack
+ : CPlusPlusBuiltinTemplate<[Class<"Args", /*is_variadic=*/1>]>;
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index c81714e..af0e824 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -1232,6 +1232,30 @@ def ConvertVector : Builtin {
let Prototype = "void(...)";
}
+def MaskedLoad : Builtin {
+ let Spellings = ["__builtin_masked_load"];
+ let Attributes = [NoThrow, CustomTypeChecking];
+ let Prototype = "void(...)";
+}
+
+def MaskedStore : Builtin {
+ let Spellings = ["__builtin_masked_store"];
+ let Attributes = [NoThrow, CustomTypeChecking];
+ let Prototype = "void(...)";
+}
+
+def MaskedExpandLoad : Builtin {
+ let Spellings = ["__builtin_masked_expand_load"];
+ let Attributes = [NoThrow, CustomTypeChecking];
+ let Prototype = "void(...)";
+}
+
+def MaskedCompressStore : Builtin {
+ let Spellings = ["__builtin_masked_compress_store"];
+ let Attributes = [NoThrow, CustomTypeChecking];
+ let Prototype = "void(...)";
+}
+
def AllocaUninitialized : Builtin {
let Spellings = ["__builtin_alloca_uninitialized"];
let Attributes = [FunctionWithBuiltinPrefix, NoThrow];
@@ -1264,7 +1288,7 @@ def NondetermenisticValue : Builtin {
def ElementwiseAbs : Builtin {
let Spellings = ["__builtin_elementwise_abs"];
- let Attributes = [NoThrow, Const, CustomTypeChecking];
+ let Attributes = [NoThrow, Const, CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
@@ -1300,13 +1324,13 @@ def ElementwiseBitreverse : Builtin {
def ElementwiseMax : Builtin {
let Spellings = ["__builtin_elementwise_max"];
- let Attributes = [NoThrow, Const, CustomTypeChecking];
+ let Attributes = [NoThrow, Const, CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
def ElementwiseMin : Builtin {
let Spellings = ["__builtin_elementwise_min"];
- let Attributes = [NoThrow, Const, CustomTypeChecking];
+ let Attributes = [NoThrow, Const, CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
@@ -1498,7 +1522,7 @@ def ElementwiseCopysign : Builtin {
def ElementwiseFma : Builtin {
let Spellings = ["__builtin_elementwise_fma"];
- let Attributes = [NoThrow, Const, CustomTypeChecking];
+ let Attributes = [NoThrow, Const, CustomTypeChecking, Constexpr];
let Prototype = "void(...)";
}
@@ -1514,6 +1538,30 @@ def ElementwiseSubSat : Builtin {
let Prototype = "void(...)";
}
+def ElementwiseFshl : Builtin {
+ let Spellings = ["__builtin_elementwise_fshl"];
+ let Attributes = [NoThrow, Const, CustomTypeChecking];
+ let Prototype = "void(...)";
+}
+
+def ElementwiseFshr : Builtin {
+ let Spellings = ["__builtin_elementwise_fshr"];
+ let Attributes = [NoThrow, Const, CustomTypeChecking];
+ let Prototype = "void(...)";
+}
+
+def ElementwiseCtlz : Builtin {
+ let Spellings = ["__builtin_elementwise_ctlz"];
+ let Attributes = [NoThrow, Const, CustomTypeChecking, Constexpr];
+ let Prototype = "void(...)";
+}
+
+def ElementwiseCttz : Builtin {
+ let Spellings = ["__builtin_elementwise_cttz"];
+ let Attributes = [NoThrow, Const, CustomTypeChecking, Constexpr];
+ let Prototype = "void(...)";
+}
+
def ReduceMax : Builtin {
let Spellings = ["__builtin_reduce_max"];
let Attributes = [NoThrow, Const, CustomTypeChecking, Constexpr];
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index b16d4a2..6f5d1e0 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -183,6 +183,9 @@ TARGET_BUILTIN(__builtin_amdgcn_struct_ptr_buffer_load_lds, "vQbv*3IUiiiiIiIi",
TARGET_BUILTIN(__builtin_amdgcn_ballot_w32, "ZUib", "nc", "wavefrontsize32")
BUILTIN(__builtin_amdgcn_ballot_w64, "WUib", "nc")
+TARGET_BUILTIN(__builtin_amdgcn_inverse_ballot_w32, "bZUi", "nc", "wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_inverse_ballot_w64, "bWUi", "nc", "wavefrontsize64")
+
// Deprecated intrinsics in favor of __builtin_amdgn_ballot_{w32|w64}
BUILTIN(__builtin_amdgcn_uicmp, "WUiUiUiIi", "nc")
BUILTIN(__builtin_amdgcn_uicmpl, "WUiWUiWUiIi", "nc")
@@ -503,6 +506,9 @@ TARGET_BUILTIN(__builtin_amdgcn_s_barrier_signal, "vIi", "n", "gfx12-insts")
TARGET_BUILTIN(__builtin_amdgcn_s_barrier_signal_var, "vv*i", "n", "gfx12-insts")
TARGET_BUILTIN(__builtin_amdgcn_s_barrier_wait, "vIs", "n", "gfx12-insts")
TARGET_BUILTIN(__builtin_amdgcn_s_barrier_signal_isfirst, "bIi", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_init, "vv*i", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_join, "vv*", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_leave, "vIs", "n", "gfx12-insts")
TARGET_BUILTIN(__builtin_amdgcn_s_get_barrier_state, "Uii", "n", "gfx12-insts")
TARGET_BUILTIN(__builtin_amdgcn_s_get_named_barrier_state, "Uiv*", "n", "gfx12-insts")
TARGET_BUILTIN(__builtin_amdgcn_s_prefetch_data, "vvC*Ui", "nc", "gfx12-insts")
diff --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def
index e7d6741..22926b6 100644
--- a/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/clang/include/clang/Basic/BuiltinsPPC.def
@@ -580,6 +580,8 @@ TARGET_BUILTIN(__builtin_ppc_bcdsub_p, "iiV16UcV16Uc", "",
"isa-v207-instructions")
// P9 Binary-coded decimal (BCD) builtins.
+TARGET_BUILTIN(__builtin_ppc_bcdcopysign, "V16UcV16UcV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_ppc_bcdsetsign, "V16UcV16UcUc", "t", "power9-vector")
TARGET_BUILTIN(__builtin_ppc_national2packed, "V16UcV16UcUc", "t", "power9-vector")
TARGET_BUILTIN(__builtin_ppc_packed2national, "V16UcV16Uc", "", "power9-vector")
TARGET_BUILTIN(__builtin_ppc_packed2zoned, "V16UcV16UcUc", "t", "power9-vector")
@@ -1098,6 +1100,10 @@ UNALIASED_CUSTOM_BUILTIN(mma_dmmr, "vW1024*W1024*", false,
"mma,isa-future-instructions")
UNALIASED_CUSTOM_BUILTIN(mma_dmxor, "vW1024*W1024*", true,
"mma,isa-future-instructions")
+UNALIASED_CUSTOM_BUILTIN(mma_disassemble_dmr, "vv*W1024*", false,
+ "mma,isa-future-instructions")
+UNALIASED_CUSTOM_BUILTIN(mma_build_dmr, "vW1024*VVVVVVVV", false,
+ "mma,isa-future-instructions")
// MMA builtins with positive/negative multiply/accumulate.
UNALIASED_CUSTOM_MMA_BUILTIN(mma_xvf16ger2, "vW512*VV",
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index a4acc72..acd8f70 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -93,13 +93,11 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in {
}
let Features = "sse2" in {
- def pmulhw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def pavgb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
def pavgw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def packsswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">;
def packssdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">;
def packuswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">;
- def pmulhuw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def vec_ext_v2di : X86Builtin<"long long int(_Vector<2, long long int>, _Constant int)">;
def vec_ext_v4si : X86Builtin<"int(_Vector<4, int>, _Constant int)">;
def vec_ext_v4sf : X86Builtin<"float(_Vector<4, float>, _Constant int)">;
@@ -107,6 +105,11 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in {
def vec_set_v8hi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, short, _Constant int)">;
}
+ let Features = "sse2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def pmulhw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
+ def pmulhuw128 : X86Builtin<"_Vector<8, unsigned short>(_Vector<8, unsigned short>, _Vector<8, unsigned short>)">;
+ }
+
let Features = "sse3" in {
foreach Op = ["addsub", "hadd", "hsub"] in {
def Op#ps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>)">;
@@ -265,7 +268,6 @@ let Header = "emmintrin.h", Attributes = [NoThrow, RequireDeclaration] in {
}
let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def pmuludq128 : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>, _Vector<4, int>)">;
def psraw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def psrad128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
def psrlw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
@@ -274,17 +276,25 @@ let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] i
def psllw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def pslld128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
def psllq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
+ def pmaddwd128 : X86Builtin<"_Vector<4, int>(_Vector<8, short>, _Vector<8, short>)">;
+ def pslldqi128_byteshift : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant int)">;
+ def psrldqi128_byteshift : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant int)">;
+}
+
+let Features = "sse2",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def pmuludq128 : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>, _Vector<4, int>)">;
+
def psllwi128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, int)">;
def pslldi128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int)">;
def psllqi128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, int)">;
+
def psrlwi128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, int)">;
def psrldi128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int)">;
def psrlqi128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, int)">;
+
def psrawi128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, int)">;
def psradi128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int)">;
- def pmaddwd128 : X86Builtin<"_Vector<4, int>(_Vector<8, short>, _Vector<8, short>)">;
- def pslldqi128_byteshift : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant int)">;
- def psrldqi128_byteshift : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant int)">;
}
let Features = "sse3", Attributes = [NoThrow] in {
@@ -309,7 +319,6 @@ let Features = "sse4.1", Attributes = [NoThrow, Const, RequiredVectorWidth<128>]
def blendvpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Vector<2, double>)">;
def blendvps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Vector<4, float>)">;
def packusdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">;
- def pmuldq128 : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>, _Vector<4, int>)">;
def roundps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Constant int)">;
def roundss : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Constant int)">;
def roundsd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Constant int)">;
@@ -326,6 +335,10 @@ let Features = "sse4.1", Attributes = [NoThrow, Const, RequiredVectorWidth<128>]
def vec_set_v4si : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int, _Constant int)">;
}
+let Features = "sse4.1", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def pmuldq128 : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>, _Vector<4, int>)">;
+}
+
let Features = "sse4.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
def pcmpistrm128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>, _Constant char)">;
def pcmpistri128 : X86Builtin<"int(_Vector<16, char>, _Vector<16, char>, _Constant char)">;
@@ -526,28 +539,22 @@ let Features = "avx", Attributes = [NoThrow] in {
let Features = "avx", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
def lddqu256 : X86Builtin<"_Vector<32, char>(char const *)">;
-}
-let Features = "avx", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def maskloadpd : X86Builtin<"_Vector<2, double>(_Vector<2, double const *>, _Vector<2, long long int>)">;
- def maskloadps : X86Builtin<"_Vector<4, float>(_Vector<4, float const *>, _Vector<4, int>)">;
-}
-
-let Features = "avx", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
def maskloadpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double const *>, _Vector<4, long long int>)">;
def maskloadps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float const *>, _Vector<8, int>)">;
+
+ def maskstorepd256 : X86Builtin<"void(_Vector<4, double *>, _Vector<4, long long int>, _Vector<4, double>)">;
+ def maskstoreps256 : X86Builtin<"void(_Vector<8, float *>, _Vector<8, int>, _Vector<8, float>)">;
}
let Features = "avx", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
+ def maskloadpd : X86Builtin<"_Vector<2, double>(_Vector<2, double const *>, _Vector<2, long long int>)">;
+ def maskloadps : X86Builtin<"_Vector<4, float>(_Vector<4, float const *>, _Vector<4, int>)">;
+
def maskstorepd : X86Builtin<"void(_Vector<2, double *>, _Vector<2, long long int>, _Vector<2, double>)">;
def maskstoreps : X86Builtin<"void(_Vector<4, float *>, _Vector<4, int>, _Vector<4, float>)">;
}
-let Features = "avx", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
- def maskstorepd256 : X86Builtin<"void(_Vector<4, double *>, _Vector<4, long long int>, _Vector<4, double>)">;
- def maskstoreps256 : X86Builtin<"void(_Vector<8, float *>, _Vector<8, int>, _Vector<8, float>)">;
-}
-
let Features = "avx", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
def vec_ext_v32qi : X86Builtin<"char(_Vector<32, char>, _Constant int)">;
def vec_ext_v16hi : X86Builtin<"short(_Vector<16, short>, _Constant int)">;
@@ -577,11 +584,7 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def pmaddubsw256 : X86Builtin<"_Vector<16, short>(_Vector<32, char>, _Vector<32, char>)">;
def pmaddwd256 : X86Builtin<"_Vector<8, int>(_Vector<16, short>, _Vector<16, short>)">;
def pmovmskb256 : X86Builtin<"int(_Vector<32, char>)">;
- def pmuldq256 : X86Builtin<"_Vector<4, long long int>(_Vector<8, int>, _Vector<8, int>)">;
def pmulhrsw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
- def pmulhuw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
- def pmulhw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
- def pmuludq256 : X86Builtin<"_Vector<4, long long int>(_Vector<8, int>, _Vector<8, int>)">;
def psadbw256 : X86Builtin<"_Vector<4, long long int>(_Vector<32, char>, _Vector<32, char>)">;
def pshufb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>)">;
def pshufd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Constant int)">;
@@ -590,23 +593,15 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def psignb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>)">;
def psignw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def psignd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">;
- def psllwi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def psllw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">;
def pslldqi256_byteshift : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Constant int)">;
- def pslldi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
def pslld256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">;
- def psllqi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, int)">;
def psllq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">;
- def psrawi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def psraw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">;
- def psradi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
def psrad256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">;
def psrldqi256_byteshift : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Constant int)">;
- def psrlwi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def psrlw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">;
- def psrldi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
def psrld256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">;
- def psrlqi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, int)">;
def psrlq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">;
def pblendd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Constant int)">;
def pblendd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Constant int)">;
@@ -619,128 +614,73 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def insert128i256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>, _Constant int)">;
}
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
- def maskloadd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int const *>, _Vector<8, int>)">;
- def maskloadq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int const *>, _Vector<4, long long int>)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def maskloadd : X86Builtin<"_Vector<4, int>(_Vector<4, int const *>, _Vector<4, int>)">;
- def maskloadq : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int const *>, _Vector<2, long long int>)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
- def maskstored256 : X86Builtin<"void(_Vector<8, int *>, _Vector<8, int>, _Vector<8, int>)">;
- def maskstoreq256 : X86Builtin<"void(_Vector<4, long long int *>, _Vector<4, long long int>, _Vector<4, long long int>)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def maskstored : X86Builtin<"void(_Vector<4, int *>, _Vector<4, int>, _Vector<4, int>)">;
- def maskstoreq : X86Builtin<"void(_Vector<2, long long int *>, _Vector<2, long long int>, _Vector<2, long long int>)">;
-}
+let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+ def pmuldq256 : X86Builtin<"_Vector<4, long long int>(_Vector<8, int>, _Vector<8, int>)">;
+ def pmuludq256 : X86Builtin<"_Vector<4, long long int>(_Vector<8, int>, _Vector<8, int>)">;
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def psllv8si : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">;
-}
+ def psllwi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
+ def pslldi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
+ def psllqi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, int)">;
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def psllv4si : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
-}
+ def psrlwi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
+ def psrldi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
+ def psrlqi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, int)">;
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def psllv4di : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>)">;
-}
+ def psrawi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
+ def psradi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def psllv2di : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
-}
+ def pmulhuw256 : X86Builtin<"_Vector<16, unsigned short>(_Vector<16, unsigned short>, _Vector<16, unsigned short>)">;
+ def pmulhw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+ def psllv8si : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">;
def psrav8si : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def psrav4si : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
def psrlv8si : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def psrlv4si : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+ def psllv4di : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>)">;
def psrlv4di : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>)">;
}
-let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def psllv4si : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
+ def psrav4si : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
+ def psrlv4si : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
+ def psllv2di : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
def psrlv2di : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
}
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def gatherd_pd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, double const *, _Vector<4, int>, _Vector<2, double>, _Constant char)">;
-}
-
let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
- def gatherd_pd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, double const *, _Vector<4, int>, _Vector<4, double>, _Constant char)">;
-}
+ def maskloadd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int const *>, _Vector<8, int>)">;
+ def maskloadq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int const *>, _Vector<4, long long int>)">;
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def gatherq_pd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, double const *, _Vector<2, long long int>, _Vector<2, double>, _Constant char)">;
-}
+ def maskstored256 : X86Builtin<"void(_Vector<8, int *>, _Vector<8, int>, _Vector<8, int>)">;
+ def maskstoreq256 : X86Builtin<"void(_Vector<4, long long int *>, _Vector<4, long long int>, _Vector<4, long long int>)">;
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
+ def gatherd_pd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, double const *, _Vector<4, int>, _Vector<4, double>, _Constant char)">;
def gatherq_pd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, double const *, _Vector<4, long long int>, _Vector<4, double>, _Constant char)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def gatherd_ps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, float const *, _Vector<4, int>, _Vector<4, float>, _Constant char)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
def gatherd_ps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, float const *, _Vector<8, int>, _Vector<8, float>, _Constant char)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def gatherq_ps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, float const *, _Vector<2, long long int>, _Vector<4, float>, _Constant char)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
def gatherq_ps256 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, float const *, _Vector<4, long long int>, _Vector<4, float>, _Constant char)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def gatherd_q : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, long long int const *, _Vector<4, int>, _Vector<2, long long int>, _Constant char)">;
-}
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
def gatherd_q256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, long long int const *, _Vector<4, int>, _Vector<4, long long int>, _Constant char)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def gatherq_q : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, long long int const *, _Vector<2, long long int>, _Vector<2, long long int>, _Constant char)">;
-}
-
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
def gatherq_q256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, long long int const *, _Vector<4, long long int>, _Vector<4, long long int>, _Constant char)">;
+ def gatherd_d256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int const *, _Vector<8, int>, _Vector<8, int>, _Constant char)">;
+ def gatherq_d256 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int const *, _Vector<4, long long int>, _Vector<4, int>, _Constant char)">;
}
let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def gatherd_d : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int const *, _Vector<4, int>, _Vector<4, int>, _Constant char)">;
-}
+ def maskloadd : X86Builtin<"_Vector<4, int>(_Vector<4, int const *>, _Vector<4, int>)">;
+ def maskloadq : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int const *>, _Vector<2, long long int>)">;
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
- def gatherd_d256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int const *, _Vector<8, int>, _Vector<8, int>, _Constant char)">;
-}
+ def maskstored : X86Builtin<"void(_Vector<4, int *>, _Vector<4, int>, _Vector<4, int>)">;
+ def maskstoreq : X86Builtin<"void(_Vector<2, long long int *>, _Vector<2, long long int>, _Vector<2, long long int>)">;
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
- def gatherq_d : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int const *, _Vector<2, long long int>, _Vector<4, int>, _Constant char)">;
-}
+ def gatherd_pd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, double const *, _Vector<4, int>, _Vector<2, double>, _Constant char)">;
+ def gatherq_pd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, double const *, _Vector<2, long long int>, _Vector<2, double>, _Constant char)">;
+ def gatherd_ps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, float const *, _Vector<4, int>, _Vector<4, float>, _Constant char)">;
+ def gatherq_ps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, float const *, _Vector<2, long long int>, _Vector<4, float>, _Constant char)">;
-let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
- def gatherq_d256 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int const *, _Vector<4, long long int>, _Vector<4, int>, _Constant char)">;
+ def gatherd_q : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, long long int const *, _Vector<4, int>, _Vector<2, long long int>, _Constant char)">;
+ def gatherq_q : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, long long int const *, _Vector<2, long long int>, _Vector<2, long long int>, _Constant char)">;
+ def gatherd_d : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int const *, _Vector<4, int>, _Vector<4, int>, _Constant char)">;
+ def gatherq_d : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int const *, _Vector<2, long long int>, _Vector<4, int>, _Constant char)">;
}
let Features = "f16c", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
@@ -751,14 +691,6 @@ let Features = "f16c", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def vcvtps2ph256 : X86Builtin<"_Vector<8, short>(_Vector<8, float>, _Constant int)">;
}
-let Features = "f16c", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vcvtph2ps : X86Builtin<"_Vector<4, float>(_Vector<8, short>)">;
-}
-
-let Features = "f16c", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vcvtph2ps256 : X86Builtin<"_Vector<8, float>(_Vector<8, short>)">;
-}
-
let Features = "rdrnd", Attributes = [NoThrow] in {
def rdrand16_step : X86Builtin<"unsigned int(unsigned short *)">;
def rdrand32_step : X86Builtin<"unsigned int(unsigned int *)">;
@@ -878,11 +810,6 @@ let Features = "sha", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in
def sha256msg2 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
}
-let Features = "fma|fma4", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vfmaddps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Vector<4, float>)">;
- def vfmaddpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Vector<2, double>)">;
-}
-
let Features = "fma", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
def vfmaddss3 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Vector<4, float>)">;
def vfmaddsd3 : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Vector<2, double>)">;
@@ -899,8 +826,6 @@ let Features = "fma|fma4", Attributes = [NoThrow, Const, RequiredVectorWidth<128
}
let Features = "fma|fma4", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vfmaddps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Vector<8, float>)">;
- def vfmaddpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Vector<4, double>)">;
def vfmaddsubps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Vector<8, float>)">;
def vfmaddsubpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Vector<4, double>)">;
}
@@ -953,14 +878,6 @@ let Features = "xop", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in
def vphsubwd : X86Builtin<"_Vector<4, int>(_Vector<8, short>)">;
def vphsubdq : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>)">;
def vpperm : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>, _Vector<16, char>)">;
- def vprotb : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
- def vprotw : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
- def vprotd : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
- def vprotq : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
- def vprotbi : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Constant char)">;
- def vprotwi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Constant char)">;
- def vprotdi : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Constant char)">;
- def vprotqi : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant char)">;
def vpshlb : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
def vpshlw : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def vpshld : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
@@ -978,28 +895,23 @@ let Features = "xop", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in
def vpcomd : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Constant char)">;
def vpcomq : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>, _Constant char)">;
def vpermil2pd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Vector<2, long long int>, _Constant char)">;
-}
-
-let Features = "xop", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vpermil2pd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Vector<4, long long int>, _Constant char)">;
-}
-
-let Features = "xop", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
def vpermil2ps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Vector<4, int>, _Constant char)">;
-}
-
-let Features = "xop", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vpermil2ps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Vector<8, int>, _Constant char)">;
-}
-
-let Features = "xop", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
def vfrczss : X86Builtin<"_Vector<4, float>(_Vector<4, float>)">;
def vfrczsd : X86Builtin<"_Vector<2, double>(_Vector<2, double>)">;
def vfrczps : X86Builtin<"_Vector<4, float>(_Vector<4, float>)">;
def vfrczpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>)">;
}
+let Features = "xop", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def vprotbi : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Constant char)">;
+ def vprotwi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Constant char)">;
+ def vprotdi : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Constant char)">;
+ def vprotqi : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant char)">;
+}
+
let Features = "xop", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+ def vpermil2pd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Vector<4, long long int>, _Constant char)">;
+ def vpermil2ps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Vector<8, int>, _Constant char)">;
def vfrczps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>)">;
def vfrczpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>)">;
}
@@ -1090,6 +1002,9 @@ let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWi
def cvtpd2ps512_mask : X86Builtin<"_Vector<8, float>(_Vector<8, double>, _Vector<8, float>, unsigned char, _Constant int)">;
def vcvtps2ph512_mask : X86Builtin<"_Vector<16, short>(_Vector<16, float>, _Constant int, _Vector<16, short>, unsigned short)">;
def vcvtph2ps512_mask : X86Builtin<"_Vector<16, float>(_Vector<16, short>, _Vector<16, float>, unsigned short, _Constant int)">;
+}
+
+let Features = "avx512f,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def pmuldq512 : X86Builtin<"_Vector<8, long long int>(_Vector<16, int>, _Vector<16, int>)">;
def pmuludq512 : X86Builtin<"_Vector<8, long long int>(_Vector<16, int>, _Vector<16, int>)">;
}
@@ -1411,8 +1326,6 @@ let Features = "avx512cd,avx512vl", Attributes = [NoThrow, Const, RequiredVector
let Features = "avx512cd,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
def vpconflictdi_512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>)">;
def vpconflictsi_512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>)">;
- def vplzcntd_512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>)">;
- def vplzcntq_512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>)">;
}
let Features = "avx512vl,avx512bitalg", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
@@ -1429,7 +1342,10 @@ let Features = "avx512bitalg,evex512", Attributes = [NoThrow, Const, RequiredVec
let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
def pmulhrsw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
- def pmulhuw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
+}
+
+let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+ def pmulhuw512 : X86Builtin<"_Vector<32, unsigned short>(_Vector<32, unsigned short>, _Vector<32, unsigned short>)">;
def pmulhw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
}
@@ -1888,78 +1804,6 @@ let Features = "avx512vbmi2,evex512", Attributes = [NoThrow, Const, RequiredVect
}
let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vpshldvd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vpshldvd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>)">;
-}
-
-let Features = "avx512vbmi2,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def vpshldvd512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vpshldvq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>, _Vector<2, long long int>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vpshldvq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Vector<4, long long int>)">;
-}
-
-let Features = "avx512vbmi2,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def vpshldvq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<8, long long int>, _Vector<8, long long int>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vpshldvw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>, _Vector<8, short>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vpshldvw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>, _Vector<16, short>)">;
-}
-
-let Features = "avx512vbmi2,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def vpshldvw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>, _Vector<32, short>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vpshrdvd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vpshrdvd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>)">;
-}
-
-let Features = "avx512vbmi2,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def vpshrdvd512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vpshrdvq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>, _Vector<2, long long int>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vpshrdvq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Vector<4, long long int>)">;
-}
-
-let Features = "avx512vbmi2,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def vpshrdvq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<8, long long int>, _Vector<8, long long int>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vpshrdvw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>, _Vector<8, short>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vpshrdvw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>, _Vector<16, short>)">;
-}
-
-let Features = "avx512vbmi2,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def vpshrdvw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>, _Vector<32, short>)">;
-}
-
-let Features = "avx512vl,avx512vbmi2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
def vpshrdd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Constant int)">;
}
@@ -2148,93 +1992,32 @@ let Features = "avx512dq,evex512", Attributes = [NoThrow, Const, RequiredVectorW
def reduceps512_mask : X86Builtin<"_Vector<16, float>(_Vector<16, float>, _Constant int, _Vector<16, float>, unsigned short, _Constant int)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def prold512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Constant int)">;
- def prolq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Constant int)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def prold128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Constant int)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def prold256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Constant int)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def prolq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant int)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def prolq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Constant int)">;
-}
-
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def prolvd512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>)">;
- def prolvq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<8, long long int>)">;
def prord512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Constant int)">;
+ def prolq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Constant int)">;
def prorq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Constant int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def prolvd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def prolvd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def prolvq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def prolvq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def prold128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Constant int)">;
def prord128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Constant int)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def prord256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Constant int)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+ def prolq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant int)">;
def prorq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Constant int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+ def prold256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Constant int)">;
+ def prord256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Constant int)">;
+ def prolq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Constant int)">;
def prorq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Constant int)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def prorvd512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>)">;
- def prorvq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<8, long long int>)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def prorvd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def prorvd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def prorvq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
-}
-
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def prorvq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>)">;
-}
-
let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
def pshufhw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Constant int)">;
def pshuflw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Constant int)">;
def psllv32hi : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
def psllw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<8, short>)">;
- def psllwi512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, int)">;
}
let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
@@ -2245,7 +2028,9 @@ let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, RequiredVector
def psllv8hi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f,evex512",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+ def psllwi512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, int)">;
def pslldi512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, int)">;
def psllqi512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, int)">;
}
@@ -2262,7 +2047,9 @@ let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, RequiredVector
def psrlv8hi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f,evex512",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+ def psrlwi512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, int)">;
def psrldi512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, int)">;
def psrlqi512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, int)">;
}
@@ -2288,10 +2075,10 @@ let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256
}
let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def psraw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<8, short>)">;
- def psrawi512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, int)">;
- def psrlw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<8, short>)">;
- def psrlwi512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, int)">;
+ def psraw512
+ : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<8, short>)">;
+ def psrlw512
+ : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<8, short>)">;
def pslldqi512_byteshift : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Constant int)">;
def psrldqi512_byteshift : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Constant int)">;
}
@@ -2574,22 +2361,6 @@ let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256
def rcp14ps256_mask : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, unsigned char)">;
}
-let Features = "avx512cd,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vplzcntd_128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>)">;
-}
-
-let Features = "avx512cd,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vplzcntd_256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>)">;
-}
-
-let Features = "avx512cd,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vplzcntq_128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>)">;
-}
-
-let Features = "avx512cd,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vplzcntq_256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>)">;
-}
-
let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
def vcvtsd2si32 : X86Builtin<"int(_Vector<2, double>, _Constant int)">;
def vcvtsd2usi32 : X86Builtin<"unsigned int(_Vector<2, double>, _Constant int)">;
@@ -2623,7 +2394,9 @@ let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<128>
def scalefss_round_mask : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Vector<4, float>, unsigned char, _Constant int)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f,evex512",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+ def psrawi512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, int)">;
def psradi512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, int)">;
def psraqi512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, int)">;
}
@@ -2636,11 +2409,13 @@ let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256
def psraq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def psraqi128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def psraqi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, int)">;
}
@@ -4140,14 +3915,6 @@ let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, RequiredVecto
def vcvtps2phx512_mask : X86Builtin<"_Vector<16, _Float16>(_Vector<16, float>, _Vector<16, _Float16>, unsigned short, _Constant int)">;
}
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vfmaddph : X86Builtin<"_Vector<8, _Float16>(_Vector<8, _Float16>, _Vector<8, _Float16>, _Vector<8, _Float16>)">;
-}
-
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vfmaddph256 : X86Builtin<"_Vector<16, _Float16>(_Vector<16, _Float16>, _Vector<16, _Float16>, _Vector<16, _Float16>)">;
-}
-
let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
def vfmaddph512_mask : X86Builtin<"_Vector<32, _Float16>(_Vector<32, _Float16>, _Vector<32, _Float16>, _Vector<32, _Float16>, unsigned int, _Constant int)">;
def vfmaddph512_mask3 : X86Builtin<"_Vector<32, _Float16>(_Vector<32, _Float16>, _Vector<32, _Float16>, _Vector<32, _Float16>, unsigned int, _Constant int)">;
@@ -4246,99 +4013,99 @@ let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, RequiredVecto
def vfcmulcph512_mask : X86Builtin<"_Vector<16, float>(_Vector<16, float>, _Vector<16, float>, _Vector<16, float>, unsigned short, _Constant int)">;
}
-let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def selectb_128 : X86Builtin<"_Vector<16, char>(unsigned short, _Vector<16, char>, _Vector<16, char>)">;
}
-let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def selectb_256 : X86Builtin<"_Vector<32, char>(unsigned int, _Vector<32, char>, _Vector<32, char>)">;
}
-let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def selectb_512 : X86Builtin<"_Vector<64, char>(unsigned long long int, _Vector<64, char>, _Vector<64, char>)">;
}
-let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def selectw_128 : X86Builtin<"_Vector<8, short>(unsigned char, _Vector<8, short>, _Vector<8, short>)">;
}
-let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512bw,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def selectw_256 : X86Builtin<"_Vector<16, short>(unsigned short, _Vector<16, short>, _Vector<16, short>)">;
}
-let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def selectw_512 : X86Builtin<"_Vector<32, short>(unsigned int, _Vector<32, short>, _Vector<32, short>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def selectd_128 : X86Builtin<"_Vector<4, int>(unsigned char, _Vector<4, int>, _Vector<4, int>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def selectd_256 : X86Builtin<"_Vector<8, int>(unsigned char, _Vector<8, int>, _Vector<8, int>)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def selectd_512 : X86Builtin<"_Vector<16, int>(unsigned short, _Vector<16, int>, _Vector<16, int>)">;
}
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def selectph_128 : X86Builtin<"_Vector<8, _Float16>(unsigned char, _Vector<8, _Float16>, _Vector<8, _Float16>)">;
}
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def selectph_256 : X86Builtin<"_Vector<16, _Float16>(unsigned short, _Vector<16, _Float16>, _Vector<16, _Float16>)">;
}
-let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def selectph_512 : X86Builtin<"_Vector<32, _Float16>(unsigned int, _Vector<32, _Float16>, _Vector<32, _Float16>)">;
}
-let Features = "avx512bf16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512bf16,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def selectpbf_128 : X86Builtin<"_Vector<8, __bf16>(unsigned char, _Vector<8, __bf16>, _Vector<8, __bf16>)">;
}
-let Features = "avx512bf16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512bf16,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def selectpbf_256 : X86Builtin<"_Vector<16, __bf16>(unsigned short, _Vector<16, __bf16>, _Vector<16, __bf16>)">;
}
-let Features = "avx512bf16,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bf16,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def selectpbf_512 : X86Builtin<"_Vector<32, __bf16>(unsigned int, _Vector<32, __bf16>, _Vector<32, __bf16>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def selectq_128 : X86Builtin<"_Vector<2, long long int>(unsigned char, _Vector<2, long long int>, _Vector<2, long long int>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def selectq_256 : X86Builtin<"_Vector<4, long long int>(unsigned char, _Vector<4, long long int>, _Vector<4, long long int>)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def selectq_512 : X86Builtin<"_Vector<8, long long int>(unsigned char, _Vector<8, long long int>, _Vector<8, long long int>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def selectps_128 : X86Builtin<"_Vector<4, float>(unsigned char, _Vector<4, float>, _Vector<4, float>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def selectps_256 : X86Builtin<"_Vector<8, float>(unsigned char, _Vector<8, float>, _Vector<8, float>)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def selectps_512 : X86Builtin<"_Vector<16, float>(unsigned short, _Vector<16, float>, _Vector<16, float>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def selectpd_128 : X86Builtin<"_Vector<2, double>(unsigned char, _Vector<2, double>, _Vector<2, double>)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def selectpd_256 : X86Builtin<"_Vector<4, double>(unsigned char, _Vector<4, double>, _Vector<4, double>)">;
}
-let Features = "avx512f,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def selectpd_512 : X86Builtin<"_Vector<8, double>(unsigned char, _Vector<8, double>, _Vector<8, double>)">;
}
@@ -5373,13 +5140,4 @@ let Features = "avx10.2-256", Attributes = [NoThrow, Const, RequiredVectorWidth<
let Features = "avx10.2-512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
def vsqrtbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>)">;
- def vfmaddbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>, _Vector<32, __bf16>, _Vector<32, __bf16>)">;
-}
-
-let Features = "avx10.2-256", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vfmaddbf16256 : X86Builtin<"_Vector<16, __bf16>(_Vector<16, __bf16>, _Vector<16, __bf16>, _Vector<16, __bf16>)">;
-}
-
-let Features = "avx10.2-256", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vfmaddbf16128 : X86Builtin<"_Vector<8, __bf16>(_Vector<8, __bf16>, _Vector<8, __bf16>, _Vector<8, __bf16>)">;
}
diff --git a/clang/include/clang/Basic/CMakeLists.txt b/clang/include/clang/Basic/CMakeLists.txt
index 0cf661a..8173600 100644
--- a/clang/include/clang/Basic/CMakeLists.txt
+++ b/clang/include/clang/Basic/CMakeLists.txt
@@ -33,6 +33,7 @@ clang_diag_gen(Parse)
clang_diag_gen(Refactoring)
clang_diag_gen(Sema)
clang_diag_gen(Serialization)
+clang_diag_gen(Trap)
clang_tablegen(DiagnosticGroups.inc -gen-clang-diag-groups
SOURCE Diagnostic.td
TARGET ClangDiagnosticGroups)
diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def
index 423b696..fda0da9 100644
--- a/clang/include/clang/Basic/CodeGenOptions.def
+++ b/clang/include/clang/Basic/CodeGenOptions.def
@@ -58,7 +58,7 @@ ENUM_CODEGENOPT(FramePointer, FramePointerKind, 2, FramePointerKind::None, Benig
ENUM_CODEGENOPT(ExceptionHandling, ExceptionHandlingKind, 3, ExceptionHandlingKind::None, NotCompatible)
-CODEGENOPT(ClearASTBeforeBackend , 1, 0, Benign) ///< Free the AST before running backend code generation. Only works with -disable-free.
+CODEGENOPT(ClearASTBeforeBackend , 1, 0, Benign) ///< Free the AST before running backend code generation.
CODEGENOPT(DisableFree , 1, 0, Benign) ///< Don't free memory.
CODEGENOPT(DiscardValueNames , 1, 0, Benign) ///< Discard Value Names from the IR (LLVMContext flag)
CODEGENOPT(DisableLLVMPasses , 1, 0, Benign) ///< Don't run any LLVM IR passes to get
@@ -307,7 +307,7 @@ CODEGENOPT(SanitizeBinaryMetadataAtomics, 1, 0, Benign) ///< Emit PCs for atomic
CODEGENOPT(SanitizeBinaryMetadataUAR, 1, 0, Benign) ///< Emit PCs for start of functions
///< that are subject for use-after-return checking.
CODEGENOPT(SanitizeStats , 1, 0, Benign) ///< Collect statistics for sanitizers.
-CODEGENOPT(SanitizeDebugTrapReasons, 1, 1 , Benign) ///< Enable UBSan trapping messages
+ENUM_CODEGENOPT(SanitizeDebugTrapReasons, SanitizeDebugTrapReasonKind, 2, SanitizeDebugTrapReasonKind::Detailed, Benign) ///< Control how "trap reasons" are emitted in debug info
CODEGENOPT(SimplifyLibCalls , 1, 1, Benign) ///< Set when -fbuiltin is enabled.
CODEGENOPT(SoftFloat , 1, 0, Benign) ///< -soft-float.
CODEGENOPT(SpeculativeLoadHardening, 1, 0, Benign) ///< Enable speculative load hardening.
diff --git a/clang/include/clang/Basic/CodeGenOptions.h b/clang/include/clang/Basic/CodeGenOptions.h
index cdeedd5..5d5cf25 100644
--- a/clang/include/clang/Basic/CodeGenOptions.h
+++ b/clang/include/clang/Basic/CodeGenOptions.h
@@ -198,6 +198,16 @@ public:
Forced,
};
+ enum SanitizeDebugTrapReasonKind {
+ None, ///< Trap Messages are omitted. This offers the smallest debug info
+ ///< size but at the cost of making traps hard to debug.
+ Basic, ///< Trap Message is fixed per SanitizerKind. Produces smaller debug
+ ///< info than `Detailed` but is not as helpful for debugging.
+ Detailed, ///< Trap Message includes more context (e.g. the expression being
+ ///< overflowed). This is more helpful for debugging but produces
+ ///< larger debug info than `Basic`.
+ };
+
/// The code model to use (-mcmodel).
std::string CodeModel;
diff --git a/clang/include/clang/Basic/Diagnostic.h b/clang/include/clang/Basic/Diagnostic.h
index cee5bed..af26a04 100644
--- a/clang/include/clang/Basic/Diagnostic.h
+++ b/clang/include/clang/Basic/Diagnostic.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
@@ -1259,10 +1260,13 @@ class DiagnosticBuilder : public StreamingDiagnostic {
DiagnosticBuilder() = default;
+protected:
DiagnosticBuilder(DiagnosticsEngine *DiagObj, SourceLocation DiagLoc,
unsigned DiagID);
-protected:
+ DiagnosticsEngine *getDiagnosticsEngine() const { return DiagObj; }
+ unsigned getDiagID() const { return DiagID; }
+
/// Clear out the current diagnostic.
void Clear() const {
DiagObj = nullptr;
diff --git a/clang/include/clang/Basic/Diagnostic.td b/clang/include/clang/Basic/Diagnostic.td
index 65b19f3..53b1db2 100644
--- a/clang/include/clang/Basic/Diagnostic.td
+++ b/clang/include/clang/Basic/Diagnostic.td
@@ -30,6 +30,7 @@ def CLASS_REMARK : DiagClass;
def CLASS_WARNING : DiagClass;
def CLASS_EXTENSION : DiagClass;
def CLASS_ERROR : DiagClass;
+def CLASS_TRAP : DiagClass;
// Responses to a diagnostic in a SFINAE context.
class SFINAEResponse;
@@ -144,7 +145,8 @@ class Extension<string str> : Diagnostic<str, CLASS_EXTENSION, SEV_Ignored>;
class ExtWarn<string str> : Diagnostic<str, CLASS_EXTENSION, SEV_Warning>;
// Notes can provide supplementary information on errors, warnings, and remarks.
class Note<string str> : Diagnostic<str, CLASS_NOTE, SEV_Fatal/*ignored*/>;
-
+// Trap messages attached to traps in debug info.
+class Trap<string str> : Diagnostic<str, CLASS_TRAP, SEV_Fatal/*ignored*/>;
class DefaultIgnore { Severity DefaultSeverity = SEV_Ignored; }
class DefaultWarn { Severity DefaultSeverity = SEV_Warning; }
@@ -235,3 +237,4 @@ include "DiagnosticParseKinds.td"
include "DiagnosticRefactoringKinds.td"
include "DiagnosticSemaKinds.td"
include "DiagnosticSerializationKinds.td"
+include "DiagnosticTrapKinds.td"
diff --git a/clang/include/clang/Basic/DiagnosticASTKinds.td b/clang/include/clang/Basic/DiagnosticASTKinds.td
index 46d04b0..a63bd80 100644
--- a/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -400,6 +400,9 @@ def note_constexpr_non_const_vectorelements : Note<
"cannot determine number of elements for sizeless vectors in a constant expression">;
def note_constexpr_assumption_failed : Note<
"assumption evaluated to false">;
+def note_constexpr_countzeroes_zero : Note<
+ "evaluation of %select{__builtin_elementwise_ctlz|__builtin_elementwise_cttz}0 "
+ "with a zero value is undefined">;
def err_experimental_clang_interp_failed : Error<
"the experimental clang interpreter failed to evaluate an expression">;
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 0f17f4a..b8c7c6e 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -581,6 +581,13 @@ def err_drv_reduced_module_output_overrided : Warning<
"please consider use '-fmodule-output=' to specify the output file for reduced BMI explicitly">,
InGroup<DiagGroup<"reduced-bmi-output-overrided">>;
+def remark_found_cxx20_module_usage : Remark<
+ "found C++20 module usage in file '%0'">,
+ InGroup<ModulesDriver>;
+def remark_performing_driver_managed_module_build : Remark<
+ "performing driver managed module build">,
+ InGroup<ModulesDriver>;
+
def warn_drv_delayed_template_parsing_after_cxx20 : Warning<
"-fdelayed-template-parsing is deprecated after C++20">,
InGroup<DiagGroup<"delayed-template-parsing-in-cxx20">>;
@@ -878,4 +885,9 @@ def warn_drv_openacc_without_cir
: Warning<"OpenACC directives will result in no runtime behavior; use "
"-fclangir to enable runtime effect">,
InGroup<SourceUsesOpenACC>;
+
+def warn_drv_gcc_install_dir_libstdcxx : Warning<
+ "future releases of the clang compiler will prefer GCC installations "
+ "containing libstdc++ include directories; '%0' would be chosen over '%1'">,
+ InGroup<DiagGroup<"gcc-install-dir-libstdcxx">>;
}
diff --git a/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index 8a8db27..b7e27d8 100644
--- a/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -287,6 +287,10 @@ def err_function_needs_feature : Error<
"always_inline function %1 requires target feature '%2', but would "
"be inlined into function %0 that is compiled without support for '%2'">;
+def err_flatten_function_needs_feature : Error<
+ "flatten function %0 calls %1 which requires target feature '%2', but the "
+ "caller is compiled without support for '%2'">;
+
let CategoryName = "Codegen ABI Check" in {
def err_function_always_inline_attribute_mismatch : Error<
"always_inline function %1 and its caller %0 have mismatching %2 attributes">;
diff --git a/clang/include/clang/Basic/DiagnosticGroups.td b/clang/include/clang/Basic/DiagnosticGroups.td
index ccb18aa..0c994e0 100644
--- a/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/clang/include/clang/Basic/DiagnosticGroups.td
@@ -533,7 +533,14 @@ def Dangling : DiagGroup<"dangling", [DanglingAssignment,
DanglingGsl,
ReturnStackAddress]>;
-def LifetimeSafety : DiagGroup<"experimental-lifetime-safety">;
+def LifetimeSafetyPermissive : DiagGroup<"experimental-lifetime-safety-permissive">;
+def LifetimeSafetyStrict : DiagGroup<"experimental-lifetime-safety-strict">;
+def LifetimeSafety : DiagGroup<"experimental-lifetime-safety",
+ [LifetimeSafetyPermissive, LifetimeSafetyStrict]> {
+ code Documentation = [{
+ Experimental warnings to detect use-after-free and related temporal safety bugs based on lifetime safety analysis.
+ }];
+}
def DistributedObjectModifiers : DiagGroup<"distributed-object-modifiers">;
def DllexportExplicitInstantiationDecl : DiagGroup<"dllexport-explicit-instantiation-decl">;
@@ -628,6 +635,7 @@ def ModuleConflict : DiagGroup<"module-conflict">;
def ModuleFileExtension : DiagGroup<"module-file-extension">;
def ModuleIncludeDirectiveTranslation : DiagGroup<"module-include-translation">;
def ModuleMap : DiagGroup<"module-map">;
+def ModulesDriver : DiagGroup<"modules-driver">;
def RoundTripCC1Args : DiagGroup<"round-trip-cc1-args">;
def NewlineEOF : DiagGroup<"newline-eof">;
def Nullability : DiagGroup<"nullability">;
@@ -644,6 +652,7 @@ def NonNull : DiagGroup<"nonnull">;
def NonPODVarargs : DiagGroup<"non-pod-varargs">;
def ClassVarargs : DiagGroup<"class-varargs", [NonPODVarargs]>;
def : DiagGroup<"nonportable-cfstrings">;
+def NonPortableSYCL : DiagGroup<"nonportable-sycl">;
def NonVirtualDtor : DiagGroup<"non-virtual-dtor">;
def GNUNullPointerArithmetic : DiagGroup<"gnu-null-pointer-arithmetic">;
def NullPointerArithmetic
diff --git a/clang/include/clang/Basic/DiagnosticIDs.h b/clang/include/clang/Basic/DiagnosticIDs.h
index b21a3b6..06446cf 100644
--- a/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/clang/include/clang/Basic/DiagnosticIDs.h
@@ -23,76 +23,80 @@
#include <vector>
namespace clang {
- class DiagnosticsEngine;
- class DiagnosticBuilder;
- class LangOptions;
- class SourceLocation;
-
- // Import the diagnostic enums themselves.
- namespace diag {
- enum class Group;
-
- // Size of each of the diagnostic categories.
- enum {
- DIAG_SIZE_COMMON = 300,
- DIAG_SIZE_DRIVER = 400,
- DIAG_SIZE_FRONTEND = 200,
- DIAG_SIZE_SERIALIZATION = 120,
- DIAG_SIZE_LEX = 500,
- DIAG_SIZE_PARSE = 800,
- DIAG_SIZE_AST = 300,
- DIAG_SIZE_COMMENT = 100,
- DIAG_SIZE_CROSSTU = 100,
- DIAG_SIZE_SEMA = 5000,
- DIAG_SIZE_ANALYSIS = 100,
- DIAG_SIZE_REFACTORING = 1000,
- DIAG_SIZE_INSTALLAPI = 100,
- };
- // Start position for diagnostics.
- enum {
- DIAG_START_COMMON = 0,
- DIAG_START_DRIVER = DIAG_START_COMMON + static_cast<int>(DIAG_SIZE_COMMON),
- DIAG_START_FRONTEND = DIAG_START_DRIVER + static_cast<int>(DIAG_SIZE_DRIVER),
- DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + static_cast<int>(DIAG_SIZE_FRONTEND),
- DIAG_START_LEX = DIAG_START_SERIALIZATION + static_cast<int>(DIAG_SIZE_SERIALIZATION),
- DIAG_START_PARSE = DIAG_START_LEX + static_cast<int>(DIAG_SIZE_LEX),
- DIAG_START_AST = DIAG_START_PARSE + static_cast<int>(DIAG_SIZE_PARSE),
- DIAG_START_COMMENT = DIAG_START_AST + static_cast<int>(DIAG_SIZE_AST),
- DIAG_START_CROSSTU = DIAG_START_COMMENT + static_cast<int>(DIAG_SIZE_COMMENT),
- DIAG_START_SEMA = DIAG_START_CROSSTU + static_cast<int>(DIAG_SIZE_CROSSTU),
- DIAG_START_ANALYSIS = DIAG_START_SEMA + static_cast<int>(DIAG_SIZE_SEMA),
- DIAG_START_REFACTORING = DIAG_START_ANALYSIS + static_cast<int>(DIAG_SIZE_ANALYSIS),
- DIAG_START_INSTALLAPI = DIAG_START_REFACTORING + static_cast<int>(DIAG_SIZE_REFACTORING),
- DIAG_UPPER_LIMIT = DIAG_START_INSTALLAPI + static_cast<int>(DIAG_SIZE_INSTALLAPI)
- };
-
- class CustomDiagInfo;
-
- /// All of the diagnostics that can be emitted by the frontend.
- typedef unsigned kind;
-
- /// Enum values that allow the client to map NOTEs, WARNINGs, and EXTENSIONs
- /// to either Ignore (nothing), Remark (emit a remark), Warning
- /// (emit a warning) or Error (emit as an error). It allows clients to
- /// map ERRORs to Error or Fatal (stop emitting diagnostics after this one).
- enum class Severity : uint8_t {
- // NOTE: 0 means "uncomputed".
- Ignored = 1, ///< Do not present this diagnostic, ignore it.
- Remark = 2, ///< Present this diagnostic as a remark.
- Warning = 3, ///< Present this diagnostic as a warning.
- Error = 4, ///< Present this diagnostic as an error.
- Fatal = 5 ///< Present this diagnostic as a fatal error.
- };
-
- /// Flavors of diagnostics we can emit. Used to filter for a particular
- /// kind of diagnostic (for instance, for -W/-R flags).
- enum class Flavor {
- WarningOrError, ///< A diagnostic that indicates a problem or potential
- ///< problem. Can be made fatal by -Werror.
- Remark ///< A diagnostic that indicates normal progress through
- ///< compilation.
- };
- } // end namespace diag
+class DiagnosticsEngine;
+class DiagnosticBuilder;
+class LangOptions;
+class SourceLocation;
+
+// Import the diagnostic enums themselves.
+namespace diag {
+enum class Group;
+
+// Size of each of the diagnostic categories.
+enum {
+ DIAG_SIZE_COMMON = 300,
+ DIAG_SIZE_DRIVER = 400,
+ DIAG_SIZE_FRONTEND = 200,
+ DIAG_SIZE_SERIALIZATION = 120,
+ DIAG_SIZE_LEX = 500,
+ DIAG_SIZE_PARSE = 800,
+ DIAG_SIZE_AST = 300,
+ DIAG_SIZE_COMMENT = 100,
+ DIAG_SIZE_CROSSTU = 100,
+ DIAG_SIZE_SEMA = 5000,
+ DIAG_SIZE_ANALYSIS = 100,
+ DIAG_SIZE_REFACTORING = 1000,
+ DIAG_SIZE_INSTALLAPI = 100,
+ DIAG_SIZE_TRAP = 100,
+};
+// Start position for diagnostics.
+// clang-format off
+enum {
+ DIAG_START_COMMON = 0,
+ DIAG_START_DRIVER = DIAG_START_COMMON + static_cast<int>(DIAG_SIZE_COMMON),
+ DIAG_START_FRONTEND = DIAG_START_DRIVER + static_cast<int>(DIAG_SIZE_DRIVER),
+ DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + static_cast<int>(DIAG_SIZE_FRONTEND),
+ DIAG_START_LEX = DIAG_START_SERIALIZATION + static_cast<int>(DIAG_SIZE_SERIALIZATION),
+ DIAG_START_PARSE = DIAG_START_LEX + static_cast<int>(DIAG_SIZE_LEX),
+ DIAG_START_AST = DIAG_START_PARSE + static_cast<int>(DIAG_SIZE_PARSE),
+ DIAG_START_COMMENT = DIAG_START_AST + static_cast<int>(DIAG_SIZE_AST),
+ DIAG_START_CROSSTU = DIAG_START_COMMENT + static_cast<int>(DIAG_SIZE_COMMENT),
+ DIAG_START_SEMA = DIAG_START_CROSSTU + static_cast<int>(DIAG_SIZE_CROSSTU),
+ DIAG_START_ANALYSIS = DIAG_START_SEMA + static_cast<int>(DIAG_SIZE_SEMA),
+ DIAG_START_REFACTORING = DIAG_START_ANALYSIS + static_cast<int>(DIAG_SIZE_ANALYSIS),
+ DIAG_START_INSTALLAPI = DIAG_START_REFACTORING + static_cast<int>(DIAG_SIZE_REFACTORING),
+ DIAG_START_TRAP = DIAG_START_INSTALLAPI + static_cast<int>(DIAG_SIZE_INSTALLAPI),
+ DIAG_UPPER_LIMIT = DIAG_START_TRAP + static_cast<int>(DIAG_SIZE_TRAP)
+};
+// clang-format on
+
+class CustomDiagInfo;
+
+/// All of the diagnostics that can be emitted by the frontend.
+typedef unsigned kind;
+
+/// Enum values that allow the client to map NOTEs, WARNINGs, and EXTENSIONs
+/// to either Ignore (nothing), Remark (emit a remark), Warning
+/// (emit a warning) or Error (emit as an error). It allows clients to
+/// map ERRORs to Error or Fatal (stop emitting diagnostics after this one).
+enum class Severity : uint8_t {
+ // NOTE: 0 means "uncomputed".
+ Ignored = 1, ///< Do not present this diagnostic, ignore it.
+ Remark = 2, ///< Present this diagnostic as a remark.
+ Warning = 3, ///< Present this diagnostic as a warning.
+ Error = 4, ///< Present this diagnostic as an error.
+ Fatal = 5 ///< Present this diagnostic as a fatal error.
+};
+
+/// Flavors of diagnostics we can emit. Used to filter for a particular
+/// kind of diagnostic (for instance, for -W/-R flags).
+enum class Flavor {
+ WarningOrError, ///< A diagnostic that indicates a problem or potential
+ ///< problem. Can be made fatal by -Werror.
+ Remark ///< A diagnostic that indicates normal progress through
+ ///< compilation.
+};
+} // end namespace diag
} // end namespace clang
// This has to be included *after* the DIAG_START_ enums above are defined.
@@ -173,7 +177,8 @@ public:
/// Used for handling and querying diagnostic IDs.
///
-/// Can be used and shared by multiple Diagnostics for multiple translation units.
+/// Can be used and shared by multiple Diagnostics for multiple translation
+/// units.
class DiagnosticIDs : public RefCountedBase<DiagnosticIDs> {
public:
/// The level of the diagnostic, after it has been through mapping.
@@ -186,7 +191,8 @@ public:
CLASS_REMARK = 0x02,
CLASS_WARNING = 0x03,
CLASS_EXTENSION = 0x04,
- CLASS_ERROR = 0x05
+ CLASS_ERROR = 0x05,
+ CLASS_TRAP = 0x06
};
static bool IsCustomDiag(diag::kind Diag) {
@@ -360,6 +366,10 @@ public:
///
bool isExtensionDiag(unsigned DiagID, bool &EnabledByDefault) const;
+ bool isTrapDiag(unsigned DiagID) const {
+ return getDiagClass(DiagID) == CLASS_TRAP;
+ }
+
/// Given a group ID, returns the flag that toggles the group.
/// For example, for Group::DeprecatedDeclarations, returns
/// "deprecated-declarations".
@@ -498,6 +508,6 @@ private:
friend class DiagnosticsEngine;
};
-} // end namespace clang
+} // end namespace clang
#endif
diff --git a/clang/include/clang/Basic/DiagnosticLexKinds.td b/clang/include/clang/Basic/DiagnosticLexKinds.td
index c7fe6e1d..c03c403 100644
--- a/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -912,6 +912,11 @@ def err_mmap_nested_submodule_id : Error<
"qualified module name can only be used to define modules at the top level">;
def err_mmap_expected_feature : Error<"expected a feature name">;
def err_mmap_expected_attribute : Error<"expected an attribute name">;
+def warn_mmap_link_redeclaration : Warning<"redeclaration of link library '%0'">,
+ InGroup<DiagGroup<"module-link-redeclaration">>, DefaultError;
+def note_mmap_prev_link_declaration : Note<"previously declared here">;
+def err_mmap_submodule_link_decl
+ : Error<"link declaration is not allowed in submodules">;
def warn_mmap_unknown_attribute : Warning<"unknown attribute '%0'">,
InGroup<IgnoredAttributes>;
def warn_mmap_mismatched_private_submodule : Warning<
diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td
index 0042afc..ff506fb 100644
--- a/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -830,6 +830,9 @@ def err_ms_property_expected_comma_or_rparen : Error<
"expected ',' or ')' at end of property accessor list">;
def err_ms_property_initializer : Error<
"property declaration cannot have a default member initializer">;
+def ext_invalid_attribute_argument
+ : Extension<"'%0' is not allowed in an attribute argument list">,
+ InGroup<DiagGroup<"attribute-preprocessor-tokens">>;
def err_assume_attr_expects_cond_expr : Error<
"use of this expression in an %0 attribute requires parentheses">;
@@ -1503,8 +1506,8 @@ def err_omp_unexpected_directive : Error<
"unexpected OpenMP directive %select{|'#pragma omp %1'}0">;
def err_omp_expected_punc : Error<
"expected ',' or ')' in '%0' %select{clause|directive}1">;
-def warn_clause_expected_string : Warning<
- "expected string literal in 'clause %0' - ignoring">, InGroup<IgnoredPragmas>;
+def warn_clause_expected_string: Warning<
+ "expected string %select{|literal }1in 'clause %0' - ignoring">, InGroup<IgnoredPragmas>;
def err_omp_unexpected_clause : Error<
"unexpected OpenMP clause '%0' in directive '#pragma omp %1'">;
def err_omp_unexpected_clause_extension_only : Error<
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index cf23594..c934fed 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -1772,7 +1772,8 @@ def note_unsatisfied_trait
"%Replaceable{replaceable}|"
"%TriviallyCopyable{trivially copyable}|"
"%Empty{empty}|"
- "%StandardLayout{standard-layout}"
+ "%StandardLayout{standard-layout}|"
+ "%Final{final}"
"}1">;
def note_unsatisfied_trait_reason
@@ -1815,7 +1816,9 @@ def note_unsatisfied_trait_reason
"%sub{select_special_member_kind}1}|"
"%FunctionType{is a function type}|"
"%CVVoidType{is a cv void type}|"
- "%IncompleteArrayType{is an incomplete array type}"
+ "%IncompleteArrayType{is an incomplete array type}|"
+ "%NotClassOrUnion{is not a class or union type}|"
+ "%NotMarkedFinal{is not marked 'final'}"
"}0">;
def warn_consteval_if_always_true : Warning<
@@ -3684,6 +3687,10 @@ def warn_alloca_align_alignof : Warning<
"second argument to __builtin_alloca_with_align is supposed to be in bits">,
InGroup<DiagGroup<"alloca-with-align-alignof">>;
+def warn_alloc_size : Warning<
+ "allocation of insufficient size '%0' for type %1 with size '%2'">,
+ InGroup<DiagGroup<"alloc-size">>;
+
def err_alignment_too_small : Error<
"requested alignment must be %0 or greater">;
def err_alignment_too_big : Error<
@@ -4404,6 +4411,11 @@ def warn_impcast_different_enum_types : Warning<
def warn_impcast_int_to_enum : Warning<
"implicit conversion from %0 to enumeration type %1 is invalid in C++">,
InGroup<ImplicitIntToEnumCast>, DefaultIgnore;
+
+def note_no_implicit_conversion_for_scoped_enum
+ : Note<"no implicit conversion for scoped enum; consider casting to "
+ "underlying type">;
+
def warn_impcast_bool_to_null_pointer : Warning<
"initialization of pointer of type %0 to null from a constant boolean "
"expression">, InGroup<BoolConversion>;
@@ -6074,6 +6086,13 @@ def warn_cxx23_pack_indexing : Warning<
def err_pack_outside_template : Error<
"pack declaration outside of template">;
+def err_builtin_pack_outside_template
+ : Error<"%0 cannot be used outside of template">;
+
+def err_unsupported_builtin_template_pack_expansion
+ : Error<"expansions of %0 are not supported here. Only expansions in "
+ "template arguments and class bases are supported">;
+
def err_fold_expression_packs_both_sides : Error<
"binary fold expression has unexpanded parameter packs in both operands">;
def err_fold_expression_empty : Error<
@@ -10423,9 +10442,10 @@ def warn_format_conversion_argument_type_mismatch : Warning<
def warn_format_conversion_argument_type_mismatch_pedantic : Extension<
warn_format_conversion_argument_type_mismatch.Summary>,
InGroup<FormatPedantic>;
-def warn_format_conversion_argument_type_mismatch_signedness : Warning<
- warn_format_conversion_argument_type_mismatch.Summary>,
- InGroup<FormatSignedness>, DefaultIgnore;
+def warn_format_conversion_argument_type_mismatch_signedness: Warning<
+ "format specifies type %0 but the argument has %select{type|underlying "
+ "type}2 %1, which differs in signedness" >
+ , InGroup<FormatSignedness>, DefaultIgnore;
def warn_format_conversion_argument_type_mismatch_confusion : Warning<
warn_format_conversion_argument_type_mismatch.Summary>,
InGroup<FormatTypeConfusion>, DefaultIgnore;
@@ -10537,8 +10557,10 @@ def warn_format_cmp_sensitivity_mismatch : Warning<
"it should be %select{unspecified|private|public|sensitive}1">, InGroup<Format>;
def warn_format_cmp_specifier_mismatch : Warning<
"format specifier '%0' is incompatible with '%1'">, InGroup<Format>;
-def warn_format_cmp_specifier_sign_mismatch : Warning<
- "signedness of format specifier '%0' is incompatible with '%1'">, InGroup<Format>;
+def warn_format_cmp_specifier_sign_mismatch
+ : Warning<"signedness of format specifier '%0' is incompatible with '%1'">,
+ InGroup<FormatSignedness>,
+ DefaultIgnore;
def warn_format_cmp_specifier_mismatch_pedantic : Extension<
warn_format_cmp_specifier_sign_mismatch.Summary>, InGroup<FormatPedantic>;
def note_format_cmp_with : Note<
@@ -10668,9 +10690,15 @@ def warn_dangling_reference_captured_by_unknown : Warning<
"object whose reference is captured will be destroyed at the end of "
"the full-expression">, InGroup<DanglingCapture>;
-def warn_experimental_lifetime_safety_dummy_warning : Warning<
- "todo: remove this warning after we have atleast one warning based on the lifetime analysis">,
- InGroup<LifetimeSafety>, DefaultIgnore;
+// Diagnostics based on the Lifetime safety analysis.
+def warn_lifetime_safety_loan_expires_permissive : Warning<
+ "object whose reference is captured does not live long enough">,
+ InGroup<LifetimeSafetyPermissive>, DefaultIgnore;
+def warn_lifetime_safety_loan_expires_strict : Warning<
+ "object whose reference is captured may not live long enough">,
+ InGroup<LifetimeSafetyStrict>, DefaultIgnore;
+def note_lifetime_safety_used_here : Note<"later used here">;
+def note_lifetime_safety_destroyed_here : Note<"destroyed here">;
// For non-floating point, expressions of the form x == x or x != x
// should result in a warning, since these always evaluate to a constant.
@@ -10985,10 +11013,15 @@ def err_block_on_vm : Error<
def err_sizeless_nonlocal : Error<
"non-local variable with sizeless type %0">;
+def err_vec_masked_load_store_ptr : Error<
+ "%ordinal0 argument must be a %1">;
+def err_vec_masked_load_store_size : Error<
+ "all arguments to %0 must have the same number of elements (was %1 and %2)">;
+
def err_vec_builtin_non_vector : Error<
"%select{first two|all}1 arguments to %0 must be vectors">;
def err_vec_builtin_incompatible_vector : Error<
- "%select{first two|all}1 arguments to %0 must have the same type">;
+ "%select{first two|all|last two}1 arguments to %0 must have the same type">;
def err_vsx_builtin_nonconstant_argument : Error<
"argument %0 to %1 must be a 2-bit unsigned literal (i.e. 0, 1, 2 or 3)">;
@@ -12585,7 +12618,7 @@ def warn_zero_as_null_pointer_constant : Warning<
InGroup<DiagGroup<"zero-as-null-pointer-constant">>, DefaultIgnore;
def warn_not_eliding_copy_on_return : Warning<
- "not eliding copy on return">,
+ "not eliding copy on return">,
InGroup<DiagGroup<"nrvo">>, DefaultIgnore;
def err_nullability_cs_multilevel : Error<
@@ -12850,7 +12883,7 @@ def err_builtin_invalid_arg_type: Error<
"%plural{0:|: }1"
// Second component: integer-like types
"%select{|integer|signed integer|unsigned integer|'int'|"
- "pointer to a valid matrix element}2"
+ "pointer to a valid matrix element|boolean}2"
// A space after a non-empty second component
"%plural{0:|: }2"
// An 'or' if non-empty second and third components are combined
@@ -12942,6 +12975,17 @@ def err_sycl_special_type_num_init_method : Error<
"types with 'sycl_special_class' attribute must have one and only one '__init' "
"method defined">;
+// SYCL external attribute diagnostics
+def err_sycl_external_invalid_linkage : Error<
+ "%0 can only be applied to functions with external linkage">;
+def err_sycl_external_invalid_main : Error<
+ "%0 cannot be applied to the 'main' function">;
+def err_sycl_external_invalid_deleted_function : Error<
+ "%0 cannot be applied to an explicitly deleted function">;
+def warn_sycl_external_missing_on_first_decl : Warning<
+ "%0 attribute does not appear on the first declaration">,
+ InGroup<NonPortableSYCL>;
+
// SYCL kernel entry point diagnostics
def err_sycl_entry_point_invalid : Error<
"the %0 attribute cannot be applied to a"
@@ -12956,7 +13000,7 @@ def err_sycl_kernel_name_conflict : Error<
"the %0 kernel name argument conflicts with a previous declaration">;
def warn_sycl_kernel_name_not_a_class_type : Warning<
"%0 is not a valid SYCL kernel name type; a non-union class type is required">,
- InGroup<DiagGroup<"nonportable-sycl">>, DefaultError;
+ InGroup<NonPortableSYCL>, DefaultError;
def warn_sycl_entry_point_redundant_declaration : Warning<
"redundant %0 attribute">, InGroup<RedundantAttribute>;
def err_sycl_entry_point_after_definition : Error<
@@ -13234,9 +13278,9 @@ def err_wasm_builtin_arg_must_match_table_element_type : Error <
"%ordinal0 argument must match the element type of the WebAssembly table in the %ordinal1 argument">;
def err_wasm_builtin_arg_must_be_integer_type : Error <
"%ordinal0 argument must be an integer">;
-def err_wasm_builtin_test_fp_sig_cannot_include_reference_type
- : Error<"not supported for "
- "function pointers with a reference type %select{return "
+def err_wasm_builtin_test_fp_sig_cannot_include_struct_or_union
+ : Error<"not supported with the multivalue ABI for "
+ "function pointers with a struct/union as %select{return "
"value|parameter}0">;
// OpenACC diagnostics.
@@ -13359,16 +13403,23 @@ def err_acc_reduction_num_gangs_conflict
"appear on a '%2' construct "
"with a '%3' clause%select{ with more than 1 argument|}0">;
def err_acc_reduction_type
- : Error<"OpenACC 'reduction' variable must be of scalar type, aggregate, "
- "sub-array, or a composite of scalar types;%select{| sub-array "
- "base}1 type is %0">;
-def err_acc_reduction_composite_type
- : Error<"OpenACC 'reduction' variable must be a composite of scalar types; "
- "%1 %select{is not a class or struct|is incomplete|is not an "
- "aggregate}0">;
-def err_acc_reduction_composite_member_type :Error<
- "OpenACC 'reduction' composite variable must not have non-scalar field">;
-def note_acc_reduction_composite_member_loc : Note<"invalid field is here">;
+ : Error<"invalid type %0 used in OpenACC 'reduction' variable reference; "
+ "type is %enum_select<OACCReductionTy>{%NotScalar{not a scalar "
+ "value, or array of scalars, or composite of "
+ "scalars}|%MemberNotScalar{not a scalar value}|%NotAgg{not an "
+ "aggregate}|%NotComplete{not a complete type}|%NotClassStruct{not "
+ "a class or struct}}1">;
+def note_acc_reduction_array
+ : Note<"used as element type of "
+ "%enum_select<OACCReductionArray>{%Section{sub-array"
+ "}|%Subscript{array}|%ArrayTy{array}}0 type %1">;
+def note_acc_reduction_member_of_composite
+ : Note<"used as field '%0' of composite '%1'">;
+def note_acc_reduction_type_summary
+ : Note<"OpenACC 'reduction' variable reference must be a scalar variable "
+ "or a "
+ "composite of scalars, or an array, sub-array, or element of scalar "
+ "types">;
def err_acc_loop_not_for_loop
: Error<"OpenACC '%0' construct can only be applied to a 'for' loop">;
def note_acc_construct_here : Note<"'%0' construct is here">;
@@ -13529,7 +13580,7 @@ def err_acc_invalid_modifier
def err_acc_invalid_default_type
: Error<"invalid value %0 in '%1' clause; valid values are %2">;
def err_acc_device_type_multiple_archs
- : Error<"OpenACC 'device_type' clause on a 'set' construct only permits "
+ : Error<"OpenACC 'device_type' clause on a '%0' construct only permits "
"one architecture">;
def warn_acc_var_referenced_non_const_array
: Warning<"variable of array type %0 referenced in OpenACC '%1' clause "
diff --git a/clang/include/clang/Basic/DiagnosticTrap.h b/clang/include/clang/Basic/DiagnosticTrap.h
new file mode 100644
index 0000000..da8bd25
--- /dev/null
+++ b/clang/include/clang/Basic/DiagnosticTrap.h
@@ -0,0 +1,14 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_DIAGNOSTICTRAP_H
+#define LLVM_CLANG_BASIC_DIAGNOSTICTRAP_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticTrapInterface.inc"
+
+#endif
diff --git a/clang/include/clang/Basic/DiagnosticTrapKinds.td b/clang/include/clang/Basic/DiagnosticTrapKinds.td
new file mode 100644
index 0000000..c17a88d
--- /dev/null
+++ b/clang/include/clang/Basic/DiagnosticTrapKinds.td
@@ -0,0 +1,30 @@
+//==--- DiagnosticTrapKinds.td ------------------------ -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Trap Diagnostics
+//
+// These are diagnostics that are emitted into `TrapReason` objects using the
+// `TrapReasonBuilder` class. These `TrapReason` objects are then encoded into
+// debug info during codegen, rather than to the traditional diagnostic
+// consumers like the terminal. Their primary purpose is to make debugging traps
+// (e.g. `-fsanitize-trap=undefined`) easier by attaching a trap category and
+// message to the trap instruction that tools like a debugger can show.
+//
+//===----------------------------------------------------------------------===//
+let Component = "Trap" in {
+let CategoryName = "Undefined Behavior Sanitizer" in {
+
+def trap_ubsan_arith_overflow : Trap<
+ "%select{unsigned|signed}0 integer "
+ "%enum_select<UBSanArithKind>{"
+ "%Add{addition}|"
+ "%Sub{subtraction}|"
+ "%Mul{multiplication}"
+ "}1 overflow in %2">;
+
+}
+}
diff --git a/clang/include/clang/Basic/Features.def b/clang/include/clang/Basic/Features.def
index 72f2361..0e91b42a 100644
--- a/clang/include/clang/Basic/Features.def
+++ b/clang/include/clang/Basic/Features.def
@@ -128,6 +128,7 @@ FEATURE(attribute_overloadable, true)
FEATURE(attribute_unavailable_with_message, true)
FEATURE(attribute_unused_on_fields, true)
FEATURE(attribute_diagnose_if_objc, true)
+FEATURE(ext_vector_type_boolean, true)
FEATURE(blocks, LangOpts.Blocks)
FEATURE(c_thread_safety_attributes, true)
FEATURE(cxx_exceptions, LangOpts.CXXExceptions)
@@ -147,14 +148,17 @@ FEATURE(type_sanitizer, LangOpts.Sanitize.has(SanitizerKind::Type))
FEATURE(thread_sanitizer, LangOpts.Sanitize.has(SanitizerKind::Thread))
FEATURE(dataflow_sanitizer, LangOpts.Sanitize.has(SanitizerKind::DataFlow))
FEATURE(scudo, LangOpts.Sanitize.hasOneOf(SanitizerKind::Scudo))
-FEATURE(ptrauth_intrinsics, LangOpts.PointerAuthIntrinsics)
-EXTENSION(ptrauth_qualifier, LangOpts.PointerAuthIntrinsics)
+FEATURE(ptrauth_intrinsics, LangOpts.PointerAuthIntrinsics &&
+ PP.getTargetInfo().getTriple().isOSDarwin())
+FEATURE(ptrauth_qualifier, LangOpts.PointerAuthIntrinsics &&
+ PP.getTargetInfo().getTriple().isOSDarwin())
FEATURE(ptrauth_calls, LangOpts.PointerAuthCalls)
FEATURE(ptrauth_returns, LangOpts.PointerAuthReturns)
FEATURE(ptrauth_vtable_pointer_address_discrimination, LangOpts.PointerAuthVTPtrAddressDiscrimination)
FEATURE(ptrauth_vtable_pointer_type_discrimination, LangOpts.PointerAuthVTPtrTypeDiscrimination)
FEATURE(ptrauth_type_info_vtable_pointer_discrimination, LangOpts.PointerAuthTypeInfoVTPtrDiscrimination)
FEATURE(ptrauth_member_function_pointer_type_discrimination, LangOpts.PointerAuthCalls)
+FEATURE(ptrauth_signed_block_descriptors, LangOpts.PointerAuthBlockDescriptorPointers)
FEATURE(ptrauth_function_pointer_type_discrimination, LangOpts.PointerAuthFunctionTypeDiscrimination)
FEATURE(ptrauth_indirect_gotos, LangOpts.PointerAuthIndirectGotos)
FEATURE(ptrauth_init_fini, LangOpts.PointerAuthInitFini)
@@ -163,7 +167,7 @@ FEATURE(ptrauth_elf_got, LangOpts.PointerAuthELFGOT)
FEATURE(ptrauth_objc_isa, LangOpts.PointerAuthObjcIsa)
FEATURE(ptrauth_objc_interface_sel, LangOpts.PointerAuthObjcInterfaceSel)
-FEATURE(ptrauth_objc_signable_class, true)
+FEATURE(ptrauth_objc_signable_class, LangOpts.PointerAuthIntrinsics)
FEATURE(ptrauth_objc_method_list_pointer, LangOpts.PointerAuthCalls)
EXTENSION(swiftcc,
@@ -303,6 +307,14 @@ FEATURE(is_trivially_assignable, LangOpts.CPlusPlus)
FEATURE(is_trivially_constructible, LangOpts.CPlusPlus)
FEATURE(is_trivially_copyable, LangOpts.CPlusPlus)
FEATURE(is_union, LangOpts.CPlusPlus)
+FEATURE(cfi_sanitizer, LangOpts.Sanitize.hasOneOf(SanitizerKind::CFI))
+FEATURE(cfi_cast_strict_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFICastStrict))
+FEATURE(cfi_derived_cast_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast))
+FEATURE(cfi_icall_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIICall))
+FEATURE(cfi_mfcall_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIMFCall))
+FEATURE(cfi_unrelated_cast_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast))
+FEATURE(cfi_nvcall_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFINVCall))
+FEATURE(cfi_vcall_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIVCall))
FEATURE(kcfi, LangOpts.Sanitize.has(SanitizerKind::KCFI))
FEATURE(kcfi_arity, LangOpts.Sanitize.has(SanitizerKind::KCFI))
FEATURE(modules, LangOpts.Modules)
diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def
index 08d98a7..e0a5351 100644
--- a/clang/include/clang/Basic/LangOptions.def
+++ b/clang/include/clang/Basic/LangOptions.def
@@ -136,6 +136,8 @@ LANGOPT(PointerAuthObjcInterfaceSel, 1, 0, NotCompatible, "authentication of SEL
LANGOPT(PointerAuthObjcInterfaceSelKey, 16, 0, NotCompatible, "authentication key for SEL fields of ObjC interfaces")
LANGOPT(PointerAuthObjcClassROPointers, 1, 0, Benign, "class_ro_t pointer authentication")
+LANGOPT(PointerAuthBlockDescriptorPointers, 1, 0, NotCompatible, "enable signed block descriptors")
+
LANGOPT(DoubleSquareBracketAttributes, 1, 0, NotCompatible, "'[[]]' attributes extension for all language standard modes")
LANGOPT(ExperimentalLateParseAttributes, 1, 0, NotCompatible, "experimental late parsing of attributes")
@@ -239,6 +241,7 @@ LANGOPT(HLSL, 1, 0, NotCompatible, "HLSL")
ENUM_LANGOPT(HLSLVersion, HLSLLangStd, 16, HLSL_Unset, NotCompatible, "HLSL Version")
LANGOPT(HLSLStrictAvailability, 1, 0, NotCompatible,
"Strict availability diagnostic mode for HLSL built-in functions.")
+LANGOPT(HLSLSpvUseUnknownImageFormat, 1, 0, NotCompatible, "For storage images and texel buffers, sets the default format to 'Unknown' when not specified via the `vk::image_format` attribute. If this option is not used, the format is inferred from the resource's data type.")
LANGOPT(CUDAIsDevice , 1, 0, NotCompatible, "compiling for CUDA device")
LANGOPT(CUDAAllowVariadicFunctions, 1, 0, NotCompatible, "allowing variadic functions in CUDA device code")
diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h
index 0407897..a8943df 100644
--- a/clang/include/clang/Basic/LangOptions.h
+++ b/clang/include/clang/Basic/LangOptions.h
@@ -186,95 +186,10 @@ public:
/// Clang versions with different platform ABI conformance.
enum class ClangABI {
- /// Attempt to be ABI-compatible with code generated by Clang 3.8.x
- /// (SVN r257626). This causes <1 x long long> to be passed in an
- /// integer register instead of an SSE register on x64_64.
- Ver3_8,
-
- /// Attempt to be ABI-compatible with code generated by Clang 4.0.x
- /// (SVN r291814). This causes move operations to be ignored when
- /// determining whether a class type can be passed or returned directly.
- Ver4,
-
- /// Attempt to be ABI-compatible with code generated by Clang 6.0.x
- /// (SVN r321711). This causes determination of whether a type is
- /// standard-layout to ignore collisions between empty base classes
- /// and between base classes and member subobjects, which affects
- /// whether we reuse base class tail padding in some ABIs.
- Ver6,
-
- /// Attempt to be ABI-compatible with code generated by Clang 7.0.x
- /// (SVN r338536). This causes alignof (C++) and _Alignof (C11) to be
- /// compatible with __alignof (i.e., return the preferred alignment)
- /// rather than returning the required alignment.
- Ver7,
-
- /// Attempt to be ABI-compatible with code generated by Clang 9.0.x
- /// (SVN r351319). This causes vectors of __int128 to be passed in memory
- /// instead of passing in multiple scalar registers on x86_64 on Linux and
- /// NetBSD.
- Ver9,
-
- /// Attempt to be ABI-compatible with code generated by Clang 11.0.x
- /// (git 2e10b7a39b93). This causes clang to pass unions with a 256-bit
- /// vector member on the stack instead of using registers, to not properly
- /// mangle substitutions for template names in some cases, and to mangle
- /// declaration template arguments without a cast to the parameter type
- /// even when that can lead to mangling collisions.
- Ver11,
-
- /// Attempt to be ABI-compatible with code generated by Clang 12.0.x
- /// (git 8e464dd76bef). This causes clang to mangle lambdas within
- /// global-scope inline variables incorrectly.
- Ver12,
-
- /// Attempt to be ABI-compatible with code generated by Clang 14.0.x.
- /// This causes clang to:
- /// - mangle dependent nested names incorrectly.
- /// - make trivial only those defaulted copy constructors with a
- /// parameter-type-list equivalent to the parameter-type-list of an
- /// implicit declaration.
- Ver14,
-
- /// Attempt to be ABI-compatible with code generated by Clang 15.0.x.
- /// This causes clang to:
- /// - Reverse the implementation for DR692, DR1395 and DR1432.
- /// - pack non-POD members of packed structs.
- /// - consider classes with defaulted special member functions non-pod.
- Ver15,
-
- /// Attempt to be ABI-compatible with code generated by Clang 17.0.x.
- /// This causes clang to revert some fixes to its implementation of the
- /// Itanium name mangling scheme, with the consequence that overloaded
- /// function templates are mangled the same if they differ only by:
- /// - constraints
- /// - whether a non-type template parameter has a deduced type
- /// - the parameter list of a template template parameter
- Ver17,
-
- /// Attempt to be ABI-compatible with code generated by Clang 18.0.x.
- /// This causes clang to revert some fixes to the mangling of lambdas
- /// in the initializers of members of local classes.
- Ver18,
-
- /// Attempt to be ABI-compatible with code generated by Clang 19.0.x.
- /// This causes clang to:
- /// - Incorrectly mangle the 'base type' substitutions of the CXX
- /// construction vtable because it hasn't added 'type' as a substitution.
- /// - Skip mangling enclosing class templates of member-like friend
- /// function templates.
- /// - Ignore empty struct arguments in C++ mode for ARM, instead of
- /// passing them as if they had a size of 1 byte.
- Ver19,
-
- /// Attempt to be ABI-compatible with code generated by Clang 20.0.x.
- /// This causes clang to:
- /// - Incorrectly return C++ records in AVX registers on x86_64.
- Ver20,
-
- /// Conform to the underlying platform's C and C++ ABIs as closely
- /// as we can.
- Latest
+#define ABI_VER_MAJOR_MINOR(Major, Minor) Ver##Major##_##Minor,
+#define ABI_VER_MAJOR(Major) Ver##Major,
+#define ABI_VER_LATEST(Latest) Latest
+#include "clang/Basic/ABIVersions.def"
};
enum class CoreFoundationABI {
@@ -637,6 +552,10 @@ public:
llvm::dxbc::RootSignatureVersion HLSLRootSigVer =
llvm::dxbc::RootSignatureVersion::V1_1;
+ /// The HLSL root signature that will be used to overide the root signature
+ /// used for the shader entry point.
+ std::string HLSLRootSigOverride;
+
// Indicates if the wasm-opt binary must be ignored in the case of a
// WebAssembly target.
bool NoWasmOpt = false;
diff --git a/clang/include/clang/Basic/PointerAuthOptions.h b/clang/include/clang/Basic/PointerAuthOptions.h
index fb6dddf..2b92025 100644
--- a/clang/include/clang/Basic/PointerAuthOptions.h
+++ b/clang/include/clang/Basic/PointerAuthOptions.h
@@ -23,6 +23,10 @@
namespace clang {
+/// Constant discriminator to be used with block descriptor pointers. The value
+/// is ptrauth_string_discriminator("block_descriptor")
+constexpr uint16_t BlockDescriptorConstantDiscriminator = 0xC0BB;
+
/// Constant discriminator to be used with function pointers in .init_array and
/// .fini_array. The value is ptrauth_string_discriminator("init_fini")
constexpr uint16_t InitFiniPointerConstantDiscriminator = 0xD9D4;
@@ -223,6 +227,18 @@ struct PointerAuthOptions {
/// The ABI for function addresses in .init_array and .fini_array
PointerAuthSchema InitFiniPointers;
+ /// The ABI for block invocation function pointers.
+ PointerAuthSchema BlockInvocationFunctionPointers;
+
+ /// The ABI for block object copy/destroy function pointers.
+ PointerAuthSchema BlockHelperFunctionPointers;
+
+ /// The ABI for __block variable copy/destroy function pointers.
+ PointerAuthSchema BlockByrefHelperFunctionPointers;
+
+ /// The ABI for pointers to block descriptors.
+ PointerAuthSchema BlockDescriptorPointers;
+
/// The ABI for Objective-C method lists.
PointerAuthSchema ObjCMethodListFunctionPointers;
diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index ce4677e..25b6862 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -233,8 +233,9 @@ protected:
bool TLSSupported;
bool VLASupported;
bool NoAsmVariants; // True if {|} are normal characters.
- bool HasLegalHalfType; // True if the backend supports operations on the half
- // LLVM IR type.
+ bool HasFastHalfType; // True if the backend has native half float support,
+ // and performing calculations in float instead does
+ // not have a performance advantage.
bool HalfArgsAndReturns; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half) type.
bool HasFloat128;
bool HasFloat16;
@@ -700,8 +701,9 @@ public:
return 128;
}
- /// Determine whether _Float16 is supported on this target.
- virtual bool hasLegalHalfType() const { return HasLegalHalfType; }
+ /// Determine whether the target has fast native support for operations
+ /// on half types.
+ virtual bool hasFastHalfType() const { return HasFastHalfType; }
/// Whether half args and returns are supported.
virtual bool allowHalfArgsAndReturns() const { return HalfArgsAndReturns; }
diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def
index 94e72fe..9d1a23d 100644
--- a/clang/include/clang/Basic/TokenKinds.def
+++ b/clang/include/clang/Basic/TokenKinds.def
@@ -552,6 +552,10 @@ TYPE_TRAIT_1(__can_pass_in_regs, CanPassInRegs, KEYCXX)
TYPE_TRAIT_2(__reference_binds_to_temporary, ReferenceBindsToTemporary, KEYCXX)
TYPE_TRAIT_2(__reference_constructs_from_temporary, ReferenceConstructsFromTemporary, KEYCXX)
TYPE_TRAIT_2(__reference_converts_from_temporary, ReferenceConvertsFromTemporary, KEYCXX)
+TYPE_TRAIT_2(__builtin_lt_synthesises_from_spaceship, LtSynthesisesFromSpaceship, KEYCXX)
+TYPE_TRAIT_2(__builtin_le_synthesises_from_spaceship, LeSynthesisesFromSpaceship, KEYCXX)
+TYPE_TRAIT_2(__builtin_gt_synthesises_from_spaceship, GtSynthesisesFromSpaceship, KEYCXX)
+TYPE_TRAIT_2(__builtin_ge_synthesises_from_spaceship, GeSynthesisesFromSpaceship, KEYCXX)
// IsDeducible is only used internally by clang for CTAD implementation and
// is not exposed to users.
TYPE_TRAIT_2(/*EmptySpellingName*/, IsDeducible, KEYCXX)
diff --git a/clang/include/clang/Basic/TokenKinds.h b/clang/include/clang/Basic/TokenKinds.h
index 1b133dd..d84f359 100644
--- a/clang/include/clang/Basic/TokenKinds.h
+++ b/clang/include/clang/Basic/TokenKinds.h
@@ -95,10 +95,20 @@ inline bool isStringLiteral(TokenKind K) {
/// Return true if this is a "literal" kind, like a numeric
/// constant, string, etc.
inline bool isLiteral(TokenKind K) {
- return K == tok::numeric_constant || K == tok::char_constant ||
- K == tok::wide_char_constant || K == tok::utf8_char_constant ||
- K == tok::utf16_char_constant || K == tok::utf32_char_constant ||
- isStringLiteral(K) || K == tok::header_name || K == tok::binary_data;
+ const bool isInLiteralRange =
+ K >= tok::numeric_constant && K <= tok::utf32_string_literal;
+
+#if !NDEBUG
+ const bool isLiteralExplicit =
+ K == tok::numeric_constant || K == tok::char_constant ||
+ K == tok::wide_char_constant || K == tok::utf8_char_constant ||
+ K == tok::utf16_char_constant || K == tok::utf32_char_constant ||
+ isStringLiteral(K) || K == tok::header_name || K == tok::binary_data;
+ assert(isInLiteralRange == isLiteralExplicit &&
+ "TokenKind literals should be contiguous");
+#endif
+
+ return isInLiteralRange;
}
/// Return true if this is any of tok::annot_* kinds.
diff --git a/clang/include/clang/Basic/TypeNodes.td b/clang/include/clang/Basic/TypeNodes.td
index 971ce54..fb6862b 100644
--- a/clang/include/clang/Basic/TypeNodes.td
+++ b/clang/include/clang/Basic/TypeNodes.td
@@ -37,21 +37,12 @@ class NeverCanonical {}
/// canonical types can ignore these nodes.
class NeverCanonicalUnlessDependent {}
-/// A type node which never has component type structure. Some code may be
-/// able to operate on leaf types faster than they can on non-leaf types.
-///
-/// For example, the function type `void (int)` is not a leaf type because it
-/// is structurally composed of component types (`void` and `int`).
-///
-/// A struct type is a leaf type because its field types are not part of its
-/// type-expression.
-///
-/// Nodes like `TypedefType` which are syntactically leaves but can desugar
-/// to types that may not be leaves should not declare this.
-class LeafType {}
+/// A type node which is always a canonical type, that is, types for which
+/// `T.getCanonicalType() == T` always holds.
+class AlwaysCanonical {}
def Type : TypeNode<?, 1>;
-def BuiltinType : TypeNode<Type>, LeafType;
+def BuiltinType : TypeNode<Type>, AlwaysCanonical;
def ComplexType : TypeNode<Type>;
def PointerType : TypeNode<Type>;
def BlockPointerType : TypeNode<Type>;
@@ -88,28 +79,29 @@ def TypeOfType : TypeNode<Type>, NeverCanonicalUnlessDependent;
def DecltypeType : TypeNode<Type>, NeverCanonicalUnlessDependent;
def UnaryTransformType : TypeNode<Type>, NeverCanonicalUnlessDependent;
def TagType : TypeNode<Type, 1>;
-def RecordType : TypeNode<TagType>, LeafType;
-def EnumType : TypeNode<TagType>, LeafType;
-def ElaboratedType : TypeNode<Type>, NeverCanonical;
+def RecordType : TypeNode<TagType>;
+def EnumType : TypeNode<TagType>;
+def InjectedClassNameType : TypeNode<TagType>, AlwaysDependent;
def AttributedType : TypeNode<Type>, NeverCanonical;
def BTFTagAttributedType : TypeNode<Type>, NeverCanonical;
def HLSLAttributedResourceType : TypeNode<Type>;
def HLSLInlineSpirvType : TypeNode<Type>;
-def TemplateTypeParmType : TypeNode<Type>, AlwaysDependent, LeafType;
+def TemplateTypeParmType : TypeNode<Type>, AlwaysDependent;
def SubstTemplateTypeParmType : TypeNode<Type>, NeverCanonical;
-def SubstTemplateTypeParmPackType : TypeNode<Type>, AlwaysDependent;
+def SubstPackType : TypeNode<Type, 1>;
+def SubstTemplateTypeParmPackType : TypeNode<SubstPackType>, AlwaysDependent;
+def SubstBuiltinTemplatePackType : TypeNode<SubstPackType>, AlwaysDependent;
def TemplateSpecializationType : TypeNode<Type>, NeverCanonicalUnlessDependent;
def DeducedType : TypeNode<Type, 1>;
def AutoType : TypeNode<DeducedType>;
def DeducedTemplateSpecializationType : TypeNode<DeducedType>;
-def InjectedClassNameType : TypeNode<Type>, AlwaysDependent, LeafType;
def DependentNameType : TypeNode<Type>, AlwaysDependent;
def DependentTemplateSpecializationType : TypeNode<Type>, AlwaysDependent;
def PackExpansionType : TypeNode<Type>, AlwaysDependent;
def PackIndexingType : TypeNode<Type>, NeverCanonicalUnlessDependent;
def ObjCTypeParamType : TypeNode<Type>, NeverCanonical;
def ObjCObjectType : TypeNode<Type>;
-def ObjCInterfaceType : TypeNode<ObjCObjectType>, LeafType;
+def ObjCInterfaceType : TypeNode<ObjCObjectType>, AlwaysCanonical;
def ObjCObjectPointerType : TypeNode<Type>;
def BoundsAttributedType : TypeNode<Type, 1>;
def CountAttributedType : TypeNode<BoundsAttributedType>, NeverCanonical;
diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td
index c491eb0..a4eb92e 100644
--- a/clang/include/clang/Basic/arm_sme.td
+++ b/clang/include/clang/Basic/arm_sme.td
@@ -21,23 +21,21 @@ let SVETargetGuard = InvalidMode in {
// Loads
multiclass ZALoad<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
- let SMETargetGuard = "sme" in {
- def NAME # _H : MInst<"svld1_hor_" # n_suffix, "vimPQ", t,
- [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
- MemEltTyDefault, i_prefix # "_horiz", ch>;
-
- def NAME # _H_VNUM : MInst<"svld1_hor_vnum_" # n_suffix, "vimPQl", t,
- [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
- MemEltTyDefault, i_prefix # "_horiz", ch>;
-
- def NAME # _V : MInst<"svld1_ver_" # n_suffix, "vimPQ", t,
- [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
- MemEltTyDefault, i_prefix # "_vert", ch>;
-
- def NAME # _V_VNUM : MInst<"svld1_ver_vnum_" # n_suffix, "vimPQl", t,
- [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
- MemEltTyDefault, i_prefix # "_vert", ch>;
- }
+ def NAME # _H : MInst<"svld1_hor_" # n_suffix, "vimPQ", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _H_VNUM : MInst<"svld1_hor_vnum_" # n_suffix, "vimPQl", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _V : MInst<"svld1_ver_" # n_suffix, "vimPQ", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+
+ def NAME # _V_VNUM : MInst<"svld1_ver_vnum_" # n_suffix, "vimPQl", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
}
defm SVLD1_ZA8 : ZALoad<"za8", "c", "aarch64_sme_ld1b", [ImmCheck<0, ImmCheck0_0>]>;
@@ -46,7 +44,6 @@ defm SVLD1_ZA32 : ZALoad<"za32", "i", "aarch64_sme_ld1w", [ImmCheck<0, ImmCheck0
defm SVLD1_ZA64 : ZALoad<"za64", "l", "aarch64_sme_ld1d", [ImmCheck<0, ImmCheck0_7>]>;
defm SVLD1_ZA128 : ZALoad<"za128", "q", "aarch64_sme_ld1q", [ImmCheck<0, ImmCheck0_15>]>;
-let SMETargetGuard = "sme" in {
def SVLDR_VNUM_ZA : MInst<"svldr_vnum_za", "vmQl", "",
[IsOverloadNone, IsStreamingCompatible, IsInOutZA],
MemEltTyDefault, "aarch64_sme_ldr">;
@@ -54,29 +51,26 @@ def SVLDR_VNUM_ZA : MInst<"svldr_vnum_za", "vmQl", "",
def SVLDR_ZA : MInst<"svldr_za", "vmQ", "",
[IsOverloadNone, IsStreamingCompatible, IsInOutZA],
MemEltTyDefault, "aarch64_sme_ldr", []>;
-}
////////////////////////////////////////////////////////////////////////////////
// Stores
multiclass ZAStore<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
- let SMETargetGuard = "sme" in {
- def NAME # _H : MInst<"svst1_hor_" # n_suffix, "vimP%", t,
- [IsStore, IsOverloadNone, IsStreaming, IsInZA],
- MemEltTyDefault, i_prefix # "_horiz", ch>;
-
- def NAME # _H_VNUM : MInst<"svst1_hor_vnum_" # n_suffix, "vimP%l", t,
- [IsStore, IsOverloadNone, IsStreaming, IsInZA],
- MemEltTyDefault, i_prefix # "_horiz", ch>;
-
- def NAME # _V : MInst<"svst1_ver_" # n_suffix, "vimP%", t,
- [IsStore, IsOverloadNone, IsStreaming, IsInZA],
- MemEltTyDefault, i_prefix # "_vert", ch>;
-
- def NAME # _V_VNUM : MInst<"svst1_ver_vnum_" # n_suffix, "vimP%l", t,
- [IsStore, IsOverloadNone, IsStreaming, IsInZA],
- MemEltTyDefault, i_prefix # "_vert", ch>;
- }
+ def NAME # _H : MInst<"svst1_hor_" # n_suffix, "vimP%", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsInZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _H_VNUM : MInst<"svst1_hor_vnum_" # n_suffix, "vimP%l", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsInZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _V : MInst<"svst1_ver_" # n_suffix, "vimP%", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsInZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+
+ def NAME # _V_VNUM : MInst<"svst1_ver_vnum_" # n_suffix, "vimP%l", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsInZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
}
defm SVST1_ZA8 : ZAStore<"za8", "c", "aarch64_sme_st1b", [ImmCheck<0, ImmCheck0_0>]>;
@@ -85,7 +79,6 @@ defm SVST1_ZA32 : ZAStore<"za32", "i", "aarch64_sme_st1w", [ImmCheck<0, ImmCheck
defm SVST1_ZA64 : ZAStore<"za64", "l", "aarch64_sme_st1d", [ImmCheck<0, ImmCheck0_7>]>;
defm SVST1_ZA128 : ZAStore<"za128", "q", "aarch64_sme_st1q", [ImmCheck<0, ImmCheck0_15>]>;
-let SMETargetGuard = "sme" in {
def SVSTR_VNUM_ZA : MInst<"svstr_vnum_za", "vm%l", "",
[IsOverloadNone, IsStreamingCompatible, IsInZA],
MemEltTyDefault, "aarch64_sme_str">;
@@ -93,21 +86,18 @@ def SVSTR_VNUM_ZA : MInst<"svstr_vnum_za", "vm%l", "",
def SVSTR_ZA : MInst<"svstr_za", "vm%", "",
[IsOverloadNone, IsStreamingCompatible, IsInZA],
MemEltTyDefault, "aarch64_sme_str", []>;
-}
////////////////////////////////////////////////////////////////////////////////
// Read horizontal/vertical ZA slices
multiclass ZARead<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
- let SMETargetGuard = "sme" in {
- def NAME # _H : SInst<"svread_hor_" # n_suffix # "[_{d}]", "ddPim", t,
- MergeOp1, i_prefix # "_horiz",
- [IsReadZA, IsStreaming, IsInZA], ch>;
-
- def NAME # _V : SInst<"svread_ver_" # n_suffix # "[_{d}]", "ddPim", t,
- MergeOp1, i_prefix # "_vert",
- [IsReadZA, IsStreaming, IsInZA], ch>;
- }
+ def NAME # _H : SInst<"svread_hor_" # n_suffix # "[_{d}]", "ddPim", t,
+ MergeOp1, i_prefix # "_horiz",
+ [IsReadZA, IsStreaming, IsInZA], ch>;
+
+ def NAME # _V : SInst<"svread_ver_" # n_suffix # "[_{d}]", "ddPim", t,
+ MergeOp1, i_prefix # "_vert",
+ [IsReadZA, IsStreaming, IsInZA], ch>;
}
defm SVREAD_ZA8 : ZARead<"za8", "cUcm", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_0>]>;
@@ -120,15 +110,13 @@ defm SVREAD_ZA128 : ZARead<"za128", "csilUcUsUiUlmhbfd", "aarch64_sme_readq", [I
// Write horizontal/vertical ZA slices
multiclass ZAWrite<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
- let SMETargetGuard = "sme" in {
- def NAME # _H : SInst<"svwrite_hor_" # n_suffix # "[_{d}]", "vimPd", t,
- MergeOp1, i_prefix # "_horiz",
- [IsWriteZA, IsStreaming, IsInOutZA], ch>;
-
- def NAME # _V : SInst<"svwrite_ver_" # n_suffix # "[_{d}]", "vimPd", t,
- MergeOp1, i_prefix # "_vert",
- [IsWriteZA, IsStreaming, IsInOutZA], ch>;
- }
+ def NAME # _H : SInst<"svwrite_hor_" # n_suffix # "[_{d}]", "vimPd", t,
+ MergeOp1, i_prefix # "_horiz",
+ [IsWriteZA, IsStreaming, IsInOutZA], ch>;
+
+ def NAME # _V : SInst<"svwrite_ver_" # n_suffix # "[_{d}]", "vimPd", t,
+ MergeOp1, i_prefix # "_vert",
+ [IsWriteZA, IsStreaming, IsInOutZA], ch>;
}
defm SVWRITE_ZA8 : ZAWrite<"za8", "cUcm", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_0>]>;
@@ -140,13 +128,11 @@ defm SVWRITE_ZA128 : ZAWrite<"za128", "csilUcUsUiUlmhbfd", "aarch64_sme_writeq",
////////////////////////////////////////////////////////////////////////////////
// SME - Zero
-let SMETargetGuard = "sme" in {
- def SVZERO_MASK_ZA : SInst<"svzero_mask_za", "vi", "", MergeNone, "aarch64_sme_zero",
- [IsOverloadNone, IsStreamingCompatible, IsInOutZA],
- [ImmCheck<0, ImmCheck0_255>]>;
- def SVZERO_ZA : SInst<"svzero_za", "vv", "", MergeNone, "aarch64_sme_zero",
- [IsOverloadNone, IsStreamingCompatible, IsOutZA]>;
-}
+def SVZERO_MASK_ZA : SInst<"svzero_mask_za", "vi", "", MergeNone, "aarch64_sme_zero",
+ [IsOverloadNone, IsStreamingCompatible, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_255>]>;
+def SVZERO_ZA : SInst<"svzero_za", "vv", "", MergeNone, "aarch64_sme_zero",
+ [IsOverloadNone, IsStreamingCompatible, IsOutZA]>;
let SMETargetGuard = "sme2p1" in {
def SVZERO_ZA64_VG1x2 : SInst<"svzero_za64_vg1x2", "vm", "", MergeNone, "aarch64_sme_zero_za64_vg1x2",
@@ -171,11 +157,9 @@ let SMETargetGuard = "sme2p1" in {
// SME - Counting elements in a streaming vector
multiclass ZACount<string n_suffix> {
- let SMETargetGuard = "sme" in {
- def NAME : SInst<"sv" # n_suffix, "nv", "", MergeNone,
- "aarch64_sme_" # n_suffix,
- [IsOverloadNone, IsStreamingCompatible]>;
- }
+ def NAME : SInst<"sv" # n_suffix, "nv", "", MergeNone,
+ "aarch64_sme_" # n_suffix,
+ [IsOverloadNone, IsStreamingCompatible]>;
}
defm SVCNTSB : ZACount<"cntsb">;
@@ -187,11 +171,9 @@ defm SVCNTSD : ZACount<"cntsd">;
// SME - ADDHA/ADDVA
multiclass ZAAdd<string n_suffix> {
- let SMETargetGuard = "sme" in {
- def NAME # _ZA32: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPd", "iUi", MergeOp1,
- "aarch64_sme_" # n_suffix, [IsStreaming, IsInOutZA],
- [ImmCheck<0, ImmCheck0_3>]>;
- }
+ def NAME # _ZA32: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPd", "iUi", MergeOp1,
+ "aarch64_sme_" # n_suffix, [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
let SMETargetGuard = "sme-i16i64" in {
def NAME # _ZA64: SInst<"sv" # n_suffix # "_za64[_{d}]", "viPPd", "lUl", MergeOp1,
@@ -207,13 +189,11 @@ defm SVADDVA : ZAAdd<"addva">;
// SME - SMOPA, SMOPS, UMOPA, UMOPS
multiclass ZAIntOuterProd<string n_suffix1, string n_suffix2> {
- let SMETargetGuard = "sme" in {
- def NAME # _ZA32_B: SInst<"sv" # n_suffix2 # "_za32[_{d}]",
- "viPPdd", !cond(!eq(n_suffix1, "s") : "", true: "U") # "c",
- MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
- [IsStreaming, IsInOutZA],
- [ImmCheck<0, ImmCheck0_3>]>;
- }
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix2 # "_za32[_{d}]",
+ "viPPdd", !cond(!eq(n_suffix1, "s") : "", true: "U") # "c",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
let SMETargetGuard = "sme-i16i64" in {
def NAME # _ZA64_H: SInst<"sv" # n_suffix2 # "_za64[_{d}]",
@@ -233,14 +213,12 @@ defm SVUMOPS : ZAIntOuterProd<"u", "mops">;
// SME - SUMOPA, SUMOPS, USMOPA, USMOPS
multiclass ZAIntOuterProdMixedSigns<string n_suffix1, string n_suffix2> {
- let SMETargetGuard = "sme" in {
- def NAME # _ZA32_B: SInst<"sv" # n_suffix1 # n_suffix2 # "_za32[_{d}]",
- "viPPd" # !cond(!eq(n_suffix1, "su") : "u", true: "x"),
- !cond(!eq(n_suffix1, "su") : "", true: "U") # "c",
- MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
- [IsStreaming, IsInOutZA],
- [ImmCheck<0, ImmCheck0_3>]>;
- }
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix1 # n_suffix2 # "_za32[_{d}]",
+ "viPPd" # !cond(!eq(n_suffix1, "su") : "u", true: "x"),
+ !cond(!eq(n_suffix1, "su") : "", true: "U") # "c",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
let SMETargetGuard = "sme-i16i64" in {
def NAME # _ZA64_H: SInst<"sv" # n_suffix1 # n_suffix2 # "_za64[_{d}]",
@@ -261,22 +239,20 @@ defm SVUSMOPS : ZAIntOuterProdMixedSigns<"us", "mops">;
// SME - FMOPA, FMOPS
multiclass ZAFPOuterProd<string n_suffix> {
- let SMETargetGuard = "sme" in {
- def NAME # _ZA32_B: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "h",
- MergeOp1, "aarch64_sme_" # n_suffix # "_wide",
- [IsStreaming, IsInOutZA],
- [ImmCheck<0, ImmCheck0_3>]>;
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "h",
+ MergeOp1, "aarch64_sme_" # n_suffix # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
- def NAME # _ZA32_H: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "b",
- MergeOp1, "aarch64_sme_" # n_suffix # "_wide",
- [IsStreaming, IsInOutZA],
- [ImmCheck<0, ImmCheck0_3>]>;
+ def NAME # _ZA32_H: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "b",
+ MergeOp1, "aarch64_sme_" # n_suffix # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
- def NAME # _ZA32_S: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "f",
- MergeOp1, "aarch64_sme_" # n_suffix,
- [IsStreaming, IsInOutZA],
- [ImmCheck<0, ImmCheck0_3>]>;
- }
+ def NAME # _ZA32_S: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "f",
+ MergeOp1, "aarch64_sme_" # n_suffix,
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
let SMETargetGuard = "sme-f64f64" in {
def NAME # _ZA64_D: SInst<"sv" # n_suffix # "_za64[_{d}]", "viPPdd", "d",
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index 07786c6..b8b0f7f 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -36,7 +36,7 @@ def SVLD1UH_VNUM : MInst<"svld1uh_vnum_{d}", "dPXl", "ilUiUl", [IsLoa
def SVLD1SW_VNUM : MInst<"svld1sw_vnum_{d}", "dPUl", "lUl", [IsLoad, VerifyRuntimeMode], MemEltTyInt32, "aarch64_sve_ld1">;
def SVLD1UW_VNUM : MInst<"svld1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn, VerifyRuntimeMode], MemEltTyInt32, "aarch64_sve_ld1">;
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
+let SMETargetGuard = InvalidMode in {
// Load one vector (vector base)
def SVLD1_GATHER_BASES_U : MInst<"svld1_gather[_{2}base]_{d}", "dPu", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1_gather_scalar_offset">;
def SVLD1SB_GATHER_BASES_U : MInst<"svld1sb_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt8, "aarch64_sve_ld1_gather_scalar_offset">;
@@ -134,7 +134,7 @@ def SVLDFF1SW_VNUM : MInst<"svldff1sw_vnum_{d}", "dPUl", "lUl", [I
def SVLDFF1UW_VNUM : MInst<"svldff1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1">;
}
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
+let SMETargetGuard = InvalidMode in {
// First-faulting load one vector (vector base)
def SVLDFF1_GATHER_BASES_U : MInst<"svldff1_gather[_{2}base]_{d}", "dPu", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldff1_gather_scalar_offset">;
def SVLDFF1SB_GATHER_BASES_U : MInst<"svldff1sb_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt8, "aarch64_sve_ldff1_gather_scalar_offset">;
@@ -251,15 +251,15 @@ def SVLD3_VNUM : SInst<"svld3_vnum[_{2}]", "3Pcl", "csilUcUsUiUlhfdbm", MergeNon
def SVLD4_VNUM : SInst<"svld4_vnum[_{2}]", "4Pcl", "csilUcUsUiUlhfdbm", MergeNone, "aarch64_sve_ld4_sret", [IsStructLoad, VerifyRuntimeMode]>;
// Load one octoword and replicate (scalar base)
-let SVETargetGuard = "sve,f64mm", SMETargetGuard = InvalidMode in {
+let SVETargetGuard = "f64mm", SMETargetGuard = InvalidMode in {
def SVLD1RO : SInst<"svld1ro[_{2}]", "dPc", "csilUcUsUiUlhfdbm", MergeNone, "aarch64_sve_ld1ro">;
}
-let SVETargetGuard = "sve,bf16", SMETargetGuard = InvalidMode in {
+let SVETargetGuard = "bf16", SMETargetGuard = InvalidMode in {
def SVBFMMLA : SInst<"svbfmmla[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmmla", [IsOverloadNone]>;
}
-let SVETargetGuard = "sve,bf16", SMETargetGuard = "sme,bf16" in {
+let SVETargetGuard = "bf16", SMETargetGuard = "bf16" in {
def SVBFDOT : SInst<"svbfdot[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfdot", [IsOverloadNone, VerifyRuntimeMode]>;
def SVBFMLALB : SInst<"svbfmlalb[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmlalb", [IsOverloadNone, VerifyRuntimeMode]>;
def SVBFMLALT : SInst<"svbfmlalt[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmlalt", [IsOverloadNone, VerifyRuntimeMode]>;
@@ -326,7 +326,7 @@ def SVST1H_VNUM_U : MInst<"svst1h_vnum[_{d}]", "vPFld", "UiUl", [Is
def SVST1W_VNUM_S : MInst<"svst1w_vnum[_{d}]", "vPCld", "l", [IsStore, VerifyRuntimeMode], MemEltTyInt32, "aarch64_sve_st1">;
def SVST1W_VNUM_U : MInst<"svst1w_vnum[_{d}]", "vPGld", "Ul", [IsStore, VerifyRuntimeMode], MemEltTyInt32, "aarch64_sve_st1">;
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
+let SMETargetGuard = InvalidMode in {
// Store one vector (vector base)
def SVST1_SCATTER_BASES_U : MInst<"svst1_scatter[_{2}base_{d}]", "vPud", "ilUiUlfd", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1_scatter_scalar_offset">;
def SVST1B_SCATTER_BASES_U : MInst<"svst1b_scatter[_{2}base_{d}]", "vPud", "ilUiUl", [IsScatterStore], MemEltTyInt8, "aarch64_sve_st1_scatter_scalar_offset">;
@@ -464,7 +464,7 @@ def SVPRFH_VNUM : MInst<"svprfh_vnum", "vPQlJ", "s", [IsPrefetch, VerifyRuntimeM
def SVPRFW_VNUM : MInst<"svprfw_vnum", "vPQlJ", "i", [IsPrefetch, VerifyRuntimeMode], MemEltTyInt32, "aarch64_sve_prf">;
def SVPRFD_VNUM : MInst<"svprfd_vnum", "vPQlJ", "l", [IsPrefetch, VerifyRuntimeMode], MemEltTyInt64, "aarch64_sve_prf">;
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
+let SMETargetGuard = InvalidMode in {
// Prefetch (Vector bases)
def SVPRFB_GATHER_BASES : MInst<"svprfb_gather[_{2}base]", "vPdJ", "UiUl", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_scalar_offset">;
def SVPRFH_GATHER_BASES : MInst<"svprfh_gather[_{2}base]", "vPdJ", "UiUl", [IsGatherPrefetch], MemEltTyInt16, "aarch64_sve_prfh_gather_scalar_offset">;
@@ -502,7 +502,7 @@ def SVPRFD_GATHER_BASES_OFFSET : MInst<"svprfd_gather[_{2}base]_index", "vPdlJ"
////////////////////////////////////////////////////////////////////////////////
// Address calculations
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
+let SMETargetGuard = InvalidMode in {
def SVADRB : SInst<"svadrb[_{0}base]_[{2}]offset", "uud", "ilUiUl", MergeNone, "aarch64_sve_adrb">;
def SVADRH : SInst<"svadrh[_{0}base]_[{2}]index", "uud", "ilUiUl", MergeNone, "aarch64_sve_adrh">;
def SVADRW : SInst<"svadrw[_{0}base]_[{2}]index", "uud", "ilUiUl", MergeNone, "aarch64_sve_adrw">;
@@ -778,11 +778,11 @@ defm SVRINTX : SInstZPZ<"svrintx", "hfd", "aarch64_sve_frintx">;
defm SVRINTZ : SInstZPZ<"svrintz", "hfd", "aarch64_sve_frintz">;
defm SVSQRT : SInstZPZ<"svsqrt", "hfd", "aarch64_sve_fsqrt">;
-let SVETargetGuard = "sve", SMETargetGuard = "sme2,ssve-fexpa" in {
+let SMETargetGuard = "sme2,ssve-fexpa" in {
def SVEXPA : SInst<"svexpa[_{d}]", "du", "hfd", MergeNone, "aarch64_sve_fexpa_x", [VerifyRuntimeMode]>;
}
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
+let SMETargetGuard = InvalidMode in {
def SVTMAD : SInst<"svtmad[_{d}]", "dddi", "hfd", MergeNone, "aarch64_sve_ftmad_x", [], [ImmCheck<2, ImmCheck0_7>]>;
def SVTSMUL : SInst<"svtsmul[_{d}]", "ddu", "hfd", MergeNone, "aarch64_sve_ftsmul_x">;
def SVTSSEL : SInst<"svtssel[_{d}]", "ddu", "hfd", MergeNone, "aarch64_sve_ftssel_x">;
@@ -825,7 +825,7 @@ def SVRSQRTS : SInst<"svrsqrts[_{d}]", "ddd", "hfd", MergeNone, "aarch64_sve_frs
////////////////////////////////////////////////////////////////////////////////
// Floating-point reductions
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
+let SMETargetGuard = InvalidMode in {
def SVFADDA : SInst<"svadda[_{d}]", "sPsd", "hfd", MergeNone, "aarch64_sve_fadda">;
}
@@ -946,14 +946,14 @@ defm SVFCVT_F32_F64 : SInstCvtMXZ<"svcvt_f32[_f64]", "MMPd", "MPd", "d", "aarc
defm SVFCVT_F64_F16 : SInstCvtMXZ<"svcvt_f64[_f16]", "ddPO", "dPO", "d", "aarch64_sve_fcvt_f64f16">;
defm SVFCVT_F64_F32 : SInstCvtMXZ<"svcvt_f64[_f32]", "ddPM", "dPM", "d", "aarch64_sve_fcvt_f64f32">;
-let SVETargetGuard = "sve,bf16", SMETargetGuard = "sme,bf16" in {
+let SVETargetGuard = "bf16", SMETargetGuard = "bf16" in {
defm SVCVT_BF16_F32 : SInstCvtMXZ<"svcvt_bf16[_f32]", "$$Pd", "$Pd", "f", "aarch64_sve_fcvt_bf16f32_v2">;
def SVCVTNT_BF16_F32 : SInst<"svcvtnt_bf16[_f32]", "$$Pd", "f", MergeOp1, "aarch64_sve_fcvtnt_bf16f32_v2", [IsOverloadNone, VerifyRuntimeMode]>;
// SVCVTNT_X_BF16_F32 : Implemented as macro by SveEmitter.cpp
}
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
defm SVCVTLT_F32_F16 : SInstCvtMX<"svcvtlt_f32[_f16]", "ddPh", "dPh", "f", "aarch64_sve_fcvtlt_f32f16">;
defm SVCVTLT_F64_F32 : SInstCvtMX<"svcvtlt_f64[_f32]", "ddPh", "dPh", "d", "aarch64_sve_fcvtlt_f64f32">;
@@ -980,8 +980,8 @@ defm SVCLASTA_N : SVEPerm<"svclasta[_n_{d}]", "sPsd", "aarch64_sve_clasta_n">;
defm SVCLASTB : SVEPerm<"svclastb[_{d}]", "dPdd", "aarch64_sve_clastb">;
defm SVCLASTB_N : SVEPerm<"svclastb[_n_{d}]", "sPsd", "aarch64_sve_clastb_n">;
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
-def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact">;
+let SMETargetGuard = "sme2p2" in {
+def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact", [VerifyRuntimeMode]>;
}
// Note: svdup_lane is implemented using the intrinsic for TBL to represent a
@@ -1088,7 +1088,7 @@ def SVPTEST_LAST : SInst<"svptest_last", "sPP", "Pc", MergeNone, "aarch64_sve_
////////////////////////////////////////////////////////////////////////////////
// FFR manipulation
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
+let SMETargetGuard = InvalidMode in {
def SVRDFFR : SInst<"svrdffr", "Pv", "Pc", MergeNone, "", [IsOverloadNone]>;
def SVRDFFR_Z : SInst<"svrdffr_z", "PP", "Pc", MergeNone, "", [IsOverloadNone]>;
def SVSETFFR : SInst<"svsetffr", "vv", "", MergeNone, "", [IsOverloadNone]>;
@@ -1173,13 +1173,13 @@ def SVQINCP_N_S64 : SInst<"svqincp[_n_s64]_{d}", "llP", "PcPsPiPl", MergeNone, "
def SVQINCP_N_U32 : SInst<"svqincp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n32", [VerifyRuntimeMode]>;
def SVQINCP_N_U64 : SInst<"svqincp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n64", [VerifyRuntimeMode]>;
-let SVETargetGuard = "sve,i8mm", SMETargetGuard = InvalidMode in {
+let SVETargetGuard = "i8mm", SMETargetGuard = InvalidMode in {
def SVMLLA_S32 : SInst<"svmmla[_s32]", "ddqq","i", MergeNone, "aarch64_sve_smmla">;
def SVMLLA_U32 : SInst<"svmmla[_u32]", "ddqq","Ui", MergeNone, "aarch64_sve_ummla">;
def SVUSMLLA_S32 : SInst<"svusmmla[_s32]", "ddbq","i", MergeNone, "aarch64_sve_usmmla">;
}
-let SVETargetGuard = "sve,i8mm", SMETargetGuard = "sme,i8mm"in {
+let SVETargetGuard = "i8mm", SMETargetGuard = "i8mm"in {
def SVUSDOT_S : SInst<"svusdot[_s32]", "ddbq", "i", MergeNone, "aarch64_sve_usdot", [VerifyRuntimeMode]>;
def SVUSDOT_N_S : SInst<"svusdot[_n_s32]", "ddbr", "i", MergeNone, "aarch64_sve_usdot", [VerifyRuntimeMode]>;
def SVSUDOT_S : SInst<"svsudot[_s32]", "ddqb", "i", MergeNone, "aarch64_sve_usdot", [ReverseUSDOT, VerifyRuntimeMode]>;
@@ -1189,11 +1189,11 @@ def SVUSDOT_LANE_S : SInst<"svusdot_lane[_s32]", "ddbqi", "i", MergeNone, "aarc
def SVSUDOT_LANE_S : SInst<"svsudot_lane[_s32]", "ddqbi", "i", MergeNone, "aarch64_sve_sudot_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
}
-let SVETargetGuard = "sve,f32mm", SMETargetGuard = InvalidMode in {
+let SVETargetGuard = "f32mm", SMETargetGuard = InvalidMode in {
def SVMLLA_F32 : SInst<"svmmla[_f32]", "dddd","f", MergeNone, "aarch64_sve_fmmla">;
}
-let SVETargetGuard = "sve,f64mm", SMETargetGuard = InvalidMode in {
+let SVETargetGuard = "f64mm", SMETargetGuard = InvalidMode in {
def SVMLLA_F64 : SInst<"svmmla[_f64]", "dddd", "d", MergeNone, "aarch64_sve_fmmla">;
def SVTRN1Q : SInst<"svtrn1q[_{d}]", "ddd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_trn1q">;
@@ -1243,7 +1243,7 @@ let SVETargetGuard = "sve2p1|sme2", SMETargetGuard = "sve2p1|sme2" in {
////////////////////////////////////////////////////////////////////////////////
// SVE2 WhileGE/GT
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVWHILEGE_S32 : SInst<"svwhilege_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhileOrMultiVecCvt, VerifyRuntimeMode]>;
def SVWHILEGE_S64 : SInst<"svwhilege_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhileOrMultiVecCvt, VerifyRuntimeMode]>;
def SVWHILEGT_S32 : SInst<"svwhilegt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhileOrMultiVecCvt, VerifyRuntimeMode]>;
@@ -1268,7 +1268,7 @@ let SVETargetGuard = "sve2p1|sme2", SMETargetGuard = "sve2p1|sme2" in {
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Uniform DSP operations
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
defm SVQADD_S : SInstZPZZ<"svqadd", "csli", "aarch64_sve_sqadd", "aarch64_sve_sqadd">;
defm SVQADD_U : SInstZPZZ<"svqadd", "UcUsUiUl", "aarch64_sve_uqadd", "aarch64_sve_uqadd">;
defm SVHADD_S : SInstZPZZ<"svhadd", "csli", "aarch64_sve_shadd", "aarch64_sve_shadd">;
@@ -1303,7 +1303,7 @@ multiclass SInstZPZxZ<string name, string types, string pat_v, string pat_n, str
def _N_Z : SInst<name # "[_n_{d}]", pat_n, types, MergeZero, intrinsic, flags>;
}
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
defm SVQRSHL_S : SInstZPZxZ<"svqrshl", "csil", "dPdx", "dPdK", "aarch64_sve_sqrshl", [VerifyRuntimeMode]>;
defm SVQRSHL_U : SInstZPZxZ<"svqrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_uqrshl", [VerifyRuntimeMode]>;
defm SVQSHL_S : SInstZPZxZ<"svqshl", "csil", "dPdx", "dPdK", "aarch64_sve_sqshl", [VerifyRuntimeMode]>;
@@ -1357,7 +1357,7 @@ multiclass SInstPairwise<string name, string types, string intrinsic, list<FlagT
def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, intrinsic, flags>;
}
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
defm SVADDP : SInstPairwise<"svaddp", "csliUcUsUiUl", "aarch64_sve_addp", [VerifyRuntimeMode]>;
defm SVADDP_F : SInstPairwise<"svaddp", "hfd", "aarch64_sve_faddp", [VerifyRuntimeMode]>;
defm SVMAXNMP : SInstPairwise<"svmaxnmp", "hfd", "aarch64_sve_fmaxnmp", [VerifyRuntimeMode]>;
@@ -1373,7 +1373,7 @@ defm SVMINP_U : SInstPairwise<"svminp", "UcUsUiUl", "aarch64_sve_uminp", [
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Widening pairwise arithmetic
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVADALP_S_M : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeOp1, "aarch64_sve_sadalp", [VerifyRuntimeMode]>;
def SVADALP_S_X : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeAny, "aarch64_sve_sadalp", [VerifyRuntimeMode]>;
def SVADALP_S_Z : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeZero, "aarch64_sve_sadalp", [VerifyRuntimeMode]>;
@@ -1387,7 +1387,7 @@ def SVADALP_U_Z : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeZero, "aarch64_s
// SVE2 - Bitwise ternary logical instructions
//
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVBCAX : SInst<"svbcax[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax", [VerifyRuntimeMode]>;
def SVBSL : SInst<"svbsl[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl", [VerifyRuntimeMode]>;
def SVBSL1N : SInst<"svbsl1n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n", [VerifyRuntimeMode]>;
@@ -1407,7 +1407,7 @@ def SVXAR_N : SInst<"svxar[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aar
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Large integer arithmetic
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVADCLB : SInst<"svadclb[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_adclb", [VerifyRuntimeMode]>;
def SVADCLT : SInst<"svadclt[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_adclt", [VerifyRuntimeMode]>;
def SVSBCLB : SInst<"svsbclb[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_sbclb", [VerifyRuntimeMode]>;
@@ -1422,7 +1422,7 @@ def SVSBCLT_N : SInst<"svsbclt[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Multiplication by indexed elements
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVMLA_LANE_2 : SInst<"svmla_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mla_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
def SVMLS_LANE_2 : SInst<"svmls_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mls_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
def SVMUL_LANE_2 : SInst<"svmul_lane[_{d}]", "dddi", "silUsUiUl", MergeNone, "aarch64_sve_mul_lane", [VerifyRuntimeMode], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
@@ -1430,7 +1430,7 @@ def SVMUL_LANE_2 : SInst<"svmul_lane[_{d}]", "dddi", "silUsUiUl", MergeNone, "a
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Uniform complex integer arithmetic
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVCADD : SInst<"svcadd[_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_cadd_x", [VerifyRuntimeMode], [ImmCheck<2, ImmCheckComplexRot90_270>]>;
def SVSQCADD : SInst<"svqcadd[_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_sqcadd_x", [VerifyRuntimeMode], [ImmCheck<2, ImmCheckComplexRot90_270>]>;
def SVCMLA : SInst<"svcmla[_{d}]", "ddddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmla_x", [VerifyRuntimeMode], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
@@ -1457,7 +1457,7 @@ multiclass SInstWideDSPWide<string name, string types, string intrinsic> {
def _N : SInst<name # "[_n_{d}]", "ddR", types, MergeNone, intrinsic, [VerifyRuntimeMode]>;
}
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
defm SVABALB_S : SInstWideDSPAcc<"svabalb", "sil", "aarch64_sve_sabalb">;
defm SVABALB_U : SInstWideDSPAcc<"svabalb", "UsUiUl", "aarch64_sve_uabalb">;
defm SVABALT_S : SInstWideDSPAcc<"svabalt", "sil", "aarch64_sve_sabalt">;
@@ -1536,7 +1536,7 @@ def SVQDMULLT_LANE : SInst<"svqdmullt_lane[_{d}]", "dhhi", "il", MergeNone, "
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Narrowing DSP operations
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVADDHNB : SInst<"svaddhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_addhnb", [VerifyRuntimeMode]>;
def SVADDHNT : SInst<"svaddhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_addhnt", [VerifyRuntimeMode]>;
def SVRADDHNB : SInst<"svraddhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_raddhnb", [VerifyRuntimeMode]>;
@@ -1576,7 +1576,7 @@ def SVQRSHRNT_U : SInst<"svqrshrnt[_n_{d}]", "hhdi", "UsUiUl", MergeNone, "a
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Unary narrowing operations
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVQXTNB_S : SInst<"svqxtnb[_{d}]", "hd", "sil", MergeNone, "aarch64_sve_sqxtnb", [VerifyRuntimeMode]>;
def SVQXTNB_U : SInst<"svqxtnb[_{d}]", "hd", "UsUiUl", MergeNone, "aarch64_sve_uqxtnb", [VerifyRuntimeMode]>;
def SVQXTUNB_S : SInst<"svqxtunb[_{d}]", "ed", "sil", MergeNone, "aarch64_sve_sqxtunb", [VerifyRuntimeMode]>;
@@ -1589,7 +1589,7 @@ def SVQXTUNT_S : SInst<"svqxtunt[_{d}]", "eed", "sil", MergeNone, "aarch64_sv
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Widening complex integer arithmetic
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
defm SVADDLBT : SInstWideDSPLong<"svaddlbt", "sil", "aarch64_sve_saddlbt">;
defm SVSUBLBT : SInstWideDSPLong<"svsublbt", "sil", "aarch64_sve_ssublbt">;
defm SVSUBLTB : SInstWideDSPLong<"svsubltb", "sil", "aarch64_sve_ssubltb">;
@@ -1723,7 +1723,7 @@ def SVSTNT1W_SCATTER_INDEX_S : MInst<"svstnt1w_scatter[_{2}base]_index[_{d}]", "
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Polynomial arithmetic
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVEORBT : SInst<"sveorbt[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorbt", [VerifyRuntimeMode]>;
def SVEORBT_N : SInst<"sveorbt[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorbt", [VerifyRuntimeMode]>;
def SVEORTB : SInst<"sveortb[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eortb", [VerifyRuntimeMode]>;
@@ -1744,7 +1744,7 @@ def SVPMULLT_PAIR_N : SInst<"svpmullt_pair[_n_{d}]", "dda", "UcUi", MergeNone,
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Complex integer dot product
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVCDOT : SInst<"svcdot[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_cdot", [VerifyRuntimeMode], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
def SVCDOT_LANE : SInst<"svcdot_lane[_{d}]", "ddqqii", "il", MergeNone, "aarch64_sve_cdot_lane", [VerifyRuntimeMode], [ImmCheck<4, ImmCheckComplexRotAll90>, ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
}
@@ -1752,7 +1752,7 @@ def SVCDOT_LANE : SInst<"svcdot_lane[_{d}]", "ddqqii", "il", MergeNone, "aarch
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Floating-point widening multiply-accumulate
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVMLALB_F : SInst<"svmlalb[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlalb", [VerifyRuntimeMode]>;
def SVMLALB_F_N : SInst<"svmlalb[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlalb", [VerifyRuntimeMode]>;
def SVMLALB_F_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlalb_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
@@ -1770,7 +1770,7 @@ def SVMLSLT_F_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Floating-point integer binary logarithm
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVLOGB_M : SInst<"svlogb[_{d}]", "xxPd", "hfd", MergeOp1, "aarch64_sve_flogb", [VerifyRuntimeMode]>;
def SVLOGB_X : SInst<"svlogb[_{d}]", "xPd", "hfd", MergeAnyExp, "aarch64_sve_flogb", [VerifyRuntimeMode]>;
def SVLOGB_Z : SInst<"svlogb[_{d}]", "xPd", "hfd", MergeZeroExp, "aarch64_sve_flogb", [VerifyRuntimeMode]>;
@@ -1794,7 +1794,7 @@ def SVNMATCH : SInst<"svnmatch[_{d}]", "PPdd", "csUcUs", MergeNone, "aarch64_sve
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Contiguous conflict detection
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVWHILERW_B : SInst<"svwhilerw[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilerw_b", [IsOverloadWhileRW, VerifyRuntimeMode]>;
def SVWHILERW_H : SInst<"svwhilerw[_{1}]", "Pcc", "sUshb", MergeNone, "aarch64_sve_whilerw_h", [IsOverloadWhileRW, VerifyRuntimeMode]>;
def SVWHILERW_S : SInst<"svwhilerw[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilerw_s", [IsOverloadWhileRW, VerifyRuntimeMode]>;
@@ -1808,7 +1808,7 @@ def SVWHILEWR_D : SInst<"svwhilewr[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sv
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Extended table lookup/permute
-let SVETargetGuard = "sve2", SMETargetGuard = "sme" in {
+let SVETargetGuard = "sve2" in {
def SVTBL2 : SInst<"svtbl2[_{d}]", "d2u", "csilUcUsUiUlhfdb", MergeNone, "", [VerifyRuntimeMode]>;
def SVTBX : SInst<"svtbx[_{d}]", "dddu", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_tbx", [VerifyRuntimeMode]>;
}
@@ -1828,17 +1828,17 @@ let SVETargetGuard = "sve2,lut", SMETargetGuard = "sme2,lut" in {
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Optional
-let SVETargetGuard = "sve2,sve-aes", SMETargetGuard = InvalidMode in {
-def SVAESD : SInst<"svaesd[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_aesd", [IsOverloadNone]>;
-def SVAESIMC : SInst<"svaesimc[_{d}]", "dd", "Uc", MergeNone, "aarch64_sve_aesimc", [IsOverloadNone]>;
-def SVAESE : SInst<"svaese[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_aese", [IsOverloadNone]>;
-def SVAESMC : SInst<"svaesmc[_{d}]", "dd", "Uc", MergeNone, "aarch64_sve_aesmc", [IsOverloadNone]>;
+let SVETargetGuard = "sve2,sve-aes", SMETargetGuard = "ssve-aes" in {
+def SVAESD : SInst<"svaesd[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_aesd", [IsOverloadNone, VerifyRuntimeMode]>;
+def SVAESIMC : SInst<"svaesimc[_{d}]", "dd", "Uc", MergeNone, "aarch64_sve_aesimc", [IsOverloadNone, VerifyRuntimeMode]>;
+def SVAESE : SInst<"svaese[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_aese", [IsOverloadNone, VerifyRuntimeMode]>;
+def SVAESMC : SInst<"svaesmc[_{d}]", "dd", "Uc", MergeNone, "aarch64_sve_aesmc", [IsOverloadNone, VerifyRuntimeMode]>;
-def SVPMULLB_PAIR_U64 : SInst<"svpmullb_pair[_{d}]", "ddd", "Ul", MergeNone, "aarch64_sve_pmullb_pair">;
-def SVPMULLB_PAIR_N_U64 : SInst<"svpmullb_pair[_n_{d}]", "dda", "Ul", MergeNone, "aarch64_sve_pmullb_pair">;
+def SVPMULLB_PAIR_U64 : SInst<"svpmullb_pair[_{d}]", "ddd", "Ul", MergeNone, "aarch64_sve_pmullb_pair", [VerifyRuntimeMode]>;
+def SVPMULLB_PAIR_N_U64 : SInst<"svpmullb_pair[_n_{d}]", "dda", "Ul", MergeNone, "aarch64_sve_pmullb_pair", [VerifyRuntimeMode]>;
-def SVPMULLT_PAIR_U64 : SInst<"svpmullt_pair[_{d}]", "ddd", "Ul", MergeNone, "aarch64_sve_pmullt_pair">;
-def SVPMULLT_PAIR_N_U64 : SInst<"svpmullt_pair[_n_{d}]", "dda", "Ul", MergeNone, "aarch64_sve_pmullt_pair">;
+def SVPMULLT_PAIR_U64 : SInst<"svpmullt_pair[_{d}]", "ddd", "Ul", MergeNone, "aarch64_sve_pmullt_pair", [VerifyRuntimeMode]>;
+def SVPMULLT_PAIR_N_U64 : SInst<"svpmullt_pair[_n_{d}]", "dda", "Ul", MergeNone, "aarch64_sve_pmullt_pair", [VerifyRuntimeMode]>;
}
let SVETargetGuard = "sve-sha3", SMETargetGuard = "sve-sha3,sme2p1" in {
@@ -1850,7 +1850,7 @@ def SVSM4E : SInst<"svsm4e[_{d}]", "ddd", "Ui", MergeNone, "aarch64_sve_sm
def SVSM4EKEY : SInst<"svsm4ekey[_{d}]", "ddd", "Ui", MergeNone, "aarch64_sve_sm4ekey", [IsOverloadNone]>;
}
-let SVETargetGuard = "sve2,sve-bitperm", SMETargetGuard = "sme,ssve-bitperm" in {
+let SVETargetGuard = "sve2,sve-bitperm", SMETargetGuard = "ssve-bitperm" in {
def SVBDEP : SInst<"svbdep[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bdep_x", [VerifyRuntimeMode]>;
def SVBDEP_N : SInst<"svbdep[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bdep_x", [VerifyRuntimeMode]>;
def SVBEXT : SInst<"svbext[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bext_x", [VerifyRuntimeMode]>;
@@ -1859,7 +1859,7 @@ def SVBGRP : SInst<"svbgrp[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sv
def SVBGRP_N : SInst<"svbgrp[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bgrp_x", [VerifyRuntimeMode]>;
}
-let SVETargetGuard = "sve2p1|sme", SMETargetGuard = "sve2p1|sme" in {
+let SVETargetGuard = "sve2p1|sme" in {
def SVPSEL_B : SInst<"svpsel_lane_b8", "PPPm", "Pc", MergeNone, "", [VerifyRuntimeMode], []>;
def SVPSEL_H : SInst<"svpsel_lane_b16", "PPPm", "Ps", MergeNone, "", [VerifyRuntimeMode], []>;
def SVPSEL_S : SInst<"svpsel_lane_b32", "PPPm", "Pi", MergeNone, "", [VerifyRuntimeMode], []>;
@@ -1965,7 +1965,7 @@ def SVDOT_LANE_X2_F : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "f", MergeNone, "a
def SVFCLAMP : SInst<"svclamp[_{d}]", "dddd", "hfd", MergeNone, "aarch64_sve_fclamp", [VerifyRuntimeMode], []>;
}
-let SVETargetGuard = "sve2p1|sme", SMETargetGuard = "sve2p1|sme" in {
+let SVETargetGuard = "sve2p1|sme" in {
def SVSCLAMP : SInst<"svclamp[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sclamp", [VerifyRuntimeMode], []>;
def SVUCLAMP : SInst<"svclamp[_{d}]", "dddd", "UcUsUiUl", MergeNone, "aarch64_sve_uclamp", [VerifyRuntimeMode], []>;
@@ -2340,7 +2340,7 @@ let SVETargetGuard = "sve2,fp8", SMETargetGuard = "sme2,fp8" in {
def SVFCVTNT : SInst<"svcvtnt_mf8[_f32_x2]", "~~2>", "f", MergeNone, "aarch64_sve_fp8_cvtnt", [VerifyRuntimeMode]>;
}
-let SVETargetGuard = "sve2,fp8dot2", SMETargetGuard ="sme,ssve-fp8dot2" in {
+let SVETargetGuard = "sve2,fp8dot2", SMETargetGuard ="ssve-fp8dot2" in {
// 8-bit floating-point dot product to half-precision (vectors)
def SVFDOT_2WAY : SInst<"svdot[_f16_mf8]", "dd~~>", "h", MergeNone, "aarch64_sve_fp8_fdot", [VerifyRuntimeMode]>;
def SVFDOT_N_2WAY : SInst<"svdot[_n_f16_mf8]", "dd~!>", "h", MergeNone, "aarch64_sve_fp8_fdot", [VerifyRuntimeMode]>;
@@ -2349,7 +2349,7 @@ let SVETargetGuard = "sve2,fp8dot2", SMETargetGuard ="sme,ssve-fp8dot2" in {
def SVFDOT_LANE_2WAY : SInst<"svdot_lane[_f16_mf8]", "dd~~i>", "h", MergeNone, "aarch64_sve_fp8_fdot_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheck0_7>]>;
}
-let SVETargetGuard = "sve2,fp8dot4", SMETargetGuard ="sme,ssve-fp8dot4" in {
+let SVETargetGuard = "sve2,fp8dot4", SMETargetGuard ="ssve-fp8dot4" in {
// 8-bit floating-point dot product to single-precision (vectors)
def SVFDOT_4WAY : SInst<"svdot[_f32_mf8]", "dd~~>", "f", MergeNone, "aarch64_sve_fp8_fdot", [VerifyRuntimeMode]>;
def SVFDOT_N_4WAY : SInst<"svdot[_n_f32_mf8]", "dd~!>", "f", MergeNone, "aarch64_sve_fp8_fdot", [VerifyRuntimeMode]>;
@@ -2358,7 +2358,7 @@ let SVETargetGuard = "sve2,fp8dot4", SMETargetGuard ="sme,ssve-fp8dot4" in {
def SVFDOT_LANE_4WAY : SInst<"svdot_lane[_f32_mf8]", "dd~~i>", "f", MergeNone, "aarch64_sve_fp8_fdot_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheck0_3>]>;
}
-let SVETargetGuard = "sve2,fp8fma", SMETargetGuard = "sme,ssve-fp8fma" in {
+let SVETargetGuard = "sve2,fp8fma", SMETargetGuard = "ssve-fp8fma" in {
// 8-bit floating-point multiply-add long to half-precision (bottom)
def SVFMLALB : SInst<"svmlalb[_f16_mf8]", "dd~~>", "h", MergeNone, "aarch64_sve_fp8_fmlalb", [VerifyRuntimeMode]>;
def SVFMLALB_N : SInst<"svmlalb[_n_f16_mf8]", "dd~!>", "h", MergeNone, "aarch64_sve_fp8_fmlalb", [VerifyRuntimeMode]>;
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 275bb2b..cc5ab38 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -56,34 +56,8 @@ multiclass RVVVLEFFBuiltin<list<string> types> {
SupportOverloading = false,
UnMaskedPolicyScheme = HasPassthruOperand,
ManualCodegen = [{
- {
- if (IsMasked) {
- // Move mask to right before vl.
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- IntrinsicTypes = {ResultType, Ops[4]->getType(), Ops[2]->getType()};
- } else {
- if (PolicyAttrs & RVV_VTA)
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[1]->getType()};
- }
- Value *NewVL = Ops[2];
- Ops.erase(Ops.begin() + 2);
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
- llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
- // Store new_vl.
- clang::CharUnits Align;
- if (IsMasked)
- Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(E->getNumArgs()-2)->getType());
- else
- Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType());
- llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1});
- Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align));
- return V;
- }
+ return emitRVVVLEFFBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}] in {
foreach type = types in {
def : RVVBuiltin<"v", "vPCePz", type>;
@@ -139,17 +113,8 @@ multiclass RVVIndexedLoad<string op> {
let HasMaskedOffOperand = false,
MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- if (IsMasked) {
- // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl)
- std::swap(Ops[0], Ops[2]);
- } else {
- // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl)
- std::swap(Ops[0], Ops[1]);
- }
- if (IsMasked)
- IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
- else
- IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
+ return emitRVVVSEMaskBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}] in {
class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> {
let Name = "vsm_v";
@@ -177,17 +142,8 @@ multiclass RVVVSSEBuiltin<list<string> types> {
HasMaskedOffOperand = false,
MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- if (IsMasked) {
- // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl)
- std::swap(Ops[0], Ops[3]);
- } else {
- // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
- }
- if (IsMasked)
- IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType()};
- else
- IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
+ return emitRVVVSSEBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}] in {
foreach type = types in {
def : RVVBuiltin<"v", "0Petv", type>;
@@ -202,17 +158,8 @@ multiclass RVVIndexedStore<string op> {
let HasMaskedOffOperand = false,
MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- if (IsMasked) {
- // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl)
- std::swap(Ops[0], Ops[3]);
- } else {
- // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
- }
- if (IsMasked)
- IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(), Ops[4]->getType()};
- else
- IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(), Ops[3]->getType()};
+ return emitRVVIndexedStoreBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}] in {
foreach type = TypeList in {
foreach eew_list = EEWList[0-2] in {
@@ -367,28 +314,8 @@ multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
MaskedIRName = IR # "_mask",
UnMaskedPolicyScheme = HasPassthruOperand,
ManualCodegen = [{
- {
- if (IsMasked) {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- } else {
- if (PolicyAttrs & RVV_VTA)
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- }
- auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
- Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
-
- if (IsMasked) {
- Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- // maskedoff, op1, op2, mask, vl, policy
- IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
- } else {
- // passthru, op1, op2, vl
- IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
- }
- break;
- }
+ return emitRVVPseudoUnaryBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}] in {
def : RVVBuiltin<"v", "vv", type_range>;
}
@@ -400,32 +327,8 @@ multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
MaskedIRName = IR # "_mask",
UnMaskedPolicyScheme = HasPassthruOperand,
ManualCodegen = [{
- {
- if (IsMasked) {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- } else {
- if (PolicyAttrs & RVV_VTA)
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- }
- auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
- Ops.insert(Ops.begin() + 2,
- llvm::Constant::getAllOnesValue(ElemTy));
- if (IsMasked) {
- Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- // maskedoff, op1, po2, mask, vl, policy
- IntrinsicTypes = {ResultType,
- ElemTy,
- Ops[4]->getType()};
- } else {
- // passthru, op1, op2, vl
- IntrinsicTypes = {ResultType,
- ElemTy,
- Ops[3]->getType()};
- }
- break;
- }
+ return emitRVVPseudoVNotBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}] in {
def : RVVBuiltin<"v", "vv", type_range>;
def : RVVBuiltin<"Uv", "UvUv", type_range>;
@@ -437,13 +340,8 @@ multiclass RVVPseudoMaskBuiltin<string IR, string type_range> {
IRName = IR,
HasMasked = false,
ManualCodegen = [{
- {
- // op1, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType()};
- Ops.insert(Ops.begin() + 1, Ops[0]);
- break;
- }
+ return emitRVVPseudoMaskBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}] in {
def : RVVBuiltin<"m", "mm", type_range>;
}
@@ -455,28 +353,8 @@ multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
MaskedIRName = IR # "_mask",
UnMaskedPolicyScheme = HasPassthruOperand,
ManualCodegen = [{
- {
- if (IsMasked) {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- Ops.insert(Ops.begin() + 2, Ops[1]);
- Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- // maskedoff, op1, op2, mask, vl
- IntrinsicTypes = {ResultType,
- Ops[2]->getType(),
- Ops.back()->getType()};
- } else {
- if (PolicyAttrs & RVV_VTA)
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- // op1, po2, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType(), Ops[2]->getType()};
- Ops.insert(Ops.begin() + 2, Ops[1]);
- break;
- }
- break;
- }
+ return emitRVVPseudoVFUnaryBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}] in {
def : RVVBuiltin<"v", "vv", type_range>;
}
@@ -490,33 +368,8 @@ multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
MaskedIRName = IR # "_mask",
UnMaskedPolicyScheme = HasPassthruOperand,
ManualCodegen = [{
- {
- if (IsMasked) {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- } else {
- if (PolicyAttrs & RVV_VTA)
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- }
- auto ElemTy = cast<llvm::VectorType>(Ops[1]->getType())->getElementType();
- Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
- if (IsMasked) {
- Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- // maskedoff, op1, op2, mask, vl, policy
- IntrinsicTypes = {ResultType,
- Ops[1]->getType(),
- ElemTy,
- Ops[4]->getType()};
- } else {
- // passtru, op1, op2, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType(),
- ElemTy,
- Ops[3]->getType()};
- }
- break;
- }
+ return emitRVVPseudoVWCVTBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}] in {
foreach s_p = suffixes_prototypes in {
def : RVVBuiltin<s_p[0], s_p[1], type_range>;
@@ -532,32 +385,8 @@ multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
MaskedIRName = IR # "_mask",
UnMaskedPolicyScheme = HasPassthruOperand,
ManualCodegen = [{
- {
- if (IsMasked) {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- } else {
- if (PolicyAttrs & RVV_VTA)
- Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
- }
- Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
- if (IsMasked) {
- Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- // maskedoff, op1, xlen, mask, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType(),
- Ops[4]->getType(),
- Ops[4]->getType()};
- } else {
- // passthru, op1, xlen, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType(),
- Ops[3]->getType(),
- Ops[3]->getType()};
- }
- break;
- }
+ return emitRVVPseudoVNCVTBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}] in {
foreach s_p = suffixes_prototypes in {
def : RVVBuiltin<s_p[0], s_p[1], type_range>;
@@ -575,17 +404,8 @@ let HasBuiltinAlias = false, HasVL = false, HasMasked = false,
UnMaskedPolicyScheme = NonePolicy, MaskedPolicyScheme = NonePolicy,
Log2LMUL = [0], IRName = "",
ManualCodegen = [{
- {
- LLVMContext &Context = CGM.getLLVMContext();
- llvm::MDBuilder MDHelper(Context);
-
- llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "vlenb")};
- llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
- llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
- llvm::Function *F =
- CGM.getIntrinsic(llvm::Intrinsic::read_register, {SizeTy});
- return Builder.CreateCall(F, Metadata);
- }
+ return emitRVVVlenbBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}] in
{
def vlenb : RVVBuiltin<"", "u", "i">;
@@ -660,7 +480,10 @@ let HasBuiltinAlias = false,
HasMasked = false,
MaskedPolicyScheme = NonePolicy,
Log2LMUL = [0],
- ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type
+ ManualCodegen = [{
+ return emitRVVVsetvliBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
+ }] in // Set XLEN type
{
def vsetvli : RVVBuiltin<"", "zzKzKz", "i">;
def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">;
@@ -720,43 +543,10 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
MaskedIRName = op # nf # "_mask",
NF = nf,
ManualCodegen = [{
- {
- SmallVector<llvm::Value*, 6> Operands;
-
- bool NoPassthru =
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
- (!IsMasked && (PolicyAttrs & RVV_VTA));
- unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
-
- if (IsMasked)
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[0]->getType(), Ops.back()->getType()};
- else
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops.back()->getType()};
-
- if (NoPassthru) { // Push poison into passthru
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- } else { // Push intrinsics operands into passthru
- llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- Operands.push_back(PassthruOperand);
- }
-
- Operands.push_back(Ops[Offset]); // Ptr
- if (IsMasked)
- Operands.push_back(Ops[0]);
- Operands.push_back(Ops[Offset + 1]); // VL
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
-
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- if (ReturnValue.isNull())
- return LoadValue;
- else
- return Builder.CreateStore(LoadValue, ReturnValue.getValue());
- }
- }] in {
+ return emitRVVUnitStridedSegLoadTupleBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs,
+ IsMasked, SegInstSEW);
+ }] in {
defvar T = "(Tuple:" # nf # ")";
def : RVVBuiltin<T # "v", T # "vPCe", type>;
if !not(IsFloat<type>.val) then {
@@ -784,31 +574,10 @@ multiclass RVVUnitStridedSegStoreTuple<string op> {
NF = nf,
HasMaskedOffOperand = false,
ManualCodegen = [{
- {
- // Masked
- // Builtin: (mask, ptr, v_tuple, vl)
- // Intrinsic: (tuple, ptr, mask, vl)
- // Unmasked
- // Builtin: (ptr, v_tuple, vl)
- // Intrinsic: (tuple, ptr, vl)
- unsigned Offset = IsMasked ? 1 : 0;
-
- SmallVector<llvm::Value*, 5> Operands;
- Operands.push_back(Ops[Offset + 1]); // tuple
- Operands.push_back(Ops[Offset]); // Ptr
- if (IsMasked)
- Operands.push_back(Ops[0]);
- Operands.push_back(Ops[Offset + 2]); // VL
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
-
- if (IsMasked)
- IntrinsicTypes = {Operands[0]->getType(), Ops[Offset]->getType(), Ops[0]->getType(), Operands.back()->getType()};
- else
- IntrinsicTypes = {Operands[0]->getType(), Ops[Offset]->getType(), Operands.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
- }
- }] in {
+ return emitRVVUnitStridedSegStoreTupleBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs,
+ IsMasked, SegInstSEW);
+ }] in {
defvar T = "(Tuple:" # nf # ")";
def : RVVBuiltin<T # "v", "0Pe" # T # "v", type>;
if !not(IsFloat<type>.val) then {
@@ -835,52 +604,9 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
MaskedIRName = op # nf # "ff_mask",
NF = nf,
ManualCodegen = [{
- {
- SmallVector<llvm::Value*, 6> Operands;
-
- bool NoPassthru =
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
- (!IsMasked && (PolicyAttrs & RVV_VTA));
- unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
-
- if (IsMasked)
- IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[Offset]->getType(), Ops[0]->getType()};
- else
- IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[Offset]->getType()};
-
- if (NoPassthru) { // Push poison into passthru
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- } else { // Push intrinsics operands into passthru
- llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- Operands.push_back(PassthruOperand);
- }
-
- Operands.push_back(Ops[Offset]); // Ptr
- if (IsMasked)
- Operands.push_back(Ops[0]);
- Operands.push_back(Ops[Offset + 2]); // vl
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
-
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- // Get alignment from the new vl operand
- clang::CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
-
- llvm::Value *ReturnTuple = Builder.CreateExtractValue(LoadValue, 0);
-
- // Store new_vl
- llvm::Value *V = Builder.CreateExtractValue(LoadValue, 1);
- Builder.CreateStore(V, Address(Ops[Offset + 1], V->getType(), Align));
-
- if (ReturnValue.isNull())
- return ReturnTuple;
- else
- return Builder.CreateStore(ReturnTuple, ReturnValue.getValue());
- }
+ return emitRVVUnitStridedSegLoadFFTupleBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs, IsMasked,
+ SegInstSEW);
}] in {
defvar T = "(Tuple:" # nf # ")";
def : RVVBuiltin<T # "v", T # "vPCePz", type>;
@@ -908,43 +634,9 @@ multiclass RVVStridedSegLoadTuple<string op> {
MaskedIRName = op # nf # "_mask",
NF = nf,
ManualCodegen = [{
- {
- SmallVector<llvm::Value*, 7> Operands;
-
- bool NoPassthru =
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
- (!IsMasked && (PolicyAttrs & RVV_VTA));
- unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
-
- if (IsMasked)
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops.back()->getType(), Ops[0]->getType()};
- else
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops.back()->getType()};
-
- if (NoPassthru) { // Push poison into passthru
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- } else { // Push intrinsics operands into passthru
- llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- Operands.push_back(PassthruOperand);
- }
-
- Operands.push_back(Ops[Offset]); // Ptr
- Operands.push_back(Ops[Offset + 1]); // Stride
- if (IsMasked)
- Operands.push_back(Ops[0]);
- Operands.push_back(Ops[Offset + 2]); // VL
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
-
- if (ReturnValue.isNull())
- return LoadValue;
- else
- return Builder.CreateStore(LoadValue, ReturnValue.getValue());
- }
+ return emitRVVStridedSegLoadTupleBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs, IsMasked,
+ SegInstSEW);
}] in {
defvar T = "(Tuple:" # nf # ")";
def : RVVBuiltin<T # "v", T # "vPCet", type>;
@@ -974,31 +666,9 @@ multiclass RVVStridedSegStoreTuple<string op> {
HasMaskedOffOperand = false,
MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- {
- // Masked
- // Builtin: (mask, ptr, stride, v_tuple, vl)
- // Intrinsic: (tuple, ptr, stride, mask, vl)
- // Unmasked
- // Builtin: (ptr, stride, v_tuple, vl)
- // Intrinsic: (tuple, ptr, stride, vl)
- unsigned Offset = IsMasked ? 1 : 0;
-
- SmallVector<llvm::Value*, 6> Operands;
- Operands.push_back(Ops[Offset + 2]); // tuple
- Operands.push_back(Ops[Offset]); // Ptr
- Operands.push_back(Ops[Offset + 1]); // Stride
- if (IsMasked)
- Operands.push_back(Ops[0]);
- Operands.push_back(Ops[Offset + 3]); // VL
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
-
- if (IsMasked)
- IntrinsicTypes = {Operands[0]->getType(), Operands[1]->getType(), Operands.back()->getType(), Ops[0]->getType()};
- else
- IntrinsicTypes = {Operands[0]->getType(), Operands[1]->getType(), Operands.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
- }
+ return emitRVVStridedSegStoreTupleBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs, IsMasked,
+ SegInstSEW);
}] in {
defvar T = "(Tuple:" # nf # ")";
def : RVVBuiltin<T # "v", "0Pet" # T # "v", type>;
@@ -1021,47 +691,9 @@ multiclass RVVIndexedSegLoadTuple<string op> {
MaskedIRName = op # nf # "_mask",
NF = nf,
ManualCodegen = [{
- {
- SmallVector<llvm::Value*, 7> Operands;
-
- bool NoPassthru =
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
- (!IsMasked && (PolicyAttrs & RVV_VTA));
- unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
-
- if (NoPassthru) { // Push poison into passthru
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- } else { // Push intrinsics operands into passthru
- llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- Operands.push_back(PassthruOperand);
- }
-
- Operands.push_back(Ops[Offset]); // Ptr
- Operands.push_back(Ops[Offset + 1]); // Idx
- if (IsMasked)
- Operands.push_back(Ops[0]);
- Operands.push_back(Ops[Offset + 2]); // VL
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
-
- if (IsMasked)
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
- Ops[Offset + 1]->getType(),
- Ops[0]->getType(),
- Ops.back()->getType()};
- else
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
- Ops[Offset + 1]->getType(),
- Ops.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
-
- if (ReturnValue.isNull())
- return LoadValue;
- else
- return Builder.CreateStore(LoadValue, ReturnValue.getValue());
- }
+ return emitRVVIndexedSegLoadTupleBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs, IsMasked,
+ SegInstSEW);
}] in {
defvar T = "(Tuple:" # nf # ")";
def : RVVBuiltin<T # "v", T # "vPCe" # eew_type # "Uv", type>;
@@ -1087,34 +719,9 @@ multiclass RVVIndexedSegStoreTuple<string op> {
HasMaskedOffOperand = false,
MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- {
- // Masked
- // Builtin: (mask, ptr, index, v_tuple, vl)
- // Intrinsic: (tuple, ptr, index, mask, vl)
- // Unmasked
- // Builtin: (ptr, index, v_tuple, vl)
- // Intrinsic: (tuple, ptr, index, vl)
- unsigned Offset = IsMasked ? 1 : 0;
-
- SmallVector<llvm::Value*, 6> Operands;
- Operands.push_back(Ops[Offset + 2]); // tuple
- Operands.push_back(Ops[Offset]); // Ptr
- Operands.push_back(Ops[Offset + 1]); // Idx
- if (IsMasked)
- Operands.push_back(Ops[0]);
- Operands.push_back(Ops[Offset + 3]); // VL
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
-
- if (IsMasked)
- IntrinsicTypes = {Operands[0]->getType(), Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
- Ops[0]->getType(),
- Operands.back()->getType()};
- else
- IntrinsicTypes = {Operands[0]->getType(), Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
- Operands.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
- }
+ return emitRVVIndexedSegStoreTupleBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs, IsMasked,
+ SegInstSEW);
}] in {
defvar T = "(Tuple:" # nf # ")";
def : RVVBuiltin<T # "v", "0Pe" # eew_type # "Uv" # T # "v", type>;
@@ -1355,37 +962,8 @@ defm vssub : RVVSignedBinBuiltinSet;
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, op1, round_mode, vl)
- // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
-
- SmallVector<llvm::Value*, 7> Operands;
- bool HasMaskedOff = !(
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA));
- unsigned Offset = IsMasked ?
- (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
-
- if (!HasMaskedOff)
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- else
- Operands.push_back(Ops[IsMasked ? 1 : 0]);
-
- Operands.push_back(Ops[Offset]); // op0
- Operands.push_back(Ops[Offset + 1]); // op1
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- Operands.push_back(Ops[Offset + 2]); // vxrm
- Operands.push_back(Ops[Offset + 3]); // vl
-
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVAveragingBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}
}] in {
// 12.2. Vector Single-Width Averaging Add and Subtract
@@ -1404,38 +982,8 @@ let ManualCodegen = [{
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, op1, round_mode, vl)
- // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
-
- SmallVector<llvm::Value*, 7> Operands;
- bool HasMaskedOff = !(
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA));
- unsigned Offset = IsMasked ?
- (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
-
- if (!HasMaskedOff)
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- else
- Operands.push_back(Ops[IsMasked ? 1 : 0]);
-
- Operands.push_back(Ops[Offset]); // op0
- Operands.push_back(Ops[Offset + 1]); // op1
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- Operands.push_back(Ops[Offset + 2]); // vxrm
- Operands.push_back(Ops[Offset + 3]); // vl
-
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
- Ops.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVNarrowingClipBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}
}] in {
// 12.5. Vector Narrowing Fixed-Point Clip Instructions
@@ -1459,47 +1007,8 @@ enum __RISCV_FRM {
let UnMaskedPolicyScheme = HasPassthruOperand in {
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, op1, round_mode, vl)
- // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
-
- SmallVector<llvm::Value*, 7> Operands;
- bool HasMaskedOff = !(
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA));
- bool HasRoundModeOp = IsMasked ?
- (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
- (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
-
- unsigned Offset = IsMasked ?
- (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
-
- if (!HasMaskedOff)
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- else
- Operands.push_back(Ops[IsMasked ? 1 : 0]);
-
- Operands.push_back(Ops[Offset]); // op0
- Operands.push_back(Ops[Offset + 1]); // op1
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- if (HasRoundModeOp) {
- Operands.push_back(Ops[Offset + 2]); // frm
- Operands.push_back(Ops[Offset + 3]); // vl
- } else {
- Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
- Operands.push_back(Ops[Offset + 2]); // vl
- }
-
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
- Operands.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVFloatingPointBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}
}] in {
let HasFRMRoundModeOp = true in {
@@ -1536,47 +1045,9 @@ let ManualCodegen = [{
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, op1, round_mode, vl)
- // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
-
- SmallVector<llvm::Value*, 7> Operands;
- bool HasMaskedOff = !(
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA));
- bool HasRoundModeOp = IsMasked ?
- (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
- (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
-
- unsigned Offset = IsMasked ?
- (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
-
- if (!HasMaskedOff)
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- else
- Operands.push_back(Ops[IsMasked ? 1 : 0]);
-
- Operands.push_back(Ops[Offset]); // op0
- Operands.push_back(Ops[Offset + 1]); // op1
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- if (HasRoundModeOp) {
- Operands.push_back(Ops[Offset + 2]); // frm
- Operands.push_back(Ops[Offset + 3]); // vl
- } else {
- Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
- Operands.push_back(Ops[Offset + 2]); // vl
- }
-
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
- Ops.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVWideningFloatingPointBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs, IsMasked,
+ SegInstSEW);
}
}] in {
let HasFRMRoundModeOp = true in {
@@ -1618,39 +1089,8 @@ let ManualCodegen = [{
let UnMaskedPolicyScheme = HasPolicyOperand in {
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, op1, round_mode, vl)
- // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
-
- SmallVector<llvm::Value*, 7> Operands;
- bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
-
- unsigned Offset = IsMasked ? 2 : 1;
-
- Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough
-
- Operands.push_back(Ops[Offset]); // op0
- Operands.push_back(Ops[Offset + 1]); // op1
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- if (HasRoundModeOp) {
- Operands.push_back(Ops[Offset + 2]); // frm
- Operands.push_back(Ops[Offset + 3]); // vl
- } else {
- Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
- Operands.push_back(Ops[Offset + 2]); // vl
- }
-
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
- Operands.back()->getType()};
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
-
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVFMABuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}
}] in {
let HasFRMRoundModeOp = 1 in {
@@ -1677,39 +1117,8 @@ let ManualCodegen = [{
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, op1, round_mode, vl)
- // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
-
- SmallVector<llvm::Value*, 7> Operands;
- bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
-
- unsigned Offset = IsMasked ? 2 : 1;
-
- Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough
-
- Operands.push_back(Ops[Offset]); // op0
- Operands.push_back(Ops[Offset + 1]); // op1
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- if (HasRoundModeOp) {
- Operands.push_back(Ops[Offset + 2]); // frm
- Operands.push_back(Ops[Offset + 3]); // vl
- } else {
- Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
- Operands.push_back(Ops[Offset + 2]); // vl
- }
-
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
- Operands.back()->getType()};
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
-
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVWideningFMABuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}
}] in {
let HasFRMRoundModeOp = 1 in {
@@ -1747,45 +1156,8 @@ let ManualCodegen = [{
let UnMaskedPolicyScheme = HasPassthruOperand in {
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, round_mode, vl)
- // Masked: (passthru, op0, mask, frm, vl, policy)
-
- SmallVector<llvm::Value*, 6> Operands;
- bool HasMaskedOff = !(
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA));
- bool HasRoundModeOp = IsMasked ?
- (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
- (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
-
- unsigned Offset = IsMasked ?
- (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
-
- if (!HasMaskedOff)
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- else
- Operands.push_back(Ops[IsMasked ? 1 : 0]);
-
- Operands.push_back(Ops[Offset]); // op0
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- if (HasRoundModeOp) {
- Operands.push_back(Ops[Offset + 1]); // frm
- Operands.push_back(Ops[Offset + 2]); // vl
- } else {
- Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
- Operands.push_back(Ops[Offset + 1]); // vl
- }
-
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- IntrinsicTypes = {ResultType, Operands.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVFloatingUnaryBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}
}] in {
let HasFRMRoundModeOp = 1 in {
@@ -1947,45 +1319,8 @@ def vfwcvtbf16_f_f_v : RVVConvBuiltin<"Fw", "Fwv", "y", "vfwcvtbf16_f">;
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, frm, vl)
- // Masked: (passthru, op0, mask, frm, vl, policy)
- SmallVector<llvm::Value*, 6> Operands;
- bool HasMaskedOff = !(
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA));
- bool HasRoundModeOp = IsMasked ?
- (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
- (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
-
- unsigned Offset = IsMasked ?
- (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
-
- if (!HasMaskedOff)
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- else
- Operands.push_back(Ops[IsMasked ? 1 : 0]);
-
- Operands.push_back(Ops[Offset]); // op0
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- if (HasRoundModeOp) {
- Operands.push_back(Ops[Offset + 1]); // frm
- Operands.push_back(Ops[Offset + 2]); // vl
- } else {
- Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
- Operands.push_back(Ops[Offset + 1]); // vl
- }
-
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
- Operands.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVFloatingConvBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}
}] in {
let HasFRMRoundModeOp = 1 in {
@@ -2151,44 +1486,9 @@ defm vfredmax : RVVFloatingReductionBuiltin;
defm vfredmin : RVVFloatingReductionBuiltin;
let ManualCodegen = [{
{
- // LLVM intrinsic
- // Unmasked: (passthru, op0, op1, round_mode, vl)
- // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
-
- SmallVector<llvm::Value*, 6> Operands;
- bool HasMaskedOff = !(
- (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA));
- bool HasRoundModeOp = IsMasked ?
- (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
- (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
-
- unsigned Offset = IsMasked ?
- (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
-
- if (!HasMaskedOff)
- Operands.push_back(llvm::PoisonValue::get(ResultType));
- else
- Operands.push_back(Ops[IsMasked ? 1 : 0]);
-
- Operands.push_back(Ops[Offset]); // op0
- Operands.push_back(Ops[Offset + 1]); // op1
-
- if (IsMasked)
- Operands.push_back(Ops[0]); // mask
-
- if (HasRoundModeOp) {
- Operands.push_back(Ops[Offset + 2]); // frm
- Operands.push_back(Ops[Offset + 3]); // vl
- } else {
- Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
- Operands.push_back(Ops[Offset + 2]); // vl
- }
-
- IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
- Ops.back()->getType()};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- return Builder.CreateCall(F, Operands, "");
+ return emitRVVFloatingReductionBuiltin(
+ this, E, ReturnValue, ResultType, ID, Ops, PolicyAttrs, IsMasked,
+ SegInstSEW);
}
}] in {
let HasFRMRoundModeOp = 1 in {
@@ -2346,37 +1646,8 @@ let HasMasked = false,
let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vreinterpret_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- if (ResultType->isIntOrIntVectorTy(1) ||
- Ops[0]->getType()->isIntOrIntVectorTy(1)) {
- assert(isa<ScalableVectorType>(ResultType) &&
- isa<ScalableVectorType>(Ops[0]->getType()));
-
- LLVMContext &Context = CGM.getLLVMContext();
- ScalableVectorType *Boolean64Ty =
- ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64);
-
- if (ResultType->isIntOrIntVectorTy(1)) {
- // Casting from m1 vector integer -> vector boolean
- // Ex: <vscale x 8 x i8>
- // --(bitcast)--------> <vscale x 64 x i1>
- // --(vector_extract)-> <vscale x 8 x i1>
- llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty);
- return Builder.CreateExtractVector(ResultType, BitCast,
- ConstantInt::get(Int64Ty, 0));
- } else {
- // Casting from vector boolean -> m1 vector integer
- // Ex: <vscale x 1 x i1>
- // --(vector_insert)-> <vscale x 64 x i1>
- // --(bitcast)-------> <vscale x 8 x i8>
- llvm::Value *Boolean64Val =
- Builder.CreateInsertVector(Boolean64Ty,
- llvm::PoisonValue::get(Boolean64Ty),
- Ops[0],
- ConstantInt::get(Int64Ty, 0));
- return Builder.CreateBitCast(Boolean64Val, ResultType);
- }
- }
- return Builder.CreateBitCast(Ops[0], ResultType);
+ return emitRVVReinterpretBuiltin(this, E, ReturnValue, ResultType, ID,
+ Ops, PolicyAttrs, IsMasked, SegInstSEW);
}] in {
// Reinterpret between different type under the same SEW and LMUL
def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">;
@@ -2502,25 +1773,8 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vget_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- {
- auto *VecTy = cast<ScalableVectorType>(ResultType);
- if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) {
- unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
- assert(isPowerOf2_32(MaxIndex));
- // Mask to only valid indices.
- Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
- Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
- Ops[1] = Builder.CreateMul(Ops[1],
- ConstantInt::get(Ops[1]->getType(),
- VecTy->getMinNumElements()));
- return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
- }
-
- return Builder.CreateIntrinsic(Intrinsic::riscv_tuple_extract,
- {ResultType, Ops[0]->getType()},
- {Ops[0], Builder.CreateTrunc(Ops[1],
- Builder.getInt32Ty())});
- }
+ return emitRVVGetBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}] in {
foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilxfdy", dst_lmul # "v">;
@@ -2535,25 +1789,8 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vset_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- {
- if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) {
- auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
- unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
- assert(isPowerOf2_32(MaxIndex));
- // Mask to only valid indices.
- Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
- Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
- Ops[1] = Builder.CreateMul(Ops[1],
- ConstantInt::get(Ops[1]->getType(),
- VecTy->getMinNumElements()));
- return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
- }
-
- return Builder.CreateIntrinsic(Intrinsic::riscv_tuple_insert,
- {ResultType, Ops[2]->getType()},
- {Ops[0], Ops[2],
- Builder.CreateTrunc(Ops[1],Builder.getInt32Ty())});
- }
+ return emitRVVSetBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}] in {
foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilxfdy">;
@@ -2571,26 +1808,8 @@ let HasMasked = false, HasVL = false, IRName = "" in {
MaskedPolicyScheme = NonePolicy,
SupportOverloading = false,
ManualCodegen = [{
- {
- llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
- auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
- for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
- if (isa<ScalableVectorType>(ResultType)) {
- llvm::Value *Idx = ConstantInt::get(Builder.getInt64Ty(),
- VecTy->getMinNumElements() * I);
- ReturnVector =
- Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
- } else {
- llvm::Value *Idx = ConstantInt::get(Builder.getInt32Ty(), I);
- ReturnVector =
- Builder.CreateIntrinsic(Intrinsic::riscv_tuple_insert,
- {ResultType, Ops[I]->getType()},
- {ReturnVector, Ops[I], Idx});
- }
-
- }
- return ReturnVector;
- }
+ return emitRVVCreateBuiltin(this, E, ReturnValue, ResultType, ID, Ops,
+ PolicyAttrs, IsMasked, SegInstSEW);
}] in {
// Since the vcreate_v uses LFixedLog2LMUL, setting the Log2LMUL to [-3] can
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index b26e558..96f55f5 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -19,7 +19,6 @@
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
-#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Types.h"
@@ -63,11 +62,11 @@ public:
mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ,
const llvm::APInt &val) {
- return create<cir::ConstantOp>(loc, cir::IntAttr::get(typ, val));
+ return cir::ConstantOp::create(*this, loc, cir::IntAttr::get(typ, val));
}
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) {
- return create<cir::ConstantOp>(loc, attr);
+ return cir::ConstantOp::create(*this, loc, attr);
}
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty,
@@ -119,7 +118,7 @@ public:
}
cir::ConstantOp getBool(bool state, mlir::Location loc) {
- return create<cir::ConstantOp>(loc, getCIRBoolAttr(state));
+ return cir::ConstantOp::create(*this, loc, getCIRBoolAttr(state));
}
cir::ConstantOp getFalse(mlir::Location loc) { return getBool(false, loc); }
cir::ConstantOp getTrue(mlir::Location loc) { return getBool(true, loc); }
@@ -144,21 +143,37 @@ public:
mlir::Value createComplexCreate(mlir::Location loc, mlir::Value real,
mlir::Value imag) {
auto resultComplexTy = cir::ComplexType::get(real.getType());
- return create<cir::ComplexCreateOp>(loc, resultComplexTy, real, imag);
+ return cir::ComplexCreateOp::create(*this, loc, resultComplexTy, real,
+ imag);
}
mlir::Value createComplexReal(mlir::Location loc, mlir::Value operand) {
auto operandTy = mlir::cast<cir::ComplexType>(operand.getType());
- return create<cir::ComplexRealOp>(loc, operandTy.getElementType(), operand);
+ return cir::ComplexRealOp::create(*this, loc, operandTy.getElementType(),
+ operand);
}
mlir::Value createComplexImag(mlir::Location loc, mlir::Value operand) {
auto operandTy = mlir::cast<cir::ComplexType>(operand.getType());
- return create<cir::ComplexImagOp>(loc, operandTy.getElementType(), operand);
+ return cir::ComplexImagOp::create(*this, loc, operandTy.getElementType(),
+ operand);
+ }
+
+ cir::LoadOp createLoad(mlir::Location loc, mlir::Value ptr,
+ uint64_t alignment = 0) {
+ mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
+ assert(!cir::MissingFeatures::opLoadStoreVolatile());
+ return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false,
+ alignmentAttr, cir::MemOrderAttr{});
+ }
+
+ mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr,
+ uint64_t alignment) {
+ return createLoad(loc, ptr, alignment);
}
mlir::Value createNot(mlir::Value value) {
- return create<cir::UnaryOp>(value.getLoc(), value.getType(),
+ return cir::UnaryOp::create(*this, value.getLoc(), value.getType(),
cir::UnaryOpKind::Not, value);
}
@@ -167,7 +182,7 @@ public:
mlir::Location loc,
llvm::function_ref<void(mlir::OpBuilder &, mlir::Location)> condBuilder,
llvm::function_ref<void(mlir::OpBuilder &, mlir::Location)> bodyBuilder) {
- return create<cir::DoWhileOp>(loc, condBuilder, bodyBuilder);
+ return cir::DoWhileOp::create(*this, loc, condBuilder, bodyBuilder);
}
/// Create a while operation.
@@ -175,7 +190,7 @@ public:
mlir::Location loc,
llvm::function_ref<void(mlir::OpBuilder &, mlir::Location)> condBuilder,
llvm::function_ref<void(mlir::OpBuilder &, mlir::Location)> bodyBuilder) {
- return create<cir::WhileOp>(loc, condBuilder, bodyBuilder);
+ return cir::WhileOp::create(*this, loc, condBuilder, bodyBuilder);
}
/// Create a for operation.
@@ -184,22 +199,23 @@ public:
llvm::function_ref<void(mlir::OpBuilder &, mlir::Location)> condBuilder,
llvm::function_ref<void(mlir::OpBuilder &, mlir::Location)> bodyBuilder,
llvm::function_ref<void(mlir::OpBuilder &, mlir::Location)> stepBuilder) {
- return create<cir::ForOp>(loc, condBuilder, bodyBuilder, stepBuilder);
+ return cir::ForOp::create(*this, loc, condBuilder, bodyBuilder,
+ stepBuilder);
}
/// Create a break operation.
cir::BreakOp createBreak(mlir::Location loc) {
- return create<cir::BreakOp>(loc);
+ return cir::BreakOp::create(*this, loc);
}
/// Create a continue operation.
cir::ContinueOp createContinue(mlir::Location loc) {
- return create<cir::ContinueOp>(loc);
+ return cir::ContinueOp::create(*this, loc);
}
mlir::Value createUnaryOp(mlir::Location loc, cir::UnaryOpKind kind,
mlir::Value operand) {
- return create<cir::UnaryOp>(loc, kind, operand);
+ return cir::UnaryOp::create(*this, loc, kind, operand);
}
mlir::TypedAttr getConstPtrAttr(mlir::Type type, int64_t value) {
@@ -209,13 +225,21 @@ public:
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType,
mlir::Type type, llvm::StringRef name,
mlir::IntegerAttr alignment) {
- return create<cir::AllocaOp>(loc, addrType, type, name, alignment);
+ return cir::AllocaOp::create(*this, loc, addrType, type, name, alignment);
+ }
+
+ /// Get constant address of a global variable as an MLIR attribute.
+ cir::GlobalViewAttr getGlobalViewAttr(cir::PointerType type,
+ cir::GlobalOp globalOp,
+ mlir::ArrayAttr indices = {}) {
+ auto symbol = mlir::FlatSymbolRefAttr::get(globalOp.getSymNameAttr());
+ return cir::GlobalViewAttr::get(type, symbol, indices);
}
mlir::Value createGetGlobal(mlir::Location loc, cir::GlobalOp global) {
assert(!cir::MissingFeatures::addressSpace());
- return create<cir::GetGlobalOp>(loc, getPointerTo(global.getSymType()),
- global.getSymName());
+ return cir::GetGlobalOp::create(
+ *this, loc, getPointerTo(global.getSymType()), global.getSymName());
}
mlir::Value createGetGlobal(cir::GlobalOp global) {
@@ -223,36 +247,39 @@ public:
}
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value dst,
- mlir::IntegerAttr align = {}) {
- return create<cir::StoreOp>(loc, val, dst, align);
+ bool isVolatile = false,
+ mlir::IntegerAttr align = {},
+ cir::MemOrderAttr order = {}) {
+ return cir::StoreOp::create(*this, loc, val, dst, align, order);
}
[[nodiscard]] cir::GlobalOp createGlobal(mlir::ModuleOp mlirModule,
mlir::Location loc,
mlir::StringRef name,
- mlir::Type type,
+ mlir::Type type, bool isConstant,
cir::GlobalLinkageKind linkage) {
mlir::OpBuilder::InsertionGuard guard(*this);
setInsertionPointToStart(mlirModule.getBody());
- return create<cir::GlobalOp>(loc, name, type, linkage);
+ return cir::GlobalOp::create(*this, loc, name, type, isConstant, linkage);
}
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy,
mlir::Value base, llvm::StringRef name,
unsigned index) {
- return create<cir::GetMemberOp>(loc, resultTy, base, name, index);
+ return cir::GetMemberOp::create(*this, loc, resultTy, base, name, index);
}
mlir::Value createDummyValue(mlir::Location loc, mlir::Type type,
clang::CharUnits alignment) {
mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
auto addr = createAlloca(loc, getPointerTo(type), type, {}, alignmentAttr);
- return create<cir::LoadOp>(loc, addr, /*isDeref=*/false, alignmentAttr);
+ return cir::LoadOp::create(*this, loc, addr, /*isDeref=*/false,
+ alignmentAttr, /*mem_order=*/{});
}
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base,
mlir::Value stride) {
- return create<cir::PtrStrideOp>(loc, base.getType(), base, stride);
+ return cir::PtrStrideOp::create(*this, loc, base.getType(), base, stride);
}
//===--------------------------------------------------------------------===//
@@ -262,7 +289,7 @@ public:
cir::CallOp createCallOp(mlir::Location loc, mlir::SymbolRefAttr callee,
mlir::Type returnType, mlir::ValueRange operands,
llvm::ArrayRef<mlir::NamedAttribute> attrs = {}) {
- auto op = create<cir::CallOp>(loc, callee, returnType, operands);
+ auto op = cir::CallOp::create(*this, loc, callee, returnType, operands);
op->setAttrs(attrs);
return op;
}
@@ -285,6 +312,25 @@ public:
resOperands, attrs);
}
+ cir::CallOp createTryCallOp(
+ mlir::Location loc, mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(),
+ mlir::Type returnType = cir::VoidType(),
+ mlir::ValueRange operands = mlir::ValueRange(),
+ [[maybe_unused]] cir::SideEffect sideEffect = cir::SideEffect::All) {
+ assert(!cir::MissingFeatures::opCallCallConv());
+ assert(!cir::MissingFeatures::opCallSideEffect());
+ return createCallOp(loc, callee, returnType, operands);
+ }
+
+ cir::CallOp createTryCallOp(
+ mlir::Location loc, cir::FuncOp callee, mlir::ValueRange operands,
+ [[maybe_unused]] cir::SideEffect sideEffect = cir::SideEffect::All) {
+ assert(!cir::MissingFeatures::opCallCallConv());
+ assert(!cir::MissingFeatures::opCallSideEffect());
+ return createTryCallOp(loc, mlir::SymbolRefAttr::get(callee),
+ callee.getFunctionType().getReturnType(), operands);
+ }
+
//===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
@@ -293,7 +339,7 @@ public:
mlir::Value src, mlir::Type newTy) {
if (newTy == src.getType())
return src;
- return create<cir::CastOp>(loc, newTy, kind, src);
+ return cir::CastOp::create(*this, loc, newTy, kind, src);
}
mlir::Value createCast(cir::CastKind kind, mlir::Value src,
@@ -343,7 +389,7 @@ public:
mlir::Value createBinop(mlir::Location loc, mlir::Value lhs,
cir::BinOpKind kind, mlir::Value rhs) {
- return create<cir::BinOp>(loc, lhs.getType(), kind, lhs, rhs);
+ return cir::BinOp::create(*this, loc, lhs.getType(), kind, lhs, rhs);
}
mlir::Value createLowBitsSet(mlir::Location loc, unsigned size,
@@ -365,8 +411,8 @@ public:
mlir::Value trueValue, mlir::Value falseValue) {
assert(trueValue.getType() == falseValue.getType() &&
"trueValue and falseValue should have the same type");
- return create<cir::SelectOp>(loc, trueValue.getType(), condition, trueValue,
- falseValue);
+ return cir::SelectOp::create(*this, loc, trueValue.getType(), condition,
+ trueValue, falseValue);
}
mlir::Value createLogicalAnd(mlir::Location loc, mlir::Value lhs,
@@ -381,8 +427,8 @@ public:
mlir::Value createMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
OverflowBehavior ob = OverflowBehavior::None) {
- auto op =
- create<cir::BinOp>(loc, lhs.getType(), cir::BinOpKind::Mul, lhs, rhs);
+ auto op = cir::BinOp::create(*this, loc, lhs.getType(), cir::BinOpKind::Mul,
+ lhs, rhs);
op.setNoUnsignedWrap(
llvm::to_underlying(ob & OverflowBehavior::NoUnsignedWrap));
op.setNoSignedWrap(
@@ -400,8 +446,8 @@ public:
mlir::Value createSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
OverflowBehavior ob = OverflowBehavior::Saturated) {
- auto op =
- create<cir::BinOp>(loc, lhs.getType(), cir::BinOpKind::Sub, lhs, rhs);
+ auto op = cir::BinOp::create(*this, loc, lhs.getType(), cir::BinOpKind::Sub,
+ lhs, rhs);
op.setNoUnsignedWrap(
llvm::to_underlying(ob & OverflowBehavior::NoUnsignedWrap));
op.setNoSignedWrap(
@@ -422,8 +468,8 @@ public:
mlir::Value createAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
OverflowBehavior ob = OverflowBehavior::None) {
- auto op =
- create<cir::BinOp>(loc, lhs.getType(), cir::BinOpKind::Add, lhs, rhs);
+ auto op = cir::BinOp::create(*this, loc, lhs.getType(), cir::BinOpKind::Add,
+ lhs, rhs);
op.setNoUnsignedWrap(
llvm::to_underlying(ob & OverflowBehavior::NoUnsignedWrap));
op.setNoSignedWrap(
@@ -444,7 +490,7 @@ public:
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind,
mlir::Value lhs, mlir::Value rhs) {
- return create<cir::CmpOp>(loc, getBoolTy(), kind, lhs, rhs);
+ return cir::CmpOp::create(*this, loc, getBoolTy(), kind, lhs, rhs);
}
mlir::Value createIsNaN(mlir::Location loc, mlir::Value operand) {
@@ -453,7 +499,8 @@ public:
mlir::Value createShift(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
bool isShiftLeft) {
- return create<cir::ShiftOp>(loc, lhs.getType(), lhs, rhs, isShiftLeft);
+ return cir::ShiftOp::create(*this, loc, lhs.getType(), lhs, rhs,
+ isShiftLeft);
}
mlir::Value createShift(mlir::Location loc, mlir::Value lhs,
@@ -496,8 +543,7 @@ public:
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) {
auto last =
std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) {
- // TODO: Add LabelOp missing feature here
- return mlir::isa<cir::AllocaOp>(&op);
+ return mlir::isa<cir::AllocaOp, cir::LabelOp>(&op);
});
if (last != block->rend())
@@ -532,12 +578,12 @@ public:
/// Create a loop condition.
cir::ConditionOp createCondition(mlir::Value condition) {
- return create<cir::ConditionOp>(condition.getLoc(), condition);
+ return cir::ConditionOp::create(*this, condition.getLoc(), condition);
}
/// Create a yield operation.
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value = {}) {
- return create<cir::YieldOp>(loc, value);
+ return cir::YieldOp::create(*this, loc, value);
}
};
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
index 588fb0d..16b818f 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
@@ -51,6 +51,45 @@ class CIR_UnitAttr<string name, string attrMnemonic, list<Trait> traits = []>
}
//===----------------------------------------------------------------------===//
+// SourceLanguageAttr
+//===----------------------------------------------------------------------===//
+
+// TODO: Add cases for other languages that Clang supports.
+
+def CIR_SourceLanguage : CIR_I32EnumAttr<"SourceLanguage", "source language", [
+ I32EnumAttrCase<"C", 1, "c">,
+ I32EnumAttrCase<"CXX", 2, "cxx">
+]> {
+ // The enum attr class is defined in `CIR_SourceLanguageAttr` below,
+ // so that it can define extra class methods.
+ let genSpecializedAttr = 0;
+}
+
+def CIR_SourceLanguageAttr : CIR_EnumAttr<CIR_SourceLanguage, "lang"> {
+
+ let summary = "Module source language";
+ let description = [{
+ Represents the source language used to generate the module.
+
+ Example:
+ ```
+ // Module compiled from C.
+ module attributes {cir.lang = cir.lang<c>} {}
+ // Module compiled from C++.
+ module attributes {cir.lang = cir.lang<cxx>} {}
+ ```
+
+ Module source language attribute name is `cir.lang` is defined by
+ `getSourceLanguageAttrName` method in CIRDialect class.
+ }];
+
+ let extraClassDeclaration = [{
+ bool isC() const { return getValue() == SourceLanguage::C; }
+ bool isCXX() const { return getValue() == SourceLanguage::CXX; }
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// OptInfoAttr
//===----------------------------------------------------------------------===//
@@ -342,6 +381,44 @@ def CIR_ConstVectorAttr : CIR_Attr<"ConstVector", "const_vector", [
}
//===----------------------------------------------------------------------===//
+// ConstRecordAttr
+//===----------------------------------------------------------------------===//
+
+def CIR_ConstRecordAttr : CIR_Attr<"ConstRecord", "const_record", [
+ TypedAttrInterface
+]> {
+ let summary = "Represents a constant record";
+ let description = [{
+ Effectively supports "struct-like" constants. It's must be built from
+ an `mlir::ArrayAttr` instance where each element is a typed attribute
+ (`mlir::TypedAttribute`).
+
+ Example:
+ ```
+ cir.global external @rgb2 = #cir.const_record<{0 : i8,
+ 5 : i64, #cir.null : !cir.ptr<i8>
+ }> : !cir.record<"", i8, i64, !cir.ptr<i8>>
+ ```
+ }];
+
+ let parameters = (ins AttributeSelfTypeParameter<"">:$type,
+ "mlir::ArrayAttr":$members);
+
+ let builders = [
+ AttrBuilderWithInferredContext<(ins "cir::RecordType":$type,
+ "mlir::ArrayAttr":$members), [{
+ return $_get(type.getContext(), type, members);
+ }]>
+ ];
+
+ let assemblyFormat = [{
+ `<` custom<RecordMembers>($members) `>`
+ }];
+
+ let genVerifyDecl = 1;
+}
+
+//===----------------------------------------------------------------------===//
// ConstPtrAttr
//===----------------------------------------------------------------------===//
@@ -371,6 +448,160 @@ def CIR_ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> {
}
//===----------------------------------------------------------------------===//
+// GlobalViewAttr
+//===----------------------------------------------------------------------===//
+
+def CIR_GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [
+ TypedAttrInterface
+]> {
+ let summary = "Provides constant access to a global address";
+ let description = [{
+ Get constant address of global `symbol` and optionally apply offsets to
+ access existing subelements. It provides a way to access globals from other
+ global and always produces a pointer.
+
+ The type of the input symbol can be different from `#cir.global_view`
+ output type, since a given view of the global might require a static
+ cast for initializing other globals.
+
+ A list of indices can be optionally passed and each element subsequently
+ indexes underlying types. For `symbol` types like `!cir.array`
+ and `!cir.record`, it leads to the constant address of sub-elements, while
+ for `!cir.ptr`, an offset is applied. The first index is relative to the
+ original symbol type, not the produced one.
+
+ The result type of this attribute may be an integer type. In such a case,
+ the pointer to the referenced global is casted to an integer and this
+ attribute represents the casted result.
+
+ Example:
+
+ ```
+ cir.global external @s = @".str2": !cir.ptr<i8>
+ cir.global external @x = #cir.global_view<@s> : !cir.ptr<i8>
+ cir.global external @s_addr = #cir.global_view<@s> : !s64i
+
+ cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8]
+ : !cir.array<i8 x 3>>
+ cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr<i8>
+ ```
+
+ Note, that unlike LLVM IR's gep instruction, CIR doesn't add the leading
+ zero index when it's known to be constant zero, e.g. for pointers, i.e. we
+ use indexes exactly to access sub elements or for the offset. The leading
+ zero index is added later in the lowering.
+
+ Example:
+ ```
+ struct A {
+ int a;
+ };
+
+ struct B: virtual A {
+ int b;
+ };
+ ```
+ VTT for B in CIR:
+ ```
+ cir.global linkonce_odr @_ZTT1B = #cir.const_array<[
+ #cir.global_view<@_ZTV1B, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>]>
+ : !cir.array<!cir.ptr<!u8i> x 1>
+ ```
+ VTT for B in LLVM IR:
+ ```
+ @_ZTT1B = linkonce_odr global [1 x ptr] [ptr getelementptr inbounds
+ ({ [3 x ptr] }, ptr @_ZTV1B, i32 0, i32 0, i32 3)], align 8
+ ```
+ }];
+
+ let parameters = (ins AttributeSelfTypeParameter<"">:$type,
+ "mlir::FlatSymbolRefAttr":$symbol,
+ OptionalParameter<"mlir::ArrayAttr">:$indices);
+
+ let builders = [
+ AttrBuilderWithInferredContext<(ins "mlir::Type":$type,
+ "mlir::FlatSymbolRefAttr":$symbol,
+ CArg<"mlir::ArrayAttr", "{}">:$indices), [{
+ return $_get(type.getContext(), type, symbol, indices);
+ }]>
+ ];
+
+ // let genVerifyDecl = 1;
+ let assemblyFormat = [{
+ `<`
+ $symbol
+ (`,` $indices^)?
+ `>`
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// VTableAttr
+//===----------------------------------------------------------------------===//
+
+def CIR_VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> {
+ let summary = "Represents a C++ vtable";
+ let description = [{
+ Wraps a #cir.const_record containing one or more vtable arrays.
+
+ In most cases, the anonymous record type wrapped by this attribute will
+ contain a single array corresponding to the vtable for one class. However,
+ in the case of multiple inheritence, the anonymous structure may contain
+ multiple arrays, each of which is a vtable.
+
+ Example 1 (single vtable):
+ ```mlir
+ cir.global linkonce_odr @_ZTV6Mother =
+ #cir.vtable<{
+ #cir.const_array<[
+ #cir.ptr<null> : !cir.ptr<!u8i>,
+ #cir.global_view<@_ZTI6Mother> : !cir.ptr<!u8i>,
+ #cir.global_view<@_ZN6Mother9MotherFooEv> : !cir.ptr<!u8i>,
+ #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr<!u8i>
+ ]> : !cir.array<!cir.ptr<!u8i> x 4>
+ }> : !rec_anon_struct1
+ ```
+
+ Example 2 (multiple vtables):
+ ```mlir
+ cir.global linkonce_odr @_ZTV5Child =
+ #cir.vtable<{
+ #cir.const_array<[
+ #cir.ptr<null> : !cir.ptr<!u8i>,
+ #cir.global_view<@_ZTI5Child> : !cir.ptr<!u8i>,
+ #cir.global_view<@_ZN5Child9MotherFooEv> : !cir.ptr<!u8i>,
+ #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr<!u8i>
+ ]> : !cir.array<!cir.ptr<!u8i> x 4>,
+ #cir.const_array<[
+ #cir.ptr<-8 : i64> : !cir.ptr<!u8i>,
+ #cir.global_view<@_ZTI5Child> : !cir.ptr<!u8i>,
+ #cir.global_view<@_ZN6Father9FatherFooEv> : !cir.ptr<!u8i>
+ ]> : !cir.array<!cir.ptr<!u8i> x 3>
+ }> : !rec_anon_struct2
+ ```
+ }];
+
+ // `data` is a const record with one element, containing an array of
+ // vtable information.
+ let parameters = (ins
+ AttributeSelfTypeParameter<"">:$type,
+ "mlir::ArrayAttr":$data
+ );
+
+ let builders = [
+ AttrBuilderWithInferredContext<(ins "mlir::Type":$type,
+ "mlir::ArrayAttr":$data), [{
+ return $_get(type.getContext(), type, data);
+ }]>
+ ];
+
+ let genVerifyDecl = 1;
+ let assemblyFormat = [{
+ `<` custom<RecordMembers>($data) `>`
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// ConstComplexAttr
//===----------------------------------------------------------------------===//
@@ -516,4 +747,36 @@ def CIR_BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> {
];
}
+//===----------------------------------------------------------------------===//
+// AddressPointAttr
+//===----------------------------------------------------------------------===//
+
+def CIR_AddressPointAttr : CIR_Attr<"AddressPoint", "address_point"> {
+ let summary = "Address point attribute";
+
+ let description = [{
+ Attribute specifying the address point within a C++ virtual table (vtable).
+
+ The `index` (vtable index) parameter identifies which vtable to use within a
+ vtable group, while the `offset` (address point index) specifies the offset
+ within that vtable where the address begins.
+
+ Example:
+ ```mlir
+ cir.global linkonce_odr @_ZTV1B = ...
+ ...
+ %3 = cir.vtable.address_point(@_ZTV1B,
+ address_point = <index = 0, offset = 2>)
+ : !cir.vptr
+ ```
+ }];
+
+ let parameters = (ins "int32_t":$index,
+ "int32_t":$offset);
+
+ let assemblyFormat = [{
+ `<` struct($index, $offset) `>`
+ }];
+}
+
#endif // CLANG_CIR_DIALECT_IR_CIRATTRS_TD
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
index 8ef565d..ecc681e 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
+++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
@@ -34,6 +34,41 @@ public:
void reset(mlir::DataLayoutSpecInterface spec);
bool isBigEndian() const { return bigEndian; }
+
+ /// Internal helper method that returns requested alignment for type.
+ llvm::Align getAlignment(mlir::Type ty, bool abiOrPref) const;
+
+ llvm::Align getABITypeAlign(mlir::Type ty) const {
+ return getAlignment(ty, true);
+ }
+
+ /// Returns the maximum number of bytes that may be overwritten by
+ /// storing the specified type.
+ ///
+ /// If Ty is a scalable vector type, the scalable property will be set and
+ /// the runtime size will be a positive integer multiple of the base size.
+ ///
+ /// For example, returns 5 for i36 and 10 for x86_fp80.
+ llvm::TypeSize getTypeStoreSize(mlir::Type ty) const {
+ llvm::TypeSize baseSize = getTypeSizeInBits(ty);
+ return {llvm::divideCeil(baseSize.getKnownMinValue(), 8),
+ baseSize.isScalable()};
+ }
+
+ /// Returns the offset in bytes between successive objects of the
+ /// specified type, including alignment padding.
+ ///
+ /// If Ty is a scalable vector type, the scalable property will be set and
+ /// the runtime size will be a positive integer multiple of the base size.
+ ///
+ /// This is the amount that alloca reserves for this type. For example,
+ /// returns 12 or 16 for x86_fp80, depending on alignment.
+ llvm::TypeSize getTypeAllocSize(mlir::Type ty) const {
+ // Round up to the next alignment boundary.
+ return llvm::alignTo(getTypeStoreSize(ty), getABITypeAlign(ty).value());
+ }
+
+ llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const;
};
} // namespace cir
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td
index 3fdbf65..15d5fa0 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td
@@ -35,11 +35,13 @@ def CIR_Dialect : Dialect {
let hasConstantMaterializer = 1;
let extraClassDeclaration = [{
+ static llvm::StringRef getSourceLanguageAttrName() { return "cir.lang"; }
static llvm::StringRef getTripleAttrName() { return "cir.triple"; }
static llvm::StringRef getOptInfoAttrName() { return "cir.opt_info"; }
static llvm::StringRef getCalleeAttrName() { return "callee"; }
static llvm::StringRef getNoThrowAttrName() { return "nothrow"; }
static llvm::StringRef getSideEffectAttrName() { return "side_effect"; }
+ static llvm::StringRef getModuleLevelAsmAttrName() { return "cir.module_asm"; }
void registerAttributes();
void registerTypes();
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 72841a1..982533f 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -300,6 +300,20 @@ def CIR_ConstantOp : CIR_Op<"const", [
}
//===----------------------------------------------------------------------===//
+// C/C++ memory order definitions
+//===----------------------------------------------------------------------===//
+
+def CIR_MemOrder : CIR_I32EnumAttr<
+ "MemOrder", "Memory order according to C++11 memory model", [
+ I32EnumAttrCase<"Relaxed", 0, "relaxed">,
+ I32EnumAttrCase<"Consume", 1, "consume">,
+ I32EnumAttrCase<"Acquire", 2, "acquire">,
+ I32EnumAttrCase<"Release", 3, "release">,
+ I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">,
+ I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">
+]>;
+
+//===----------------------------------------------------------------------===//
// AllocaOp
//===----------------------------------------------------------------------===//
@@ -408,13 +422,14 @@ def CIR_LoadOp : CIR_Op<"load", [
let arguments = (ins Arg<CIR_PointerType, "the address to load from",
[MemRead]>:$addr,
UnitAttr:$isDeref,
- OptionalAttr<I64Attr>:$alignment
- );
+ OptionalAttr<I64Attr>:$alignment,
+ OptionalAttr<CIR_MemOrder>:$mem_order);
let results = (outs CIR_AnyType:$result);
let assemblyFormat = [{
(`deref` $isDeref^)?
(`align` `(` $alignment^ `)`)?
+ (`atomic` `(` $mem_order^ `)`)?
$addr `:` qualified(type($addr)) `,` type($result) attr-dict
}];
@@ -451,10 +466,12 @@ def CIR_StoreOp : CIR_Op<"store", [
let arguments = (ins CIR_AnyType:$value,
Arg<CIR_PointerType, "the address to store the value",
[MemWrite]>:$addr,
- OptionalAttr<I64Attr>:$alignment);
+ OptionalAttr<I64Attr>:$alignment,
+ OptionalAttr<CIR_MemOrder>:$mem_order);
let assemblyFormat = [{
(`align` `(` $alignment^ `)`)?
+ (`atomic` `(` $mem_order^ `)`)?
$value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr))
}];
@@ -1061,6 +1078,77 @@ def CIR_BrOp : CIR_Op<"br",[
}
//===----------------------------------------------------------------------===//
+// GotoOp
+//===----------------------------------------------------------------------===//
+
+def CIR_GotoOp : CIR_Op<"goto", [Terminator]> {
+ let description = [{
+
+ Transfers control to the specified `label`. This requires a corresponding
+ `cir.label` to exist and is used by to represent source level `goto`s
+ that jump across region boundaries. Alternatively, `cir.br` is used to
+ construct goto's that don't violate such boundaries.
+
+ `cir.goto` is completely symbolic (i.e. it "jumps" on a label that isn't
+ yet materialized) and should be taken into account by passes and analysis
+ when deciding if it's safe to make some assumptions about a given region
+ or basic block.
+
+ Example:
+ ```C++
+ int test(int x) {
+ if (x)
+ goto label;
+ {
+ x = 10;
+ label:
+ return x;
+ }
+ }
+ ```
+
+ ```mlir
+ cir.scope { // REGION #1
+ %2 = cir.load %0 : !cir.ptr<!s32i>, !s32i
+ %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool
+ cir.if %3 {
+ cir.goto "label"
+ }
+ }
+ cir.scope { // REGION #2
+ %2 = cir.const #cir.int<10> : !s32i
+ cir.store %2, %0 : !s32i, !cir.ptr<!s32i>
+ cir.br ^bb1
+ ^bb1: // pred: ^bb0
+ cir.label "label"
+ %3 = cir.load %0 : !cir.ptr<!s32i>, !s32i
+ cir.store %3, %1 : !s32i, !cir.ptr<!s32i>
+ %4 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+ cir.return %4 : !s32i
+ }
+ cir.unreachable
+ ```
+ }];
+ let arguments = (ins StrAttr:$label);
+ let assemblyFormat = [{ $label attr-dict }];
+}
+
+//===----------------------------------------------------------------------===//
+// LabelOp
+//===----------------------------------------------------------------------===//
+
+// The LabelOp has AlwaysSpeculatable trait in order to not to be swept
+// by canonicalizer
+def CIR_LabelOp : CIR_Op<"label", [AlwaysSpeculatable]> {
+ let description = [{
+ An identifier which may be referred by cir.goto operation
+ }];
+ let arguments = (ins StrAttr:$label);
+ let assemblyFormat = [{ $label attr-dict }];
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
// UnaryOp
//===----------------------------------------------------------------------===//
@@ -1631,12 +1719,14 @@ def CIR_GlobalOp : CIR_Op<"global", [
CIR_GlobalLinkageKind:$linkage,
OptionalAttr<AnyAttr>:$initial_value,
UnitAttr:$comdat,
+ UnitAttr:$constant,
UnitAttr:$dso_local,
OptionalAttr<I64Attr>:$alignment);
let assemblyFormat = [{
($sym_visibility^)?
(`` $global_visibility^)?
+ (`constant` $constant^)?
$linkage
(`comdat` $comdat^)?
(`dso_local` $dso_local^)?
@@ -1655,6 +1745,7 @@ def CIR_GlobalOp : CIR_Op<"global", [
let builders = [OpBuilder<(ins
"llvm::StringRef":$sym_name,
"mlir::Type":$sym_type,
+ CArg<"bool", "false">:$isConstant,
// CIR defaults to external linkage.
CArg<"cir::GlobalLinkageKind",
"cir::GlobalLinkageKind::ExternalLinkage">:$linkage)>];
@@ -1692,6 +1783,194 @@ def CIR_GetGlobalOp : CIR_Op<"get_global", [
}
//===----------------------------------------------------------------------===//
+// VTableAddrPointOp
+//===----------------------------------------------------------------------===//
+
+def CIR_VTableAddrPointOp : CIR_Op<"vtable.address_point", [
+ Pure, DeclareOpInterfaceMethods<SymbolUserOpInterface>
+]> {
+ let summary = "Get the vtable (global variable) address point";
+ let description = [{
+ The `vtable.address_point` operation retrieves the "effective" address
+ (address point) of a C++ virtual table. An object internal `__vptr`
+ gets initializated on top of the value returned by this operation.
+
+ `address_point.index` (vtable index) provides the appropriate vtable within
+ the vtable group (as specified by Itanium ABI), and `address_point.offset`
+ (address point index) the actual address point within that vtable.
+
+ The return type is always `!cir.vptr`.
+
+ Example:
+ ```mlir
+ cir.global linkonce_odr @_ZTV1B = ...
+ ...
+ %3 = cir.vtable.address_point(@_ZTV1B,
+ address_point = <index = 0, offset = 2>) : !cir.vptr
+ ```
+ }];
+
+ let arguments = (ins
+ FlatSymbolRefAttr:$name,
+ CIR_AddressPointAttr:$address_point
+ );
+
+ let results = (outs Res<CIR_VPtrType, "", []>:$addr);
+
+ let assemblyFormat = [{
+ `(`
+ $name `,` `address_point` `=` $address_point
+ `)`
+ `:` qualified(type($addr)) attr-dict
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// VTableGetVPtr
+//===----------------------------------------------------------------------===//
+
+def CIR_VTableGetVPtrOp : CIR_Op<"vtable.get_vptr", [Pure]> {
+ let summary = "Get a the address of the vtable pointer for an object";
+ let description = [{
+ The `vtable.get_vptr` operation retrieves the address of the vptr for a
+ C++ object. This operation requires that the object pointer points to
+ the start of a complete object. (TODO: Describe how we get that).
+ The vptr will always be at offset zero in the object, but this operation
+ is more explicit about what is being retrieved than a direct bitcast.
+
+ The return type is always `!cir.ptr<!cir.vptr>`.
+
+ Example:
+ ```mlir
+ %2 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_C>>, !cir.ptr<!rec_C>
+ %3 = cir.vtable.get_vptr %2 : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+ ```
+ }];
+
+ let arguments = (ins
+ Arg<CIR_PointerType, "the vptr address", [MemRead]>:$src
+ );
+
+ let results = (outs CIR_PtrToVPtr:$result);
+
+ let assemblyFormat = [{
+ $src `:` qualified(type($src)) `->` qualified(type($result)) attr-dict
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// VTableGetVirtualFnAddrOp
+//===----------------------------------------------------------------------===//
+
+def CIR_VTableGetVirtualFnAddrOp : CIR_Op<"vtable.get_virtual_fn_addr", [
+ Pure
+]> {
+ let summary = "Get a the address of a virtual function pointer";
+ let description = [{
+ The `vtable.get_virtual_fn_addr` operation retrieves the address of a
+ virtual function pointer from an object's vtable (__vptr).
+ This is an abstraction to perform the basic pointer arithmetic to get
+ the address of the virtual function pointer, which can then be loaded and
+ called.
+
+ The `vptr` operand must be a `!cir.ptr<!cir.vptr>` value, which would
+ have been returned by a previous call to `cir.vatble.get_vptr`. The
+ `index` operand is an index of the virtual function in the vtable.
+
+ The return type is a pointer-to-pointer to the function type.
+
+ Example:
+ ```mlir
+ %2 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_C>>, !cir.ptr<!rec_C>
+ %3 = cir.vtable.get_vptr %2 : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+ %4 = cir.load %3 : !cir.ptr<!cir.vptr>, !cir.vptr
+ %5 = cir.vtable.get_virtual_fn_addr %4[2] : !cir.vptr
+ -> !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!rec_C>) -> !s32i>>>
+ %6 = cir.load align(8) %5 : !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!rec_C>)
+ -> !s32i>>>,
+ !cir.ptr<!cir.func<(!cir.ptr<!rec_C>) -> !s32i>>
+ %7 = cir.call %6(%2) : (!cir.ptr<!cir.func<(!cir.ptr<!rec_C>) -> !s32i>>,
+ !cir.ptr<!rec_C>) -> !s32i
+ ```
+ }];
+
+ let arguments = (ins
+ Arg<CIR_VPtrType, "vptr", [MemRead]>:$vptr,
+ I64Attr:$index);
+
+ let results = (outs CIR_PointerType:$result);
+
+ let assemblyFormat = [{
+ $vptr `[` $index `]` attr-dict
+ `:` qualified(type($vptr)) `->` qualified(type($result))
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// VTTAddrPointOp
+//===----------------------------------------------------------------------===//
+
+def CIR_VTTAddrPointOp : CIR_Op<"vtt.address_point", [
+ Pure, DeclareOpInterfaceMethods<SymbolUserOpInterface>
+]> {
+ let summary = "Get the VTT address point";
+ let description = [{
+ The `vtt.address_point` operation retrieves an element from the virtual
+ table table (VTT), which is the address point of a C++ vtable. In virtual
+ inheritance, a set of internal `__vptr` members for an object are
+ initialized by this operation, which assigns an element from the VTT. The
+ initialization order is as follows:
+
+ The complete object constructors and destructors find the VTT,
+ via the mangled name of the VTT global variable. They pass the address of
+ the subobject's sub-VTT entry in the VTT as a second parameter
+ when calling the base object constructors and destructors.
+ The base object constructors and destructors use the address passed to
+ initialize the primary virtual pointer and virtual pointers that point to
+ the classes which either have virtual bases or override virtual functions
+ with a virtual step.
+
+ The first parameter is either the mangled name of VTT global variable
+ or the address of the subobject's sub-VTT entry in the VTT.
+ The second parameter `offset` provides a virtual step to adjust to
+ the actual address point of the vtable.
+
+ The return type is always a `!cir.ptr<!cir.ptr<void>>`.
+
+ Example:
+ ```mlir
+ cir.global linkonce_odr @_ZTV1B = ...
+ ...
+ %3 = cir.base_class_addr(%1 : !cir.ptr<!rec_D> nonnull) [0]
+ -> !cir.ptr<!rec_B>
+ %4 = cir.vtt.address_point @_ZTT1D, offset = 1
+ -> !cir.ptr<!cir.ptr<!void>>
+ cir.call @_ZN1BC2Ev(%3, %4)
+ ```
+ Or:
+ ```mlir
+ %7 = cir.vtt.address_point %3 : !cir.ptr<!cir.ptr<!void>>, offset = 1
+ -> !cir.ptr<!cir.ptr<!void>>
+ ```
+ }];
+
+ let arguments = (ins OptionalAttr<FlatSymbolRefAttr>:$name,
+ Optional<CIR_AnyType>:$sym_addr,
+ I32Attr:$offset);
+ let results = (outs CIR_PointerType:$addr);
+
+ let assemblyFormat = [{
+ ($name^)?
+ ($sym_addr^ `:` type($sym_addr))?
+ `,`
+ `offset` `=` $offset
+ `->` qualified(type($addr)) attr-dict
+ }];
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
// SetBitfieldOp
//===----------------------------------------------------------------------===//
@@ -2153,6 +2432,68 @@ def CIR_CallOp : CIR_CallOpBase<"call", [NoRegionArguments]> {
}
//===----------------------------------------------------------------------===//
+// ReturnAddrOp and FrameAddrOp
+//===----------------------------------------------------------------------===//
+
+class CIR_FuncAddrBuiltinOp<string mnemonic> : CIR_Op<mnemonic, []> {
+ let arguments = (ins CIR_UInt32:$level);
+ let results = (outs CIR_VoidPtrType:$result);
+ let assemblyFormat = [{
+ `(` $level `)` attr-dict
+ }];
+}
+
+def CIR_ReturnAddrOp : CIR_FuncAddrBuiltinOp<"return_address"> {
+ let summary =
+ "The return address of the current function, or of one of its callers";
+
+ let description = [{
+ Represents a call to builtin function ` __builtin_return_address` in CIR.
+ This builtin function returns the return address of the current function,
+ or of one of its callers.
+
+ The `level` argument is number of frames to scan up the call stack.
+ For instance, value of 0 yields the return address of the current function,
+ value of 1 yields the return address of the caller of the current function,
+ and so forth.
+
+ Examples:
+
+ ```mlir
+ %p = return_address(%level) -> !cir.ptr<!void>
+ ```
+ }];
+}
+
+def CIR_FrameAddrOp : CIR_FuncAddrBuiltinOp<"frame_address"> {
+ let summary =
+ "The frame address of the current function, or of one of its callers";
+
+ let description = [{
+ Represents a call to builtin function ` __builtin_frame_address` in CIR.
+ This builtin function returns the frame address of the current function,
+ or of one of its callers. The frame is the area on the stack that holds
+ local variables and saved registers. The frame address is normally the
+ address of the first word pushed on to the stack by the function.
+ However, the exact definition depends upon the processor and the calling
+ convention. If the processor has a dedicated frame pointer register, and
+ the function has a frame, then __builtin_frame_address returns the value of
+ the frame pointer register.
+
+ The `level` argument is number of frames to scan up the call stack.
+ For instance, value of 0 yields the frame address of the current function,
+ value of 1 yields the frame address of the caller of the current function,
+ and so forth.
+
+ Examples:
+
+ ```mlir
+ %p = frame_address(%level) -> !cir.ptr<!void>
+ ```
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// StackSaveOp & StackRestoreOp
//===----------------------------------------------------------------------===//
@@ -2197,6 +2538,87 @@ def CIR_StackRestoreOp : CIR_Op<"stackrestore"> {
}
//===----------------------------------------------------------------------===//
+// InlineAsmOp
+//===----------------------------------------------------------------------===//
+
+def CIR_AsmFlavor : CIR_I32EnumAttr<"AsmFlavor", "ATT or Intel",
+ [I32EnumAttrCase<"x86_att", 0>,
+ I32EnumAttrCase<"x86_intel", 1>]>;
+
+def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> {
+ let description = [{
+ The `cir.asm` operation represents C/C++ asm inline.
+
+ CIR constraints strings follow the same rules that are established for
+ the C level assembler constraints with several differences caused by
+ clang::AsmStmt processing.
+
+ Thus, numbers that appears in the constraint string may also refer to:
+ - the output variable index referenced by the input operands.
+ - the index of early-clobber operand
+
+ Operand attributes are a storage, where each element corresponds to the
+ operand with the same index. The first index relates to the operation
+ result (if any).
+ The operands themselves are stored as VariadicOfVariadic in the following
+ order: output, input and then in/out operands. When several output operands
+ are present, the result type may be represented as an anonymous record type.
+
+ Example:
+ ```C++
+ __asm__("foo" : : : );
+ __asm__("bar $42 %[val]" : [val] "=r" (x), "+&r"(x));
+ __asm__("baz $42 %[val]" : [val] "=r" (x), "+&r"(x) : "[val]"(y));
+ ```
+
+ ```mlir
+ !rec_22anon2E022 = !cir.record<struct "anon.0" {!cir.int<s, 32>, !cir.int<s, 32>}>
+ !rec_22anon2E122 = !cir.record<struct "anon.1" {!cir.int<s, 32>, !cir.int<s, 32>}>
+ ...
+ %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init]
+ %1 = cir.alloca !s32i, !cir.ptr<!s32i>, ["y", init]
+ ...
+ %2 = cir.load %0 : !cir.ptr<!s32i>, !s32i
+ %3 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+
+ cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"foo" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+
+ cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [%2 : !s32i],
+ {"bar $$42 $0" "=r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) -> !rec_22anon2E022
+
+ cir.asm(x86_att,
+ out = [],
+ in = [%3 : !s32i],
+ in_out = [%2 : !s32i],
+ {"baz $$42 $0" "=r,=&r,0,1,~{dirflag},~{fpsr},~{flags}"}) -> !rec_22anon2E122
+ ```
+ }];
+
+ let results = (outs Optional<CIR_AnyType>:$res);
+
+ let arguments =
+ (ins VariadicOfVariadic<AnyType, "operands_segments">:$asm_operands,
+ StrAttr:$asm_string, StrAttr:$constraints, UnitAttr:$side_effects,
+ CIR_AsmFlavor:$asm_flavor, ArrayAttr:$operand_attrs,
+ DenseI32ArrayAttr:$operands_segments);
+
+ let builders = [OpBuilder<(ins
+ "llvm::ArrayRef<mlir::ValueRange>":$asmOperands,
+ "llvm::StringRef":$asmString, "llvm::StringRef":$constraints,
+ "bool":$sideEffects, "AsmFlavor":$asmFlavor,
+ "llvm::ArrayRef<mlir::Attribute>":$operandAttrs)>];
+
+ let hasCustomAssemblyFormat = 1;
+}
+
+//===----------------------------------------------------------------------===//
// UnreachableOp
//===----------------------------------------------------------------------===//
@@ -2827,7 +3249,7 @@ def CIR_ComplexSubOp : CIR_Op<"complex.sub", [
}
//===----------------------------------------------------------------------===//
-// ComplexMulOp
+// ComplexMulOp & ComplexDivOp
//===----------------------------------------------------------------------===//
def CIR_ComplexRangeKind : CIR_I32EnumAttr<
@@ -2846,12 +3268,13 @@ def CIR_ComplexMulOp : CIR_Op<"complex.mul", [
The `cir.complex.mul` operation takes two complex numbers and returns
their product.
- Range is used to select the implementation used when the operation
- is lowered to the LLVM dialect. For multiplication, 'improved',
- 'promoted', and 'basic' are all handled equivalently, producing the
- algebraic formula with no special handling for NaN value. If 'full' is
- used, a runtime-library function is called if one of the intermediate
- calculations produced a NaN value.
+ For complex types with floating-point components, the `range` attribute
+ specifies the algorithm to be used when the operation is lowered to
+ the LLVM dialect. For multiplication, 'improved', 'promoted', and 'basic'
+ are all handled equivalently, producing the algebraic formula with no
+ special handling for NaN value. If 'full' is used, a runtime-library
+ function is called if one of the intermediate calculations produced
+ a NaN value.
Example:
@@ -2874,6 +3297,48 @@ def CIR_ComplexMulOp : CIR_Op<"complex.mul", [
}];
}
+def CIR_ComplexDivOp : CIR_Op<"complex.div", [
+ Pure, SameOperandsAndResultType
+]> {
+ let summary = "Complex division";
+ let description = [{
+ The `cir.complex.div` operation takes two complex numbers and returns
+ their quotient.
+
+ For complex types with floating-point components, the `range` attribute
+ specifies the algorithm to be used when the operation is lowered to
+ the LLVM dialect. For division, 'improved' produces Smith's algorithms for
+ Complex division with no additional handling for NaN values. If 'promoted'
+ is used, the values are promoted to a higher precision type, if possible,
+ and the calculation is performed using the algebraic formula, with
+ no additional handling for NaN values. We fall back on Smith's algorithm
+ when the target doesn't support a higher precision type. If 'full' is used,
+ a runtime-library function is called if one of the intermediate
+ calculations produced a NaN value. and for 'basic' algebraic formula with
+ no additional handling for the NaN value will be used. For integers types
+ `range` attribute will be ignored.
+
+ Example:
+
+ ```mlir
+ %2 = cir.complex.div %0, %1 range(basic) : !cir.complex<!cir.float>
+ %2 = cir.complex.div %0, %1 range(full) : !cir.complex<!cir.float>
+ ```
+ }];
+
+ let arguments = (ins
+ CIR_ComplexType:$lhs,
+ CIR_ComplexType:$rhs,
+ CIR_ComplexRangeKind:$range
+ );
+
+ let results = (outs CIR_ComplexType:$result);
+
+ let assemblyFormat = [{
+ $lhs `,` $rhs `range` `(` $range `)` `:` qualified(type($result)) attr-dict
+ }];
+}
+
//===----------------------------------------------------------------------===//
// Bit Manipulation Operations
//===----------------------------------------------------------------------===//
@@ -3143,6 +3608,56 @@ def CIR_AssumeOp : CIR_Op<"assume"> {
}];
}
+def CIR_AssumeAlignedOp : CIR_Op<"assume_aligned", [
+ Pure, AllTypesMatch<["pointer", "result"]>
+]> {
+ let summary = "Tell the optimizer that a pointer is aligned";
+ let description = [{
+ The `cir.assume_aligned` operation takes two or three arguments. The first
+ argument `pointer` gives the pointer value whose alignment is to be assumed,
+ and the second argument `align` is an integer attribute that gives the
+ assumed alignment.
+
+ The `offset` argument is optional. If given, it represents misalignment
+ offset. When it's present, this operation tells the optimizer that the
+ pointer is always misaligned to the alignment by `offset` bytes, a.k.a. the
+ pointer yielded by `(char *)pointer - offset` is aligned to the specified
+ alignment. Note that the `offset` argument is an SSA value rather than an
+ attribute, which means that you could pass a dynamically determined value
+ as the mialignment offset.
+
+ The result of this operation has the same value as the `pointer` argument,
+ but it additionally carries any alignment information indicated by this
+ operation.
+
+ This operation corresponds to the `__builtin_assume_aligned` builtin
+ function.
+
+ Example:
+
+ ```mlir
+ // Assume that %0 is a CIR pointer value of type !cir.ptr<!s32i>
+ %1 = cir.assume_aligned %0 alignment 16 : !cir.ptr<!s32i>
+
+ // With a misalignment offset of 4 bytes:
+ %2 = cir.const #cir.int<4> : !u64i
+ %3 = cir.assume_aligned %0 alignment 16 [offset %2 : !u64i] : !cir.ptr<!s32i>
+ ```
+ }];
+
+ let arguments = (ins CIR_PointerType:$pointer,
+ I64Attr:$alignment,
+ Optional<CIR_IntType>:$offset);
+ let results = (outs CIR_PointerType:$result);
+
+ let assemblyFormat = [{
+ $pointer
+ `alignment` $alignment
+ (`[` `offset` $offset^ `:` type($offset) `]`)?
+ `:` qualified(type($pointer)) attr-dict
+ }];
+}
+
def CIR_AssumeSepStorageOp : CIR_Op<"assume_separate_storage", [
SameTypeOperands
]> {
@@ -3202,4 +3717,210 @@ def CIR_ExpectOp : CIR_Op<"expect", [
}];
}
+//===----------------------------------------------------------------------===//
+// Floating Point Ops
+//===----------------------------------------------------------------------===//
+
+class CIR_UnaryFPToFPBuiltinOp<string mnemonic, string llvmOpName>
+ : CIR_Op<mnemonic, [Pure, SameOperandsAndResultType]>
+{
+ let arguments = (ins CIR_AnyFloatOrVecOfFloatType:$src);
+ let results = (outs CIR_AnyFloatOrVecOfFloatType:$result);
+
+ let assemblyFormat = "$src `:` type($src) attr-dict";
+
+ let llvmOp = llvmOpName;
+}
+
+def CIR_FAbsOp : CIR_UnaryFPToFPBuiltinOp<"fabs", "FAbsOp"> {
+ let summary = "Computes the floating-point absolute value";
+ let description = [{
+ `cir.fabs` computes the absolute value of a floating-point operand
+ and returns a result of the same type, ignoring floating-point
+ exceptions. It does not set `errno`.
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// Variadic Operations
+//===----------------------------------------------------------------------===//
+
+def CIR_VAStartOp : CIR_Op<"va_start"> {
+ let summary = "Starts a variable argument list";
+ let description = [{
+ The cir.va_start operation models the C/C++ va_start macro by
+ initializing a variable argument list at the given va_list storage
+ location.
+
+ The first operand must be a pointer to the target's `va_list`
+ representation. This operation has no results and produces its effect by
+ mutating the storage referenced by the pointer operand. The second operand
+ must be an integer value that contains the expected number of arguments in
+ that list.
+
+ Each `cir.va_start` must be paired with a corresponding `cir.va_end`
+ on the same logical `va_list` object along all control-flow paths. After
+ `cir.va_end`, the `va_list` must not be accessed unless reinitialized
+ with another `cir.va_start`.
+
+ Lowering maps this to the LLVM intrinsic `llvm.va_start`, passing the
+ appropriately decayed pointer to the underlying `va_list` storage.
+
+ Example:
+
+ ```mlir
+ // %args : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>
+ %p = cir.cast(array_to_ptrdecay, %args
+ : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>),
+ !cir.ptr<!rec___va_list_tag>
+ %count = cir.load %0 : !cir.ptr<!s32i>, !s32i
+ cir.va_start %p %count : !cir.ptr<!rec___va_list_tag>, !s32i
+ ```
+ }];
+ let arguments = (ins
+ CIR_PointerType:$arg_list,
+ CIR_AnyFundamentalIntType:$count
+ );
+
+ let assemblyFormat = [{
+ $arg_list $count attr-dict `:` type(operands)
+ }];
+}
+
+def CIR_VAEndOp : CIR_Op<"va_end"> {
+ let summary = "Ends a variable argument list";
+ let description = [{
+ The `cir.va_end` operation models the C/C++ va_end macro by finalizing
+ and cleaning up a variable argument list previously initialized with
+ `cir.va_start`.
+
+ The operand must be a pointer to the target's `va_list` representation.
+ This operation has no results and produces its effect by mutating the
+ storage referenced by the pointer operand.
+
+ `cir.va_end` must only be called after a matching `cir.va_start` on the
+ same `va_list` along all control-flow paths. After `cir.va_end`, the
+ `va_list` is invalid and must not be accessed unless reinitialized.
+
+ Lowering typically maps this to the LLVM intrinsic `llvm.va_end`,
+ passing the appropriately decayed pointer to the underlying `va_list`
+ storage.
+
+ Example:
+ ```mlir
+ // %args : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>
+ %p = cir.cast(array_to_ptrdecay, %args
+ : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>),
+ !cir.ptr<!rec___va_list_tag>
+ cir.va_end %p : !cir.ptr<!rec___va_list_tag>
+ ```
+ }];
+
+ let arguments = (ins CIR_PointerType:$arg_list);
+
+ let assemblyFormat = [{
+ $arg_list attr-dict `:` type(operands)
+ }];
+}
+
+def CIR_VAArgOp : CIR_Op<"va_arg"> {
+ let summary = "Fetches next variadic element as a given type";
+ let description = [{
+ The `cir.va_arg` operation models the C/C++ `va_arg` macro by reading the
+ next argument from an active variable argument list and producing it as a
+ value of a specified result type.
+
+ The operand must be a pointer to the target's `va_list` representation.
+ The operation advances the `va_list` state as a side effect and returns
+ the fetched value as the result, whose type is chosen by the user of the
+ operation.
+
+ A `cir.va_arg` must only be used on a `va_list` that has been initialized
+ with `cir.va.start` and not yet finalized by `cir.va.end`. The semantics
+ (including alignment and promotion rules) follow the platform ABI; the
+ frontend is responsible for providing a `va_list` pointer that matches the
+ target representation.
+
+ Example:
+ ```mlir
+ // %args : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>
+ %p = cir.cast(array_to_ptrdecay, %args
+ : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>),
+ !cir.ptr<!rec___va_list_tag>
+ cir.va.start %p : !cir.ptr<!rec___va_list_tag>
+
+ // Fetch an `int` from the vararg list.
+ %v = cir.va_arg %p : (!cir.ptr<!rec___va_list_tag>) -> !s32i
+
+ cir.va.end %p : !cir.ptr<!rec___va_list_tag>
+ ```
+ }];
+
+ let arguments = (ins CIR_PointerType:$arg_list);
+ let results = (outs CIR_AnyType:$result);
+
+ let assemblyFormat = [{
+ $arg_list attr-dict `:` functional-type(operands, $result)
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// ThrowOp
+//===----------------------------------------------------------------------===//
+
+def CIR_ThrowOp : CIR_Op<"throw"> {
+ let summary = "(Re)Throws an exception";
+ let description = [{
+ This operation is equivalent to either __cxa_throw or __cxa_rethrow,
+ depending on the arguments.
+
+ The absense of arguments for `cir.throw` means it rethrows.
+
+ For the no-rethrow version, it must have at least two operands, the RTTI
+ information, a pointer to the exception object (likely allocated via
+ `cir.alloc_exception`) and finally an optional dtor, which might run as
+ part of this operation.
+
+ Example:
+
+ ```mlir
+ // re-throw;
+ cir.throw
+
+ // if (b == 0)
+ // throw "Division by zero condition!";
+
+ // Type info for char const*
+ cir.global "private" constant external @_ZTIPKc : !cir.ptr<!u8i>
+ cir.if %cond {
+ %exception_addr = cir.alloc_exception 8 -> !cir.ptr<!void>
+ ...
+ // Store string addr for "Division by zero condition!"
+ cir.store %string_addr, %exception_addr : !cir.ptr<!s8i>,
+ !cir.ptr<!cir.ptr<!s8i>>
+ cir.throw %exception_addr : !cir.ptr<!cir.ptr<!u8i>>,
+ @_ZTIPKc
+ ```
+ }];
+
+ let arguments = (ins
+ Optional<CIR_PointerType>:$exception_ptr,
+ OptionalAttr<FlatSymbolRefAttr>:$type_info,
+ OptionalAttr<FlatSymbolRefAttr>:$dtor
+ );
+
+ let assemblyFormat = [{
+ ($exception_ptr^ `:` type($exception_ptr))?
+ (`,` $type_info^)?
+ (`,` $dtor^)?
+ attr-dict
+ }];
+
+ let extraClassDeclaration = [{
+ bool rethrows() { return getNumOperands() == 0; }
+ }];
+
+ let hasVerifier = 1;
+}
+
#endif // CLANG_CIR_DIALECT_IR_CIROPS_TD
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
index fead572..17fddae 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
+++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
@@ -113,6 +113,18 @@ LLVM_ATTRIBUTE_UNUSED static bool isValidLinkage(GlobalLinkageKind gl) {
isLinkOnceLinkage(gl);
}
+bool operator<(cir::MemOrder, cir::MemOrder) = delete;
+bool operator>(cir::MemOrder, cir::MemOrder) = delete;
+bool operator<=(cir::MemOrder, cir::MemOrder) = delete;
+bool operator>=(cir::MemOrder, cir::MemOrder) = delete;
+
+// Validate an integral value which isn't known to fit within the enum's range
+// is a valid AtomicOrderingCABI.
+template <typename Int> inline bool isValidCIRAtomicOrderingCABI(Int value) {
+ return static_cast<Int>(cir::MemOrder::Relaxed) <= value &&
+ value <= static_cast<Int>(cir::MemOrder::SequentiallyConsistent);
+}
+
} // namespace cir
#endif // CLANG_CIR_DIALECT_IR_CIROPSENUMS_H
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td b/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td
index d7d55df..82f6e1d 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td
@@ -290,6 +290,14 @@ def CIR_AnyFloatOrVecOfFloatType
}
//===----------------------------------------------------------------------===//
+// VPtr type predicates
+//===----------------------------------------------------------------------===//
+
+def CIR_AnyVPtrType : CIR_TypeBase<"::cir::VPtrType", "vptr type">;
+
+def CIR_PtrToVPtr : CIR_PtrToType<CIR_AnyVPtrType>;
+
+//===----------------------------------------------------------------------===//
// Scalar Type predicates
//===----------------------------------------------------------------------===//
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td
index a258df7..312d0a9 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td
@@ -296,10 +296,10 @@ def CIR_VPtrType : CIR_Type<"VPtr", "vptr", [
access to the vptr.
This type will be the element type of the 'vptr' member of structures that
- require a vtable pointer. A pointer to this type is returned by the
- `cir.vtable.address_point` and `cir.vtable.get_vptr` operations, and this
- pointer may be passed to the `cir.vtable.get_virtual_fn_addr` operation to
- get the address of a virtual function pointer.
+ require a vtable pointer. The `cir.vtable.address_point` operation returns
+ this type. The `cir.vtable.get_vptr` operations returns a pointer to this
+ type. This pointer may be passed to the `cir.vtable.get_virtual_fn_addr`
+ operation to get the address of a virtual function pointer.
The pointer may also be cast to other pointer types in order to perform
pointer arithmetic based on information encoded in the AST layout to get
diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h
index 7a202b1..32c3e27 100644
--- a/clang/include/clang/CIR/Dialect/Passes.h
+++ b/clang/include/clang/CIR/Dialect/Passes.h
@@ -26,6 +26,7 @@ std::unique_ptr<Pass> createCIRSimplifyPass();
std::unique_ptr<Pass> createHoistAllocasPass();
std::unique_ptr<Pass> createLoweringPreparePass();
std::unique_ptr<Pass> createLoweringPreparePass(clang::ASTContext *astCtx);
+std::unique_ptr<Pass> createGotoSolverPass();
void populateCIRPreLoweringPasses(mlir::OpPassManager &pm);
diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td
index 7d5ec2f..0f57839 100644
--- a/clang/include/clang/CIR/Dialect/Passes.td
+++ b/clang/include/clang/CIR/Dialect/Passes.td
@@ -72,6 +72,16 @@ def CIRFlattenCFG : Pass<"cir-flatten-cfg"> {
let dependentDialects = ["cir::CIRDialect"];
}
+def GotoSolver : Pass<"cir-goto-solver"> {
+ let summary = "Replaces goto operations with branches";
+ let description = [{
+ This pass transforms CIR and replaces goto-s with branch
+ operations to the proper blocks.
+ }];
+ let constructor = "mlir::createGotoSolverPass()";
+ let dependentDialects = ["cir::CIRDialect"];
+}
+
def LoweringPrepare : Pass<"cir-lowering-prepare"> {
let summary = "Lower to more fine-grained CIR operations before lowering to "
"other dialects";
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 27dd181..6a8bab2 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -27,9 +27,6 @@ struct MissingFeatures {
// Address space related
static bool addressSpace() { return false; }
- // CIRGenFunction implementation details
- static bool cgfSymbolTable() { return false; }
-
// Unhandled global/linkage information.
static bool opGlobalThreadLocal() { return false; }
static bool opGlobalConstant() { return false; }
@@ -52,7 +49,6 @@ struct MissingFeatures {
static bool opLoadEmitScalarRangeCheck() { return false; }
static bool opLoadBooleanRepresentation() { return false; }
static bool opLoadStoreTbaa() { return false; }
- static bool opLoadStoreMemOrder() { return false; }
static bool opLoadStoreVolatile() { return false; }
static bool opLoadStoreAtomic() { return false; }
static bool opLoadStoreObjC() { return false; }
@@ -87,7 +83,6 @@ struct MissingFeatures {
static bool setFunctionAttributes() { return false; }
// CallOp handling
- static bool opCallPseudoDtor() { return false; }
static bool opCallAggregateArgs() { return false; }
static bool opCallPaddingArgs() { return false; }
static bool opCallABIExtendArg() { return false; }
@@ -98,8 +93,8 @@ struct MissingFeatures {
static bool opCallReturn() { return false; }
static bool opCallArgEvaluationOrder() { return false; }
static bool opCallCallConv() { return false; }
+ static bool opCallSideEffect() { return false; }
static bool opCallMustTail() { return false; }
- static bool opCallVirtual() { return false; }
static bool opCallInAlloca() { return false; }
static bool opCallAttrs() { return false; }
static bool opCallSurroundingTry() { return false; }
@@ -162,6 +157,15 @@ struct MissingFeatures {
static bool addressIsKnownNonNull() { return false; }
static bool addressPointerAuthInfo() { return false; }
+ // Atomic
+ static bool atomicExpr() { return false; }
+ static bool atomicInfo() { return false; }
+ static bool atomicInfoGetAtomicPointer() { return false; }
+ static bool atomicInfoGetAtomicAddress() { return false; }
+ static bool atomicUseLibCall() { return false; }
+ static bool atomicScope() { return false; }
+ static bool atomicSyncScopeID() { return false; }
+
// Misc
static bool abiArgInfo() { return false; }
static bool addHeapAllocSiteMetadata() { return false; }
@@ -173,7 +177,12 @@ struct MissingFeatures {
static bool aggValueSlotVolatile() { return false; }
static bool alignCXXRecordDecl() { return false; }
static bool armComputeVolatileBitfields() { return false; }
+ static bool asmGoto() { return false; }
+ static bool asmInputOperands() { return false; }
static bool asmLabelAttr() { return false; }
+ static bool asmMemoryEffects() { return false; }
+ static bool asmOutputOperands() { return false; }
+ static bool asmUnwindClobber() { return false; }
static bool assignMemcpyizer() { return false; }
static bool astVarDeclInterface() { return false; }
static bool attributeBuiltin() { return false; }
@@ -187,18 +196,26 @@ struct MissingFeatures {
static bool cirgenABIInfo() { return false; }
static bool cleanupAfterErrorDiags() { return false; }
static bool cleanupsToDeactivate() { return false; }
+ static bool constEmitterAggILE() { return false; }
static bool constEmitterArrayILE() { return false; }
static bool constEmitterVectorILE() { return false; }
static bool constantFoldSwitchStatement() { return false; }
static bool constructABIArgDirectExtend() { return false; }
static bool coverageMapping() { return false; }
+ static bool createInvariantGroup() { return false; }
static bool createProfileWeightsForLoop() { return false; }
static bool ctorMemcpyizer() { return false; }
static bool cudaSupport() { return false; }
static bool cxxRecordStaticMembers() { return false; }
+ static bool dataLayoutTypeIsSized() { return false; }
static bool dataLayoutTypeAllocSize() { return false; }
+ static bool dataLayoutTypeStoreSize() { return false; }
static bool deferredCXXGlobalInit() { return false; }
+ static bool devirtualizeMemberFunction() { return false; }
static bool ehCleanupFlags() { return false; }
+ static bool ehCleanupScope() { return false; }
+ static bool ehCleanupScopeRequiresEHCleanup() { return false; }
+ static bool ehCleanupBranchFixups() { return false; }
static bool ehstackBranches() { return false; }
static bool emitCheckedInBoundsGEP() { return false; }
static bool emitCondLikelihoodViaExpectIntrinsic() { return false; }
@@ -206,12 +223,16 @@ struct MissingFeatures {
static bool emitLValueAlignmentAssumption() { return false; }
static bool emitNullabilityCheck() { return false; }
static bool emitTypeCheck() { return false; }
+ static bool emitTypeMetadataCodeForVCall() { return false; }
static bool fastMathFlags() { return false; }
static bool fpConstraints() { return false; }
static bool generateDebugInfo() { return false; }
+ static bool globalViewIndices() { return false; }
+ static bool globalViewIntLowering() { return false; }
static bool hip() { return false; }
static bool implicitConstructorArgs() { return false; }
static bool incrementProfileCounter() { return false; }
+ static bool innermostEHScope() { return false; }
static bool insertBuiltinUnpredictable() { return false; }
static bool instrumentation() { return false; }
static bool intrinsics() { return false; }
@@ -231,8 +252,8 @@ struct MissingFeatures {
static bool objCBlocks() { return false; }
static bool objCGC() { return false; }
static bool objCLifetime() { return false; }
+ static bool openCL() { return false; }
static bool openMP() { return false; }
- static bool opGlobalViewAttr() { return false; }
static bool opTBAA() { return false; }
static bool peepholeProtection() { return false; }
static bool pgoUse() { return false; }
@@ -245,6 +266,7 @@ struct MissingFeatures {
static bool setNonGC() { return false; }
static bool setObjCGCLValueClass() { return false; }
static bool setTargetAttributes() { return false; }
+ static bool sourceLanguageCases() { return false; }
static bool stackBase() { return false; }
static bool stackSaveOp() { return false; }
static bool targetCIRGenInfoArch() { return false; }
@@ -258,7 +280,10 @@ struct MissingFeatures {
static bool appleKext() { return false; }
static bool dtorCleanups() { return false; }
static bool vtableInitialization() { return false; }
+ static bool vtableEmitMetadata() { return false; }
+ static bool vtableRelativeLayout() { return false; }
static bool msvcBuiltins() { return false; }
+ static bool vaArgABILowering() { return false; }
static bool vlas() { return false; }
// Missing types
diff --git a/clang/include/clang/CodeGen/CGFunctionInfo.h b/clang/include/clang/CodeGen/CGFunctionInfo.h
index 50be517..713b52a 100644
--- a/clang/include/clang/CodeGen/CGFunctionInfo.h
+++ b/clang/include/clang/CodeGen/CGFunctionInfo.h
@@ -77,6 +77,11 @@ public:
/// Array elements in the type are assumed to be padding and skipped.
CoerceAndExpand,
+ /// TargetSpecific - Some argument types are passed as target specific types
+ /// such as RISC-V's tuple type, these need to be handled in the target
+ /// hook.
+ TargetSpecific,
+
/// InAlloca - Pass the argument directly using the LLVM inalloca attribute.
/// This is similar to indirect with byval, except it only applies to
/// arguments stored in memory and forbids any implicit copies. When
@@ -120,7 +125,7 @@ private:
bool canHavePaddingType() const {
return isDirect() || isExtend() || isIndirect() || isIndirectAliased() ||
- isExpand();
+ isExpand() || isTargetSpecific();
}
void setPaddingType(llvm::Type *T) {
assert(canHavePaddingType());
@@ -291,6 +296,20 @@ public:
return AI;
}
+ static ABIArgInfo getTargetSpecific(llvm::Type *T = nullptr,
+ unsigned Offset = 0,
+ llvm::Type *Padding = nullptr,
+ bool CanBeFlattened = true,
+ unsigned Align = 0) {
+ auto AI = ABIArgInfo(TargetSpecific);
+ AI.setCoerceToType(T);
+ AI.setPaddingType(Padding);
+ AI.setDirectOffset(Offset);
+ AI.setDirectAlign(Align);
+ AI.setCanBeFlattened(CanBeFlattened);
+ return AI;
+ }
+
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType) {
return eltType->isArrayTy() &&
eltType->getArrayElementType()->isIntegerTy(8);
@@ -305,27 +324,33 @@ public:
bool isIndirectAliased() const { return TheKind == IndirectAliased; }
bool isExpand() const { return TheKind == Expand; }
bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; }
+ bool isTargetSpecific() const { return TheKind == TargetSpecific; }
bool canHaveCoerceToType() const {
- return isDirect() || isExtend() || isCoerceAndExpand();
+ return isDirect() || isExtend() || isCoerceAndExpand() ||
+ isTargetSpecific();
}
// Direct/Extend accessors
unsigned getDirectOffset() const {
- assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ assert((isDirect() || isExtend() || isTargetSpecific()) &&
+ "Not a direct or extend or target specific kind");
return DirectAttr.Offset;
}
void setDirectOffset(unsigned Offset) {
- assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ assert((isDirect() || isExtend() || isTargetSpecific()) &&
+ "Not a direct or extend or target specific kind");
DirectAttr.Offset = Offset;
}
unsigned getDirectAlign() const {
- assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ assert((isDirect() || isExtend() || isTargetSpecific()) &&
+ "Not a direct or extend or target specific kind");
return DirectAttr.Align;
}
void setDirectAlign(unsigned Align) {
- assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ assert((isDirect() || isExtend() || isTargetSpecific()) &&
+ "Not a direct or extend or target specific kind");
DirectAttr.Align = Align;
}
@@ -394,12 +419,14 @@ public:
}
bool getInReg() const {
- assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!");
+ assert((isDirect() || isExtend() || isIndirect() || isTargetSpecific()) &&
+ "Invalid kind!");
return InReg;
}
void setInReg(bool IR) {
- assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!");
+ assert((isDirect() || isExtend() || isIndirect() || isTargetSpecific()) &&
+ "Invalid kind!");
InReg = IR;
}
@@ -481,12 +508,12 @@ public:
}
bool getCanBeFlattened() const {
- assert(isDirect() && "Invalid kind!");
+ assert((isDirect() || isTargetSpecific()) && "Invalid kind!");
return CanBeFlattened;
}
void setCanBeFlattened(bool Flatten) {
- assert(isDirect() && "Invalid kind!");
+ assert((isDirect() || isTargetSpecific()) && "Invalid kind!");
CanBeFlattened = Flatten;
}
diff --git a/clang/include/clang/CodeGen/CodeGenABITypes.h b/clang/include/clang/CodeGen/CodeGenABITypes.h
index 836fdd7..16f39b991 100644
--- a/clang/include/clang/CodeGen/CodeGenABITypes.h
+++ b/clang/include/clang/CodeGen/CodeGenABITypes.h
@@ -32,6 +32,7 @@
namespace llvm {
class AttrBuilder;
class Constant;
+class ConstantInt;
class Function;
class FunctionType;
class Type;
@@ -126,6 +127,12 @@ uint16_t getPointerAuthDeclDiscriminator(CodeGenModule &CGM, GlobalDecl GD);
uint16_t getPointerAuthTypeDiscriminator(CodeGenModule &CGM,
QualType FunctionType);
+/// Return a signed constant pointer.
+llvm::Constant *getConstantSignedPointer(CodeGenModule &CGM,
+ llvm::Constant *Pointer, unsigned Key,
+ llvm::Constant *StorageAddress,
+ llvm::ConstantInt *OtherDiscriminator);
+
/// Given the language and code-generation options that Clang was configured
/// with, set the default LLVM IR attributes for a function definition.
/// The attributes set here are mostly global target-configuration and
diff --git a/clang/include/clang/Driver/Action.h b/clang/include/clang/Driver/Action.h
index 7aecfd8..dbf1187 100644
--- a/clang/include/clang/Driver/Action.h
+++ b/clang/include/clang/Driver/Action.h
@@ -76,9 +76,10 @@ public:
StaticLibJobClass,
BinaryAnalyzeJobClass,
BinaryTranslatorJobClass,
+ ObjcopyJobClass,
JobClassFirst = PreprocessJobClass,
- JobClassLast = BinaryTranslatorJobClass
+ JobClassLast = ObjcopyJobClass
};
// The offloading kind determines if this action is binded to a particular
@@ -687,6 +688,17 @@ public:
}
};
+class ObjcopyJobAction : public JobAction {
+ void anchor() override;
+
+public:
+ ObjcopyJobAction(Action *Input, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == ObjcopyJobClass;
+ }
+};
+
} // namespace driver
} // namespace clang
diff --git a/clang/include/clang/Driver/CommonArgs.h b/clang/include/clang/Driver/CommonArgs.h
index 9802962d..1464ce4 100644
--- a/clang/include/clang/Driver/CommonArgs.h
+++ b/clang/include/clang/Driver/CommonArgs.h
@@ -85,6 +85,8 @@ const char *RelocationModelName(llvm::Reloc::Model Model);
std::tuple<llvm::Reloc::Model, unsigned, bool>
ParsePICArgs(const ToolChain &ToolChain, const llvm::opt::ArgList &Args);
+bool getStaticPIE(const llvm::opt::ArgList &Args, const ToolChain &TC);
+
unsigned ParseFunctionAlignment(const ToolChain &TC,
const llvm::opt::ArgList &Args);
diff --git a/clang/include/clang/Driver/Driver.h b/clang/include/clang/Driver/Driver.h
index 4d32552..b9b187a 100644
--- a/clang/include/clang/Driver/Driver.h
+++ b/clang/include/clang/Driver/Driver.h
@@ -512,6 +512,9 @@ public:
/// BuildActions - Construct the list of actions to perform for the
/// given arguments, which are only done for a single architecture.
+ /// If the compilation is an explicit module build, delegates to
+ /// BuildDriverManagedModuleBuildActions. Otherwise, BuildDefaultActions is
+ /// used.
///
/// \param C - The compilation that is being built.
/// \param Args - The input arguments.
@@ -796,6 +799,35 @@ private:
/// compilation based on which -f(no-)?lto(=.*)? option occurs last.
void setLTOMode(const llvm::opt::ArgList &Args);
+ /// BuildDefaultActions - Constructs the list of actions to perform
+ /// for the provided arguments, which are only done for a single architecture.
+ ///
+ /// \param C - The compilation that is being built.
+ /// \param Args - The input arguments.
+ /// \param Actions - The list to store the resulting actions onto.
+ void BuildDefaultActions(Compilation &C, llvm::opt::DerivedArgList &Args,
+ const InputList &Inputs, ActionList &Actions) const;
+
+ /// BuildDriverManagedModuleBuildActions - Performs a dependency
+ /// scan and constructs the list of actions to perform for dependency order
+ /// and the provided arguments. This is only done for a single a architecture.
+ ///
+ /// \param C - The compilation that is being built.
+ /// \param Args - The input arguments.
+ /// \param Actions - The list to store the resulting actions onto.
+ void BuildDriverManagedModuleBuildActions(Compilation &C,
+ llvm::opt::DerivedArgList &Args,
+ const InputList &Inputs,
+ ActionList &Actions) const;
+
+ /// Scans the leading lines of the C++ source inputs to detect C++20 module
+ /// usage.
+ ///
+ /// \returns True if module usage is detected, false otherwise, or an error on
+ /// read failure.
+ llvm::ErrorOr<bool>
+ ScanInputsForCXX20ModulesUsage(const InputList &Inputs) const;
+
/// Retrieves a ToolChain for a particular \p Target triple.
///
/// Will cache ToolChains for the life of the driver object, and create them
diff --git a/clang/include/clang/Driver/OffloadBundler.h b/clang/include/clang/Driver/OffloadBundler.h
index 667156a..e7306ce 100644
--- a/clang/include/clang/Driver/OffloadBundler.h
+++ b/clang/include/clang/Driver/OffloadBundler.h
@@ -120,7 +120,7 @@ public:
static llvm::Expected<CompressedBundleHeader> tryParse(llvm::StringRef);
};
- static inline const uint16_t DefaultVersion = 2;
+ static inline const uint16_t DefaultVersion = 3;
static llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
compress(llvm::compression::Params P, const llvm::MemoryBuffer &Input,
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 6aab43c..f507968d 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -1851,7 +1851,7 @@ defm pseudo_probe_for_profiling
CodeGenOpts<"PseudoProbeForProfiling">, DefaultFalse,
PosFlag<SetTrue, [], [ClangOption], "Emit">,
NegFlag<SetFalse, [], [ClangOption], "Do not emit">,
- BothFlags<[], [ClangOption, CC1Option],
+ BothFlags<[], [ClangOption, CC1Option, CLOption],
" pseudo probes for sample profiling">>;
def fprofile_list_EQ : Joined<["-"], "fprofile-list=">,
Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
@@ -2612,16 +2612,27 @@ def fsanitize_undefined_trap_on_error
def fno_sanitize_undefined_trap_on_error
: Flag<["-"], "fno-sanitize-undefined-trap-on-error">, Group<f_clang_Group>,
Alias<fno_sanitize_trap_EQ>, AliasArgs<["undefined"]>;
-defm sanitize_debug_trap_reasons
- : BoolFOption<
- "sanitize-debug-trap-reasons",
- CodeGenOpts<"SanitizeDebugTrapReasons">, DefaultTrue,
- PosFlag<SetTrue, [], [ClangOption, CC1Option],
- "Annotate trap blocks in debug info with UBSan trap reasons">,
- NegFlag<SetFalse, [], [ClangOption, CC1Option],
- "Do not annotate trap blocks in debug info with UBSan trap "
- "reasons">>;
-
+def fsanitize_debug_trap_reasons_EQ
+ : Joined<["-"], "fsanitize-debug-trap-reasons=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set how trap reasons are emitted. "
+ "`none` - Not emitted. This gives the smallest debug info; "
+ "`basic` - Emit a fixed trap message per check type. This increases the "
+ "debug info size but not as much as `detailed`; "
+ "`detailed` - Emit a more detailed trap message. This increases the "
+ "debug info size the most. Default is `detailed`.">,
+ Values<"none,basic,detailed">,
+ NormalizedValuesScope<"CodeGenOptions::SanitizeDebugTrapReasonKind">,
+ NormalizedValues<["None", "Basic", "Detailed"]>,
+ MarshallingInfoEnum<CodeGenOpts<"SanitizeDebugTrapReasons">, "Detailed">;
+def fsanitize_debug_trap_reasons
+ : Flag<["-"], "fsanitize-debug-trap-reasons">, Group<f_clang_Group>,
+ Alias<fsanitize_debug_trap_reasons_EQ>, AliasArgs<["detailed"]>,
+ HelpText<"Alias for -fsanitize-debug-trap-reasons=detailed">;
+def fno_sanitize_debug_trap_reasons
+ : Flag<["-"], "fno-sanitize-debug-trap-reasons">, Group<f_clang_Group>,
+ Alias<fsanitize_debug_trap_reasons_EQ>, AliasArgs<["none"]>,
+ HelpText<"Alias for -fsanitize-debug-trap-reasons=none">;
defm sanitize_minimal_runtime : BoolOption<"f", "sanitize-minimal-runtime",
CodeGenOpts<"SanitizeMinimalRuntime">, DefaultFalse,
PosFlag<SetTrue>,
@@ -3296,6 +3307,13 @@ defm modules_reduced_bmi : BoolOption<"f", "modules-reduced-bmi",
PosFlag<SetTrue, [], [ClangOption, CC1Option],
"Generate the reduced BMI">>;
+def fmodules_driver : Flag<["-"], "fmodules-driver">,
+ Group<f_Group>, Visibility<[ClangOption]>,
+ HelpText<"Enable support for driver managed module builds (experimental)">;
+def fno_modules_driver : Flag<["-"], "fno-modules-driver">,
+ Group<f_Group>, Visibility<[ClangOption]>,
+ HelpText<"Disable support for driver managed module builds (experimental)">;
+
def experimental_modules_reduced_bmi : Flag<["-"], "fexperimental-modules-reduced-bmi">,
Group<f_Group>, Visibility<[ClangOption, CC1Option]>, Alias<fmodules_reduced_bmi>;
@@ -3731,14 +3749,20 @@ def fopenmp_relocatable_target : Flag<["-"], "fopenmp-relocatable-target">,
def fnoopenmp_relocatable_target : Flag<["-"], "fnoopenmp-relocatable-target">,
Group<f_Group>, Flags<[NoArgumentUnused, HelpHidden]>,
Visibility<[ClangOption, CC1Option]>;
-def fopenmp_simd : Flag<["-"], "fopenmp-simd">, Group<f_Group>,
- Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CC1Option]>,
- HelpText<"Emit OpenMP code only for SIMD-based constructs.">;
+def fopenmp_simd : Flag<["-"], "fopenmp-simd">,
+ Group<f_Group>,
+ Flags<[NoArgumentUnused]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ HelpText<"Emit OpenMP code only for SIMD-based constructs.">;
def fopenmp_enable_irbuilder : Flag<["-"], "fopenmp-enable-irbuilder">, Group<f_Group>,
Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Use the experimental OpenMP-IR-Builder codegen path.">;
-def fno_openmp_simd : Flag<["-"], "fno-openmp-simd">, Group<f_Group>,
- Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CC1Option]>;
+def fno_openmp_simd
+ : Flag<["-"], "fno-openmp-simd">,
+ Group<f_Group>,
+ Flags<[NoArgumentUnused]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ HelpText<"Do not emit code for any OpenMP constructs.">;
def fopenmp_cuda_mode : Flag<["-"], "fopenmp-cuda-mode">, Group<f_Group>,
Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
def fno_openmp_cuda_mode : Flag<["-"], "fno-openmp-cuda-mode">, Group<f_Group>,
@@ -4540,6 +4564,7 @@ defm aarch64_jump_table_hardening: OptInCC1FFlag<"aarch64-jump-table-hardening",
defm ptrauth_objc_isa : OptInCC1FFlag<"ptrauth-objc-isa", "Enable signing and authentication of Objective-C object's 'isa' field">;
defm ptrauth_objc_interface_sel : OptInCC1FFlag<"ptrauth-objc-interface-sel", "Enable signing and authentication of Objective-C object's 'SEL' fields">;
defm ptrauth_objc_class_ro : OptInCC1FFlag<"ptrauth-objc-class-ro", "Enable signing and authentication for ObjC class_ro pointers">;
+defm ptrauth_block_descriptor_pointers : OptInCC1FFlag<"ptrauth-block-descriptor-pointers", "Enable signing and authentication of block descriptors">;
}
def fenable_matrix : Flag<["-"], "fenable-matrix">, Group<f_Group>,
@@ -5589,7 +5614,8 @@ def mno_outline_atomics : Flag<["-"], "mno-outline-atomics">, Group<f_clang_Grou
HelpText<"Don't generate local calls to out-of-line atomic operations">;
def mno_implicit_float : Flag<["-"], "mno-implicit-float">, Group<m_Group>,
HelpText<"Don't generate implicit floating point or vector instructions">;
-def mimplicit_float : Flag<["-"], "mimplicit-float">, Group<m_Group>;
+def mimplicit_float : Flag<["-"], "mimplicit-float">, Group<m_Group>,
+ HelpText<"Generate implicit floating point or vector instructions">;
def mrecip : Flag<["-"], "mrecip">, Group<m_Group>,
Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Equivalent to '-mrecip=all'">;
@@ -6805,10 +6831,10 @@ def mapx_features_EQ : CommaJoined<["-"], "mapx-features=">, Group<m_x86_Feature
def mno_apx_features_EQ : CommaJoined<["-"], "mno-apx-features=">, Group<m_x86_Features_Group>,
HelpText<"Disable features of APX">, Values<"egpr,push2pop2,ppx,ndd,ccmp,nf,cf,zu">, Visibility<[ClangOption, CLOption, FlangOption]>;
def mapxf : Flag<["-"], "mapxf">, Alias<mapx_features_EQ>,
- AliasArgs<["egpr","push2pop2","ppx","ndd","ccmp","nf","cf","zu"]>,
+ AliasArgs<["egpr","push2pop2","ppx","ndd","ccmp","nf","zu"]>,
Group<m_x86_Features_Group>;
def mno_apxf : Flag<["-"], "mno-apxf">, Alias<mno_apx_features_EQ>,
- AliasArgs<["egpr","push2pop2","ppx","ndd","ccmp","nf","cf","zu"]>,
+ AliasArgs<["egpr","push2pop2","ppx","ndd","ccmp","nf","zu"]>,
Group<m_x86_Features_Group>;
def mapx_inline_asm_use_gpr32 : Flag<["-"], "mapx-inline-asm-use-gpr32">, Group<m_Group>,
HelpText<"Enable use of GPR32 in inline assembly for APX">;
@@ -6979,7 +7005,6 @@ def static_libgfortran : Flag<["-"], "static-libgfortran">, Group<gfortran_Group
// "f" options with values for gfortran.
def fblas_matmul_limit_EQ : Joined<["-"], "fblas-matmul-limit=">, Group<gfortran_Group>;
def fcheck_EQ : Joined<["-"], "fcheck=">, Group<gfortran_Group>;
-def fcoarray_EQ : Joined<["-"], "fcoarray=">, Group<gfortran_Group>;
def ffpe_trap_EQ : Joined<["-"], "ffpe-trap=">, Group<gfortran_Group>;
def ffree_line_length_VALUE : Joined<["-"], "ffree-line-length-">, Group<gfortran_Group>;
def finit_character_EQ : Joined<["-"], "finit-character=">, Group<gfortran_Group>;
@@ -8688,6 +8713,15 @@ def fopenmp_host_ir_file_path : Separate<["-"], "fopenmp-host-ir-file-path">,
} // let Visibility = [CC1Option, FC1Option]
//===----------------------------------------------------------------------===//
+// Coarray Options
+//===----------------------------------------------------------------------===//
+
+def fcoarray : Flag<["-"], "fcoarray">,
+ Group<f_Group>,
+ Visibility<[FlangOption, FC1Option]>,
+ HelpText<"Enable Coarray features">;
+
+//===----------------------------------------------------------------------===//
// SYCL Options
//===----------------------------------------------------------------------===//
@@ -9381,6 +9415,11 @@ def res_may_alias : Option<["/", "-"], "res-may-alias", KIND_FLAG>,
Visibility<[DXCOption, ClangOption, CC1Option]>,
HelpText<"Assume that UAVs/SRVs may alias">,
MarshallingInfoFlag<CodeGenOpts<"ResMayAlias">>;
+def dxc_strip_rootsignature :
+ Option<["/", "-"], "Qstrip-rootsignature", KIND_FLAG>,
+ Group<dxc_Group>,
+ Visibility<[DXCOption]>,
+ HelpText<"Omit the root signature from produced DXContainer">;
def target_profile : DXCJoinedOrSeparate<"T">, MetaVarName<"<profile>">,
HelpText<"Set target profile">,
Values<"ps_6_0, ps_6_1, ps_6_2, ps_6_3, ps_6_4, ps_6_5, ps_6_6, ps_6_7,"
@@ -9413,6 +9452,18 @@ def dxc_rootsig_ver :
Alias<fdx_rootsignature_version>,
Group<dxc_Group>,
Visibility<[DXCOption]>;
+def fdx_rootsignature_define :
+ Joined<["-"], "fdx-rootsignature-define=">,
+ Group<dxc_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoString<LangOpts<"HLSLRootSigOverride">, "\"\"">,
+ HelpText<"Override entry function root signature with root signature at "
+ "given macro name.">;
+def dxc_rootsig_define :
+ Separate<["-"], "rootsig-define">,
+ Alias<fdx_rootsignature_define>,
+ Group<dxc_Group>,
+ Visibility<[DXCOption]>;
def hlsl_entrypoint : Option<["-"], "hlsl-entry", KIND_SEPARATE>,
Group<dxc_Group>,
Visibility<[ClangOption, CC1Option]>,
@@ -9445,8 +9496,12 @@ def fspv_target_env_EQ : Joined<["-"], "fspv-target-env=">, Group<dxc_Group>,
def fspv_extension_EQ
: Joined<["-"], "fspv-extension=">,
Group<dxc_Group>,
- HelpText<"Specify the available SPIR-V extensions. If this option is not "
- "specified, then all extensions are available.">;
+ HelpText<
+ "Specify the available SPIR-V extensions. If this option is not "
+ "specified, then all extensions are available. If KHR is specified, "
+ "then all KHR extensions will be available. If DXC is specifided, "
+ "then all extensions implemented by the DirectX Shader compiler will "
+ "be available. This option is useful for moving from DXC to Clang.">;
def fvk_use_dx_layout
: DXCFlag<"fvk-use-dx-layout">,
HelpText<"Use DirectX memory layout for Vulkan resources.">;
@@ -9457,6 +9512,16 @@ def fvk_use_scalar_layout
: DXCFlag<"fvk-use-scalar-layout">,
HelpText<"Use scalar memory layout for Vulkan resources.">;
+def fhlsl_spv_use_unknown_image_format
+ : Flag<["-"], "fspv-use-unknown-image-format">,
+ Group<dxc_Group>,
+ Visibility<[CC1Option, DXCOption]>,
+ HelpText<"For storage images and texel buffers, sets the default format "
+ "to 'Unknown' when not specified via the `vk::image_format` "
+ "attribute. If this option is not used, the format is inferred "
+ "from the resource's data type.">,
+ MarshallingInfoFlag<LangOpts<"HLSLSpvUseUnknownImageFormat">>;
+
def no_wasm_opt : Flag<["--"], "no-wasm-opt">,
Group<m_Group>,
HelpText<"Disable the wasm-opt optimizer">,
diff --git a/clang/include/clang/Driver/ToolChain.h b/clang/include/clang/Driver/ToolChain.h
index 2430563..1425714 100644
--- a/clang/include/clang/Driver/ToolChain.h
+++ b/clang/include/clang/Driver/ToolChain.h
@@ -224,9 +224,6 @@ protected:
static void addSystemFrameworkInclude(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
const Twine &Path);
- static void addSystemInclude(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- const Twine &Path);
static void addExternCSystemInclude(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
const Twine &Path);
@@ -246,6 +243,9 @@ protected:
///@}
public:
+ static void addSystemInclude(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ const Twine &Path);
virtual ~ToolChain();
// Accessors
diff --git a/clang/include/clang/ExtractAPI/DeclarationFragments.h b/clang/include/clang/ExtractAPI/DeclarationFragments.h
index 4ac7444..4859225 100644
--- a/clang/include/clang/ExtractAPI/DeclarationFragments.h
+++ b/clang/include/clang/ExtractAPI/DeclarationFragments.h
@@ -440,9 +440,8 @@ private:
DeclarationFragments &);
/// Build DeclarationFragments for a NestedNameSpecifier.
- static DeclarationFragments getFragmentsForNNS(const NestedNameSpecifier *,
- ASTContext &,
- DeclarationFragments &);
+ static DeclarationFragments
+ getFragmentsForNNS(NestedNameSpecifier, ASTContext &, DeclarationFragments &);
/// Build DeclarationFragments for Qualifiers.
static DeclarationFragments getFragmentsForQualifiers(const Qualifiers quals);
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index 31582a4..5dfdb23 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -4813,14 +4813,45 @@ struct FormatStyle {
/// \version 7
bool SpaceBeforeRangeBasedForLoopColon;
- /// If ``true``, spaces will be inserted into ``{}``.
- /// \code
- /// true: false:
- /// void f() { } vs. void f() {}
- /// while (true) { } while (true) {}
- /// \endcode
+ /// This option is **deprecated**. See ``Block`` of ``SpaceInEmptyBraces``.
/// \version 10
- bool SpaceInEmptyBlock;
+ // bool SpaceInEmptyBlock;
+
+ /// Style of when to insert a space in empty braces.
+ enum SpaceInEmptyBracesStyle : int8_t {
+ /// Always insert a space in empty braces.
+ /// \code
+ /// void f() { }
+ /// class Unit { };
+ /// auto a = [] { };
+ /// int x{ };
+ /// \endcode
+ SIEB_Always,
+ /// Only insert a space in empty blocks.
+ /// \code
+ /// void f() { }
+ /// class Unit { };
+ /// auto a = [] { };
+ /// int x{};
+ /// \endcode
+ SIEB_Block,
+ /// Never insert a space in empty braces.
+ /// \code
+ /// void f() {}
+ /// class Unit {};
+ /// auto a = [] {};
+ /// int x{};
+ /// \endcode
+ SIEB_Never
+ };
+
+ /// Specifies when to insert a space in empty braces.
+ /// \note
+ /// This option doesn't apply to initializer braces if
+ /// ``Cpp11BracedListStyle`` is set to ``true``.
+ /// \endnote
+ /// \version 22
+ SpaceInEmptyBracesStyle SpaceInEmptyBraces;
/// If ``true``, spaces may be inserted into ``()``.
/// This option is **deprecated**. See ``InEmptyParentheses`` of
@@ -5494,7 +5525,7 @@ struct FormatStyle {
SpaceBeforeRangeBasedForLoopColon ==
R.SpaceBeforeRangeBasedForLoopColon &&
SpaceBeforeSquareBrackets == R.SpaceBeforeSquareBrackets &&
- SpaceInEmptyBlock == R.SpaceInEmptyBlock &&
+ SpaceInEmptyBraces == R.SpaceInEmptyBraces &&
SpacesBeforeTrailingComments == R.SpacesBeforeTrailingComments &&
SpacesInAngles == R.SpacesInAngles &&
SpacesInContainerLiterals == R.SpacesInContainerLiterals &&
diff --git a/clang/include/clang/Frontend/ASTUnit.h b/clang/include/clang/Frontend/ASTUnit.h
index ad54016..27bba8e 100644
--- a/clang/include/clang/Frontend/ASTUnit.h
+++ b/clang/include/clang/Frontend/ASTUnit.h
@@ -629,6 +629,17 @@ public:
return StoredDiagnostics.end();
}
+ using diags_range = llvm::iterator_range<stored_diag_iterator>;
+ using const_diags_range = llvm::iterator_range<stored_diag_const_iterator>;
+
+ diags_range storedDiagnostics() {
+ return {stored_diag_begin(), stored_diag_end()};
+ }
+
+ const_diags_range storedDiagnostics() const {
+ return {stored_diag_begin(), stored_diag_end()};
+ }
+
unsigned stored_diag_size() const { return StoredDiagnostics.size(); }
stored_diag_iterator stored_diag_afterDriver_begin() {
diff --git a/clang/include/clang/Frontend/FrontendActions.h b/clang/include/clang/Frontend/FrontendActions.h
index a5dfb77..73308c0 100644
--- a/clang/include/clang/Frontend/FrontendActions.h
+++ b/clang/include/clang/Frontend/FrontendActions.h
@@ -329,6 +329,18 @@ public:
: ModuleName(ModuleName) {}
};
+//===----------------------------------------------------------------------===//
+// HLSL Specific Actions
+//===----------------------------------------------------------------------===//
+
+class HLSLFrontendAction : public WrapperFrontendAction {
+protected:
+ void ExecuteAction() override;
+
+public:
+ HLSLFrontendAction(std::unique_ptr<FrontendAction> WrappedAction);
+};
+
} // end namespace clang
#endif
diff --git a/clang/include/clang/Index/IndexSymbol.h b/clang/include/clang/Index/IndexSymbol.h
index 59e90fc..deb9337 100644
--- a/clang/include/clang/Index/IndexSymbol.h
+++ b/clang/include/clang/Index/IndexSymbol.h
@@ -27,6 +27,7 @@ enum class SymbolKind : uint8_t {
Namespace,
NamespaceAlias,
Macro,
+ IncludeDirective,
Enum,
Struct,
diff --git a/clang/include/clang/Interpreter/Interpreter.h b/clang/include/clang/Interpreter/Interpreter.h
index 83d2962..8c124aa 100644
--- a/clang/include/clang/Interpreter/Interpreter.h
+++ b/clang/include/clang/Interpreter/Interpreter.h
@@ -37,7 +37,6 @@ class ThreadSafeContext;
namespace clang {
class CompilerInstance;
-class CodeGenerator;
class CXXRecordDecl;
class Decl;
class IncrementalExecutor;
@@ -110,10 +109,6 @@ class Interpreter {
// printing happens, it's in an invalid state.
Value LastValue;
- /// When CodeGen is created the first llvm::Module gets cached in many places
- /// and we must keep it alive.
- std::unique_ptr<llvm::Module> CachedInCodeGenModule;
-
/// Compiler instance performing the incremental compilation.
std::unique_ptr<CompilerInstance> CI;
@@ -175,15 +170,9 @@ public:
llvm::Expected<llvm::orc::ExecutorAddr>
getSymbolAddressFromLinkerName(llvm::StringRef LinkerName) const;
- std::unique_ptr<llvm::Module> GenModule(IncrementalAction *Action = nullptr);
- PartialTranslationUnit &RegisterPTU(TranslationUnitDecl *TU,
- std::unique_ptr<llvm::Module> M = {},
- IncrementalAction *Action = nullptr);
-
private:
size_t getEffectivePTUSize() const;
void markUserCodeStart();
- llvm::Expected<Expr *> ExtractValueFromExpr(Expr *E);
// A cache for the compiled destructors used to for de-allocation of managed
// clang::Values.
@@ -206,11 +195,6 @@ private:
// This function forces emission of the needed dtor.
llvm::Expected<llvm::orc::ExecutorAddr>
CompileDtorCall(CXXRecordDecl *CXXRD) const;
-
- /// @}
- /// @name Code generation
- /// @{
- CodeGenerator *getCodeGen(IncrementalAction *Action = nullptr) const;
};
} // namespace clang
diff --git a/clang/include/clang/Lex/DependencyDirectivesScanner.h b/clang/include/clang/Lex/DependencyDirectivesScanner.h
index f9fec39..c0b742d 100644
--- a/clang/include/clang/Lex/DependencyDirectivesScanner.h
+++ b/clang/include/clang/Lex/DependencyDirectivesScanner.h
@@ -135,6 +135,13 @@ void printDependencyDirectivesAsSource(
ArrayRef<dependency_directives_scan::Directive> Directives,
llvm::raw_ostream &OS);
+/// Scan an input source buffer for C++20 named module usage.
+///
+/// \param Source The input source buffer.
+///
+/// \returns true if any C++20 named modules related directive was found.
+bool scanInputForCXX20ModulesUsage(StringRef Source);
+
/// Functor that returns the dependency directives for a given file.
class DependencyDirectivesGetter {
public:
diff --git a/clang/include/clang/Lex/Lexer.h b/clang/include/clang/Lex/Lexer.h
index 06971ff..423f2ff 100644
--- a/clang/include/clang/Lex/Lexer.h
+++ b/clang/include/clang/Lex/Lexer.h
@@ -143,9 +143,6 @@ class Lexer : public PreprocessorLexer {
/// True if this is the first time we're lexing the input file.
bool IsFirstTimeLexingFile;
- /// True if current lexing token is the first pp-token.
- bool IsFirstPPToken;
-
// NewLinePtr - A pointer to new line character '\n' being lexed. For '\r\n',
// it also points to '\n.'
const char *NewLinePtr;
diff --git a/clang/include/clang/Lex/NoTrivialPPDirectiveTracer.h b/clang/include/clang/Lex/NoTrivialPPDirectiveTracer.h
new file mode 100644
index 0000000..9ab3c6a
--- /dev/null
+++ b/clang/include/clang/Lex/NoTrivialPPDirectiveTracer.h
@@ -0,0 +1,310 @@
+//===--- NoTrivialPPDirectiveTracer.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NoTrivialPPDirectiveTracer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LEX_NO_TRIVIAL_PPDIRECTIVE_TRACER_H
+#define LLVM_CLANG_LEX_NO_TRIVIAL_PPDIRECTIVE_TRACER_H
+
+#include "clang/Lex/PPCallbacks.h"
+
+namespace clang {
+class Preprocessor;
+
+/// Consider the following code:
+///
+/// # 1 __FILE__ 1 3
+/// export module a;
+///
+/// According to the wording in
+/// [P1857R3](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1857r3.html):
+///
+/// A module directive may only appear as the first preprocessing tokens in a
+/// file (excluding the global module fragment.)
+///
+/// and the wording in
+/// [[cpp.pre]](https://eel.is/c++draft/cpp.pre#nt:module-file):
+/// module-file:
+/// pp-global-module-fragment[opt] pp-module group[opt]
+/// pp-private-module-fragment[opt]
+///
+/// `#` is the first pp-token in the translation unit, and it was rejected by
+/// clang, but they really should be exempted from this rule. The goal is to not
+/// allow any preprocessor conditionals or most state changes, but these don't
+/// fit that.
+///
+/// State change would mean most semantically observable preprocessor state,
+/// particularly anything that is order dependent. Global flags like being a
+/// system header/module shouldn't matter.
+///
+/// We should exempt a brunch of directives, even though it violates the current
+/// standard wording.
+///
+/// This class used to trace 'no-trivial' pp-directives in main file, which may
+/// change the preprocessing state.
+///
+/// FIXME: Once the wording of the standard is revised, we need to follow the
+/// wording of the standard. Currently this is just a workaround
+class NoTrivialPPDirectiveTracer : public PPCallbacks {
+ Preprocessor &PP;
+
+ /// Whether preprocessing main file. We only focus on the main file.
+ bool InMainFile = true;
+
+ /// Whether one or more conditional, include or other 'no-trivial'
+ /// pp-directives has seen before.
+ bool SeenNoTrivialPPDirective = false;
+
+ void setSeenNoTrivialPPDirective();
+
+public:
+ NoTrivialPPDirectiveTracer(Preprocessor &P) : PP(P) {}
+
+ bool hasSeenNoTrivialPPDirective() const;
+
+ /// Callback invoked whenever the \p Lexer moves to a different file for
+ /// lexing. Unlike \p FileChanged line number directives and other related
+ /// pragmas do not trigger callbacks to \p LexedFileChanged.
+ ///
+ /// \param FID The \p FileID that the \p Lexer moved to.
+ ///
+ /// \param Reason Whether the \p Lexer entered a new file or exited one.
+ ///
+ /// \param FileType The \p CharacteristicKind of the file the \p Lexer moved
+ /// to.
+ ///
+ /// \param PrevFID The \p FileID the \p Lexer was using before the change.
+ ///
+ /// \param Loc The location where the \p Lexer entered a new file from or the
+ /// location that the \p Lexer moved into after exiting a file.
+ void LexedFileChanged(FileID FID, LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType, FileID PrevFID,
+ SourceLocation Loc) override;
+
+ /// Callback invoked whenever an embed directive has been processed,
+ /// regardless of whether the embed will actually find a file.
+ ///
+ /// \param HashLoc The location of the '#' that starts the embed directive.
+ ///
+ /// \param FileName The name of the file being included, as written in the
+ /// source code.
+ ///
+ /// \param IsAngled Whether the file name was enclosed in angle brackets;
+ /// otherwise, it was enclosed in quotes.
+ ///
+ /// \param File The actual file that may be included by this embed directive.
+ ///
+ /// \param Params The parameters used by the directive.
+ void EmbedDirective(SourceLocation HashLoc, StringRef FileName, bool IsAngled,
+ OptionalFileEntryRef File,
+ const LexEmbedParametersResult &Params) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Callback invoked whenever an inclusion directive of
+ /// any kind (\c \#include, \c \#import, etc.) has been processed, regardless
+ /// of whether the inclusion will actually result in an inclusion.
+ ///
+ /// \param HashLoc The location of the '#' that starts the inclusion
+ /// directive.
+ ///
+ /// \param IncludeTok The token that indicates the kind of inclusion
+ /// directive, e.g., 'include' or 'import'.
+ ///
+ /// \param FileName The name of the file being included, as written in the
+ /// source code.
+ ///
+ /// \param IsAngled Whether the file name was enclosed in angle brackets;
+ /// otherwise, it was enclosed in quotes.
+ ///
+ /// \param FilenameRange The character range of the quotes or angle brackets
+ /// for the written file name.
+ ///
+ /// \param File The actual file that may be included by this inclusion
+ /// directive.
+ ///
+ /// \param SearchPath Contains the search path which was used to find the file
+ /// in the file system. If the file was found via an absolute include path,
+ /// SearchPath will be empty. For framework includes, the SearchPath and
+ /// RelativePath will be split up. For example, if an include of "Some/Some.h"
+ /// is found via the framework path
+ /// "path/to/Frameworks/Some.framework/Headers/Some.h", SearchPath will be
+ /// "path/to/Frameworks/Some.framework/Headers" and RelativePath will be
+ /// "Some.h".
+ ///
+ /// \param RelativePath The path relative to SearchPath, at which the include
+ /// file was found. This is equal to FileName except for framework includes.
+ ///
+ /// \param SuggestedModule The module suggested for this header, if any.
+ ///
+ /// \param ModuleImported Whether this include was translated into import of
+ /// \p SuggestedModule.
+ ///
+ /// \param FileType The characteristic kind, indicates whether a file or
+ /// directory holds normal user code, system code, or system code which is
+ /// implicitly 'extern "C"' in C++ mode.
+ ///
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported,
+ SrcMgr::CharacteristicKind FileType) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Callback invoked whenever there was an explicit module-import
+ /// syntax.
+ ///
+ /// \param ImportLoc The location of import directive token.
+ ///
+ /// \param Path The identifiers (and their locations) of the module
+ /// "path", e.g., "std.vector" would be split into "std" and "vector".
+ ///
+ /// \param Imported The imported module; can be null if importing failed.
+ ///
+ void moduleImport(SourceLocation ImportLoc, ModuleIdPath Path,
+ const Module *Imported) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Callback invoked when the end of the main file is reached.
+ ///
+ /// No subsequent callbacks will be made.
+ void EndOfMainFile() override { setSeenNoTrivialPPDirective(); }
+
+ /// Callback invoked when start reading any pragma directive.
+ void PragmaDirective(SourceLocation Loc,
+ PragmaIntroducerKind Introducer) override {}
+
+ /// Called by Preprocessor::HandleMacroExpandedIdentifier when a
+ /// macro invocation is found.
+ void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD,
+ SourceRange Range, const MacroArgs *Args) override;
+
+ /// Hook called whenever a macro definition is seen.
+ void MacroDefined(const Token &MacroNameTok,
+ const MacroDirective *MD) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever a macro \#undef is seen.
+ /// \param MacroNameTok The active Token
+ /// \param MD A MacroDefinition for the named macro.
+ /// \param Undef New MacroDirective if the macro was defined, null otherwise.
+ ///
+ /// MD is released immediately following this callback.
+ void MacroUndefined(const Token &MacroNameTok, const MacroDefinition &MD,
+ const MacroDirective *Undef) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever the 'defined' operator is seen.
+ /// \param MD The MacroDirective if the name was a macro, null otherwise.
+ void Defined(const Token &MacroNameTok, const MacroDefinition &MD,
+ SourceRange Range) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever an \#if is seen.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ /// \param ConditionValue The evaluated value of the condition.
+ ///
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ void If(SourceLocation Loc, SourceRange ConditionRange,
+ ConditionValueKind ConditionValue) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever an \#elif is seen.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ /// \param ConditionValue The evaluated value of the condition.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ void Elif(SourceLocation Loc, SourceRange ConditionRange,
+ ConditionValueKind ConditionValue, SourceLocation IfLoc) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever an \#ifdef is seen.
+ /// \param Loc the source location of the directive.
+ /// \param MacroNameTok Information on the token being tested.
+ /// \param MD The MacroDefinition if the name was a macro, null otherwise.
+ void Ifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever an \#elifdef branch is taken.
+ /// \param Loc the source location of the directive.
+ /// \param MacroNameTok Information on the token being tested.
+ /// \param MD The MacroDefinition if the name was a macro, null otherwise.
+ void Elifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ setSeenNoTrivialPPDirective();
+ }
+ /// Hook called whenever an \#elifdef is skipped.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ void Elifdef(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever an \#ifndef is seen.
+ /// \param Loc the source location of the directive.
+ /// \param MacroNameTok Information on the token being tested.
+ /// \param MD The MacroDefiniton if the name was a macro, null otherwise.
+ void Ifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever an \#elifndef branch is taken.
+ /// \param Loc the source location of the directive.
+ /// \param MacroNameTok Information on the token being tested.
+ /// \param MD The MacroDefinition if the name was a macro, null otherwise.
+ void Elifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ setSeenNoTrivialPPDirective();
+ }
+ /// Hook called whenever an \#elifndef is skipped.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ void Elifndef(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever an \#else is seen.
+ /// \param Loc the source location of the directive.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
+ void Else(SourceLocation Loc, SourceLocation IfLoc) override {
+ setSeenNoTrivialPPDirective();
+ }
+
+ /// Hook called whenever an \#endif is seen.
+ /// \param Loc the source location of the directive.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
+ void Endif(SourceLocation Loc, SourceLocation IfLoc) override {
+ setSeenNoTrivialPPDirective();
+ }
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_LEX_NO_TRIVIAL_PPDIRECTIVE_TRACER_H
diff --git a/clang/include/clang/Lex/Preprocessor.h b/clang/include/clang/Lex/Preprocessor.h
index 71b0f8e..3975484 100644
--- a/clang/include/clang/Lex/Preprocessor.h
+++ b/clang/include/clang/Lex/Preprocessor.h
@@ -82,6 +82,7 @@ class PreprocessorLexer;
class PreprocessorOptions;
class ScratchBuffer;
class TargetInfo;
+class NoTrivialPPDirectiveTracer;
namespace Builtin {
class Context;
@@ -353,6 +354,11 @@ private:
/// First pp-token source location in current translation unit.
SourceLocation FirstPPTokenLoc;
+ /// A preprocessor directive tracer to trace whether the preprocessing
+ /// state changed. These changes would mean most semantically observable
+ /// preprocessor state, particularly anything that is order dependent.
+ NoTrivialPPDirectiveTracer *DirTracer = nullptr;
+
/// A position within a C++20 import-seq.
class StdCXXImportSeq {
public:
@@ -609,6 +615,8 @@ private:
return State == NamedModuleImplementation && !getName().contains(':');
}
+ bool isNotAModuleDecl() const { return State == NotAModuleDecl; }
+
StringRef getName() const {
assert(isNamedModule() && "Can't get name from a non named module");
return Name;
@@ -3091,6 +3099,10 @@ public:
bool setDeserializedSafeBufferOptOutMap(
const SmallVectorImpl<SourceLocation> &SrcLocSeqs);
+ /// Whether we've seen pp-directives which may have changed the preprocessing
+ /// state.
+ bool hasSeenNoTrivialPPDirective() const;
+
private:
/// Helper functions to forward lexing to the actual lexer. They all share the
/// same signature.
diff --git a/clang/include/clang/Lex/Token.h b/clang/include/clang/Lex/Token.h
index fc43e72..d9dc5a5 100644
--- a/clang/include/clang/Lex/Token.h
+++ b/clang/include/clang/Lex/Token.h
@@ -86,12 +86,12 @@ public:
// macro stringizing or charizing operator.
CommaAfterElided = 0x200, // The comma following this token was elided (MS).
IsEditorPlaceholder = 0x400, // This identifier is a placeholder.
-
- IsReinjected = 0x800, // A phase 4 token that was produced before and
- // re-added, e.g. via EnterTokenStream. Annotation
- // tokens are *not* reinjected.
- FirstPPToken = 0x1000, // This token is the first pp token in the
- // translation unit.
+ IsReinjected = 0x800, // A phase 4 token that was produced before and
+ // re-added, e.g. via EnterTokenStream. Annotation
+ // tokens are *not* reinjected.
+ HasSeenNoTrivialPPDirective =
+ 0x1000, // Whether we've seen any 'no-trivial' pp-directives before
+ // current position.
};
tok::TokenKind getKind() const { return Kind; }
@@ -321,8 +321,9 @@ public:
/// lexer uses identifier tokens to represent placeholders.
bool isEditorPlaceholder() const { return getFlag(IsEditorPlaceholder); }
- /// Returns true if this token is the first pp-token.
- bool isFirstPPToken() const { return getFlag(FirstPPToken); }
+ bool hasSeenNoTrivialPPDirective() const {
+ return getFlag(HasSeenNoTrivialPPDirective);
+ }
};
/// Information about the conditional stack (\#if directives)
diff --git a/clang/include/clang/Parse/ParseHLSLRootSignature.h b/clang/include/clang/Parse/ParseHLSLRootSignature.h
index a49bdfd..c87e6637c 100644
--- a/clang/include/clang/Parse/ParseHLSLRootSignature.h
+++ b/clang/include/clang/Parse/ParseHLSLRootSignature.h
@@ -236,6 +236,10 @@ private:
RootSignatureToken CurToken;
};
+IdentifierInfo *ParseHLSLRootSignature(Sema &Actions,
+ llvm::dxbc::RootSignatureVersion Version,
+ StringLiteral *Signature);
+
} // namespace hlsl
} // namespace clang
diff --git a/clang/include/clang/Sema/CodeCompleteConsumer.h b/clang/include/clang/Sema/CodeCompleteConsumer.h
index 2dd2759..c26f4e3 100644
--- a/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -162,7 +162,8 @@ SimplifiedTypeClass getSimplifiedTypeClass(CanQualType T);
/// Determine the type that this declaration will have if it is used
/// as a type or in an expression.
-QualType getDeclUsageType(ASTContext &C, const NamedDecl *ND);
+QualType getDeclUsageType(ASTContext &C, NestedNameSpecifier Qualifier,
+ const NamedDecl *ND);
/// Determine the priority to be given to a macro code completion result
/// with the given name.
@@ -867,7 +868,7 @@ public:
/// If the result should have a nested-name-specifier, this is it.
/// When \c QualifierIsInformative, the nested-name-specifier is
/// informative rather than required.
- NestedNameSpecifier *Qualifier = nullptr;
+ NestedNameSpecifier Qualifier = std::nullopt;
/// If this Decl was unshadowed by using declaration, this can store a
/// pointer to the UsingShadowDecl which was used in the unshadowing process.
@@ -882,7 +883,7 @@ public:
/// Build a result that refers to a declaration.
CodeCompletionResult(const NamedDecl *Declaration, unsigned Priority,
- NestedNameSpecifier *Qualifier = nullptr,
+ NestedNameSpecifier Qualifier = std::nullopt,
bool QualifierIsInformative = false,
bool Accessible = true,
std::vector<FixItHint> FixIts = std::vector<FixItHint>())
diff --git a/clang/include/clang/Sema/DeclSpec.h b/clang/include/clang/Sema/DeclSpec.h
index e568081..c1a99a1 100644
--- a/clang/include/clang/Sema/DeclSpec.h
+++ b/clang/include/clang/Sema/DeclSpec.h
@@ -91,12 +91,11 @@ public:
}
/// Retrieve the representation of the nested-name-specifier.
- NestedNameSpecifier *getScopeRep() const {
+ NestedNameSpecifier getScopeRep() const {
return Builder.getRepresentation();
}
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'type::'.
+ /// Make a nested-name-specifier of the form 'type::'.
///
/// \param Context The AST context in which this nested-name-specifier
/// resides.
@@ -106,21 +105,7 @@ public:
/// \param TL The TypeLoc that describes the type preceding the '::'.
///
/// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, TypeLoc TL, SourceLocation ColonColonLoc);
-
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'identifier::'.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param Identifier The identifier.
- ///
- /// \param IdentifierLoc The location of the identifier.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, IdentifierInfo *Identifier,
- SourceLocation IdentifierLoc, SourceLocation ColonColonLoc);
+ void Make(ASTContext &Context, TypeLoc TL, SourceLocation ColonColonLoc);
/// Extend the current nested-name-specifier by another
/// nested-name-specifier component of the form 'namespace::'.
@@ -154,8 +139,9 @@ public:
/// name.
///
/// \param ColonColonLoc The location of the trailing '::'.
- void MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
- SourceLocation SuperLoc, SourceLocation ColonColonLoc);
+ void MakeMicrosoftSuper(ASTContext &Context, CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc);
/// Make a new nested-name-specifier from incomplete source-location
/// information.
@@ -163,7 +149,7 @@ public:
/// FIXME: This routine should be used very, very rarely, in cases where we
/// need to synthesize a nested-name-specifier. Most code should instead use
/// \c Adopt() with a proper \c NestedNameSpecifierLoc.
- void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier,
+ void MakeTrivial(ASTContext &Context, NestedNameSpecifier Qualifier,
SourceRange R);
/// Adopt an existing nested-name-specifier (with source-range
@@ -189,14 +175,14 @@ public:
SourceLocation getLastQualifierNameLoc() const;
/// No scope specifier.
- bool isEmpty() const { return Range.isInvalid() && getScopeRep() == nullptr; }
+ bool isEmpty() const { return Range.isInvalid() && !getScopeRep(); }
/// A scope specifier is present, but may be valid or invalid.
bool isNotEmpty() const { return !isEmpty(); }
/// An error occurred during parsing of the scope specifier.
- bool isInvalid() const { return Range.isValid() && getScopeRep() == nullptr; }
+ bool isInvalid() const { return Range.isValid() && !getScopeRep(); }
/// A scope specifier is present, and it refers to a real scope.
- bool isValid() const { return getScopeRep() != nullptr; }
+ bool isValid() const { return bool(getScopeRep()); }
/// Indicate that this nested-name-specifier is invalid.
void SetInvalid(SourceRange R) {
@@ -209,7 +195,7 @@ public:
/// Deprecated. Some call sites intend isNotEmpty() while others intend
/// isValid().
- bool isSet() const { return getScopeRep() != nullptr; }
+ bool isSet() const { return bool(getScopeRep()); }
void clear() {
Range = SourceRange();
diff --git a/clang/include/clang/Sema/HeuristicResolver.h b/clang/include/clang/Sema/HeuristicResolver.h
index e193c0b..71588be 100644
--- a/clang/include/clang/Sema/HeuristicResolver.h
+++ b/clang/include/clang/Sema/HeuristicResolver.h
@@ -67,8 +67,7 @@ public:
// Try to heuristically resolve a dependent nested name specifier
// to the type it likely denotes. Note that *dependent* name specifiers always
// denote types, not namespaces.
- QualType
- resolveNestedNameSpecifierToType(const NestedNameSpecifier *NNS) const;
+ QualType resolveNestedNameSpecifierToType(NestedNameSpecifier NNS) const;
// Perform an imprecise lookup of a dependent name in `RD`.
// This function does not follow strict semantic rules and should be used
diff --git a/clang/include/clang/Sema/ParsedTemplate.h b/clang/include/clang/Sema/ParsedTemplate.h
index 3a8050f..4a3df78 100644
--- a/clang/include/clang/Sema/ParsedTemplate.h
+++ b/clang/include/clang/Sema/ParsedTemplate.h
@@ -48,8 +48,8 @@ namespace clang {
///
/// \param Arg the template type argument or non-type template argument.
/// \param Loc the location of the type.
- ParsedTemplateArgument(KindType Kind, void *Arg, SourceLocation Loc)
- : Kind(Kind), Arg(Arg), Loc(Loc) { }
+ ParsedTemplateArgument(KindType Kind, void *Arg, SourceLocation NameLoc)
+ : Kind(Kind), Arg(Arg), NameLoc(NameLoc) {}
/// Create a template template argument.
///
@@ -60,11 +60,11 @@ namespace clang {
/// argument refers.
///
/// \param TemplateLoc the location of the template name.
- ParsedTemplateArgument(const CXXScopeSpec &SS,
- ParsedTemplateTy Template,
- SourceLocation TemplateLoc)
- : Kind(ParsedTemplateArgument::Template),
- Arg(Template.getAsOpaquePtr()), SS(SS), Loc(TemplateLoc) {}
+ ParsedTemplateArgument(SourceLocation TemplateKwLoc, const CXXScopeSpec &SS,
+ ParsedTemplateTy Template, SourceLocation NameLoc)
+ : Kind(ParsedTemplateArgument::Template),
+ Arg(Template.getAsOpaquePtr()), SS(SS), TemplateKwLoc(TemplateKwLoc),
+ NameLoc(NameLoc) {}
/// Determine whether the given template argument is invalid.
bool isInvalid() const { return Arg == nullptr; }
@@ -91,7 +91,10 @@ namespace clang {
}
/// Retrieve the location of the template argument.
- SourceLocation getLocation() const { return Loc; }
+ SourceLocation getTemplateKwLoc() const { return TemplateKwLoc; }
+
+ /// Retrieve the location of the template argument.
+ SourceLocation getNameLoc() const { return NameLoc; }
/// Retrieve the nested-name-specifier that precedes the template
/// name in a template template argument.
@@ -128,8 +131,11 @@ namespace clang {
/// argument.
CXXScopeSpec SS;
- /// the location of the template argument.
- SourceLocation Loc;
+ /// the location of the template keyword.
+ SourceLocation TemplateKwLoc;
+
+ /// the location of the template name.
+ SourceLocation NameLoc;
/// The ellipsis location that can accompany a template template
/// argument (turning it into a template template argument expansion).
diff --git a/clang/include/clang/Sema/ScopeInfo.h b/clang/include/clang/Sema/ScopeInfo.h
index 94b247a..4f4d38c 100644
--- a/clang/include/clang/Sema/ScopeInfo.h
+++ b/clang/include/clang/Sema/ScopeInfo.h
@@ -933,7 +933,7 @@ public:
/// to local variables that are usable as constant expressions and
/// do not involve an odr-use (they may still need to be captured
/// if the enclosing full-expression is instantiation dependent).
- llvm::SmallSet<Expr *, 8> NonODRUsedCapturingExprs;
+ llvm::SmallPtrSet<Expr *, 8> NonODRUsedCapturingExprs;
/// A map of explicit capture indices to their introducer source ranges.
llvm::DenseMap<unsigned, SourceRange> ExplicitCaptureRanges;
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 5211373..c3fb57774 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -228,7 +228,9 @@ void threadSafetyCleanup(BeforeSet *Cache);
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
-typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType *, NamedDecl *>,
+typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType *, NamedDecl *,
+ const TemplateSpecializationType *,
+ const SubstBuiltinTemplatePackType *>,
SourceLocation>
UnexpandedParameterPack;
@@ -2659,9 +2661,9 @@ public:
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
- /// Diagnoses the current set of gathered accesses. This typically
- /// happens at full expression level. The set is cleared after emitting the
- /// diagnostics.
+ /// Diagnoses the current set of gathered accesses. This happens at the end of
+ /// each expression evaluation context. Diagnostics are emitted only for
+ /// accesses gathered in the current evaluation context.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
@@ -3117,9 +3119,6 @@ private:
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
- /// Small set of gathered accesses to potentially misaligned members
- /// due to the packed attribute.
- SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
@@ -3225,7 +3224,7 @@ public:
/// current instantiation (C++0x [temp.dep.type]p1).
///
/// \param NNS a dependent nested name specifier.
- CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
+ CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
@@ -3262,7 +3261,7 @@ public:
/// (e.g., Base::), perform name lookup for that identifier as a
/// nested-name-specifier within the given scope, and return the result of
/// that name lookup.
- NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
+ NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
@@ -3581,8 +3580,8 @@ public:
/// Returns the TypeDeclType for the given type declaration,
/// as ASTContext::getTypeDeclType would, but
/// performs the required semantic checks for name lookup of said entity.
- QualType getTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK,
- TypeDecl *TD, SourceLocation NameLoc);
+ void checkTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK, TypeDecl *TD,
+ SourceLocation NameLoc);
/// If the identifier refers to a type name within this scope,
/// return the declaration of that type.
@@ -4179,8 +4178,15 @@ public:
/// return statement in the scope of a variable has the same NRVO candidate,
/// that candidate is an NRVO variable.
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
- Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
- Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
+
+ /// Performs semantic analysis at the end of a function body.
+ ///
+ /// \param RetainFunctionScopeInfo If \c true, the client is responsible for
+ /// releasing the associated \p FunctionScopeInfo. This is useful when
+ /// building e.g. LambdaExprs.
+ Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body,
+ bool IsInstantiation = false,
+ bool RetainFunctionScopeInfo = false);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
@@ -5398,7 +5404,7 @@ public:
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
- void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
+ void FinalizeVarWithDestructor(VarDecl *VD, CXXRecordDecl *DeclInit);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
@@ -6765,6 +6771,10 @@ public:
/// InLifetimeExtendingContext is true.
SmallVector<MaterializeTemporaryExpr *, 8> ForRangeLifetimeExtendTemps;
+ /// Small set of gathered accesses to potentially misaligned members
+ /// due to the packed attribute.
+ SmallVector<MisalignedMember, 4> MisalignedMembers;
+
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
@@ -6873,23 +6883,23 @@ public:
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back();
- };
+ }
ExpressionEvaluationContextRecord &currentEvaluationContext() {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back();
- };
+ }
ExpressionEvaluationContextRecord &parentEvaluationContext() {
assert(ExprEvalContexts.size() >= 2 &&
"Must be in an expression evaluation context");
return ExprEvalContexts[ExprEvalContexts.size() - 2];
- };
+ }
const ExpressionEvaluationContextRecord &parentEvaluationContext() const {
return const_cast<Sema *>(this)->parentEvaluationContext();
- };
+ }
bool isAttrContext() const {
return ExprEvalContexts.back().ExprContext ==
@@ -7618,7 +7628,7 @@ public:
/// "real" base class is checked as appropriate when checking the access of
/// the member name.
ExprResult PerformObjectMemberConversion(Expr *From,
- NestedNameSpecifier *Qualifier,
+ NestedNameSpecifier Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
@@ -8055,8 +8065,8 @@ public:
ExprResult &RHS);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
- ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
- bool IsDivide);
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
+ BinaryOperatorKind Opc);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
@@ -8065,7 +8075,7 @@ public:
BinaryOperatorKind Opc, QualType *CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
- QualType *CompLHSTy = nullptr);
+ BinaryOperatorKind Opc, QualType *CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
@@ -9139,8 +9149,7 @@ public:
/// Complete a lambda-expression having processed and attached the
/// lambda body.
- ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
- sema::LambdaScopeInfo *LSI);
+ ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
@@ -9836,7 +9845,7 @@ public:
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, ModuleIdPath Partition,
ModuleImportState &ImportState,
- bool IntroducerIsFirstPPToken);
+ bool SeenNoTrivialPPDirective);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
@@ -10210,7 +10219,7 @@ public:
ExprResult InitializeExplicitObjectArgument(Sema &S, Expr *Obj,
FunctionDecl *Fun);
ExprResult PerformImplicitObjectArgumentInitialization(
- Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl,
+ Expr *From, NestedNameSpecifier Qualifier, NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// PerformContextuallyConvertToBool - Perform a contextual conversion
@@ -11618,13 +11627,16 @@ public:
void NoteAllFoundTemplates(TemplateName Name);
- QualType CheckTemplateIdType(TemplateName Template,
+ QualType CheckTemplateIdType(ElaboratedTypeKeyword Keyword,
+ TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
- ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
- TemplateTy Template, const IdentifierInfo *TemplateII,
+ ActOnTemplateIdType(Scope *S, ElaboratedTypeKeyword ElaboratedKeyword,
+ SourceLocation ElaboratedKeywordLoc, CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc, TemplateTy Template,
+ const IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false,
@@ -11858,8 +11870,8 @@ public:
/// argument, substitute into that default template argument and
/// return the corresponding template argument.
TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(
- TemplateDecl *Template, SourceLocation TemplateLoc,
- SourceLocation RAngleLoc, Decl *Param,
+ TemplateDecl *Template, SourceLocation TemplateKWLoc,
+ SourceLocation TemplateNameLoc, SourceLocation RAngleLoc, Decl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
ArrayRef<TemplateArgument> CanonicalConverted, bool &HasDefaultArg);
@@ -13487,8 +13499,6 @@ public:
~ArgPackSubstIndexRAII() { Self.ArgPackSubstIndex = OldSubstIndex; }
};
- friend class ArgumentPackSubstitutionRAII;
-
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
@@ -13762,8 +13772,9 @@ public:
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
- SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
- SourceLocation Loc,
+ SubstTemplateName(SourceLocation TemplateKWLoc,
+ NestedNameSpecifierLoc &QualifierLoc, TemplateName Name,
+ SourceLocation NameLoc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
@@ -14417,6 +14428,15 @@ public:
static void collectUnexpandedParameterPacks(
Expr *E, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+ /// Invoked when parsing a template argument.
+ ///
+ /// \param Arg the template argument, which may already be invalid.
+ ///
+ /// If it is followed by ellipsis, this function is called before
+ /// `ActOnPackExpansion`.
+ ParsedTemplateArgument
+ ActOnTemplateTemplateArgument(const ParsedTemplateArgument &Arg);
+
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
@@ -14504,7 +14524,8 @@ public:
bool CheckParameterPacksForExpansion(
SourceLocation EllipsisLoc, SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
- const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool FailOnPackProducingTemplates, bool &ShouldExpand,
bool &RetainExpansion, UnsignedOrNone &NumExpansions);
/// Determine the number of arguments in the given pack expansion
@@ -15179,14 +15200,6 @@ public:
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
- /// Retrieve a version of the type 'T' that is elaborated by Keyword,
- /// qualified by the nested-name-specifier contained in SS, and that is
- /// (re)declared by OwnedTagDecl, which is nullptr if this is not a
- /// (re)declaration.
- QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
- const CXXScopeSpec &SS, QualType T,
- TagDecl *OwnedTagDecl = nullptr);
-
// Returns the underlying type of a decltype with the given expression.
QualType getDecltypeForExpr(Expr *E);
@@ -15331,6 +15344,16 @@ public:
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl *>(D), &Hidden);
}
+ /// Determine if \p D has a definition which allows we redefine it in current
+ /// TU. \p Suggested is the definition that should be made visible to expose
+ /// the definition.
+ bool isRedefinitionAllowedFor(NamedDecl *D, NamedDecl **Suggested,
+ bool &Visible);
+ bool isRedefinitionAllowedFor(const NamedDecl *D, bool &Visible) {
+ NamedDecl *Hidden;
+ return isRedefinitionAllowedFor(const_cast<NamedDecl *>(D), &Hidden,
+ Visible);
+ }
/// Determine if \p D has a reachable definition. If not, suggest a
/// declaration that should be made reachable to expose the definition.
diff --git a/clang/include/clang/Sema/SemaHLSL.h b/clang/include/clang/Sema/SemaHLSL.h
index 085c9ed..5cbe1b6 100644
--- a/clang/include/clang/Sema/SemaHLSL.h
+++ b/clang/include/clang/Sema/SemaHLSL.h
@@ -153,6 +153,10 @@ public:
ActOnFinishRootSignatureDecl(SourceLocation Loc, IdentifierInfo *DeclIdent,
ArrayRef<hlsl::RootSignatureElement> Elements);
+ void SetRootSignatureOverride(IdentifierInfo *DeclIdent) {
+ RootSigOverrideIdent = DeclIdent;
+ }
+
// Returns true if any RootSignatureElement is invalid and a diagnostic was
// produced
bool
@@ -221,6 +225,8 @@ private:
uint32_t ImplicitBindingNextOrderID = 0;
+ IdentifierInfo *RootSigOverrideIdent = nullptr;
+
private:
void collectResourceBindingsOnVarDecl(VarDecl *D);
void collectResourceBindingsOnUserRecordDecl(const VarDecl *VD,
@@ -229,10 +235,17 @@ private:
void diagnoseAvailabilityViolations(TranslationUnitDecl *TU);
- bool initGlobalResourceDecl(VarDecl *VD);
uint32_t getNextImplicitBindingOrderID() {
return ImplicitBindingNextOrderID++;
}
+
+ bool initGlobalResourceDecl(VarDecl *VD);
+ bool initGlobalResourceArrayDecl(VarDecl *VD);
+ void createResourceRecordCtorArgs(const Type *ResourceTy, StringRef VarName,
+ HLSLResourceBindingAttr *RBA,
+ HLSLVkBindingAttr *VkBinding,
+ uint32_t ArrayIndex,
+ llvm::SmallVectorImpl<Expr *> &Args);
};
} // namespace clang
diff --git a/clang/include/clang/Sema/SemaInternal.h b/clang/include/clang/Sema/SemaInternal.h
index 42c9469..8f6041b 100644
--- a/clang/include/clang/Sema/SemaInternal.h
+++ b/clang/include/clang/Sema/SemaInternal.h
@@ -71,12 +71,17 @@ inline std::pair<unsigned, unsigned> getDepthAndIndex(const NamedDecl *ND) {
}
/// Retrieve the depth and index of an unexpanded parameter pack.
-inline std::pair<unsigned, unsigned>
+/// Returns nullopt when the unexpanded packs do not correspond to template
+/// parameters, e.g. __builtin_dedup_types.
+inline std::optional<std::pair<unsigned, unsigned>>
getDepthAndIndex(UnexpandedParameterPack UPP) {
if (const auto *TTP = dyn_cast<const TemplateTypeParmType *>(UPP.first))
return std::make_pair(TTP->getDepth(), TTP->getIndex());
-
- return getDepthAndIndex(cast<NamedDecl *>(UPP.first));
+ if (isa<NamedDecl *>(UPP.first))
+ return getDepthAndIndex(cast<NamedDecl *>(UPP.first));
+ assert((isa<const TemplateSpecializationType *,
+ const SubstBuiltinTemplatePackType *>(UPP.first)));
+ return std::nullopt;
}
class TypoCorrectionConsumer : public VisibleDeclConsumer {
@@ -209,7 +214,7 @@ private:
class NamespaceSpecifierSet {
struct SpecifierInfo {
DeclContext* DeclCtx;
- NestedNameSpecifier* NameSpecifier;
+ NestedNameSpecifier NameSpecifier;
unsigned EditDistance;
};
@@ -229,9 +234,9 @@ private:
static DeclContextList buildContextChain(DeclContext *Start);
unsigned buildNestedNameSpecifier(DeclContextList &DeclChain,
- NestedNameSpecifier *&NNS);
+ NestedNameSpecifier &NNS);
- public:
+ public:
NamespaceSpecifierSet(ASTContext &Context, DeclContext *CurContext,
CXXScopeSpec *CurScopeSpec);
@@ -276,7 +281,7 @@ private:
};
void addName(StringRef Name, NamedDecl *ND,
- NestedNameSpecifier *NNS = nullptr, bool isKeyword = false);
+ NestedNameSpecifier NNS = std::nullopt, bool isKeyword = false);
/// Find any visible decls for the given typo correction candidate.
/// If none are found, it to the set of candidates for which qualified lookups
diff --git a/clang/include/clang/Sema/SemaOpenACC.h b/clang/include/clang/Sema/SemaOpenACC.h
index d078de5..42e8658 100644
--- a/clang/include/clang/Sema/SemaOpenACC.h
+++ b/clang/include/clang/Sema/SemaOpenACC.h
@@ -244,7 +244,14 @@ public:
// 'temporary' created for the init (in the case of a copy), such as with
// firstprivate.
std::pair<VarDecl *, VarDecl *> CreateInitRecipe(OpenACCClauseKind CK,
- const Expr *VarExpr);
+ const Expr *VarExpr) {
+ assert(CK != OpenACCClauseKind::Reduction);
+ return CreateInitRecipe(CK, OpenACCReductionOperator::Invalid, VarExpr);
+ }
+ std::pair<VarDecl *, VarDecl *>
+ CreateInitRecipe(OpenACCClauseKind CK,
+ OpenACCReductionOperator ReductionOperator,
+ const Expr *VarExpr);
public:
ComputeConstructInfo &getActiveComputeConstructInfo() {
@@ -947,12 +954,12 @@ public:
ArrayRef<Expr *> IntExprs, SourceLocation EndLoc);
// Does the checking for a 'reduction ' clause that needs to be done in
// dependent and not dependent cases.
- OpenACCClause *
- CheckReductionClause(ArrayRef<const OpenACCClause *> ExistingClauses,
- OpenACCDirectiveKind DirectiveKind,
- SourceLocation BeginLoc, SourceLocation LParenLoc,
- OpenACCReductionOperator ReductionOp,
- ArrayRef<Expr *> Vars, SourceLocation EndLoc);
+ OpenACCClause *CheckReductionClause(
+ ArrayRef<const OpenACCClause *> ExistingClauses,
+ OpenACCDirectiveKind DirectiveKind, SourceLocation BeginLoc,
+ SourceLocation LParenLoc, OpenACCReductionOperator ReductionOp,
+ ArrayRef<Expr *> Vars, ArrayRef<OpenACCReductionRecipe> Recipes,
+ SourceLocation EndLoc);
ExprResult BuildOpenACCAsteriskSizeExpr(SourceLocation AsteriskLoc);
ExprResult ActOnOpenACCAsteriskSizeExpr(SourceLocation AsteriskLoc);
diff --git a/clang/include/clang/Sema/SemaSYCL.h b/clang/include/clang/Sema/SemaSYCL.h
index b47b2f15..7ae556d 100644
--- a/clang/include/clang/Sema/SemaSYCL.h
+++ b/clang/include/clang/Sema/SemaSYCL.h
@@ -64,6 +64,7 @@ public:
void handleKernelAttr(Decl *D, const ParsedAttr &AL);
void handleKernelEntryPointAttr(Decl *D, const ParsedAttr &AL);
+ void CheckSYCLExternalFunctionDecl(FunctionDecl *FD);
void CheckSYCLEntryPointFunctionDecl(FunctionDecl *FD);
StmtResult BuildSYCLKernelCallStmt(FunctionDecl *FD, CompoundStmt *Body);
};
diff --git a/clang/include/clang/Sema/SemaWasm.h b/clang/include/clang/Sema/SemaWasm.h
index 8c0639f..f825907 100644
--- a/clang/include/clang/Sema/SemaWasm.h
+++ b/clang/include/clang/Sema/SemaWasm.h
@@ -37,7 +37,8 @@ public:
bool BuiltinWasmTableGrow(CallExpr *TheCall);
bool BuiltinWasmTableFill(CallExpr *TheCall);
bool BuiltinWasmTableCopy(CallExpr *TheCall);
- bool BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall);
+ bool BuiltinWasmTestFunctionPointerSignature(const TargetInfo &TI,
+ CallExpr *TheCall);
WebAssemblyImportNameAttr *
mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL);
diff --git a/clang/include/clang/Sema/TypoCorrection.h b/clang/include/clang/Sema/TypoCorrection.h
index 09de164..1d780c4 100644
--- a/clang/include/clang/Sema/TypoCorrection.h
+++ b/clang/include/clang/Sema/TypoCorrection.h
@@ -57,15 +57,15 @@ public:
static const unsigned CallbackDistanceWeight = 150U;
TypoCorrection(const DeclarationName &Name, NamedDecl *NameDecl,
- NestedNameSpecifier *NNS = nullptr, unsigned CharDistance = 0,
- unsigned QualifierDistance = 0)
+ NestedNameSpecifier NNS = std::nullopt,
+ unsigned CharDistance = 0, unsigned QualifierDistance = 0)
: CorrectionName(Name), CorrectionNameSpec(NNS),
CharDistance(CharDistance), QualifierDistance(QualifierDistance) {
if (NameDecl)
CorrectionDecls.push_back(NameDecl);
}
- TypoCorrection(NamedDecl *Name, NestedNameSpecifier *NNS = nullptr,
+ TypoCorrection(NamedDecl *Name, NestedNameSpecifier NNS = std::nullopt,
unsigned CharDistance = 0)
: CorrectionName(Name->getDeclName()), CorrectionNameSpec(NNS),
CharDistance(CharDistance) {
@@ -73,7 +73,7 @@ public:
CorrectionDecls.push_back(Name);
}
- TypoCorrection(DeclarationName Name, NestedNameSpecifier *NNS = nullptr,
+ TypoCorrection(DeclarationName Name, NestedNameSpecifier NNS = std::nullopt,
unsigned CharDistance = 0)
: CorrectionName(Name), CorrectionNameSpec(NNS),
CharDistance(CharDistance) {}
@@ -88,13 +88,13 @@ public:
}
/// Gets the NestedNameSpecifier needed to use the typo correction
- NestedNameSpecifier *getCorrectionSpecifier() const {
+ NestedNameSpecifier getCorrectionSpecifier() const {
return CorrectionNameSpec;
}
- void setCorrectionSpecifier(NestedNameSpecifier *NNS) {
+ void setCorrectionSpecifier(NestedNameSpecifier NNS) {
CorrectionNameSpec = NNS;
- ForceSpecifierReplacement = (NNS != nullptr);
+ ForceSpecifierReplacement = !!NNS;
}
void WillReplaceSpecifier(bool ForceReplacement) {
@@ -264,7 +264,7 @@ private:
// Results.
DeclarationName CorrectionName;
- NestedNameSpecifier *CorrectionNameSpec = nullptr;
+ NestedNameSpecifier CorrectionNameSpec = std::nullopt;
SmallVector<NamedDecl *, 1> CorrectionDecls;
unsigned CharDistance = 0;
unsigned QualifierDistance = 0;
@@ -282,8 +282,9 @@ class CorrectionCandidateCallback {
public:
static const unsigned InvalidDistance = TypoCorrection::InvalidDistance;
- explicit CorrectionCandidateCallback(const IdentifierInfo *Typo = nullptr,
- NestedNameSpecifier *TypoNNS = nullptr)
+ explicit CorrectionCandidateCallback(
+ const IdentifierInfo *Typo = nullptr,
+ NestedNameSpecifier TypoNNS = std::nullopt)
: Typo(Typo), TypoNNS(TypoNNS) {}
virtual ~CorrectionCandidateCallback() = default;
@@ -320,7 +321,7 @@ public:
virtual std::unique_ptr<CorrectionCandidateCallback> clone() = 0;
void setTypoName(const IdentifierInfo *II) { Typo = II; }
- void setTypoNNS(NestedNameSpecifier *NNS) { TypoNNS = NNS; }
+ void setTypoNNS(NestedNameSpecifier NNS) { TypoNNS = NNS; }
// Flags for context-dependent keywords. WantFunctionLikeCasts is only
// used/meaningful when WantCXXNamedCasts is false.
@@ -346,13 +347,13 @@ protected:
}
const IdentifierInfo *Typo;
- NestedNameSpecifier *TypoNNS;
+ NestedNameSpecifier TypoNNS;
};
class DefaultFilterCCC final : public CorrectionCandidateCallback {
public:
explicit DefaultFilterCCC(const IdentifierInfo *Typo = nullptr,
- NestedNameSpecifier *TypoNNS = nullptr)
+ NestedNameSpecifier TypoNNS = std::nullopt)
: CorrectionCandidateCallback(Typo, TypoNNS) {}
std::unique_ptr<CorrectionCandidateCallback> clone() override {
@@ -366,7 +367,7 @@ template <class C>
class DeclFilterCCC final : public CorrectionCandidateCallback {
public:
explicit DeclFilterCCC(const IdentifierInfo *Typo = nullptr,
- NestedNameSpecifier *TypoNNS = nullptr)
+ NestedNameSpecifier TypoNNS = std::nullopt)
: CorrectionCandidateCallback(Typo, TypoNNS) {}
bool ValidateCandidate(const TypoCorrection &candidate) override {
diff --git a/clang/include/clang/Serialization/ASTReader.h b/clang/include/clang/Serialization/ASTReader.h
index 7ce626c..3ede092 100644
--- a/clang/include/clang/Serialization/ASTReader.h
+++ b/clang/include/clang/Serialization/ASTReader.h
@@ -526,6 +526,9 @@ private:
/// A timer used to track the time spent deserializing.
std::unique_ptr<llvm::Timer> ReadTimer;
+ // A TimeRegion used to start and stop ReadTimer via RAII.
+ std::optional<llvm::TimeRegion> ReadTimeRegion;
+
/// The location where the module file will be considered as
/// imported from. For non-module AST types it should be invalid.
SourceLocation CurrentImportLoc;
diff --git a/clang/include/clang/Serialization/ASTRecordReader.h b/clang/include/clang/Serialization/ASTRecordReader.h
index 1472497..aed1b7d 100644
--- a/clang/include/clang/Serialization/ASTRecordReader.h
+++ b/clang/include/clang/Serialization/ASTRecordReader.h
@@ -223,7 +223,7 @@ public:
void readQualifierInfo(QualifierInfo &Info);
/// Return a nested name specifier, advancing Idx.
- // NestedNameSpecifier *readNestedNameSpecifier(); (inherited)
+ // NestedNameSpecifier readNestedNameSpecifier(); (inherited)
NestedNameSpecifierLoc readNestedNameSpecifierLoc();
diff --git a/clang/include/clang/Serialization/ASTRecordWriter.h b/clang/include/clang/Serialization/ASTRecordWriter.h
index ee005ec..9849ea6 100644
--- a/clang/include/clang/Serialization/ASTRecordWriter.h
+++ b/clang/include/clang/Serialization/ASTRecordWriter.h
@@ -247,8 +247,7 @@ public:
void AddTypeLoc(TypeLoc TL);
/// Emits a template argument location info.
- void AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
- const TemplateArgumentLocInfo &Arg);
+ void AddTemplateArgumentLocInfo(const TemplateArgumentLoc &Arg);
/// Emits a template argument location.
void AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg);
@@ -280,7 +279,7 @@ public:
void AddQualifierInfo(const QualifierInfo &Info);
/// Emit a nested name specifier.
- void AddNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ void AddNestedNameSpecifier(NestedNameSpecifier NNS) {
writeNestedNameSpecifier(NNS);
}
diff --git a/clang/include/clang/Serialization/TypeBitCodes.def b/clang/include/clang/Serialization/TypeBitCodes.def
index 613eb6a..bea1525 100644
--- a/clang/include/clang/Serialization/TypeBitCodes.def
+++ b/clang/include/clang/Serialization/TypeBitCodes.def
@@ -32,7 +32,6 @@ TYPE_BIT_CODE(Enum, ENUM, 20)
TYPE_BIT_CODE(ObjCInterface, OBJC_INTERFACE, 21)
TYPE_BIT_CODE(ObjCObjectPointer, OBJC_OBJECT_POINTER, 22)
TYPE_BIT_CODE(Decltype, DECLTYPE, 23)
-TYPE_BIT_CODE(Elaborated, ELABORATED, 24)
TYPE_BIT_CODE(SubstTemplateTypeParm, SUBST_TEMPLATE_TYPE_PARM, 25)
TYPE_BIT_CODE(UnresolvedUsing, UNRESOLVED_USING, 26)
TYPE_BIT_CODE(InjectedClassName, INJECTED_CLASS_NAME, 27)
@@ -70,5 +69,6 @@ TYPE_BIT_CODE(ArrayParameter, ARRAY_PARAMETER, 58)
TYPE_BIT_CODE(HLSLAttributedResource, HLSLRESOURCE_ATTRIBUTED, 59)
TYPE_BIT_CODE(HLSLInlineSpirv, HLSL_INLINE_SPIRV, 60)
TYPE_BIT_CODE(PredefinedSugar, PREDEFINED_SUGAR, 61)
+TYPE_BIT_CODE(SubstBuiltinTemplatePack, SUBST_BUILTIN_TEMPLATE_PACK, 62)
#undef TYPE_BIT_CODE
diff --git a/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index 8696fce..f6a0233 100644
--- a/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -320,7 +320,7 @@ protected:
/// A set of location contexts that correspoind to call sites which should be
/// considered "interesting".
- llvm::SmallSet<const LocationContext *, 2> InterestingLocationContexts;
+ llvm::SmallPtrSet<const LocationContext *, 2> InterestingLocationContexts;
/// A set of custom visitors which generate "event" diagnostics at
/// interesting points in the path.
@@ -348,7 +348,7 @@ protected:
llvm::SmallSet<InvalidationRecord, 4> Invalidations;
/// Conditions we're already tracking.
- llvm::SmallSet<const ExplodedNode *, 4> TrackedConditions;
+ llvm::SmallPtrSet<const ExplodedNode *, 4> TrackedConditions;
/// Reports with different uniqueing locations are considered to be different
/// for the purposes of deduplication.
@@ -623,10 +623,12 @@ public:
ASTContext &getContext() { return D.getASTContext(); }
const SourceManager &getSourceManager() { return D.getSourceManager(); }
+ const SourceManager &getSourceManager() const { return D.getSourceManager(); }
const AnalyzerOptions &getAnalyzerOptions() { return D.getAnalyzerOptions(); }
Preprocessor &getPreprocessor() { return D.getPreprocessor(); }
+ const Preprocessor &getPreprocessor() const { return D.getPreprocessor(); }
/// Get the top-level entry point for the issue to be reported.
const Decl *getAnalysisEntryPoint() const { return AnalysisEntryPoint; }
diff --git a/clang/include/clang/StaticAnalyzer/Core/Checker.h b/clang/include/clang/StaticAnalyzer/Core/Checker.h
index 31cc095c..d9a7c00 100644
--- a/clang/include/clang/StaticAnalyzer/Core/Checker.h
+++ b/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -209,8 +209,8 @@ public:
class Bind {
template <typename CHECKER>
static void _checkBind(void *checker, SVal location, SVal val, const Stmt *S,
- CheckerContext &C) {
- ((const CHECKER *)checker)->checkBind(location, val, S, C);
+ bool AtDeclInit, CheckerContext &C) {
+ ((const CHECKER *)checker)->checkBind(location, val, S, AtDeclInit, C);
}
public:
diff --git a/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
index c8e6f12..bf33ce6 100644
--- a/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
+++ b/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -338,10 +338,9 @@ public:
ExprEngine &Eng);
/// Run checkers for binding of a value to a location.
- void runCheckersForBind(ExplodedNodeSet &Dst,
- const ExplodedNodeSet &Src,
- SVal location, SVal val,
- const Stmt *S, ExprEngine &Eng,
+ void runCheckersForBind(ExplodedNodeSet &Dst, const ExplodedNodeSet &Src,
+ SVal location, SVal val, const Stmt *S,
+ bool AtDeclInit, ExprEngine &Eng,
const ProgramPoint &PP);
/// Run checkers after taking a control flow edge.
@@ -499,8 +498,8 @@ public:
using CheckLocationFunc = CheckerFn<void(SVal location, bool isLoad,
const Stmt *S, CheckerContext &)>;
- using CheckBindFunc =
- CheckerFn<void(SVal location, SVal val, const Stmt *S, CheckerContext &)>;
+ using CheckBindFunc = CheckerFn<void(SVal location, SVal val, const Stmt *S,
+ bool AtDeclInit, CheckerContext &)>;
using CheckBlockEntranceFunc =
CheckerFn<void(const BlockEntrance &, CheckerContext &)>;
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index f20b003..cf035a9 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -58,25 +58,34 @@ public:
AnalysisManager &getAnalysisManager() {
return Eng.getAnalysisManager();
}
+ const AnalysisManager &getAnalysisManager() const {
+ return Eng.getAnalysisManager();
+ }
ConstraintManager &getConstraintManager() {
return Eng.getConstraintManager();
}
+ const ConstraintManager &getConstraintManager() const {
+ return Eng.getConstraintManager();
+ }
StoreManager &getStoreManager() {
return Eng.getStoreManager();
}
+ const StoreManager &getStoreManager() const { return Eng.getStoreManager(); }
/// Returns the previous node in the exploded graph, which includes
/// the state of the program before the checker ran. Note, checkers should
/// not retain the node in their state since the nodes might get invalidated.
ExplodedNode *getPredecessor() { return Pred; }
+ const ExplodedNode *getPredecessor() const { return Pred; }
const ProgramPoint getLocation() const { return Location; }
const ProgramStateRef &getState() const { return Pred->getState(); }
/// Check if the checker changed the state of the execution; ex: added
/// a new transition or a bug report.
bool isDifferent() { return Changed; }
+ bool isDifferent() const { return Changed; }
/// Returns the number of times the current block has been visited
/// along the analyzed path.
@@ -108,24 +117,38 @@ public:
BugReporter &getBugReporter() {
return Eng.getBugReporter();
}
+ const BugReporter &getBugReporter() const { return Eng.getBugReporter(); }
const SourceManager &getSourceManager() {
return getBugReporter().getSourceManager();
}
+ const SourceManager &getSourceManager() const {
+ return getBugReporter().getSourceManager();
+ }
Preprocessor &getPreprocessor() { return getBugReporter().getPreprocessor(); }
+ const Preprocessor &getPreprocessor() const {
+ return getBugReporter().getPreprocessor();
+ }
SValBuilder &getSValBuilder() {
return Eng.getSValBuilder();
}
+ const SValBuilder &getSValBuilder() const { return Eng.getSValBuilder(); }
SymbolManager &getSymbolManager() {
return getSValBuilder().getSymbolManager();
}
+ const SymbolManager &getSymbolManager() const {
+ return getSValBuilder().getSymbolManager();
+ }
ProgramStateManager &getStateManager() {
return Eng.getStateManager();
}
+ const ProgramStateManager &getStateManager() const {
+ return Eng.getStateManager();
+ }
AnalysisDeclContext *getCurrentAnalysisDeclContext() const {
return Pred->getLocationContext()->getAnalysisDeclContext();
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index fbb3434..d184986cd 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -196,6 +196,7 @@ public:
ASTContext &getContext() const { return AMgr.getASTContext(); }
AnalysisManager &getAnalysisManager() { return AMgr; }
+ const AnalysisManager &getAnalysisManager() const { return AMgr; }
AnalysisDeclContextManager &getAnalysisDeclContextManager() {
return AMgr.getAnalysisDeclContextManager();
@@ -206,8 +207,10 @@ public:
}
SValBuilder &getSValBuilder() { return svalBuilder; }
+ const SValBuilder &getSValBuilder() const { return svalBuilder; }
BugReporter &getBugReporter() { return BR; }
+ const BugReporter &getBugReporter() const { return BR; }
cross_tu::CrossTranslationUnitContext *
getCrossTranslationUnitContext() {
@@ -416,12 +419,19 @@ public:
unsigned int Space, bool IsDot) const;
ProgramStateManager &getStateManager() { return StateMgr; }
+ const ProgramStateManager &getStateManager() const { return StateMgr; }
StoreManager &getStoreManager() { return StateMgr.getStoreManager(); }
+ const StoreManager &getStoreManager() const {
+ return StateMgr.getStoreManager();
+ }
ConstraintManager &getConstraintManager() {
return StateMgr.getConstraintManager();
}
+ const ConstraintManager &getConstraintManager() const {
+ return StateMgr.getConstraintManager();
+ }
// FIXME: Remove when we migrate over to just using SValBuilder.
BasicValueFactory &getBasicVals() {
@@ -429,6 +439,7 @@ public:
}
SymbolManager &getSymbolManager() { return SymMgr; }
+ const SymbolManager &getSymbolManager() const { return SymMgr; }
MemRegionManager &getRegionManager() { return MRMgr; }
DataTag::Factory &getDataTags() { return Engine.getDataTags(); }
@@ -660,7 +671,7 @@ private:
/// evalBind - Handle the semantics of binding a value to a specific location.
/// This method is used by evalStore, VisitDeclStmt, and others.
void evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE, ExplodedNode *Pred,
- SVal location, SVal Val, bool atDeclInit = false,
+ SVal location, SVal Val, bool AtDeclInit = false,
const ProgramPoint *PP = nullptr);
ProgramStateRef
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index 5271453..12487a3 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -572,7 +572,11 @@ public:
CallEventManager &getCallEventManager() { return *CallEventMgr; }
StoreManager &getStoreManager() { return *StoreMgr; }
+ const StoreManager &getStoreManager() const { return *StoreMgr; }
ConstraintManager &getConstraintManager() { return *ConstraintMgr; }
+ const ConstraintManager &getConstraintManager() const {
+ return *ConstraintMgr;
+ }
ExprEngine &getOwningEngine() { return *Eng; }
ProgramStateRef
diff --git a/clang/include/clang/Tooling/Refactoring/Lookup.h b/clang/include/clang/Tooling/Refactoring/Lookup.h
index dcb40b7e..fe0df86 100644
--- a/clang/include/clang/Tooling/Refactoring/Lookup.h
+++ b/clang/include/clang/Tooling/Refactoring/Lookup.h
@@ -38,8 +38,7 @@ namespace tooling {
/// \param ReplacementString The replacement nested name. Must be fully
/// qualified including a leading "::".
/// \returns The new name to be inserted in place of the current nested name.
-std::string replaceNestedName(const NestedNameSpecifier *Use,
- SourceLocation UseLoc,
+std::string replaceNestedName(NestedNameSpecifier Use, SourceLocation UseLoc,
const DeclContext *UseContext,
const NamedDecl *FromDecl,
StringRef ReplacementString);
diff --git a/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h b/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
index 271232e..319569f 100644
--- a/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
+++ b/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
@@ -108,19 +108,21 @@ public:
bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
const SourceLocation TypeEndLoc =
Lexer::getLocForEndOfToken(TL.getBeginLoc(), 0, SM, LangOpts);
- return visit(TL.getTypedefNameDecl(), TL.getBeginLoc(), TypeEndLoc);
+ return visit(TL.getDecl(), TL.getBeginLoc(), TypeEndLoc);
}
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc QualifierLoc) {
// The base visitor will visit NNSL prefixes, so we should only look at
// the current NNS.
- if (NNS) {
- const auto *ND = dyn_cast_if_present<NamespaceDecl>(
- NNS.getNestedNameSpecifier()->getAsNamespace());
- if (!visit(ND, NNS.getLocalBeginLoc(), NNS.getLocalEndLoc()))
+ if (NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Namespace) {
+ const auto *ND = dyn_cast<NamespaceDecl>(
+ Qualifier.getAsNamespaceAndPrefix().Namespace);
+ if (!visit(ND, QualifierLoc.getLocalBeginLoc(),
+ QualifierLoc.getLocalEndLoc()))
return false;
}
- return BaseType::TraverseNestedNameSpecifierLoc(NNS);
+ return BaseType::TraverseNestedNameSpecifierLoc(QualifierLoc);
}
bool VisitDesignatedInitExpr(const DesignatedInitExpr *E) {
diff --git a/clang/include/clang/Tooling/Tooling.h b/clang/include/clang/Tooling/Tooling.h
index 200fb30..9909394 100644
--- a/clang/include/clang/Tooling/Tooling.h
+++ b/clang/include/clang/Tooling/Tooling.h
@@ -32,6 +32,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/FrontendAction.h"
#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Tooling/ArgumentsAdjusters.h"
@@ -239,7 +240,8 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
const FileContentMappings &VirtualMappedFiles = FileContentMappings(),
DiagnosticConsumer *DiagConsumer = nullptr,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS =
- llvm::vfs::getRealFileSystem());
+ llvm::vfs::getRealFileSystem(),
+ CaptureDiagsKind CaptureKind = CaptureDiagsKind::None);
/// Utility to run a FrontendAction in a single clang invocation.
class ToolInvocation {
diff --git a/clang/include/module.modulemap b/clang/include/module.modulemap
index 42ee34f..c553526 100644
--- a/clang/include/module.modulemap
+++ b/clang/include/module.modulemap
@@ -37,6 +37,7 @@ module Clang_Basic {
umbrella "clang/Basic"
textual header "clang/Basic/AArch64ACLETypes.def"
+ textual header "clang/Basic/ABIVersions.def"
textual header "clang/Basic/AMDGPUTypes.def"
textual header "clang/Basic/BuiltinHeaders.def"
textual header "clang/Basic/BuiltinsAArch64.def"
diff --git a/clang/lib/APINotes/APINotesFormat.h b/clang/lib/APINotes/APINotesFormat.h
index bb0c276..69d180e 100644
--- a/clang/lib/APINotes/APINotesFormat.h
+++ b/clang/lib/APINotes/APINotesFormat.h
@@ -24,7 +24,7 @@ const uint16_t VERSION_MAJOR = 0;
/// API notes file minor version number.
///
/// When the format changes IN ANY WAY, this number should be incremented.
-const uint16_t VERSION_MINOR = 35; // SwiftDefaultOwnership
+const uint16_t VERSION_MINOR = 37; // SwiftDestroyOp
const uint8_t kSwiftConforms = 1;
const uint8_t kSwiftDoesNotConform = 2;
diff --git a/clang/lib/APINotes/APINotesReader.cpp b/clang/lib/APINotes/APINotesReader.cpp
index 7cc4df2..573356f 100644
--- a/clang/lib/APINotes/APINotesReader.cpp
+++ b/clang/lib/APINotes/APINotesReader.cpp
@@ -134,6 +134,13 @@ void ReadCommonTypeInfo(const uint8_t *&Data, CommonTypeInfo &Info) {
reinterpret_cast<const char *>(Data), ErrorDomainLength - 1)));
Data += ErrorDomainLength - 1;
}
+
+ if (unsigned ConformanceLength =
+ endian::readNext<uint16_t, llvm::endianness::little>(Data)) {
+ Info.setSwiftConformance(std::string(reinterpret_cast<const char *>(Data),
+ ConformanceLength - 1));
+ Data += ConformanceLength - 1;
+ }
}
/// Used to deserialize the on-disk identifier table.
@@ -629,11 +636,12 @@ public:
reinterpret_cast<const char *>(Data), DefaultOwnershipLength - 1);
Data += DefaultOwnershipLength - 1;
}
- if (unsigned ConformanceLength =
- endian::readNext<uint16_t, llvm::endianness::little>(Data)) {
- Info.SwiftConformance = std::string(reinterpret_cast<const char *>(Data),
- ConformanceLength - 1);
- Data += ConformanceLength - 1;
+ unsigned DestroyOpLength =
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
+ if (DestroyOpLength > 0) {
+ Info.SwiftDestroyOp = std::string(reinterpret_cast<const char *>(Data),
+ DestroyOpLength - 1);
+ Data += DestroyOpLength - 1;
}
ReadCommonTypeInfo(Data, Info);
diff --git a/clang/lib/APINotes/APINotesWriter.cpp b/clang/lib/APINotes/APINotesWriter.cpp
index ffc5473..cf88d118 100644
--- a/clang/lib/APINotes/APINotesWriter.cpp
+++ b/clang/lib/APINotes/APINotesWriter.cpp
@@ -536,7 +536,8 @@ unsigned getCommonEntityInfoSize(const CommonEntityInfo &CEI) {
// in on-disk hash tables.
unsigned getCommonTypeInfoSize(const CommonTypeInfo &CTI) {
return 2 + (CTI.getSwiftBridge() ? CTI.getSwiftBridge()->size() : 0) + 2 +
- (CTI.getNSErrorDomain() ? CTI.getNSErrorDomain()->size() : 0) +
+ (CTI.getNSErrorDomain() ? CTI.getNSErrorDomain()->size() : 0) + 2 +
+ (CTI.getSwiftConformance() ? CTI.getSwiftConformance()->size() : 0) +
getCommonEntityInfoSize(CTI);
}
@@ -557,6 +558,12 @@ void emitCommonTypeInfo(raw_ostream &OS, const CommonTypeInfo &CTI) {
} else {
writer.write<uint16_t>(0);
}
+ if (auto conformance = CTI.getSwiftConformance()) {
+ writer.write<uint16_t>(conformance->size() + 1);
+ OS.write(conformance->c_str(), conformance->size());
+ } else {
+ writer.write<uint16_t>(0);
+ }
}
/// Used to serialize the on-disk Objective-C property table.
@@ -1273,8 +1280,8 @@ public:
return 2 + (TI.SwiftImportAs ? TI.SwiftImportAs->size() : 0) +
2 + (TI.SwiftRetainOp ? TI.SwiftRetainOp->size() : 0) +
2 + (TI.SwiftReleaseOp ? TI.SwiftReleaseOp->size() : 0) +
+ 2 + (TI.SwiftDestroyOp ? TI.SwiftDestroyOp->size() : 0) +
2 + (TI.SwiftDefaultOwnership ? TI.SwiftDefaultOwnership->size() : 0) +
- 2 + (TI.SwiftConformance ? TI.SwiftConformance->size() : 0) +
3 + getCommonTypeInfoSize(TI);
// clang-format on
}
@@ -1328,9 +1335,9 @@ public:
} else {
writer.write<uint16_t>(0);
}
- if (auto Conformance = TI.SwiftConformance) {
- writer.write<uint16_t>(Conformance->size() + 1);
- OS.write(Conformance->c_str(), Conformance->size());
+ if (auto DestroyOp = TI.SwiftDestroyOp) {
+ writer.write<uint16_t>(DestroyOp->size() + 1);
+ OS.write(DestroyOp->c_str(), DestroyOp->size());
} else {
writer.write<uint16_t>(0);
}
diff --git a/clang/lib/APINotes/APINotesYAMLCompiler.cpp b/clang/lib/APINotes/APINotesYAMLCompiler.cpp
index 803410c..a91a1ee 100644
--- a/clang/lib/APINotes/APINotesYAMLCompiler.cpp
+++ b/clang/lib/APINotes/APINotesYAMLCompiler.cpp
@@ -251,6 +251,7 @@ struct Class {
std::optional<StringRef> NSErrorDomain;
std::optional<bool> SwiftImportAsNonGeneric;
std::optional<bool> SwiftObjCMembers;
+ std::optional<std::string> SwiftConformance;
MethodsSeq Methods;
PropertiesSeq Properties;
};
@@ -275,6 +276,7 @@ template <> struct MappingTraits<Class> {
IO.mapOptional("NSErrorDomain", C.NSErrorDomain);
IO.mapOptional("SwiftImportAsNonGeneric", C.SwiftImportAsNonGeneric);
IO.mapOptional("SwiftObjCMembers", C.SwiftObjCMembers);
+ IO.mapOptional("SwiftConformsTo", C.SwiftConformance);
IO.mapOptional("Methods", C.Methods);
IO.mapOptional("Properties", C.Properties);
}
@@ -460,6 +462,7 @@ struct Tag {
std::optional<std::string> SwiftImportAs;
std::optional<std::string> SwiftRetainOp;
std::optional<std::string> SwiftReleaseOp;
+ std::optional<std::string> SwiftDestroyOp;
std::optional<std::string> SwiftDefaultOwnership;
std::optional<std::string> SwiftConformance;
std::optional<EnumExtensibilityKind> EnumExtensibility;
@@ -501,6 +504,7 @@ template <> struct MappingTraits<Tag> {
IO.mapOptional("SwiftImportAs", T.SwiftImportAs);
IO.mapOptional("SwiftReleaseOp", T.SwiftReleaseOp);
IO.mapOptional("SwiftRetainOp", T.SwiftRetainOp);
+ IO.mapOptional("SwiftDestroyOp", T.SwiftDestroyOp);
IO.mapOptional("SwiftDefaultOwnership", T.SwiftDefaultOwnership);
IO.mapOptional("SwiftConformsTo", T.SwiftConformance);
IO.mapOptional("EnumExtensibility", T.EnumExtensibility);
@@ -525,6 +529,7 @@ struct Typedef {
std::optional<StringRef> SwiftBridge;
std::optional<StringRef> NSErrorDomain;
std::optional<SwiftNewTypeKind> SwiftType;
+ std::optional<std::string> SwiftConformance;
};
typedef std::vector<Typedef> TypedefsSeq;
@@ -553,6 +558,7 @@ template <> struct MappingTraits<Typedef> {
IO.mapOptional("SwiftBridge", T.SwiftBridge);
IO.mapOptional("NSErrorDomain", T.NSErrorDomain);
IO.mapOptional("SwiftWrapper", T.SwiftType);
+ IO.mapOptional("SwiftConformsTo", T.SwiftConformance);
}
};
} // namespace yaml
@@ -802,6 +808,8 @@ public:
if (Common.SwiftBridge)
Info.setSwiftBridge(std::string(*Common.SwiftBridge));
Info.setNSErrorDomain(Common.NSErrorDomain);
+ if (auto conformance = Common.SwiftConformance)
+ Info.setSwiftConformance(conformance);
}
// Translate from Method into ObjCMethodInfo and write it out.
@@ -990,8 +998,8 @@ public:
TI.SwiftRetainOp = T.SwiftRetainOp;
if (T.SwiftReleaseOp)
TI.SwiftReleaseOp = T.SwiftReleaseOp;
- if (T.SwiftConformance)
- TI.SwiftConformance = T.SwiftConformance;
+ if (T.SwiftDestroyOp)
+ TI.SwiftDestroyOp = T.SwiftDestroyOp;
if (T.SwiftDefaultOwnership)
TI.SwiftDefaultOwnership = T.SwiftDefaultOwnership;
diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp
index ee3dc84..7173c2a 100644
--- a/clang/lib/AST/APValue.cpp
+++ b/clang/lib/AST/APValue.cpp
@@ -902,8 +902,8 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
}
case APValue::Struct: {
Out << '{';
- const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
bool First = true;
+ const auto *RD = Ty->castAsRecordDecl();
if (unsigned N = getStructNumBases()) {
const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp
index 2243ac0..d658890 100644
--- a/clang/lib/AST/ASTConcept.cpp
+++ b/clang/lib/AST/ASTConcept.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
#include "llvm/ADT/StringExtras.h"
@@ -92,10 +93,16 @@ ConceptReference::Create(const ASTContext &C, NestedNameSpecifierLoc NNS,
FoundDecl, NamedConcept, ArgsAsWritten);
}
+SourceLocation ConceptReference::getBeginLoc() const {
+ // Note that if the qualifier is null the template KW must also be null.
+ if (auto QualifierLoc = getNestedNameSpecifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return getConceptNameInfo().getBeginLoc();
+}
+
void ConceptReference::print(llvm::raw_ostream &OS,
const PrintingPolicy &Policy) const {
- if (NestedNameSpec)
- NestedNameSpec.getNestedNameSpecifier()->print(OS, Policy);
+ NestedNameSpec.getNestedNameSpecifier().print(OS, Policy);
ConceptName.printName(OS, Policy);
if (hasExplicitTemplateArgs()) {
OS << "<";
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 3a16111..dca05b4 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -654,9 +654,9 @@ comments::FullComment *ASTContext::getCommentForDecl(
// does not have one of its own.
QualType QT = TD->getUnderlyingType();
if (const auto *TT = QT->getAs<TagType>())
- if (const Decl *TD = TT->getDecl())
- if (comments::FullComment *FC = getCommentForDecl(TD, PP))
- return cloneFullComment(FC, D);
+ if (comments::FullComment *FC =
+ getCommentForDecl(TT->getOriginalDecl(), PP))
+ return cloneFullComment(FC, D);
}
else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
while (IC->getSuperClass()) {
@@ -1708,7 +1708,7 @@ void ASTContext::setRelocationInfoForCXXRecord(
}
static bool primaryBaseHaseAddressDiscriminatedVTableAuthentication(
- ASTContext &Context, const CXXRecordDecl *Class) {
+ const ASTContext &Context, const CXXRecordDecl *Class) {
if (!Class->isPolymorphic())
return false;
const CXXRecordDecl *BaseType = Context.baseForVTableAuthentication(Class);
@@ -1723,7 +1723,8 @@ static bool primaryBaseHaseAddressDiscriminatedVTableAuthentication(
return AddressDiscrimination == AuthAttr::AddressDiscrimination;
}
-ASTContext::PointerAuthContent ASTContext::findPointerAuthContent(QualType T) {
+ASTContext::PointerAuthContent
+ASTContext::findPointerAuthContent(QualType T) const {
assert(isPointerAuthenticationAvailable());
T = T.getCanonicalType();
@@ -1933,9 +1934,8 @@ TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
// of a base-class subobject. We decide whether that's possible
// during class layout, so here we can just trust the layout results.
if (getLangOpts().CPlusPlus) {
- if (const auto *RT = T->getAs<RecordType>();
- RT && !RT->getDecl()->isInvalidDecl()) {
- const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
+ if (const auto *RD = T->getAsCXXRecordDecl(); RD && !RD->isInvalidDecl()) {
+ const ASTRecordLayout &layout = getASTRecordLayout(RD);
Info.Width = layout.getDataSize();
}
}
@@ -2002,9 +2002,9 @@ bool ASTContext::isPromotableIntegerType(QualType T) const {
// Enumerated types are promotable to their compatible integer types
// (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
- if (const auto *ET = T->getAs<EnumType>()) {
- if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
- ET->getDecl()->isScoped())
+ if (const auto *ED = T->getAsEnumDecl()) {
+ if (T->isDependentType() || ED->getPromotionType().isNull() ||
+ ED->isScoped())
return false;
return true;
@@ -2040,8 +2040,8 @@ unsigned ASTContext::getTypeAlignIfKnown(QualType T,
return Align;
// Otherwise, see if the declaration of the type had an attribute.
- if (const auto *TT = T->getAs<TagType>())
- return TT->getDecl()->getMaxAlignment();
+ if (const auto *TD = T->getAsTagDecl())
+ return TD->getMaxAlignment();
return 0;
}
@@ -2472,15 +2472,16 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Record:
case Type::Enum: {
const auto *TT = cast<TagType>(T);
+ const TagDecl *TD = TT->getOriginalDecl()->getDefinitionOrSelf();
- if (TT->getDecl()->isInvalidDecl()) {
+ if (TD->isInvalidDecl()) {
Width = 8;
Align = 8;
break;
}
- if (const auto *ET = dyn_cast<EnumType>(TT)) {
- const EnumDecl *ED = ET->getDecl();
+ if (isa<EnumType>(TT)) {
+ const EnumDecl *ED = cast<EnumDecl>(TD);
TypeInfo Info =
getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType());
if (unsigned AttrAlign = ED->getMaxAlignment()) {
@@ -2490,8 +2491,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return Info;
}
- const auto *RT = cast<RecordType>(TT);
- const RecordDecl *RD = RT->getDecl();
+ const auto *RD = cast<RecordDecl>(TD);
const ASTRecordLayout &Layout = getASTRecordLayout(RD);
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
@@ -2543,9 +2543,6 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
}
- case Type::Elaborated:
- return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
-
case Type::Attributed:
return getTypeInfo(
cast<AttributedType>(T)->getEquivalentType().getTypePtr());
@@ -2618,11 +2615,10 @@ unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
return I->second;
unsigned UnadjustedAlign;
- if (const auto *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- const ASTRecordLayout &Layout = getASTRecordLayout(RD);
+ if (const auto *RT = T->getAsCanonical<RecordType>()) {
+ const ASTRecordLayout &Layout = getASTRecordLayout(RT->getOriginalDecl());
UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
- } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
+ } else if (const auto *ObjCI = T->getAsCanonical<ObjCInterfaceType>()) {
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
} else {
@@ -2695,9 +2691,7 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
if (!Target->allowsLargerPreferedTypeAlignment())
return ABIAlign;
- if (const auto *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
-
+ if (const auto *RD = T->getAsRecordDecl()) {
// When used as part of a typedef, or together with a 'packed' attribute,
// the 'aligned' attribute can be used to decrease alignment. Note that the
// 'packed' case is already taken into consideration when computing the
@@ -2718,8 +2712,8 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
// possible.
if (const auto *CT = T->getAs<ComplexType>())
T = CT->getElementType().getTypePtr();
- if (const auto *ET = T->getAs<EnumType>())
- T = ET->getDecl()->getIntegerType().getTypePtr();
+ if (const auto *ED = T->getAsEnumDecl())
+ T = ED->getIntegerType().getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Double) ||
T->isSpecificBuiltinType(BuiltinType::LongLong) ||
T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
@@ -2851,7 +2845,8 @@ static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
const RecordDecl *RD,
bool CheckIfTriviallyCopyable) {
assert(RD->isUnion() && "Must be union type");
- CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
+ CharUnits UnionSize =
+ Context.getTypeSizeInChars(Context.getCanonicalTagType(RD));
for (const auto *Field : RD->fields()) {
if (!Context.hasUniqueObjectRepresentations(Field->getType(),
@@ -2884,12 +2879,10 @@ structHasUniqueObjectRepresentations(const ASTContext &Context,
static std::optional<int64_t>
getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
bool CheckIfTriviallyCopyable) {
- if (Field->getType()->isRecordType()) {
- const RecordDecl *RD = Field->getType()->getAsRecordDecl();
- if (!RD->isUnion())
- return structHasUniqueObjectRepresentations(Context, RD,
- CheckIfTriviallyCopyable);
- }
+ if (const auto *RD = Field->getType()->getAsRecordDecl();
+ RD && !RD->isUnion())
+ return structHasUniqueObjectRepresentations(Context, RD,
+ CheckIfTriviallyCopyable);
// A _BitInt type may not be unique if it has padding bits
// but if it is a bitfield the padding bits are not used.
@@ -3037,16 +3030,14 @@ bool ASTContext::hasUniqueObjectRepresentations(
return true;
}
- // All other pointers (except __ptrauth pointers) are unique.
+ // All other pointers are unique.
if (Ty->isPointerType())
return !Ty.hasAddressDiscriminatedPointerAuth();
if (const auto *MPT = Ty->getAs<MemberPointerType>())
return !ABI->getMemberPointerInfo(MPT).HasPadding;
- if (Ty->isRecordType()) {
- const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
-
+ if (const auto *Record = Ty->getAsRecordDecl()) {
if (Record->isInvalidDecl())
return false;
@@ -3418,7 +3409,7 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
// type, or an unsigned integer type.
//
// So we have to treat enum types as integers.
- QualType UnderlyingType = cast<EnumType>(T)->getDecl()->getIntegerType();
+ QualType UnderlyingType = T->castAsEnumDecl()->getIntegerType();
return encodeTypeForFunctionPointerAuth(
Ctx, OS, UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
}
@@ -3456,7 +3447,7 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
OS << "M";
const auto *MPT = T->castAs<MemberPointerType>();
encodeTypeForFunctionPointerAuth(
- Ctx, OS, QualType(MPT->getQualifier()->getAsType(), 0));
+ Ctx, OS, QualType(MPT->getQualifier().getAsType(), 0));
encodeTypeForFunctionPointerAuth(Ctx, OS, MPT->getPointeeType());
return;
}
@@ -3562,7 +3553,7 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
llvm_unreachable("should never get here");
}
case Type::Record: {
- const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD = T->castAsCanonical<RecordType>()->getOriginalDecl();
const IdentifierInfo *II = RD->getIdentifier();
// In C++, an immediate typedef of an anonymous struct or union
@@ -3740,12 +3731,6 @@ ASTContext::adjustType(QualType Orig,
adjustType(BTFT->getWrappedType(), Adjust));
}
- case Type::Elaborated: {
- const auto *ET = cast<ElaboratedType>(Orig);
- return getElaboratedType(ET->getKeyword(), ET->getQualifier(),
- adjustType(ET->getNamedType(), Adjust));
- }
-
case Type::Paren:
return getParenType(
adjustType(cast<ParenType>(Orig)->getInnerType(), Adjust));
@@ -4163,14 +4148,13 @@ QualType ASTContext::getRValueReferenceType(QualType T) const {
}
QualType ASTContext::getMemberPointerType(QualType T,
- NestedNameSpecifier *Qualifier,
+ NestedNameSpecifier Qualifier,
const CXXRecordDecl *Cls) const {
if (!Qualifier) {
assert(Cls && "At least one of Qualifier or Cls must be provided");
- Qualifier = NestedNameSpecifier::Create(*this, /*Prefix=*/nullptr,
- getTypeDeclType(Cls).getTypePtr());
+ Qualifier = NestedNameSpecifier(getCanonicalTagType(Cls).getTypePtr());
} else if (!Cls) {
- Cls = Qualifier->getAsRecordDecl();
+ Cls = Qualifier.getAsRecordDecl();
}
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
@@ -4182,12 +4166,11 @@ QualType ASTContext::getMemberPointerType(QualType T,
MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(PT, 0);
- NestedNameSpecifier *CanonicalQualifier = [&] {
+ NestedNameSpecifier CanonicalQualifier = [&] {
if (!Cls)
- return getCanonicalNestedNameSpecifier(Qualifier);
- NestedNameSpecifier *R = NestedNameSpecifier::Create(
- *this, /*Prefix=*/nullptr, Cls->getCanonicalDecl()->getTypeForDecl());
- assert(R == getCanonicalNestedNameSpecifier(R));
+ return Qualifier.getCanonical();
+ NestedNameSpecifier R(getCanonicalTagType(Cls).getTypePtr());
+ assert(R.isCanonical());
return R;
}();
// If the pointee or class type isn't canonical, this won't be a canonical
@@ -4306,6 +4289,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::DependentTemplateSpecialization:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
+ case Type::SubstBuiltinTemplatePack:
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
@@ -5128,10 +5112,12 @@ QualType ASTContext::getFunctionTypeInternal(
EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
size_t Size = FunctionProtoType::totalSizeToAlloc<
QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
+ FunctionType::FunctionTypeExtraAttributeInfo,
FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
FunctionEffect, EffectConditionExpr>(
NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(),
+ EPI.requiresFunctionProtoTypeExtraAttributeInfo(),
EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType,
ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
EPI.ExtParameterInfos ? NumArgs : 0,
@@ -5245,7 +5231,6 @@ ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const {
}
llvm_unreachable("unexpected kind");
};
-
auto *New = new (*this, alignof(PredefinedSugarType))
PredefinedSugarType(KD, &Idents.get(PredefinedSugarType::getName(KD)),
getCanonicalType(*this, static_cast<Kind>(KD)));
@@ -5254,153 +5239,296 @@ ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const {
return QualType(New, 0);
}
-#ifndef NDEBUG
-static bool NeedsInjectedClassNameType(const RecordDecl *D) {
- if (!isa<CXXRecordDecl>(D)) return false;
- const auto *RD = cast<CXXRecordDecl>(D);
- if (isa<ClassTemplatePartialSpecializationDecl>(RD))
- return true;
- if (RD->getDescribedClassTemplate() &&
- !isa<ClassTemplateSpecializationDecl>(RD))
- return true;
- return false;
-}
-#endif
+QualType ASTContext::getTypeDeclType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypeDecl *Decl) const {
+ if (auto *Tag = dyn_cast<TagDecl>(Decl))
+ return getTagType(Keyword, Qualifier, Tag,
+ /*OwnsTag=*/false);
+ if (auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
+ return getTypedefType(Keyword, Qualifier, Typedef);
+ if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Decl))
+ return getUnresolvedUsingType(Keyword, Qualifier, UD);
-/// getInjectedClassNameType - Return the unique reference to the
-/// injected class name type for the specified templated declaration.
-QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
- QualType TST) const {
- assert(NeedsInjectedClassNameType(Decl));
- if (Decl->TypeForDecl) {
- assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
- } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
- assert(PrevDecl->TypeForDecl && "previous declaration has no type");
- Decl->TypeForDecl = PrevDecl->TypeForDecl;
- assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
- } else {
- Type *newType = new (*this, alignof(InjectedClassNameType))
- InjectedClassNameType(Decl, TST);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- }
+ assert(Keyword == ElaboratedTypeKeyword::None);
+ assert(!Qualifier);
return QualType(Decl->TypeForDecl, 0);
}
-/// getTypeDeclType - Return the unique reference to the type for the
-/// specified type declaration.
-QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
- assert(Decl && "Passed null for Decl param");
- assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
-
- if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
- return getTypedefType(Typedef);
-
- assert(!isa<TemplateTypeParmDecl>(Decl) &&
- "Template type parameter types are always available.");
-
- if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
- assert(Record->isFirstDecl() && "struct/union has previous declaration");
- assert(!NeedsInjectedClassNameType(Record));
- return getRecordType(Record);
- } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
- assert(Enum->isFirstDecl() && "enum has previous declaration");
- return getEnumType(Enum);
- } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
- return getUnresolvedUsingType(Using);
- } else
- llvm_unreachable("TypeDecl without a type?");
-
+CanQualType ASTContext::getCanonicalTypeDeclType(const TypeDecl *TD) const {
+ if (auto *Tag = dyn_cast<TagDecl>(TD))
+ return getCanonicalTagType(Tag);
+ if (auto *TN = dyn_cast<TypedefNameDecl>(TD))
+ return getCanonicalType(TN->getUnderlyingType());
+ if (const auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(TD))
+ return getCanonicalUnresolvedUsingType(UD);
+ assert(TD->TypeForDecl);
+ return TD->TypeForDecl->getCanonicalTypeUnqualified();
+}
+
+QualType ASTContext::getTypeDeclType(const TypeDecl *Decl) const {
+ if (const auto *TD = dyn_cast<TagDecl>(Decl))
+ return getCanonicalTagType(TD);
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(Decl);
+ isa_and_nonnull<TypedefDecl, TypeAliasDecl>(TD))
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
+ if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl))
+ return getCanonicalUnresolvedUsingType(Using);
+
+ assert(Decl->TypeForDecl);
return QualType(Decl->TypeForDecl, 0);
}
/// getTypedefType - Return the unique reference to the type for the
/// specified typedef name decl.
-QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
- QualType Underlying) const {
- if (!Decl->TypeForDecl) {
- if (Underlying.isNull())
- Underlying = Decl->getUnderlyingType();
- auto *NewType = new (*this, alignof(TypedefType)) TypedefType(
- Type::Typedef, Decl, Underlying, /*HasTypeDifferentFromDecl=*/false);
- Decl->TypeForDecl = NewType;
+QualType
+ASTContext::getTypedefType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypedefNameDecl *Decl, QualType UnderlyingType,
+ std::optional<bool> TypeMatchesDeclOrNone) const {
+ if (!TypeMatchesDeclOrNone) {
+ QualType DeclUnderlyingType = Decl->getUnderlyingType();
+ assert(!DeclUnderlyingType.isNull());
+ if (UnderlyingType.isNull())
+ UnderlyingType = DeclUnderlyingType;
+ else
+ assert(hasSameType(UnderlyingType, DeclUnderlyingType));
+ TypeMatchesDeclOrNone = UnderlyingType == DeclUnderlyingType;
+ } else {
+ // FIXME: This is a workaround for a serialization cycle: assume the decl
+ // underlying type is not available; don't touch it.
+ assert(!UnderlyingType.isNull());
+ }
+
+ if (Keyword == ElaboratedTypeKeyword::None && !Qualifier &&
+ *TypeMatchesDeclOrNone) {
+ if (Decl->TypeForDecl)
+ return QualType(Decl->TypeForDecl, 0);
+
+ auto *NewType = new (*this, alignof(TypedefType))
+ TypedefType(Type::Typedef, Keyword, Qualifier, Decl, UnderlyingType,
+ !*TypeMatchesDeclOrNone);
+
Types.push_back(NewType);
+ Decl->TypeForDecl = NewType;
return QualType(NewType, 0);
}
- if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
- return QualType(Decl->TypeForDecl, 0);
- assert(hasSameType(Decl->getUnderlyingType(), Underlying));
llvm::FoldingSetNodeID ID;
- TypedefType::Profile(ID, Decl, Underlying);
+ TypedefType::Profile(ID, Keyword, Qualifier, Decl, UnderlyingType);
void *InsertPos = nullptr;
- if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
- assert(!T->typeMatchesDecl() &&
- "non-divergent case should be handled with TypeDecl");
- return QualType(T, 0);
- }
+ if (FoldingSetPlaceholder<TypedefType> *Placeholder =
+ TypedefTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(Placeholder->getType(), 0);
- void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true),
- alignof(TypedefType));
- auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
- /*HasTypeDifferentFromDecl=*/true);
- TypedefTypes.InsertNode(NewType, InsertPos);
+ void *Mem =
+ Allocate(TypedefType::totalSizeToAlloc<FoldingSetPlaceholder<TypedefType>,
+ NestedNameSpecifier, QualType>(
+ 1, !!Qualifier, !*TypeMatchesDeclOrNone),
+ alignof(TypedefType));
+ auto *NewType =
+ new (Mem) TypedefType(Type::Typedef, Keyword, Qualifier, Decl,
+ UnderlyingType, !*TypeMatchesDeclOrNone);
+ auto *Placeholder = new (NewType->getFoldingSetPlaceholder())
+ FoldingSetPlaceholder<TypedefType>();
+ TypedefTypes.InsertNode(Placeholder, InsertPos);
Types.push_back(NewType);
return QualType(NewType, 0);
}
-QualType ASTContext::getUsingType(const UsingShadowDecl *Found,
- QualType Underlying) const {
+QualType ASTContext::getUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UsingShadowDecl *D,
+ QualType UnderlyingType) const {
+ // FIXME: This is expensive to compute every time!
+ if (UnderlyingType.isNull()) {
+ const auto *UD = cast<UsingDecl>(D->getIntroducer());
+ UnderlyingType =
+ getTypeDeclType(UD->hasTypename() ? ElaboratedTypeKeyword::Typename
+ : ElaboratedTypeKeyword::None,
+ UD->getQualifier(), cast<TypeDecl>(D->getTargetDecl()));
+ }
+
llvm::FoldingSetNodeID ID;
- UsingType::Profile(ID, Found, Underlying);
+ UsingType::Profile(ID, Keyword, Qualifier, D, UnderlyingType);
void *InsertPos = nullptr;
- if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
+ if (const UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(T, 0);
- const Type *TypeForDecl =
- cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl();
+ assert(!UnderlyingType.hasLocalQualifiers());
- assert(!Underlying.hasLocalQualifiers());
- QualType Canon = Underlying->getCanonicalTypeInternal();
- assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
+ assert(
+ hasSameType(getCanonicalTypeDeclType(cast<TypeDecl>(D->getTargetDecl())),
+ UnderlyingType));
- if (Underlying.getTypePtr() == TypeForDecl)
- Underlying = QualType();
void *Mem =
- Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()),
+ Allocate(UsingType::totalSizeToAlloc<NestedNameSpecifier>(!!Qualifier),
alignof(UsingType));
- UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
- Types.push_back(NewType);
- UsingTypes.InsertNode(NewType, InsertPos);
- return QualType(NewType, 0);
+ UsingType *T = new (Mem) UsingType(Keyword, Qualifier, D, UnderlyingType);
+ Types.push_back(T);
+ UsingTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
}
-QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
- if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+TagType *ASTContext::getTagTypeInternal(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TagDecl *TD, bool OwnsTag,
+ bool IsInjected,
+ const Type *CanonicalType,
+ bool WithFoldingSetNode) const {
+ auto [TC, Size] = [&] {
+ switch (TD->getDeclKind()) {
+ case Decl::Enum:
+ static_assert(alignof(EnumType) == alignof(TagType));
+ return std::make_tuple(Type::Enum, sizeof(EnumType));
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::CXXRecord:
+ static_assert(alignof(RecordType) == alignof(TagType));
+ static_assert(alignof(InjectedClassNameType) == alignof(TagType));
+ if (cast<CXXRecordDecl>(TD)->hasInjectedClassType())
+ return std::make_tuple(Type::InjectedClassName,
+ sizeof(InjectedClassNameType));
+ [[fallthrough]];
+ case Decl::Record:
+ return std::make_tuple(Type::Record, sizeof(RecordType));
+ default:
+ llvm_unreachable("unexpected decl kind");
+ }
+ }();
- if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
- if (PrevDecl->TypeForDecl)
- return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+ if (Qualifier) {
+ static_assert(alignof(NestedNameSpecifier) <= alignof(TagType));
+ Size = llvm::alignTo(Size, alignof(NestedNameSpecifier)) +
+ sizeof(NestedNameSpecifier);
+ }
+ void *Mem;
+ if (WithFoldingSetNode) {
+ // FIXME: It would be more profitable to tail allocate the folding set node
+ // from the type, instead of the other way around, due to the greater
+ // alignment requirements of the type. But this makes it harder to deal with
+ // the different type node sizes. This would require either uniquing from
+ // different folding sets, or having the folding setaccept a
+ // contextual parameter which is not fixed at construction.
+ Mem = Allocate(
+ sizeof(TagTypeFoldingSetPlaceholder) +
+ TagTypeFoldingSetPlaceholder::getOffset() + Size,
+ std::max(alignof(TagTypeFoldingSetPlaceholder), alignof(TagType)));
+ auto *T = new (Mem) TagTypeFoldingSetPlaceholder();
+ Mem = T->getTagType();
+ } else {
+ Mem = Allocate(Size, alignof(TagType));
+ }
+
+ auto *T = [&, TC = TC]() -> TagType * {
+ switch (TC) {
+ case Type::Enum: {
+ assert(isa<EnumDecl>(TD));
+ auto *T = new (Mem) EnumType(TC, Keyword, Qualifier, TD, OwnsTag,
+ IsInjected, CanonicalType);
+ assert(reinterpret_cast<void *>(T) ==
+ reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
+ "TagType must be the first base of EnumType");
+ return T;
+ }
+ case Type::Record: {
+ assert(isa<RecordDecl>(TD));
+ auto *T = new (Mem) RecordType(TC, Keyword, Qualifier, TD, OwnsTag,
+ IsInjected, CanonicalType);
+ assert(reinterpret_cast<void *>(T) ==
+ reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
+ "TagType must be the first base of RecordType");
+ return T;
+ }
+ case Type::InjectedClassName: {
+ auto *T = new (Mem) InjectedClassNameType(Keyword, Qualifier, TD,
+ IsInjected, CanonicalType);
+ assert(reinterpret_cast<void *>(T) ==
+ reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
+ "TagType must be the first base of InjectedClassNameType");
+ return T;
+ }
+ default:
+ llvm_unreachable("unexpected type class");
+ }
+ }();
+ assert(T->getKeyword() == Keyword);
+ assert(T->getQualifier() == Qualifier);
+ assert(T->getOriginalDecl() == TD);
+ assert(T->isInjected() == IsInjected);
+ assert(T->isTagOwned() == OwnsTag);
+ assert((T->isCanonicalUnqualified()
+ ? QualType()
+ : T->getCanonicalTypeInternal()) == QualType(CanonicalType, 0));
+ Types.push_back(T);
+ return T;
+}
- auto *newType = new (*this, alignof(RecordType)) RecordType(Decl);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- return QualType(newType, 0);
+static const TagDecl *getNonInjectedClassName(const TagDecl *TD) {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(TD);
+ RD && RD->isInjectedClassName())
+ return cast<TagDecl>(RD->getDeclContext());
+ return TD;
}
-QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
- if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+CanQualType ASTContext::getCanonicalTagType(const TagDecl *TD) const {
+ TD = ::getNonInjectedClassName(TD)->getCanonicalDecl();
+ if (TD->TypeForDecl)
+ return TD->TypeForDecl->getCanonicalTypeUnqualified();
- if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
- if (PrevDecl->TypeForDecl)
- return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+ const Type *CanonicalType = getTagTypeInternal(
+ ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD,
+ /*OwnsTag=*/false, /*IsInjected=*/false, /*CanonicalType=*/nullptr,
+ /*WithFoldingSetNode=*/false);
+ TD->TypeForDecl = CanonicalType;
+ return CanQualType::CreateUnsafe(QualType(CanonicalType, 0));
+}
- auto *newType = new (*this, alignof(EnumType)) EnumType(Decl);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- return QualType(newType, 0);
+QualType ASTContext::getTagType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TagDecl *TD, bool OwnsTag) const {
+
+ const TagDecl *NonInjectedTD = ::getNonInjectedClassName(TD);
+ bool IsInjected = TD != NonInjectedTD;
+
+ ElaboratedTypeKeyword PreferredKeyword =
+ getLangOpts().CPlusPlus ? ElaboratedTypeKeyword::None
+ : KeywordHelpers::getKeywordForTagTypeKind(
+ NonInjectedTD->getTagKind());
+
+ if (Keyword == PreferredKeyword && !Qualifier && !OwnsTag) {
+ if (const Type *T = TD->TypeForDecl; T && !T->isCanonicalUnqualified())
+ return QualType(T, 0);
+
+ const Type *CanonicalType = getCanonicalTagType(NonInjectedTD).getTypePtr();
+ const Type *T =
+ getTagTypeInternal(Keyword,
+ /*Qualifier=*/std::nullopt, NonInjectedTD,
+ /*OwnsTag=*/false, IsInjected, CanonicalType,
+ /*WithFoldingSetNode=*/false);
+ TD->TypeForDecl = T;
+ return QualType(T, 0);
+ }
+
+ llvm::FoldingSetNodeID ID;
+ TagTypeFoldingSetPlaceholder::Profile(ID, Keyword, Qualifier, NonInjectedTD,
+ OwnsTag, IsInjected);
+
+ void *InsertPos = nullptr;
+ if (TagTypeFoldingSetPlaceholder *T =
+ TagTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(T->getTagType(), 0);
+
+ const Type *CanonicalType = getCanonicalTagType(NonInjectedTD).getTypePtr();
+ TagType *T =
+ getTagTypeInternal(Keyword, Qualifier, NonInjectedTD, OwnsTag, IsInjected,
+ CanonicalType, /*WithFoldingSetNode=*/true);
+ TagTypes.InsertNode(TagTypeFoldingSetPlaceholder::fromTagType(T), InsertPos);
+ return QualType(T, 0);
}
bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
@@ -5495,21 +5623,69 @@ bool ASTContext::isRepresentableIntegerValue(llvm::APSInt &Value, QualType T) {
return Value.getSignificantBits() <= BitWidth;
}
-QualType ASTContext::getUnresolvedUsingType(
- const UnresolvedUsingTypenameDecl *Decl) const {
- if (Decl->TypeForDecl)
- return QualType(Decl->TypeForDecl, 0);
+UnresolvedUsingType *ASTContext::getUnresolvedUsingTypeInternal(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D, void *InsertPos,
+ const Type *CanonicalType) const {
+ void *Mem = Allocate(
+ UnresolvedUsingType::totalSizeToAlloc<
+ FoldingSetPlaceholder<UnresolvedUsingType>, NestedNameSpecifier>(
+ !!InsertPos, !!Qualifier),
+ alignof(UnresolvedUsingType));
+ auto *T = new (Mem) UnresolvedUsingType(Keyword, Qualifier, D, CanonicalType);
+ if (InsertPos) {
+ auto *Placeholder = new (T->getFoldingSetPlaceholder())
+ FoldingSetPlaceholder<TypedefType>();
+ TypedefTypes.InsertNode(Placeholder, InsertPos);
+ }
+ Types.push_back(T);
+ return T;
+}
- if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
- Decl->getCanonicalDecl())
- if (CanonicalDecl->TypeForDecl)
- return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
+CanQualType ASTContext::getCanonicalUnresolvedUsingType(
+ const UnresolvedUsingTypenameDecl *D) const {
+ D = D->getCanonicalDecl();
+ if (D->TypeForDecl)
+ return D->TypeForDecl->getCanonicalTypeUnqualified();
- Type *newType =
- new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- return QualType(newType, 0);
+ const Type *CanonicalType = getUnresolvedUsingTypeInternal(
+ ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, D,
+ /*InsertPos=*/nullptr, /*CanonicalType=*/nullptr);
+ D->TypeForDecl = CanonicalType;
+ return CanQualType::CreateUnsafe(QualType(CanonicalType, 0));
+}
+
+QualType
+ASTContext::getUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D) const {
+ if (Keyword == ElaboratedTypeKeyword::None && !Qualifier) {
+ if (const Type *T = D->TypeForDecl; T && !T->isCanonicalUnqualified())
+ return QualType(T, 0);
+
+ const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
+ const Type *T =
+ getUnresolvedUsingTypeInternal(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, D,
+ /*InsertPos=*/nullptr, CanonicalType);
+ D->TypeForDecl = T;
+ return QualType(T, 0);
+ }
+
+ llvm::FoldingSetNodeID ID;
+ UnresolvedUsingType::Profile(ID, Keyword, Qualifier, D);
+
+ void *InsertPos = nullptr;
+ if (FoldingSetPlaceholder<UnresolvedUsingType> *Placeholder =
+ UnresolvedUsingTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(Placeholder->getType(), 0);
+ assert(InsertPos);
+
+ const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
+ const Type *T = getUnresolvedUsingTypeInternal(Keyword, Qualifier, D,
+ InsertPos, CanonicalType);
+ return QualType(T, 0);
}
QualType ASTContext::getAttributedType(attr::Kind attrKind,
@@ -5655,7 +5831,6 @@ QualType ASTContext::getSubstTemplateTypeParmType(QualType Replacement,
return QualType(SubstParm, 0);
}
-/// Retrieve a
QualType
ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
unsigned Index, bool Final,
@@ -5694,6 +5869,34 @@ ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
return QualType(SubstParm, 0);
}
+QualType
+ASTContext::getSubstBuiltinTemplatePack(const TemplateArgument &ArgPack) {
+ assert(llvm::all_of(ArgPack.pack_elements(),
+ [](const auto &P) {
+ return P.getKind() == TemplateArgument::Type;
+ }) &&
+ "Pack contains a non-type");
+
+ llvm::FoldingSetNodeID ID;
+ SubstBuiltinTemplatePackType::Profile(ID, ArgPack);
+
+ void *InsertPos = nullptr;
+ if (auto *T =
+ SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(T, 0);
+
+ QualType Canon;
+ TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack);
+ if (!CanonArgPack.structurallyEquals(ArgPack))
+ Canon = getSubstBuiltinTemplatePack(CanonArgPack);
+
+ auto *PackType = new (*this, alignof(SubstBuiltinTemplatePackType))
+ SubstBuiltinTemplatePackType(Canon, ArgPack);
+ Types.push_back(PackType);
+ SubstBuiltinTemplatePackTypes.InsertNode(PackType, InsertPos);
+ return QualType(PackType, 0);
+}
+
/// Retrieve the template type parameter type for a template
/// parameter or parameter pack with the given depth, index, and (optionally)
/// name.
@@ -5729,34 +5932,32 @@ QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
}
TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
+ ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
TemplateName Name, SourceLocation NameLoc,
const TemplateArgumentListInfo &SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
- QualType TST = getTemplateSpecializationType(Name, SpecifiedArgs.arguments(),
- CanonicalArgs, Underlying);
+ QualType TST = getTemplateSpecializationType(
+ Keyword, Name, SpecifiedArgs.arguments(), CanonicalArgs, Underlying);
TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
- TemplateSpecializationTypeLoc TL =
- DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>();
- TL.setTemplateKeywordLoc(SourceLocation());
- TL.setTemplateNameLoc(NameLoc);
- TL.setLAngleLoc(SpecifiedArgs.getLAngleLoc());
- TL.setRAngleLoc(SpecifiedArgs.getRAngleLoc());
- for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- TL.setArgLocInfo(i, SpecifiedArgs[i].getLocInfo());
+ DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>().set(
+ ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
+ SpecifiedArgs);
return DI;
}
QualType ASTContext::getTemplateSpecializationType(
- TemplateName Template, ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
SmallVector<TemplateArgument, 4> SpecifiedArgVec;
SpecifiedArgVec.reserve(SpecifiedArgs.size());
for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
SpecifiedArgVec.push_back(Arg.getArgument());
- return getTemplateSpecializationType(Template, SpecifiedArgVec, CanonicalArgs,
- Underlying);
+ return getTemplateSpecializationType(Keyword, Template, SpecifiedArgVec,
+ CanonicalArgs, Underlying);
}
[[maybe_unused]] static bool
@@ -5787,7 +5988,8 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
sizeof(TemplateArgument) * Args.size(),
alignof(TemplateSpecializationType));
auto *Spec = new (Mem)
- TemplateSpecializationType(Template, /*IsAlias=*/false, Args, QualType());
+ TemplateSpecializationType(ElaboratedTypeKeyword::None, Template,
+ /*IsAlias=*/false, Args, QualType());
assert(Spec->isDependentType() &&
"canonical template specialization must be dependent");
Types.push_back(Spec);
@@ -5796,7 +5998,8 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
}
QualType ASTContext::getTemplateSpecializationType(
- TemplateName Template, ArrayRef<TemplateArgument> SpecifiedArgs,
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ ArrayRef<TemplateArgument> SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
assert(!Template.getUnderlying().getAsDependentTemplateName() &&
"No dependent template names here!");
@@ -5806,7 +6009,8 @@ QualType ASTContext::getTemplateSpecializationType(
if (Underlying.isNull()) {
TemplateName CanonTemplate =
getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true);
- bool NonCanonical = Template != CanonTemplate;
+ bool NonCanonical =
+ Template != CanonTemplate || Keyword != ElaboratedTypeKeyword::None;
SmallVector<TemplateArgument, 4> CanonArgsVec;
if (CanonicalArgs.empty()) {
CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
@@ -5837,42 +6041,12 @@ QualType ASTContext::getTemplateSpecializationType(
sizeof(TemplateArgument) * SpecifiedArgs.size() +
(IsTypeAlias ? sizeof(QualType) : 0),
alignof(TemplateSpecializationType));
- auto *Spec = new (Mem) TemplateSpecializationType(Template, IsTypeAlias,
- SpecifiedArgs, Underlying);
+ auto *Spec = new (Mem) TemplateSpecializationType(
+ Keyword, Template, IsTypeAlias, SpecifiedArgs, Underlying);
Types.push_back(Spec);
return QualType(Spec, 0);
}
-QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
- QualType NamedType,
- TagDecl *OwnedTagDecl) const {
- llvm::FoldingSetNodeID ID;
- ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
-
- void *InsertPos = nullptr;
- ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
- if (T)
- return QualType(T, 0);
-
- QualType Canon = NamedType;
- if (!Canon.isCanonical()) {
- Canon = getCanonicalType(NamedType);
- ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!CheckT && "Elaborated canonical type broken");
- (void)CheckT;
- }
-
- void *Mem =
- Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
- alignof(ElaboratedType));
- T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
-
- Types.push_back(T);
- ElaboratedTypes.InsertNode(T, InsertPos);
- return QualType(T, 0);
-}
-
QualType
ASTContext::getParenType(QualType InnerType) const {
llvm::FoldingSetNodeID ID;
@@ -5935,7 +6109,7 @@ getCanonicalElaboratedTypeKeyword(ElaboratedTypeKeyword Keyword) {
}
QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
+ NestedNameSpecifier NNS,
const IdentifierInfo *Name) const {
llvm::FoldingSetNodeID ID;
DependentNameType::Profile(ID, Keyword, NNS, Name);
@@ -5947,7 +6121,7 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
ElaboratedTypeKeyword CanonKeyword =
getCanonicalElaboratedTypeKeyword(Keyword);
- NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ NestedNameSpecifier CanonNNS = NNS.getCanonical();
QualType Canon;
if (CanonKeyword != Keyword || CanonNNS != NNS) {
@@ -5985,13 +6159,13 @@ QualType ASTContext::getDependentTemplateSpecializationType(
T_iter != DependentTemplateSpecializationTypes.end())
return QualType(T_iter->getSecond(), 0);
- NestedNameSpecifier *NNS = Name.getQualifier();
+ NestedNameSpecifier NNS = Name.getQualifier();
QualType Canon;
if (!IsCanonical) {
ElaboratedTypeKeyword CanonKeyword =
getCanonicalElaboratedTypeKeyword(Keyword);
- NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ NestedNameSpecifier CanonNNS = NNS.getCanonical();
bool AnyNonCanonArgs = false;
auto CanonArgs =
::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs);
@@ -6006,7 +6180,7 @@ QualType ASTContext::getDependentTemplateSpecializationType(
} else {
assert(Keyword == getCanonicalElaboratedTypeKeyword(Keyword));
assert(Name.hasTemplateKeyword());
- assert(NNS == getCanonicalNestedNameSpecifier(NNS));
+ assert(NNS.isCanonical());
#ifndef NDEBUG
for (const auto &Arg : Args)
assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
@@ -6062,7 +6236,8 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) const {
} else {
auto *TTP = cast<TemplateTemplateParmDecl>(Param);
TemplateName Name = getQualifiedTemplateName(
- nullptr, /*TemplateKeyword=*/false, TemplateName(TTP));
+ /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
+ TemplateName(TTP));
if (TTP->isParameterPack())
Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
else
@@ -6334,11 +6509,11 @@ void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
ObjCTypeParamDecl *New) const {
New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType()));
// Update TypeForDecl after updating TypeSourceInfo.
- auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl());
+ auto *NewTypeParamTy = cast<ObjCTypeParamType>(New->TypeForDecl);
SmallVector<ObjCProtocolDecl *, 8> protocols;
protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end());
QualType UpdatedTy = getObjCTypeParamType(New, protocols);
- New->setTypeForDecl(UpdatedTy.getTypePtr());
+ New->TypeForDecl = UpdatedTy.getTypePtr();
}
/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
@@ -6737,20 +6912,20 @@ QualType ASTContext::getUnconstrainedType(QualType T) const {
}
QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
- TemplateName Template, QualType DeducedType, bool IsDependent,
- QualType Canon) const {
+ ElaboratedTypeKeyword Keyword, TemplateName Template, QualType DeducedType,
+ bool IsDependent, QualType Canon) const {
// Look in the folding set for an existing type.
void *InsertPos = nullptr;
llvm::FoldingSetNodeID ID;
- DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
+ DeducedTemplateSpecializationType::Profile(ID, Keyword, Template, DeducedType,
IsDependent);
if (DeducedTemplateSpecializationType *DTST =
DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(DTST, 0);
auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
- DeducedTemplateSpecializationType(Template, DeducedType, IsDependent,
- Canon);
+ DeducedTemplateSpecializationType(Keyword, Template, DeducedType,
+ IsDependent, Canon);
#ifndef NDEBUG
llvm::FoldingSetNodeID TempID;
@@ -6766,14 +6941,20 @@ QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
/// which has been deduced to the given type, or to the canonical undeduced
/// such type, or the canonical deduced-but-dependent such type.
QualType ASTContext::getDeducedTemplateSpecializationType(
- TemplateName Template, QualType DeducedType, bool IsDependent) const {
- QualType Canon = DeducedType.isNull()
- ? getDeducedTemplateSpecializationTypeInternal(
- getCanonicalTemplateName(Template), QualType(),
- IsDependent, QualType())
- : DeducedType.getCanonicalType();
- return getDeducedTemplateSpecializationTypeInternal(Template, DeducedType,
- IsDependent, Canon);
+ ElaboratedTypeKeyword Keyword, TemplateName Template, QualType DeducedType,
+ bool IsDependent) const {
+ // FIXME: This could save an extra hash table lookup if it handled all the
+ // parameters already being canonical.
+ // FIXME: Can this be formed from a DependentTemplateName, such that the
+ // keyword should be part of the canonical type?
+ QualType Canon =
+ DeducedType.isNull()
+ ? getDeducedTemplateSpecializationTypeInternal(
+ ElaboratedTypeKeyword::None, getCanonicalTemplateName(Template),
+ QualType(), IsDependent, QualType())
+ : DeducedType.getCanonicalType();
+ return getDeducedTemplateSpecializationTypeInternal(
+ Keyword, Template, DeducedType, IsDependent, Canon);
}
/// getAtomicType - Return the uniqued reference to the atomic type for
@@ -6823,15 +7004,6 @@ QualType ASTContext::getAutoRRefDeductType() const {
return AutoRRefDeductTy;
}
-/// getTagDeclType - Return the unique reference to the type for the
-/// specified TagDecl (struct/union/class/enum) decl.
-QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
- assert(Decl);
- // FIXME: What is the design on getTagDeclType when it requires casting
- // away const? mutable?
- return getTypeDeclType(const_cast<TagDecl*>(Decl));
-}
-
/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
/// needs to agree with the definition in <stddef.h>.
@@ -7053,8 +7225,8 @@ bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
*RD2 = T2MPType->getMostRecentCXXRecordDecl();
RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
return false;
- if (getCanonicalNestedNameSpecifier(T1MPType->getQualifier()) !=
- getCanonicalNestedNameSpecifier(T2MPType->getQualifier()))
+ if (T1MPType->getQualifier().getCanonical() !=
+ T2MPType->getQualifier().getCanonical())
return false;
T1 = T1MPType->getPointeeType();
T2 = T2MPType->getPointeeType();
@@ -7211,9 +7383,8 @@ TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name,
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = Name.getAsDependentTemplateName();
assert(DTN && "Non-dependent template names must refer to template decls.");
- NestedNameSpecifier *Qualifier = DTN->getQualifier();
- NestedNameSpecifier *CanonQualifier =
- getCanonicalNestedNameSpecifier(Qualifier);
+ NestedNameSpecifier Qualifier = DTN->getQualifier();
+ NestedNameSpecifier CanonQualifier = Qualifier.getCanonical();
if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
return getDependentTemplateName({CanonQualifier, DTN->getName(),
/*HasTemplateKeyword=*/true});
@@ -7430,38 +7601,40 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate());
}
-static bool isSameQualifier(const NestedNameSpecifier *X,
- const NestedNameSpecifier *Y) {
- if (X->getKind() != Y->getKind())
+static bool isSameQualifier(const NestedNameSpecifier X,
+ const NestedNameSpecifier Y) {
+ if (X == Y)
+ return true;
+ if (!X || !Y)
+ return false;
+
+ auto Kind = X.getKind();
+ if (Kind != Y.getKind())
return false;
// FIXME: For namespaces and types, we're permitted to check that the entity
// is named via the same tokens. We should probably do so.
- switch (X->getKind()) {
- case NestedNameSpecifier::Identifier:
- if (X->getAsIdentifier() != Y->getAsIdentifier())
- return false;
- break;
- case NestedNameSpecifier::Namespace:
- if (!declaresSameEntity(X->getAsNamespace(), Y->getAsNamespace()))
+ switch (Kind) {
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [NamespaceX, PrefixX] = X.getAsNamespaceAndPrefix();
+ auto [NamespaceY, PrefixY] = Y.getAsNamespaceAndPrefix();
+ if (!declaresSameEntity(NamespaceX->getNamespace(),
+ NamespaceY->getNamespace()))
return false;
- break;
- case NestedNameSpecifier::TypeSpec:
- if (X->getAsType()->getCanonicalTypeInternal() !=
- Y->getAsType()->getCanonicalTypeInternal())
+ return isSameQualifier(PrefixX, PrefixY);
+ }
+ case NestedNameSpecifier::Kind::Type: {
+ const auto *TX = X.getAsType(), *TY = Y.getAsType();
+ if (TX->getCanonicalTypeInternal() != TY->getCanonicalTypeInternal())
return false;
- break;
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ return isSameQualifier(TX->getPrefix(), TY->getPrefix());
+ }
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return true;
}
-
- // Recurse into earlier portion of NNS, if any.
- auto *PX = X->getPrefix();
- auto *PY = Y->getPrefix();
- if (PX && PY)
- return isSameQualifier(PX, PY);
- return !PX && !PY;
+ llvm_unreachable("unhandled qualifier kind");
}
static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
@@ -7854,63 +8027,6 @@ bool ASTContext::isSameTemplateArgument(const TemplateArgument &Arg1,
llvm_unreachable("Unhandled template argument kind");
}
-NestedNameSpecifier *
-ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
- if (!NNS)
- return nullptr;
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- // Canonicalize the prefix but keep the identifier the same.
- return NestedNameSpecifier::Create(*this,
- getCanonicalNestedNameSpecifier(NNS->getPrefix()),
- NNS->getAsIdentifier());
-
- case NestedNameSpecifier::Namespace:
- // A namespace is canonical; build a nested-name-specifier with
- // this namespace and no prefix.
- return NestedNameSpecifier::Create(
- *this, nullptr, NNS->getAsNamespace()->getNamespace()->getFirstDecl());
-
- // The difference between TypeSpec and TypeSpecWithTemplate is that the
- // latter will have the 'template' keyword when printed.
- case NestedNameSpecifier::TypeSpec: {
- const Type *T = getCanonicalType(NNS->getAsType());
-
- // If we have some kind of dependent-named type (e.g., "typename T::type"),
- // break it apart into its prefix and identifier, then reconsititute those
- // as the canonical nested-name-specifier. This is required to canonicalize
- // a dependent nested-name-specifier involving typedefs of dependent-name
- // types, e.g.,
- // typedef typename T::type T1;
- // typedef typename T1::type T2;
- if (const auto *DNT = T->getAs<DependentNameType>())
- return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
- DNT->getIdentifier());
- if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) {
- const DependentTemplateStorage &DTN = DTST->getDependentTemplateName();
- QualType NewT = getDependentTemplateSpecializationType(
- ElaboratedTypeKeyword::None,
- {/*NNS=*/nullptr, DTN.getName(), /*HasTemplateKeyword=*/true},
- DTST->template_arguments(), /*IsCanonical=*/true);
- assert(NewT.isCanonical());
- NestedNameSpecifier *Prefix = DTN.getQualifier();
- if (!Prefix)
- Prefix = getCanonicalNestedNameSpecifier(NNS->getPrefix());
- return NestedNameSpecifier::Create(*this, Prefix, NewT.getTypePtr());
- }
- return NestedNameSpecifier::Create(*this, nullptr, T);
- }
-
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- // The global specifier and __super specifer are canonical and unique.
- return NNS;
- }
-
- llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
-}
-
const ArrayType *ASTContext::getAsArrayType(QualType T) const {
// Handle the non-qualified case efficiently.
if (!T.hasLocalQualifiers()) {
@@ -8228,8 +8344,8 @@ QualType ASTContext::isPromotableBitField(Expr *E) const {
QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
assert(!Promotable.isNull());
assert(isPromotableIntegerType(Promotable));
- if (const auto *ET = Promotable->getAs<EnumType>())
- return ET->getDecl()->getPromotionType();
+ if (const auto *ED = Promotable->getAsEnumDecl())
+ return ED->getPromotionType();
if (const auto *BT = Promotable->getAs<BuiltinType>()) {
// C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
@@ -8288,8 +8404,9 @@ Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
static const Type *getIntegerTypeForEnum(const EnumType *ET) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
- if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
- return ET->getDecl()->getIntegerType().getTypePtr();
+ const EnumDecl *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (ED->isComplete() && !ED->isScoped())
+ return ED->getIntegerType().getTypePtr();
return nullptr;
}
@@ -8418,7 +8535,7 @@ TypedefDecl *ASTContext::getCFConstantStringDecl() const {
CFConstantStringTagDecl->completeDefinition();
// This type is designed to be compatible with NSConstantString, but cannot
// use the same name, since NSConstantString is an interface.
- auto tagType = getTagDeclType(CFConstantStringTagDecl);
+ CanQualType tagType = getCanonicalTagType(CFConstantStringTagDecl);
CFConstantStringTypeDecl =
buildImplicitTypedef(tagType, "__NSConstantString");
@@ -8433,28 +8550,28 @@ RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
// getCFConstantStringType - Return the type used for constant CFStrings.
QualType ASTContext::getCFConstantStringType() const {
- return getTypedefType(getCFConstantStringDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
+ getCFConstantStringDecl());
}
QualType ASTContext::getObjCSuperType() const {
if (ObjCSuperType.isNull()) {
RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl);
- ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
+ ObjCSuperType = getCanonicalTagType(ObjCSuperTypeDecl);
}
return ObjCSuperType;
}
void ASTContext::setCFConstantStringType(QualType T) {
- const auto *TD = T->castAs<TypedefType>();
- CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl());
- const auto *TagType = TD->castAs<RecordType>();
- CFConstantStringTagDecl = TagType->getDecl();
+ const auto *TT = T->castAs<TypedefType>();
+ CFConstantStringTypeDecl = cast<TypedefDecl>(TT->getDecl());
+ CFConstantStringTagDecl = TT->castAsRecordDecl();
}
QualType ASTContext::getBlockDescriptorType() const {
if (BlockDescriptorType)
- return getTagDeclType(BlockDescriptorType);
+ return getCanonicalTagType(BlockDescriptorType);
RecordDecl *RD;
// FIXME: Needs the FlagAppleBlock bit.
@@ -8484,12 +8601,12 @@ QualType ASTContext::getBlockDescriptorType() const {
BlockDescriptorType = RD;
- return getTagDeclType(BlockDescriptorType);
+ return getCanonicalTagType(BlockDescriptorType);
}
QualType ASTContext::getBlockDescriptorExtendedType() const {
if (BlockDescriptorExtendedType)
- return getTagDeclType(BlockDescriptorExtendedType);
+ return getCanonicalTagType(BlockDescriptorExtendedType);
RecordDecl *RD;
// FIXME: Needs the FlagAppleBlock bit.
@@ -8523,7 +8640,7 @@ QualType ASTContext::getBlockDescriptorExtendedType() const {
RD->completeDefinition();
BlockDescriptorExtendedType = RD;
- return getTagDeclType(BlockDescriptorExtendedType);
+ return getCanonicalTagType(BlockDescriptorExtendedType);
}
OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
@@ -9160,8 +9277,8 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
llvm_unreachable("invalid BuiltinType::Kind value");
}
-static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
- EnumDecl *Enum = ET->getDecl();
+static char ObjCEncodingForEnumDecl(const ASTContext *C, const EnumDecl *ED) {
+ EnumDecl *Enum = ED->getDefinitionOrSelf();
// The encoding of an non-fixed enum type is always 'i', regardless of size.
if (!Enum->isFixed())
@@ -9204,8 +9321,8 @@ static void EncodeBitField(const ASTContext *Ctx, std::string& S,
S += llvm::utostr(Offset);
- if (const auto *ET = T->getAs<EnumType>())
- S += ObjCEncodingForEnumType(Ctx, ET);
+ if (const auto *ET = T->getAsCanonical<EnumType>())
+ S += ObjCEncodingForEnumDecl(Ctx, ET->getOriginalDecl());
else {
const auto *BT = T->castAs<BuiltinType>();
S += getObjCEncodingForPrimitiveType(Ctx, BT);
@@ -9262,7 +9379,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
if (const auto *BT = dyn_cast<BuiltinType>(CT))
S += getObjCEncodingForPrimitiveType(this, BT);
else
- S += ObjCEncodingForEnumType(this, cast<EnumType>(CT));
+ S += ObjCEncodingForEnumDecl(this, cast<EnumType>(CT)->getOriginalDecl());
return;
case Type::Complex:
@@ -9329,14 +9446,15 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
S += '*';
return;
}
- } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) {
+ } else if (const auto *RTy = PointeeTy->getAsCanonical<RecordType>()) {
+ const IdentifierInfo *II = RTy->getOriginalDecl()->getIdentifier();
// GCC binary compat: Need to convert "struct objc_class *" to "#".
- if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) {
+ if (II == &Idents.get("objc_class")) {
S += '#';
return;
}
// GCC binary compat: Need to convert "struct objc_object *" to "@".
- if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) {
+ if (II == &Idents.get("objc_object")) {
S += '@';
return;
}
@@ -9401,7 +9519,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
case Type::Record: {
- RecordDecl *RDecl = cast<RecordType>(CT)->getDecl();
+ RecordDecl *RDecl = cast<RecordType>(CT)->getOriginalDecl();
S += RDecl->isUnion() ? '(' : '{';
// Anonymous structures print as '?'
if (const IdentifierInfo *II = RDecl->getIdentifier()) {
@@ -9900,7 +10018,7 @@ CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// } __builtin_va_list;
return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
@@ -9952,14 +10070,15 @@ static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
QualType VaListTagTypedefType =
- Context->getTypedefType(VaListTagTypedefDecl);
+ Context->getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, VaListTagTypedefDecl);
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
@@ -10011,7 +10130,7 @@ CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// };
@@ -10059,7 +10178,7 @@ CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
Context->VaListTagDecl = VaListDecl;
// typedef struct __va_list __builtin_va_list;
- QualType T = Context->getRecordType(VaListDecl);
+ CanQualType T = Context->getCanonicalTagType(VaListDecl);
return Context->buildImplicitTypedef(T, "__builtin_va_list");
}
@@ -10106,7 +10225,7 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// };
@@ -10153,13 +10272,15 @@ static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
- QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl);
+ QualType VaListTagTypedefType =
+ Context->getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, VaListTagTypedefDecl);
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
@@ -10197,7 +10318,7 @@ CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
@@ -10311,22 +10432,28 @@ TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
/// Retrieve the template name that represents a qualified
/// template name such as \c std::vector.
-TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier Qualifier,
bool TemplateKeyword,
TemplateName Template) const {
assert(Template.getKind() == TemplateName::Template ||
Template.getKind() == TemplateName::UsingTemplate);
+ if (Template.getAsTemplateDecl()->getKind() == Decl::TemplateTemplateParm) {
+ assert(!Qualifier && "unexpected qualified template template parameter");
+ assert(TemplateKeyword == false);
+ return Template;
+ }
+
// FIXME: Canonicalization?
llvm::FoldingSetNodeID ID;
- QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
+ QualifiedTemplateName::Profile(ID, Qualifier, TemplateKeyword, Template);
void *InsertPos = nullptr;
QualifiedTemplateName *QTN =
- QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
if (!QTN) {
QTN = new (*this, alignof(QualifiedTemplateName))
- QualifiedTemplateName(NNS, TemplateKeyword, Template);
+ QualifiedTemplateName(Qualifier, TemplateKeyword, Template);
QualifiedTemplateNames.InsertNode(QTN, InsertPos);
}
@@ -11296,7 +11423,7 @@ QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
bool OfBlockPointer,
bool Unqualified) {
if (const RecordType *UT = T->getAsUnionType()) {
- RecordDecl *UD = UT->getDecl();
+ RecordDecl *UD = UT->getOriginalDecl()->getMostRecentDecl();
if (UD->hasAttr<TransparentUnionAttr>()) {
for (const auto *I : UD->fields()) {
QualType ET = I->getType().getUnqualifiedType();
@@ -11442,6 +11569,11 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (lproto->getMethodQuals() != rproto->getMethodQuals())
return {};
+ // Function protos with different 'cfi_salt' values aren't compatible.
+ if (lproto->getExtraAttributeInfo().CFISalt !=
+ rproto->getExtraAttributeInfo().CFISalt)
+ return {};
+
// Function effects are handled similarly to noreturn, see above.
FunctionEffectsRef LHSFX = lproto->getFunctionEffects();
FunctionEffectsRef RHSFX = rproto->getFunctionEffects();
@@ -11527,8 +11659,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
// Look at the converted type of enum types, since that is the type used
// to pass enum values.
- if (const auto *Enum = paramTy->getAs<EnumType>()) {
- paramTy = Enum->getDecl()->getIntegerType();
+ if (const auto *ED = paramTy->getAsEnumDecl()) {
+ paramTy = ED->getIntegerType();
if (paramTy.isNull())
return {};
}
@@ -11560,7 +11692,8 @@ static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
// a signed integer type, or an unsigned integer type.
// Compatibility is based on the underlying type, not the promotion
// type.
- QualType underlyingType = ET->getDecl()->getIntegerType();
+ QualType underlyingType =
+ ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (underlyingType.isNull())
return {};
if (Context.hasSameType(underlyingType, other))
@@ -11685,10 +11818,10 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
if (LHSClass != RHSClass) {
// Note that we only have special rules for turning block enum
// returns into block int returns, not vice-versa.
- if (const auto *ETy = LHS->getAs<EnumType>()) {
+ if (const auto *ETy = LHS->getAsCanonical<EnumType>()) {
return mergeEnumWithInteger(*this, ETy, RHS, false);
}
- if (const EnumType* ETy = RHS->getAs<EnumType>()) {
+ if (const EnumType *ETy = RHS->getAsCanonical<EnumType>()) {
return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType);
}
// allow block pointer type to match an 'id' type.
@@ -12118,8 +12251,8 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
//===----------------------------------------------------------------------===//
unsigned ASTContext::getIntWidth(QualType T) const {
- if (const auto *ET = T->getAs<EnumType>())
- T = ET->getDecl()->getIntegerType();
+ if (const auto *ED = T->getAsEnumDecl())
+ T = ED->getIntegerType();
if (T->isBooleanType())
return 1;
if (const auto *EIT = T->getAs<BitIntType>())
@@ -12144,8 +12277,8 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
- if (const auto *ETy = T->getAs<EnumType>())
- T = ETy->getDecl()->getIntegerType();
+ if (const auto *ED = T->getAsEnumDecl())
+ T = ED->getIntegerType();
switch (T->castAs<BuiltinType>()->getKind()) {
case BuiltinType::Char_U:
@@ -12218,8 +12351,8 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const {
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
- if (const auto *ETy = T->getAs<EnumType>())
- T = ETy->getDecl()->getIntegerType();
+ if (const auto *ED = T->getAsEnumDecl())
+ T = ED->getIntegerType();
switch (T->castAs<BuiltinType>()->getKind()) {
case BuiltinType::Char_S:
@@ -12980,6 +13113,14 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
if (D->hasAttr<WeakRefAttr>())
return false;
+ // SYCL device compilation requires that functions defined with the
+ // sycl_kernel_entry_point or sycl_external attributes be emitted. All
+ // other entities are emitted only if they are used by a function
+ // defined with one of those attributes.
+ if (LangOpts.SYCLIsDevice)
+ return isa<FunctionDecl>(D) && (D->hasAttr<SYCLKernelEntryPointAttr>() ||
+ D->hasAttr<SYCLExternalAttr>());
+
// Aliases and used decls are required.
if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
return true;
@@ -12989,15 +13130,6 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
if (!FD->doesThisDeclarationHaveABody())
return FD->doesDeclarationForceExternallyVisibleDefinition();
- // Function definitions with the sycl_kernel_entry_point attribute are
- // required during device compilation so that SYCL kernel caller offload
- // entry points are emitted.
- if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>())
- return true;
-
- // FIXME: Functions declared with SYCL_EXTERNAL are required during
- // device compilation.
-
// Constructors and destructors are required.
if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
return true;
@@ -13573,7 +13705,7 @@ static T *getCommonDeclChecked(T *X, T *Y) {
const_cast<Decl *>(cast<Decl>(Y))));
}
-static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X,
+static TemplateName getCommonTemplateName(const ASTContext &Ctx, TemplateName X,
TemplateName Y,
bool IgnoreDeduced = false) {
if (X.getAsVoidPointer() == Y.getAsVoidPointer())
@@ -13588,7 +13720,7 @@ static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X,
return CX;
}
-static TemplateName getCommonTemplateNameChecked(ASTContext &Ctx,
+static TemplateName getCommonTemplateNameChecked(const ASTContext &Ctx,
TemplateName X, TemplateName Y,
bool IgnoreDeduced) {
TemplateName R = getCommonTemplateName(Ctx, X, Y, IgnoreDeduced);
@@ -13596,7 +13728,7 @@ static TemplateName getCommonTemplateNameChecked(ASTContext &Ctx,
return R;
}
-static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs,
+static auto getCommonTypes(const ASTContext &Ctx, ArrayRef<QualType> Xs,
ArrayRef<QualType> Ys, bool Unqualified = false) {
assert(Xs.size() == Ys.size());
SmallVector<QualType, 8> Rs(Xs.size());
@@ -13611,7 +13743,7 @@ static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
: SourceLocation();
}
-static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx,
+static TemplateArgument getCommonTemplateArgument(const ASTContext &Ctx,
const TemplateArgument &X,
const TemplateArgument &Y) {
if (X.getKind() != Y.getKind())
@@ -13657,7 +13789,7 @@ static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx,
}
}
-static bool getCommonTemplateArguments(ASTContext &Ctx,
+static bool getCommonTemplateArguments(const ASTContext &Ctx,
SmallVectorImpl<TemplateArgument> &R,
ArrayRef<TemplateArgument> Xs,
ArrayRef<TemplateArgument> Ys) {
@@ -13672,7 +13804,7 @@ static bool getCommonTemplateArguments(ASTContext &Ctx,
return false;
}
-static auto getCommonTemplateArguments(ASTContext &Ctx,
+static auto getCommonTemplateArguments(const ASTContext &Ctx,
ArrayRef<TemplateArgument> Xs,
ArrayRef<TemplateArgument> Ys) {
SmallVector<TemplateArgument, 8> R;
@@ -13683,163 +13815,101 @@ static auto getCommonTemplateArguments(ASTContext &Ctx,
}
template <class T>
-static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) {
- return X->getKeyword() == Y->getKeyword() ? X->getKeyword()
- : ElaboratedTypeKeyword::None;
+static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y,
+ bool IsSame) {
+ ElaboratedTypeKeyword KX = X->getKeyword(), KY = Y->getKeyword();
+ if (KX == KY)
+ return KX;
+ KX = getCanonicalElaboratedTypeKeyword(KX);
+ assert(!IsSame || KX == getCanonicalElaboratedTypeKeyword(KY));
+ return KX;
}
/// Returns a NestedNameSpecifier which has only the common sugar
/// present in both NNS1 and NNS2.
-static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx,
- NestedNameSpecifier *NNS1,
- NestedNameSpecifier *NNS2,
- bool IsSame) {
+static NestedNameSpecifier getCommonNNS(const ASTContext &Ctx,
+ NestedNameSpecifier NNS1,
+ NestedNameSpecifier NNS2, bool IsSame) {
// If they are identical, all sugar is common.
if (NNS1 == NNS2)
return NNS1;
- // IsSame implies both NNSes are equivalent.
- NestedNameSpecifier *Canon = Ctx.getCanonicalNestedNameSpecifier(NNS1);
- if (Canon != Ctx.getCanonicalNestedNameSpecifier(NNS2)) {
+ // IsSame implies both Qualifiers are equivalent.
+ NestedNameSpecifier Canon = NNS1.getCanonical();
+ if (Canon != NNS2.getCanonical()) {
assert(!IsSame && "Should be the same NestedNameSpecifier");
// If they are not the same, there is nothing to unify.
- // FIXME: It would be useful here if we could represent a canonically
- // empty NNS, which is not identical to an empty-as-written NNS.
- return nullptr;
+ return std::nullopt;
}
- NestedNameSpecifier *R = nullptr;
- NestedNameSpecifier::SpecifierKind K1 = NNS1->getKind(), K2 = NNS2->getKind();
- switch (K1) {
- case NestedNameSpecifier::SpecifierKind::Identifier: {
- assert(K2 == NestedNameSpecifier::SpecifierKind::Identifier);
- IdentifierInfo *II = NNS1->getAsIdentifier();
- assert(II == NNS2->getAsIdentifier());
- // For an identifier, the prefixes are significant, so they must be the
- // same.
- NestedNameSpecifier *P = ::getCommonNNS(Ctx, NNS1->getPrefix(),
- NNS2->getPrefix(), /*IsSame=*/true);
- R = NestedNameSpecifier::Create(Ctx, P, II);
- break;
- }
- case NestedNameSpecifier::SpecifierKind::Namespace: {
- assert(K2 == NestedNameSpecifier::SpecifierKind::Namespace);
- // The prefixes for namespaces are not significant, its declaration
- // identifies it uniquely.
- NestedNameSpecifier *P =
- ::getCommonNNS(Ctx, NNS1->getPrefix(), NNS2->getPrefix(),
- /*IsSame=*/false);
- NamespaceBaseDecl *Namespace1 = NNS1->getAsNamespace(),
- *Namespace2 = NNS2->getAsNamespace();
+ NestedNameSpecifier R = std::nullopt;
+ NestedNameSpecifier::Kind Kind = NNS1.getKind();
+ assert(Kind == NNS2.getKind());
+ switch (Kind) {
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace1, Prefix1] = NNS1.getAsNamespaceAndPrefix();
+ auto [Namespace2, Prefix2] = NNS2.getAsNamespaceAndPrefix();
auto Kind = Namespace1->getKind();
if (Kind != Namespace2->getKind() ||
(Kind == Decl::NamespaceAlias &&
!declaresSameEntity(Namespace1, Namespace2))) {
- R = NestedNameSpecifier::Create(
- Ctx, P,
+ R = NestedNameSpecifier(
+ Ctx,
::getCommonDeclChecked(Namespace1->getNamespace(),
- Namespace2->getNamespace()));
+ Namespace2->getNamespace()),
+ /*Prefix=*/std::nullopt);
break;
}
- R = NestedNameSpecifier::Create(
- Ctx, P, ::getCommonDeclChecked(Namespace1, Namespace2));
+ // The prefixes for namespaces are not significant, its declaration
+ // identifies it uniquely.
+ NestedNameSpecifier Prefix = ::getCommonNNS(Ctx, Prefix1, Prefix2,
+ /*IsSame=*/false);
+ R = NestedNameSpecifier(Ctx, ::getCommonDeclChecked(Namespace1, Namespace2),
+ Prefix);
break;
}
- case NestedNameSpecifier::SpecifierKind::TypeSpec: {
- // FIXME: See comment below, on Super case.
- if (K2 == NestedNameSpecifier::SpecifierKind::Super)
- return Ctx.getCanonicalNestedNameSpecifier(NNS1);
-
- assert(K2 == NestedNameSpecifier::SpecifierKind::TypeSpec);
-
- const Type *T1 = NNS1->getAsType(), *T2 = NNS2->getAsType();
- if (T1 == T2) {
- // If the types are indentical, then only the prefixes differ.
- // A well-formed NNS never has these types, as they have
- // special normalized forms.
- assert((!isa<DependentNameType, ElaboratedType>(T1)));
- // Only for a DependentTemplateSpecializationType the prefix
- // is actually significant. A DependentName, which would be another
- // plausible case, cannot occur here, as explained above.
- bool IsSame = isa<DependentTemplateSpecializationType>(T1);
- NestedNameSpecifier *P =
- ::getCommonNNS(Ctx, NNS1->getPrefix(), NNS2->getPrefix(), IsSame);
- R = NestedNameSpecifier::Create(Ctx, P, T1);
- break;
- }
- // TODO: Try to salvage the original prefix.
- // If getCommonSugaredType removed any top level sugar, the original prefix
- // is not applicable anymore.
+ case NestedNameSpecifier::Kind::Type: {
+ const Type *T1 = NNS1.getAsType(), *T2 = NNS2.getAsType();
const Type *T = Ctx.getCommonSugaredType(QualType(T1, 0), QualType(T2, 0),
/*Unqualified=*/true)
.getTypePtr();
-
- // A NestedNameSpecifier has special normalization rules for certain types.
- switch (T->getTypeClass()) {
- case Type::Elaborated: {
- // An ElaboratedType is stripped off, it's Qualifier becomes the prefix.
- auto *ET = cast<ElaboratedType>(T);
- R = NestedNameSpecifier::Create(Ctx, ET->getQualifier(),
- ET->getNamedType().getTypePtr());
- break;
- }
- case Type::DependentName: {
- // A DependentName is turned into an Identifier NNS.
- auto *DN = cast<DependentNameType>(T);
- R = NestedNameSpecifier::Create(Ctx, DN->getQualifier(),
- DN->getIdentifier());
- break;
- }
- case Type::DependentTemplateSpecialization: {
- // A DependentTemplateSpecializationType loses it's Qualifier, which
- // is turned into the prefix.
- auto *DTST = cast<DependentTemplateSpecializationType>(T);
- const DependentTemplateStorage &DTN = DTST->getDependentTemplateName();
- DependentTemplateStorage NewDTN(/*Qualifier=*/nullptr, DTN.getName(),
- DTN.hasTemplateKeyword());
- T = Ctx.getDependentTemplateSpecializationType(DTST->getKeyword(), NewDTN,
- DTST->template_arguments())
- .getTypePtr();
- R = NestedNameSpecifier::Create(Ctx, DTN.getQualifier(), T);
- break;
- }
- default:
- R = NestedNameSpecifier::Create(Ctx, /*Prefix=*/nullptr, T);
- break;
- }
+ R = NestedNameSpecifier(T);
break;
}
- case NestedNameSpecifier::SpecifierKind::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
// FIXME: Can __super even be used with data members?
// If it's only usable in functions, we will never see it here,
// unless we save the qualifiers used in function types.
// In that case, it might be possible NNS2 is a type,
// in which case we should degrade the result to
// a CXXRecordType.
- return Ctx.getCanonicalNestedNameSpecifier(NNS1);
- case NestedNameSpecifier::SpecifierKind::Global:
- // The global NNS is a singleton.
- assert(K2 == NestedNameSpecifier::SpecifierKind::Global &&
- "Global NNS cannot be equivalent to any other kind");
- llvm_unreachable("Global NestedNameSpecifiers did not compare equal");
- }
- assert(Ctx.getCanonicalNestedNameSpecifier(R) == Canon);
+ R = NestedNameSpecifier(getCommonDeclChecked(NNS1.getAsMicrosoftSuper(),
+ NNS2.getAsMicrosoftSuper()));
+ break;
+ }
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ // These are singletons.
+ llvm_unreachable("singletons did not compare equal");
+ }
+ assert(R.getCanonical() == Canon);
return R;
}
template <class T>
-static NestedNameSpecifier *getCommonQualifier(ASTContext &Ctx, const T *X,
- const T *Y, bool IsSame) {
+static NestedNameSpecifier getCommonQualifier(const ASTContext &Ctx, const T *X,
+ const T *Y, bool IsSame) {
return ::getCommonNNS(Ctx, X->getQualifier(), Y->getQualifier(), IsSame);
}
template <class T>
-static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) {
+static QualType getCommonElementType(const ASTContext &Ctx, const T *X,
+ const T *Y) {
return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType());
}
template <class T>
-static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X,
+static QualType getCommonArrayElementType(const ASTContext &Ctx, const T *X,
Qualifiers &QX, const T *Y,
Qualifiers &QY) {
QualType EX = X->getElementType(), EY = Y->getElementType();
@@ -13856,11 +13926,13 @@ static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X,
}
template <class T>
-static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) {
+static QualType getCommonPointeeType(const ASTContext &Ctx, const T *X,
+ const T *Y) {
return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType());
}
-template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) {
+template <class T>
+static auto *getCommonSizeExpr(const ASTContext &Ctx, T *X, T *Y) {
assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
return X->getSizeExpr();
}
@@ -13880,8 +13952,9 @@ static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
// each type (in a canonical sense) only once, in the order they appear
// from X to Y. If they occur in both X and Y, the result will contain
// the common sugared type between them.
-static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out,
- ArrayRef<QualType> X, ArrayRef<QualType> Y) {
+static void mergeTypeLists(const ASTContext &Ctx,
+ SmallVectorImpl<QualType> &Out, ArrayRef<QualType> X,
+ ArrayRef<QualType> Y) {
llvm::DenseMap<QualType, unsigned> Found;
for (auto Ts : {X, Y}) {
for (QualType T : Ts) {
@@ -13900,7 +13973,7 @@ FunctionProtoType::ExceptionSpecInfo
ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
FunctionProtoType::ExceptionSpecInfo ESI2,
SmallVectorImpl<QualType> &ExceptionTypeStorage,
- bool AcceptDependent) {
+ bool AcceptDependent) const {
ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
// If either of them can throw anything, that is the result.
@@ -13964,7 +14037,7 @@ ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
llvm_unreachable("invalid ExceptionSpecificationType");
}
-static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
+static QualType getCommonNonSugarTypeNode(const ASTContext &Ctx, const Type *X,
Qualifiers &QX, const Type *Y,
Qualifiers &QY) {
Type::TypeClass TC = X->getTypeClass();
@@ -13982,11 +14055,10 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
SUGAR_FREE_TYPE(Builtin)
SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
SUGAR_FREE_TYPE(DependentBitInt)
- SUGAR_FREE_TYPE(Enum)
SUGAR_FREE_TYPE(BitInt)
SUGAR_FREE_TYPE(ObjCInterface)
- SUGAR_FREE_TYPE(Record)
SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
+ SUGAR_FREE_TYPE(SubstBuiltinTemplatePack)
SUGAR_FREE_TYPE(UnresolvedUsing)
SUGAR_FREE_TYPE(HLSLAttributedResource)
SUGAR_FREE_TYPE(HLSLInlineSpirv)
@@ -14203,13 +14275,15 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY),
getCommonAttrLoc(VX, VY), VX->getVectorKind());
}
+ case Type::Enum:
+ case Type::Record:
case Type::InjectedClassName: {
- const auto *IX = cast<InjectedClassNameType>(X),
- *IY = cast<InjectedClassNameType>(Y);
- return Ctx.getInjectedClassNameType(
- getCommonDeclChecked(IX->getDecl(), IY->getDecl()),
- Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(),
- IY->getInjectedSpecializationType()));
+ const auto *TX = cast<TagType>(X), *TY = cast<TagType>(Y);
+ return Ctx.getTagType(
+ ::getCommonTypeKeyword(TX, TY, /*IsSame=*/false),
+ ::getCommonQualifier(Ctx, TX, TY, /*IsSame=*/false),
+ ::getCommonDeclChecked(TX->getOriginalDecl(), TY->getOriginalDecl()),
+ /*OwnedTag=*/false);
}
case Type::TemplateSpecialization: {
const auto *TX = cast<TemplateSpecializationType>(X),
@@ -14217,6 +14291,7 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(),
TY->template_arguments());
return Ctx.getTemplateSpecializationType(
+ getCommonTypeKeyword(TX, TY, /*IsSame=*/false),
::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(),
TY->getTemplateName(),
/*IgnoreDeduced=*/true),
@@ -14244,7 +14319,7 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
*NY = cast<DependentNameType>(Y);
assert(NX->getIdentifier() == NY->getIdentifier());
return Ctx.getDependentNameType(
- getCommonTypeKeyword(NX, NY),
+ getCommonTypeKeyword(NX, NY, /*IsSame=*/true),
getCommonQualifier(Ctx, NX, NY, /*IsSame=*/true), NX->getIdentifier());
}
case Type::DependentTemplateSpecialization: {
@@ -14260,7 +14335,7 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
/*IsSame=*/true),
SX.getName(), SX.hasTemplateKeyword() || SY.hasTemplateKeyword());
return Ctx.getDependentTemplateSpecializationType(
- getCommonTypeKeyword(TX, TY), Name, As);
+ getCommonTypeKeyword(TX, TY, /*IsSame=*/true), Name, As);
}
case Type::UnaryTransform: {
const auto *TX = cast<UnaryTransformType>(X),
@@ -14301,7 +14376,7 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
llvm_unreachable("Unknown Type Class");
}
-static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
+static QualType getCommonSugarTypeNode(const ASTContext &Ctx, const Type *X,
const Type *Y,
SplitQualType Underlying) {
Type::TypeClass TC = X->getTypeClass();
@@ -14413,15 +14488,6 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
case Type::DeducedTemplateSpecialization:
// FIXME: Try to merge these.
return QualType();
-
- case Type::Elaborated: {
- const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y);
- return Ctx.getElaboratedType(
- ::getCommonTypeKeyword(EX, EY),
- ::getCommonQualifier(Ctx, EX, EY, /*IsSame=*/false),
- Ctx.getQualifiedType(Underlying),
- ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl()));
- }
case Type::MacroQualified: {
const auto *MX = cast<MacroQualifiedType>(X),
*MY = cast<MacroQualifiedType>(Y);
@@ -14465,16 +14531,19 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
if (getCommonTemplateArguments(Ctx, As, TX->template_arguments(),
TY->template_arguments()))
return QualType();
- return Ctx.getTemplateSpecializationType(CTN, As,
- /*CanonicalArgs=*/{},
- Ctx.getQualifiedType(Underlying));
+ return Ctx.getTemplateSpecializationType(
+ getCommonTypeKeyword(TX, TY, /*IsSame=*/false), CTN, As,
+ /*CanonicalArgs=*/{}, Ctx.getQualifiedType(Underlying));
}
case Type::Typedef: {
const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y);
const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl());
if (!CD)
return QualType();
- return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying));
+ return Ctx.getTypedefType(
+ ::getCommonTypeKeyword(TX, TY, /*IsSame=*/false),
+ ::getCommonQualifier(Ctx, TX, TY, /*IsSame=*/false), CD,
+ Ctx.getQualifiedType(Underlying));
}
case Type::TypeOf: {
// The common sugar between two typeof expressions, where one is
@@ -14505,11 +14574,12 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
}
case Type::Using: {
const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y);
- const UsingShadowDecl *CD =
- ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl());
+ const UsingShadowDecl *CD = ::getCommonDecl(UX->getDecl(), UY->getDecl());
if (!CD)
return QualType();
- return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying));
+ return Ctx.getUsingType(::getCommonTypeKeyword(UX, UY, /*IsSame=*/false),
+ ::getCommonQualifier(Ctx, UX, UY, /*IsSame=*/false),
+ CD, Ctx.getQualifiedType(Underlying));
}
case Type::MemberPointer: {
const auto *PX = cast<MemberPointerType>(X),
@@ -14568,7 +14638,7 @@ static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
}
QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
- bool Unqualified) {
+ bool Unqualified) const {
assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
if (X == Y)
return X;
@@ -15119,7 +15189,7 @@ StringRef ASTContext::getCUIDHash() const {
}
const CXXRecordDecl *
-ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) {
+ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) const {
assert(ThisClass);
assert(ThisClass->isPolymorphic());
const CXXRecordDecl *PrimaryBase = ThisClass;
diff --git a/clang/lib/AST/ASTDiagnostic.cpp b/clang/lib/AST/ASTDiagnostic.cpp
index 2ef0c31..d7fd411 100644
--- a/clang/lib/AST/ASTDiagnostic.cpp
+++ b/clang/lib/AST/ASTDiagnostic.cpp
@@ -36,11 +36,6 @@ QualType clang::desugarForDiagnostic(ASTContext &Context, QualType QT,
while (true) {
const Type *Ty = QC.strip(QT);
- // Don't aka just because we saw an elaborated type...
- if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Ty)) {
- QT = ET->desugar();
- continue;
- }
// ... or a using type ...
if (const UsingType *UT = dyn_cast<UsingType>(Ty)) {
QT = UT->desugar();
@@ -130,7 +125,8 @@ QualType clang::desugarForDiagnostic(ASTContext &Context, QualType QT,
if (DesugarArgument) {
ShouldAKA = true;
QT = Context.getTemplateSpecializationType(
- TST->getTemplateName(), Args, /*CanonicalArgs=*/{}, QT);
+ TST->getKeyword(), TST->getTemplateName(), Args,
+ /*CanonicalArgs=*/{}, QT);
}
break;
}
@@ -200,7 +196,8 @@ break; \
// Don't desugar through the primary typedef of an anonymous type.
if (const TagType *UTT = Underlying->getAs<TagType>())
if (const TypedefType *QTT = dyn_cast<TypedefType>(QT))
- if (UTT->getDecl()->getTypedefNameForAnonDecl() == QTT->getDecl())
+ if (UTT->getOriginalDecl()->getTypedefNameForAnonDecl() ==
+ QTT->getDecl())
break;
// Record that we actually looked through an opaque type here.
@@ -461,13 +458,12 @@ void clang::FormatASTNodeDiagnosticArgument(
ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), Qualified);
break;
}
- case DiagnosticsEngine::ak_nestednamespec: {
- NestedNameSpecifier *NNS = reinterpret_cast<NestedNameSpecifier*>(Val);
- NNS->print(OS, Context.getPrintingPolicy(),
+ case DiagnosticsEngine::ak_nestednamespec:
+ NestedNameSpecifier::getFromVoidPointer(reinterpret_cast<void *>(Val))
+ .print(OS, Context.getPrintingPolicy(),
/*ResolveTemplateArguments=*/false,
/*PrintFinalScopeResOp=*/false);
break;
- }
case DiagnosticsEngine::ak_declcontext: {
DeclContext *DC = reinterpret_cast<DeclContext *> (Val);
assert(DC && "Should never have a null declaration context");
@@ -484,9 +480,8 @@ void clang::FormatASTNodeDiagnosticArgument(
} else if (isLambdaCallOperator(DC)) {
OS << "lambda expression";
} else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) {
- OS << ConvertTypeToDiagnosticString(Context,
- Context.getTypeDeclType(Type),
- PrevArgs, QualTypeVals);
+ OS << ConvertTypeToDiagnosticString(
+ Context, Context.getTypeDeclType(Type), PrevArgs, QualTypeVals);
} else {
assert(isa<NamedDecl>(DC) && "Expected a NamedDecl");
NamedDecl *ND = cast<NamedDecl>(DC);
@@ -1158,12 +1153,13 @@ class TemplateDiff {
return nullptr;
const ClassTemplateSpecializationDecl *CTSD =
- dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl());
if (!CTSD)
return nullptr;
Ty = Context.getTemplateSpecializationType(
+ ElaboratedTypeKeyword::None,
TemplateName(CTSD->getSpecializedTemplate()),
CTSD->getTemplateArgs().asArray(), /*CanonicalArgs=*/{},
Ty.getLocalUnqualifiedType().getCanonicalType());
@@ -1743,25 +1739,10 @@ class TemplateDiff {
std::string FromTypeStr = FromType.isNull() ? "(no argument)"
: FromType.getAsString(Policy);
- std::string ToTypeStr = ToType.isNull() ? "(no argument)"
- : ToType.getAsString(Policy);
- // Print without ElaboratedType sugar if it is better.
+ std::string ToTypeStr =
+ ToType.isNull() ? "(no argument)" : ToType.getAsString(Policy);
// TODO: merge this with other aka printing above.
if (FromTypeStr == ToTypeStr) {
- const auto *FromElTy = dyn_cast<ElaboratedType>(FromType),
- *ToElTy = dyn_cast<ElaboratedType>(ToType);
- if (FromElTy || ToElTy) {
- std::string FromNamedTypeStr =
- FromElTy ? FromElTy->getNamedType().getAsString(Policy)
- : FromTypeStr;
- std::string ToNamedTypeStr =
- ToElTy ? ToElTy->getNamedType().getAsString(Policy) : ToTypeStr;
- if (FromNamedTypeStr != ToNamedTypeStr) {
- FromTypeStr = FromNamedTypeStr;
- ToTypeStr = ToNamedTypeStr;
- goto PrintTypes;
- }
- }
// Switch to canonical typename if it is better.
std::string FromCanTypeStr =
FromType.getCanonicalType().getAsString(Policy);
@@ -1772,8 +1753,8 @@ class TemplateDiff {
}
}
- PrintTypes:
- if (PrintTree) OS << '[';
+ if (PrintTree)
+ OS << '[';
OS << (FromDefault ? "(default) " : "");
Bold();
OS << FromTypeStr;
diff --git a/clang/lib/AST/ASTDumper.cpp b/clang/lib/AST/ASTDumper.cpp
index 5e4487e..8e651a0 100644
--- a/clang/lib/AST/ASTDumper.cpp
+++ b/clang/lib/AST/ASTDumper.cpp
@@ -117,7 +117,9 @@ void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D,
// FIXME: The redecls() range sometimes has elements of a less-specific
// type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
// us TagDecls, and should give CXXRecordDecls).
- auto *Redecl = cast<SpecializationDecl>(RedeclWithBadType);
+ auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType);
+ if (!Redecl)
+ continue;
switch (Redecl->getTemplateSpecializationKind()) {
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 8e2927b..6299efa 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -949,8 +949,10 @@ ASTNodeImporter::import(const TemplateArgumentLoc &TALoc) {
else
return TSIOrErr.takeError();
} else {
- auto ToTemplateQualifierLocOrErr =
- import(FromInfo.getTemplateQualifierLoc());
+ auto ToTemplateKWLocOrErr = import(FromInfo.getTemplateKwLoc());
+ if (!ToTemplateKWLocOrErr)
+ return ToTemplateKWLocOrErr.takeError();
+ auto ToTemplateQualifierLocOrErr = import(TALoc.getTemplateQualifierLoc());
if (!ToTemplateQualifierLocOrErr)
return ToTemplateQualifierLocOrErr.takeError();
auto ToTemplateNameLocOrErr = import(FromInfo.getTemplateNameLoc());
@@ -961,8 +963,9 @@ ASTNodeImporter::import(const TemplateArgumentLoc &TALoc) {
if (!ToTemplateEllipsisLocOrErr)
return ToTemplateEllipsisLocOrErr.takeError();
ToInfo = TemplateArgumentLocInfo(
- Importer.getToContext(), *ToTemplateQualifierLocOrErr,
- *ToTemplateNameLocOrErr, *ToTemplateEllipsisLocOrErr);
+ Importer.getToContext(), *ToTemplateKWLocOrErr,
+ *ToTemplateQualifierLocOrErr, *ToTemplateNameLocOrErr,
+ *ToTemplateEllipsisLocOrErr);
}
return TemplateArgumentLoc(Arg, ToInfo);
@@ -1597,13 +1600,15 @@ ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
ExpectedType ASTNodeImporter::VisitUnresolvedUsingType(
const UnresolvedUsingType *T) {
Error Err = Error::success();
- auto ToD = importChecked(Err, T->getDecl());
- auto ToPrevD = importChecked(Err, T->getDecl()->getPreviousDecl());
+ auto ToQualifier = importChecked(Err, T->getQualifier());
+ auto *ToD = importChecked(Err, T->getDecl());
if (Err)
return std::move(Err);
- return Importer.getToContext().getTypeDeclType(
- ToD, cast_or_null<TypeDecl>(ToPrevD));
+ if (T->isCanonicalUnqualified())
+ return Importer.getToContext().getCanonicalUnresolvedUsingType(ToD);
+ return Importer.getToContext().getUnresolvedUsingType(T->getKeyword(),
+ ToQualifier, ToD);
}
ExpectedType ASTNodeImporter::VisitParenType(const ParenType *T) {
@@ -1631,15 +1636,17 @@ ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
if (!ToDeclOrErr)
return ToDeclOrErr.takeError();
- TypedefNameDecl *ToDecl = *ToDeclOrErr;
- if (ToDecl->getTypeForDecl())
- return QualType(ToDecl->getTypeForDecl(), 0);
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
- ExpectedType ToUnderlyingTypeOrErr = import(T->desugar());
+ ExpectedType ToUnderlyingTypeOrErr =
+ T->typeMatchesDecl() ? QualType() : import(T->desugar());
if (!ToUnderlyingTypeOrErr)
return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getTypedefType(ToDecl, *ToUnderlyingTypeOrErr);
+ return Importer.getToContext().getTypedefType(
+ T->getKeyword(), *ToQualifierOrErr, *ToDeclOrErr, *ToUnderlyingTypeOrErr);
}
ExpectedType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
@@ -1658,14 +1665,14 @@ ExpectedType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
}
ExpectedType ASTNodeImporter::VisitUsingType(const UsingType *T) {
- Expected<UsingShadowDecl *> FoundOrErr = import(T->getFoundDecl());
- if (!FoundOrErr)
- return FoundOrErr.takeError();
- Expected<QualType> UnderlyingOrErr = import(T->getUnderlyingType());
- if (!UnderlyingOrErr)
- return UnderlyingOrErr.takeError();
-
- return Importer.getToContext().getUsingType(*FoundOrErr, *UnderlyingOrErr);
+ Error Err = Error::success();
+ auto ToQualifier = importChecked(Err, T->getQualifier());
+ auto *ToD = importChecked(Err, T->getDecl());
+ QualType ToT = importChecked(Err, T->desugar());
+ if (Err)
+ return std::move(Err);
+ return Importer.getToContext().getUsingType(T->getKeyword(), ToQualifier, ToD,
+ ToT);
}
ExpectedType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
@@ -1728,36 +1735,37 @@ ExpectedType ASTNodeImporter::VisitDeducedTemplateSpecializationType(
return ToDeducedTypeOrErr.takeError();
return Importer.getToContext().getDeducedTemplateSpecializationType(
- *ToTemplateNameOrErr, *ToDeducedTypeOrErr, T->isDependentType());
+ T->getKeyword(), *ToTemplateNameOrErr, *ToDeducedTypeOrErr,
+ T->isDependentType());
}
-ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
- const InjectedClassNameType *T) {
- Expected<CXXRecordDecl *> ToDeclOrErr = import(T->getDecl());
+ExpectedType ASTNodeImporter::VisitTagType(const TagType *T) {
+ Expected<TagDecl *> ToDeclOrErr = import(T->getOriginalDecl());
if (!ToDeclOrErr)
return ToDeclOrErr.takeError();
- // The InjectedClassNameType is created in VisitRecordDecl when the
- // T->getDecl() is imported. Here we can return the existing type.
- const Type *Ty = (*ToDeclOrErr)->getTypeForDecl();
- assert(isa_and_nonnull<InjectedClassNameType>(Ty));
- return QualType(Ty, 0);
-}
+ if (T->isCanonicalUnqualified())
+ return Importer.getToContext().getCanonicalTagType(*ToDeclOrErr);
-ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) {
- Expected<RecordDecl *> ToDeclOrErr = import(T->getDecl());
- if (!ToDeclOrErr)
- return ToDeclOrErr.takeError();
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
- return Importer.getToContext().getTagDeclType(*ToDeclOrErr);
+ return Importer.getToContext().getTagType(T->getKeyword(), *ToQualifierOrErr,
+ *ToDeclOrErr, T->isTagOwned());
}
ExpectedType ASTNodeImporter::VisitEnumType(const EnumType *T) {
- Expected<EnumDecl *> ToDeclOrErr = import(T->getDecl());
- if (!ToDeclOrErr)
- return ToDeclOrErr.takeError();
+ return VisitTagType(T);
+}
- return Importer.getToContext().getTagDeclType(*ToDeclOrErr);
+ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) {
+ return VisitTagType(T);
+}
+
+ExpectedType
+ASTNodeImporter::VisitInjectedClassNameType(const InjectedClassNameType *T) {
+ return VisitTagType(T);
}
ExpectedType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
@@ -1834,6 +1842,14 @@ ExpectedType ASTNodeImporter::VisitSubstTemplateTypeParmPackType(
*ReplacedOrErr, T->getIndex(), T->getFinal(), *ToArgumentPack);
}
+ExpectedType ASTNodeImporter::VisitSubstBuiltinTemplatePackType(
+ const SubstBuiltinTemplatePackType *T) {
+ Expected<TemplateArgument> ToArgumentPack = import(T->getArgumentPack());
+ if (!ToArgumentPack)
+ return ToArgumentPack.takeError();
+ return Importer.getToContext().getSubstBuiltinTemplatePack(*ToArgumentPack);
+}
+
ExpectedType ASTNodeImporter::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
auto ToTemplateOrErr = import(T->getTemplateName());
@@ -1850,27 +1866,8 @@ ExpectedType ASTNodeImporter::VisitTemplateSpecializationType(
if (!ToUnderlyingOrErr)
return ToUnderlyingOrErr.takeError();
return Importer.getToContext().getTemplateSpecializationType(
- *ToTemplateOrErr, ToTemplateArgs, {}, *ToUnderlyingOrErr);
-}
-
-ExpectedType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
- // Note: the qualifier in an ElaboratedType is optional.
- auto ToQualifierOrErr = import(T->getQualifier());
- if (!ToQualifierOrErr)
- return ToQualifierOrErr.takeError();
-
- ExpectedType ToNamedTypeOrErr = import(T->getNamedType());
- if (!ToNamedTypeOrErr)
- return ToNamedTypeOrErr.takeError();
-
- Expected<TagDecl *> ToOwnedTagDeclOrErr = import(T->getOwnedTagDecl());
- if (!ToOwnedTagDeclOrErr)
- return ToOwnedTagDeclOrErr.takeError();
-
- return Importer.getToContext().getElaboratedType(T->getKeyword(),
- *ToQualifierOrErr,
- *ToNamedTypeOrErr,
- *ToOwnedTagDeclOrErr);
+ T->getKeyword(), *ToTemplateOrErr, ToTemplateArgs, {},
+ *ToUnderlyingOrErr);
}
ExpectedType
@@ -2168,7 +2165,7 @@ Error ASTNodeImporter::ImportDeclParts(
const Type *LeafT =
getLeafPointeeType(P->getType().getCanonicalType().getTypePtr());
auto *RT = dyn_cast<RecordType>(LeafT);
- if (RT && RT->getDecl() == D) {
+ if (RT && RT->getOriginalDecl() == D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
@@ -2421,8 +2418,8 @@ Error ASTNodeImporter::ImportFieldDeclDefinition(const FieldDecl *From,
const RecordType *RecordTo = ToType->getAs<RecordType>();
if (RecordFrom && RecordTo) {
- FromRecordDecl = RecordFrom->getDecl();
- ToRecordDecl = RecordTo->getDecl();
+ FromRecordDecl = RecordFrom->getOriginalDecl();
+ ToRecordDecl = RecordTo->getOriginalDecl();
}
}
@@ -2643,7 +2640,7 @@ Error ASTNodeImporter::ImportDefinition(
return Err;
ExpectedType ToTypeOrErr =
- import(Importer.getFromContext().getTypeDeclType(From));
+ import(QualType(Importer.getFromContext().getCanonicalTagType(From)));
if (!ToTypeOrErr)
return ToTypeOrErr.takeError();
@@ -3218,7 +3215,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
if (auto *Typedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
if (const auto *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
- FoundDecl = Tag->getDecl();
+ FoundDecl = Tag->getOriginalDecl();
}
if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) {
@@ -3349,7 +3346,7 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Decl *Found = FoundDecl;
if (auto *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
if (const auto *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
- Found = Tag->getDecl();
+ Found = Tag->getOriginalDecl();
}
if (auto *FoundRecord = dyn_cast<RecordDecl>(Found)) {
@@ -3428,17 +3425,6 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
return CDeclOrErr.takeError();
Numbering.ContextDecl = *CDeclOrErr;
D2CXX->setLambdaNumbering(Numbering);
- } else if (DCXX->isInjectedClassName()) {
- // We have to be careful to do a similar dance to the one in
- // Sema::ActOnStartCXXMemberDeclarations
- const bool DelayTypeCreation = true;
- if (GetImportedOrCreateDecl(
- D2CXX, D, Importer.getToContext(), D->getTagKind(), DC,
- *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(),
- cast_or_null<CXXRecordDecl>(PrevDecl), DelayTypeCreation))
- return D2CXX;
- Importer.getToContext().getTypeDeclType(
- D2CXX, dyn_cast<CXXRecordDecl>(DC));
} else {
if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
D->getTagKind(), DC, *BeginLocOrErr, Loc,
@@ -3458,51 +3444,6 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (Error Err = importInto(ToDescribed, FromDescribed))
return std::move(Err);
D2CXX->setDescribedClassTemplate(ToDescribed);
- if (!DCXX->isInjectedClassName() && !IsFriendTemplate) {
- // In a record describing a template the type should be an
- // InjectedClassNameType (see Sema::CheckClassTemplate). Update the
- // previously set type to the correct value here (ToDescribed is not
- // available at record create).
- CXXRecordDecl *Injected = nullptr;
- for (NamedDecl *Found : D2CXX->noload_lookup(Name)) {
- auto *Record = dyn_cast<CXXRecordDecl>(Found);
- if (Record && Record->isInjectedClassName()) {
- Injected = Record;
- break;
- }
- }
- // Create an injected type for the whole redecl chain.
- // The chain may contain an already existing injected type at the start,
- // if yes this should be reused. We must ensure that only one type
- // object exists for the injected type (including the injected record
- // declaration), ASTContext does not check it.
- SmallVector<Decl *, 2> Redecls =
- getCanonicalForwardRedeclChain(D2CXX);
- const Type *FrontTy =
- cast<CXXRecordDecl>(Redecls.front())->getTypeForDecl();
- QualType InjSpec;
- if (auto *InjTy = FrontTy->getAs<InjectedClassNameType>())
- InjSpec = InjTy->getInjectedSpecializationType();
- else
- InjSpec = ToDescribed->getInjectedClassNameSpecialization();
- for (auto *R : Redecls) {
- auto *RI = cast<CXXRecordDecl>(R);
- if (R != Redecls.front() ||
- !isa<InjectedClassNameType>(RI->getTypeForDecl()))
- RI->setTypeForDecl(nullptr);
- // This function tries to get the injected type from getTypeForDecl,
- // then from the previous declaration if possible. If not, it creates
- // a new type.
- Importer.getToContext().getInjectedClassNameType(RI, InjSpec);
- }
- // Set the new type for the injected decl too.
- if (Injected) {
- Injected->setTypeForDecl(nullptr);
- // This function will copy the injected type from D2CXX into Injected.
- // The injected decl does not have a previous decl to copy from.
- Importer.getToContext().getTypeDeclType(Injected, D2CXX);
- }
- }
} else if (MemberSpecializationInfo *MemberInfo =
DCXX->getMemberSpecializationInfo()) {
TemplateSpecializationKind SK =
@@ -3826,11 +3767,12 @@ public:
}
std::optional<bool> VisitTagType(const TagType *T) {
- if (auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl()))
+ if (auto *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(T->getOriginalDecl()))
for (const auto &Arg : Spec->getTemplateArgs().asArray())
if (checkTemplateArgument(Arg))
return true;
- return isAncestorDeclContextOf(ParentDC, T->getDecl());
+ return isAncestorDeclContextOf(ParentDC, T->getOriginalDecl());
}
std::optional<bool> VisitPointerType(const PointerType *T) {
@@ -3842,17 +3784,11 @@ public:
}
std::optional<bool> VisitTypedefType(const TypedefType *T) {
- const TypedefNameDecl *TD = T->getDecl();
- assert(TD);
- return isAncestorDeclContextOf(ParentDC, TD);
+ return isAncestorDeclContextOf(ParentDC, T->getDecl());
}
std::optional<bool> VisitUsingType(const UsingType *T) {
- if (T->getFoundDecl() &&
- isAncestorDeclContextOf(ParentDC, T->getFoundDecl()))
- return true;
-
- return {};
+ return isAncestorDeclContextOf(ParentDC, T->getDecl());
}
std::optional<bool>
@@ -6521,16 +6457,10 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Create the specialization.
ClassTemplateSpecializationDecl *D2 = nullptr;
if (PartialSpec) {
- QualType CanonInjType;
- if (Error Err = importInto(
- CanonInjType, PartialSpec->getInjectedSpecializationType()))
- return std::move(Err);
- CanonInjType = CanonInjType.getCanonicalType();
-
if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
D2, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr,
*IdLocOrErr, ToTPList, ClassTemplate, ArrayRef(TemplateArgs),
- CanonInjType,
+ /*CanonInjectedTST=*/CanQualType(),
cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl)))
return D2;
@@ -10050,46 +9980,34 @@ Expected<Stmt *> ASTImporter::Import(Stmt *FromS) {
return ToSOrErr;
}
-Expected<NestedNameSpecifier *>
-ASTImporter::Import(NestedNameSpecifier *FromNNS) {
- if (!FromNNS)
- return nullptr;
-
- NestedNameSpecifier *Prefix = nullptr;
- if (Error Err = importInto(Prefix, FromNNS->getPrefix()))
- return std::move(Err);
-
- switch (FromNNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- assert(FromNNS->getAsIdentifier() && "NNS should contain identifier.");
- return NestedNameSpecifier::Create(ToContext, Prefix,
- Import(FromNNS->getAsIdentifier()));
-
- case NestedNameSpecifier::Namespace:
- if (ExpectedDecl NSOrErr = Import(FromNNS->getAsNamespace())) {
- return NestedNameSpecifier::Create(ToContext, Prefix,
- cast<NamespaceBaseDecl>(*NSOrErr));
- } else
+Expected<NestedNameSpecifier> ASTImporter::Import(NestedNameSpecifier FromNNS) {
+ switch (FromNNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ return FromNNS;
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = FromNNS.getAsNamespaceAndPrefix();
+ auto NSOrErr = Import(Namespace);
+ if (!NSOrErr)
return NSOrErr.takeError();
-
- case NestedNameSpecifier::Global:
- return NestedNameSpecifier::GlobalSpecifier(ToContext);
-
- case NestedNameSpecifier::Super:
- if (ExpectedDecl RDOrErr = Import(FromNNS->getAsRecordDecl()))
- return NestedNameSpecifier::SuperSpecifier(ToContext,
- cast<CXXRecordDecl>(*RDOrErr));
+ auto PrefixOrErr = Import(Prefix);
+ if (!PrefixOrErr)
+ return PrefixOrErr.takeError();
+ return NestedNameSpecifier(ToContext, cast<NamespaceBaseDecl>(*NSOrErr),
+ *PrefixOrErr);
+ }
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ if (ExpectedDecl RDOrErr = Import(FromNNS.getAsMicrosoftSuper()))
+ return NestedNameSpecifier(cast<CXXRecordDecl>(*RDOrErr));
else
return RDOrErr.takeError();
-
- case NestedNameSpecifier::TypeSpec:
- if (ExpectedTypePtr TyOrErr = Import(FromNNS->getAsType())) {
- return NestedNameSpecifier::Create(ToContext, Prefix, *TyOrErr);
+ case NestedNameSpecifier::Kind::Type:
+ if (ExpectedTypePtr TyOrErr = Import(FromNNS.getAsType())) {
+ return NestedNameSpecifier(*TyOrErr);
} else {
return TyOrErr.takeError();
}
}
-
llvm_unreachable("Invalid nested name specifier kind");
}
@@ -10103,64 +10021,62 @@ ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
// serialization in reverse order.
while (NNS) {
NestedNames.push_back(NNS);
- NNS = NNS.getPrefix();
+ NNS = NNS.getAsNamespaceAndPrefix().Prefix;
}
NestedNameSpecifierLocBuilder Builder;
while (!NestedNames.empty()) {
NNS = NestedNames.pop_back_val();
- NestedNameSpecifier *Spec = nullptr;
+ NestedNameSpecifier Spec = std::nullopt;
if (Error Err = importInto(Spec, NNS.getNestedNameSpecifier()))
return std::move(Err);
- NestedNameSpecifier::SpecifierKind Kind = Spec->getKind();
+ NestedNameSpecifier::Kind Kind = Spec.getKind();
SourceLocation ToLocalBeginLoc, ToLocalEndLoc;
- if (Kind != NestedNameSpecifier::Super) {
+ if (Kind != NestedNameSpecifier::Kind::MicrosoftSuper) {
if (Error Err = importInto(ToLocalBeginLoc, NNS.getLocalBeginLoc()))
return std::move(Err);
- if (Kind != NestedNameSpecifier::Global)
+ if (Kind != NestedNameSpecifier::Kind::Global)
if (Error Err = importInto(ToLocalEndLoc, NNS.getLocalEndLoc()))
return std::move(Err);
}
switch (Kind) {
- case NestedNameSpecifier::Identifier:
- Builder.Extend(getToContext(), Spec->getAsIdentifier(), ToLocalBeginLoc,
- ToLocalEndLoc);
- break;
-
- case NestedNameSpecifier::Namespace:
- Builder.Extend(getToContext(), Spec->getAsNamespace(), ToLocalBeginLoc,
- ToLocalEndLoc);
+ case NestedNameSpecifier::Kind::Namespace:
+ Builder.Extend(getToContext(), Spec.getAsNamespaceAndPrefix().Namespace,
+ ToLocalBeginLoc, ToLocalEndLoc);
break;
- case NestedNameSpecifier::TypeSpec: {
+ case NestedNameSpecifier::Kind::Type: {
SourceLocation ToTLoc;
- if (Error Err = importInto(ToTLoc, NNS.getTypeLoc().getBeginLoc()))
+ if (Error Err = importInto(ToTLoc, NNS.castAsTypeLoc().getBeginLoc()))
return std::move(Err);
TypeSourceInfo *TSI = getToContext().getTrivialTypeSourceInfo(
- QualType(Spec->getAsType(), 0), ToTLoc);
- Builder.Extend(getToContext(), TSI->getTypeLoc(), ToLocalEndLoc);
+ QualType(Spec.getAsType(), 0), ToTLoc);
+ Builder.Make(getToContext(), TSI->getTypeLoc(), ToLocalEndLoc);
break;
}
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
Builder.MakeGlobal(getToContext(), ToLocalBeginLoc);
break;
- case NestedNameSpecifier::Super: {
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
auto ToSourceRangeOrErr = Import(NNS.getSourceRange());
if (!ToSourceRangeOrErr)
return ToSourceRangeOrErr.takeError();
- Builder.MakeSuper(getToContext(), Spec->getAsRecordDecl(),
- ToSourceRangeOrErr->getBegin(),
- ToSourceRangeOrErr->getEnd());
+ Builder.MakeMicrosoftSuper(getToContext(), Spec.getAsMicrosoftSuper(),
+ ToSourceRangeOrErr->getBegin(),
+ ToSourceRangeOrErr->getEnd());
+ break;
+ }
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
- }
}
return Builder.getWithLocInContext(getToContext());
@@ -10740,7 +10656,7 @@ ASTNodeImporter::ImportAPValue(const APValue &FromValue) {
if (Err)
return std::move(Err);
if (auto *RD = dyn_cast<CXXRecordDecl>(FromDecl))
- FromElemTy = Importer.FromContext.getRecordType(RD);
+ FromElemTy = Importer.FromContext.getCanonicalTagType(RD);
else
FromElemTy = cast<ValueDecl>(FromDecl)->getType();
ToPath[LoopIdx] = APValue::LValuePathEntry(APValue::BaseOrMemberType(
diff --git a/clang/lib/AST/ASTImporterLookupTable.cpp b/clang/lib/AST/ASTImporterLookupTable.cpp
index 4ed3198..29c3af1 100644
--- a/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -49,8 +49,6 @@ struct Builder : RecursiveASTVisitor<Builder> {
bool VisitFriendDecl(FriendDecl *D) {
if (D->getFriendType()) {
QualType Ty = D->getFriendType()->getType();
- if (isa<ElaboratedType>(Ty))
- Ty = cast<ElaboratedType>(Ty)->getNamedType();
// A FriendDecl with a dependent type (e.g. ClassTemplateSpecialization)
// always has that decl as child node.
// However, there are non-dependent cases which does not have the
@@ -64,13 +62,15 @@ struct Builder : RecursiveASTVisitor<Builder> {
dyn_cast<SubstTemplateTypeParmType>(Ty)) {
if (SubstTy->getAsCXXRecordDecl())
LT.add(SubstTy->getAsCXXRecordDecl());
- } else if (isa<TypedefType>(Ty)) {
- // We do not put friend typedefs to the lookup table because
- // ASTImporter does not organize typedefs into redecl chains.
- } else if (isa<UsingType>(Ty)) {
- // Similar to TypedefType, not putting into lookup table.
} else {
- llvm_unreachable("Unhandled type of friend class");
+ if (isa<TypedefType>(Ty)) {
+ // We do not put friend typedefs to the lookup table because
+ // ASTImporter does not organize typedefs into redecl chains.
+ } else if (isa<UsingType>(Ty)) {
+ // Similar to TypedefType, not putting into lookup table.
+ } else {
+ llvm_unreachable("Unhandled type of friend class");
+ }
}
}
}
diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp
index 0e2023f..1292c30 100644
--- a/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -110,8 +110,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const TemplateArgumentLoc &Arg1,
const TemplateArgumentLoc &Arg2);
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- NestedNameSpecifier *NNS1,
- NestedNameSpecifier *NNS2);
+ NestedNameSpecifier NNS1,
+ NestedNameSpecifier NNS2);
static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
const IdentifierInfo *Name2);
@@ -579,35 +579,30 @@ static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
/// Determine whether two nested-name-specifiers are equivalent.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- NestedNameSpecifier *NNS1,
- NestedNameSpecifier *NNS2) {
- if (NNS1->getKind() != NNS2->getKind())
- return false;
-
- NestedNameSpecifier *Prefix1 = NNS1->getPrefix(),
- *Prefix2 = NNS2->getPrefix();
- if ((bool)Prefix1 != (bool)Prefix2)
- return false;
-
- if (Prefix1)
- if (!IsStructurallyEquivalent(Context, Prefix1, Prefix2))
- return false;
-
- switch (NNS1->getKind()) {
- case NestedNameSpecifier::Identifier:
- return IsStructurallyEquivalent(NNS1->getAsIdentifier(),
- NNS2->getAsIdentifier());
- case NestedNameSpecifier::Namespace:
- return IsStructurallyEquivalent(Context, NNS1->getAsNamespace(),
- NNS2->getAsNamespace());
- case NestedNameSpecifier::TypeSpec:
- return IsStructurallyEquivalent(Context, QualType(NNS1->getAsType(), 0),
- QualType(NNS2->getAsType(), 0));
- case NestedNameSpecifier::Global:
+ NestedNameSpecifier NNS1,
+ NestedNameSpecifier NNS2) {
+ auto Kind = NNS1.getKind();
+ if (Kind != NNS2.getKind())
+ return false;
+ switch (Kind) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
return true;
- case NestedNameSpecifier::Super:
- return IsStructurallyEquivalent(Context, NNS1->getAsRecordDecl(),
- NNS2->getAsRecordDecl());
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace1, Prefix1] = NNS1.getAsNamespaceAndPrefix();
+ auto [Namespace2, Prefix2] = NNS2.getAsNamespaceAndPrefix();
+ if (!IsStructurallyEquivalent(Context,
+ const_cast<NamespaceBaseDecl *>(Namespace1),
+ const_cast<NamespaceBaseDecl *>(Namespace2)))
+ return false;
+ return IsStructurallyEquivalent(Context, Prefix1, Prefix2);
+ }
+ case NestedNameSpecifier::Kind::Type:
+ return IsStructurallyEquivalent(Context, QualType(NNS1.getAsType(), 0),
+ QualType(NNS2.getAsType(), 0));
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return IsStructurallyEquivalent(Context, NNS1.getAsMicrosoftSuper(),
+ NNS2.getAsMicrosoftSuper());
}
return false;
}
@@ -615,9 +610,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const DependentTemplateStorage &S1,
const DependentTemplateStorage &S2) {
- if (NestedNameSpecifier *NNS1 = S1.getQualifier(), *NNS2 = S2.getQualifier();
- !NNS1 != !NNS2 ||
- (NNS1 && !IsStructurallyEquivalent(Context, NNS1, NNS2)))
+ if (!IsStructurallyEquivalent(Context, S1.getQualifier(), S2.getQualifier()))
return false;
IdentifierOrOverloadedOperator IO1 = S1.getName(), IO2 = S2.getName();
@@ -885,10 +878,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
// Treat the enumeration as its underlying type and use the builtin type
// class comparison.
if (T1->getTypeClass() == Type::Enum) {
- T1 = T1->getAs<EnumType>()->getDecl()->getIntegerType();
+ T1 = cast<EnumType>(T1)->getOriginalDecl()->getIntegerType();
assert(T2->isBuiltinType() && !T1.isNull()); // Sanity check
} else if (T2->getTypeClass() == Type::Enum) {
- T2 = T2->getAs<EnumType>()->getDecl()->getIntegerType();
+ T2 = cast<EnumType>(T2)->getOriginalDecl()->getIntegerType();
assert(T1->isBuiltinType() && !T2.isNull()); // Sanity check
}
TC = Type::Builtin;
@@ -1206,23 +1199,35 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
- case Type::Using:
- if (!IsStructurallyEquivalent(Context, cast<UsingType>(T1)->getFoundDecl(),
- cast<UsingType>(T2)->getFoundDecl()))
+ case Type::Using: {
+ auto *U1 = cast<UsingType>(T1), *U2 = cast<UsingType>(T2);
+ if (U1->getKeyword() != U2->getKeyword())
return false;
- if (!IsStructurallyEquivalent(Context,
- cast<UsingType>(T1)->getUnderlyingType(),
- cast<UsingType>(T2)->getUnderlyingType()))
+ if (!IsStructurallyEquivalent(Context, U1->getQualifier(),
+ U2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, U1->getDecl(), U2->getDecl()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, U1->desugar(), U2->desugar()))
return false;
break;
-
- case Type::Typedef:
- if (!IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->getDecl(),
- cast<TypedefType>(T2)->getDecl()) ||
- !IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->desugar(),
- cast<TypedefType>(T2)->desugar()))
+ }
+ case Type::Typedef: {
+ auto *U1 = cast<TypedefType>(T1), *U2 = cast<TypedefType>(T2);
+ if (U1->getKeyword() != U2->getKeyword())
+ return false;
+ if (!IsStructurallyEquivalent(Context, U1->getQualifier(),
+ U2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, U1->getDecl(), U2->getDecl()))
+ return false;
+ if (U1->typeMatchesDecl() != U2->typeMatchesDecl())
+ return false;
+ if (!U1->typeMatchesDecl() &&
+ !IsStructurallyEquivalent(Context, U1->desugar(), U2->desugar()))
return false;
break;
+ }
case Type::TypeOfExpr:
if (!IsStructurallyEquivalent(
@@ -1286,10 +1291,20 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::Record:
case Type::Enum:
- if (!IsStructurallyEquivalent(Context, cast<TagType>(T1)->getDecl(),
- cast<TagType>(T2)->getDecl()))
+ case Type::InjectedClassName: {
+ const auto *TT1 = cast<TagType>(T1), *TT2 = cast<TagType>(T2);
+ if (TT1->getKeyword() != TT2->getKeyword())
+ return false;
+ if (TT1->isTagOwned() != TT2->isTagOwned())
+ return false;
+ if (!IsStructurallyEquivalent(Context, TT1->getQualifier(),
+ TT2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, TT1->getOriginalDecl(),
+ TT2->getOriginalDecl()))
return false;
break;
+ }
case Type::TemplateTypeParm: {
const auto *Parm1 = cast<TemplateTypeParmType>(T1);
@@ -1322,6 +1337,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
+ case Type::SubstBuiltinTemplatePack: {
+ const auto *Subst1 = cast<SubstBuiltinTemplatePackType>(T1);
+ const auto *Subst2 = cast<SubstBuiltinTemplatePackType>(T2);
+ if (!IsStructurallyEquivalent(Context, Subst1->getArgumentPack(),
+ Subst2->getArgumentPack()))
+ return false;
+ break;
+ }
case Type::SubstTemplateTypeParmPack: {
const auto *Subst1 = cast<SubstTemplateTypeParmPackType>(T1);
const auto *Subst2 = cast<SubstTemplateTypeParmPackType>(T2);
@@ -1348,33 +1371,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
- case Type::Elaborated: {
- const auto *Elab1 = cast<ElaboratedType>(T1);
- const auto *Elab2 = cast<ElaboratedType>(T2);
- // CHECKME: what if a keyword is ElaboratedTypeKeyword::None or
- // ElaboratedTypeKeyword::Typename
- // ?
- if (Elab1->getKeyword() != Elab2->getKeyword())
- return false;
- if (!IsStructurallyEquivalent(Context, Elab1->getQualifier(),
- Elab2->getQualifier()))
- return false;
- if (!IsStructurallyEquivalent(Context, Elab1->getNamedType(),
- Elab2->getNamedType()))
- return false;
- break;
- }
-
- case Type::InjectedClassName: {
- const auto *Inj1 = cast<InjectedClassNameType>(T1);
- const auto *Inj2 = cast<InjectedClassNameType>(T2);
- if (!IsStructurallyEquivalent(Context,
- Inj1->getInjectedSpecializationType(),
- Inj2->getInjectedSpecializationType()))
- return false;
- break;
- }
-
case Type::DependentName: {
const auto *Typename1 = cast<DependentNameType>(T1);
const auto *Typename2 = cast<DependentNameType>(T2);
@@ -1549,8 +1545,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
// types
if (Field1->isAnonymousStructOrUnion() &&
Field2->isAnonymousStructOrUnion()) {
- RecordDecl *D1 = Field1->getType()->castAs<RecordType>()->getDecl();
- RecordDecl *D2 = Field2->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *D1 = Field1->getType()->castAs<RecordType>()->getOriginalDecl();
+ RecordDecl *D2 = Field2->getType()->castAs<RecordType>()->getOriginalDecl();
return IsStructurallyEquivalent(Context, D1, D2);
}
@@ -1628,7 +1624,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
FieldDecl *Field1, FieldDecl *Field2) {
const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
return IsStructurallyEquivalent(Context, Field1, Field2,
- Context.ToCtx.getTypeDeclType(Owner2));
+ Context.ToCtx.getCanonicalTagType(Owner2));
}
/// Determine structural equivalence of two methods.
@@ -1801,7 +1797,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Context.Complain) {
Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
<< D1->getDeclName() << (unsigned)D1->getTagKind();
@@ -1903,7 +1899,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
<< D2CXX->getNumBases();
@@ -1924,7 +1920,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(Base2->getBeginLoc(), diag::note_odr_base)
<< Base2->getType() << Base2->getSourceRange();
@@ -1940,7 +1936,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(Base2->getBeginLoc(), diag::note_odr_virtual_base)
<< Base2->isVirtual() << Base2->getSourceRange();
@@ -1962,7 +1958,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2CXX)
+ << Context.ToCtx.getCanonicalTagType(D2CXX)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1((*Friend1)->getFriendLoc(), diag::note_odr_friend);
Context.Diag2(D2->getLocation(), diag::note_odr_missing_friend);
@@ -1975,7 +1971,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2CXX)
+ << Context.ToCtx.getCanonicalTagType(D2CXX)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1((*Friend1)->getFriendLoc(), diag::note_odr_friend);
Context.Diag2((*Friend2)->getFriendLoc(), diag::note_odr_friend);
@@ -1989,7 +1985,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2((*Friend2)->getFriendLoc(), diag::note_odr_friend);
Context.Diag1(D1->getLocation(), diag::note_odr_missing_friend);
@@ -2001,7 +1997,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
Context.Diag1(Base1->getBeginLoc(), diag::note_odr_base)
@@ -2013,7 +2009,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
// Check the fields for consistency.
- QualType D2Type = Context.ToCtx.getTypeDeclType(D2);
+ CanQualType D2Type = Context.ToCtx.getCanonicalTagType(D2);
RecordDecl::field_iterator Field2 = D2->field_begin(),
Field2End = D2->field_end();
for (RecordDecl::field_iterator Field1 = D1->field_begin(),
@@ -2024,7 +2020,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1(Field1->getLocation(), diag::note_odr_field)
<< Field1->getDeclName() << Field1->getType();
@@ -2041,7 +2037,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Context.Complain) {
Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(Field2->getLocation(), diag::note_odr_field)
<< Field2->getDeclName() << Field2->getType();
@@ -2101,7 +2097,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1(D1->getLocation(),
D1->isFixed()
@@ -2124,7 +2120,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(D2->getLocation(),
diag::note_odr_incompatible_fixed_underlying_type)
@@ -2162,7 +2158,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1((*EC1)->getLocation(), diag::note_odr_enumerator)
<< (*EC1)->getDeclName() << toString((*EC1)->getInitVal(), 10);
@@ -2180,7 +2176,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2((*EC2)->getLocation(), diag::note_odr_enumerator)
<< (*EC2)->getDeclName() << toString((*EC2)->getInitVal(), 10);
@@ -2198,7 +2194,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Context.Complain) {
Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2((*EC2)->getLocation(), diag::note_odr_enumerator)
<< (*EC2)->getDeclName() << toString((*EC2)->getInitVal(), 10);
@@ -2595,7 +2591,7 @@ DiagnosticBuilder StructuralEquivalenceContext::Diag2(SourceLocation Loc,
UnsignedOrNone
StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
ASTContext &Context = Anon->getASTContext();
- QualType AnonTy = Context.getRecordType(Anon);
+ CanQualType AnonTy = Context.getCanonicalTagType(Anon);
const auto *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext());
if (!Owner)
@@ -2617,12 +2613,8 @@ StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
// If the field looks like this:
// struct { ... } A;
QualType FieldType = F->getType();
- // In case of nested structs.
- while (const auto *ElabType = dyn_cast<ElaboratedType>(FieldType))
- FieldType = ElabType->getNamedType();
-
if (const auto *RecType = dyn_cast<RecordType>(FieldType)) {
- const RecordDecl *RecDecl = RecType->getDecl();
+ const RecordDecl *RecDecl = RecType->getOriginalDecl();
if (RecDecl->getDeclContext() == Owner && !RecDecl->getIdentifier()) {
if (Context.hasSameType(FieldType, AnonTy))
break;
diff --git a/clang/lib/AST/ASTTypeTraits.cpp b/clang/lib/AST/ASTTypeTraits.cpp
index 99916f5..d2f7fdb 100644
--- a/clang/lib/AST/ASTTypeTraits.cpp
+++ b/clang/lib/AST/ASTTypeTraits.cpp
@@ -194,8 +194,8 @@ void DynTypedNode::print(llvm::raw_ostream &OS,
else if (const NestedNameSpecifier *NNS = get<NestedNameSpecifier>())
NNS->print(OS, PP);
else if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>()) {
- if (const NestedNameSpecifier *NNS = NNSL->getNestedNameSpecifier())
- NNS->print(OS, PP);
+ if (NestedNameSpecifier NNS = NNSL->getNestedNameSpecifier())
+ NNS.print(OS, PP);
else
OS << "(empty NestedNameSpecifierLoc)";
} else if (const QualType *QT = get<QualType>())
@@ -234,13 +234,39 @@ void DynTypedNode::dump(llvm::raw_ostream &OS,
OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
}
-SourceRange DynTypedNode::getSourceRange() const {
+SourceRange DynTypedNode::getSourceRange(bool IncludeQualifier) const {
if (const CXXCtorInitializer *CCI = get<CXXCtorInitializer>())
return CCI->getSourceRange();
if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>())
return NNSL->getSourceRange();
- if (const TypeLoc *TL = get<TypeLoc>())
- return TL->getSourceRange();
+ if (const TypeLoc *TL = get<TypeLoc>()) {
+ if (IncludeQualifier)
+ return TL->getSourceRange();
+ switch (TL->getTypeLocClass()) {
+ case TypeLoc::DependentName:
+ return TL->castAs<DependentNameTypeLoc>().getNameLoc();
+ case TypeLoc::TemplateSpecialization: {
+ auto T = TL->castAs<TemplateSpecializationTypeLoc>();
+ return SourceRange(T.getTemplateNameLoc(), T.getEndLoc());
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto T = TL->castAs<DependentTemplateSpecializationTypeLoc>();
+ return SourceRange(T.getTemplateNameLoc(), T.getEndLoc());
+ }
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ return TL->castAs<TagTypeLoc>().getNameLoc();
+ case TypeLoc::Typedef:
+ return TL->castAs<TypedefTypeLoc>().getNameLoc();
+ case TypeLoc::UnresolvedUsing:
+ return TL->castAs<UnresolvedUsingTypeLoc>().getNameLoc();
+ case TypeLoc::Using:
+ return TL->castAs<UsingTypeLoc>().getNameLoc();
+ default:
+ return TL->getSourceRange();
+ }
+ }
if (const Decl *D = get<Decl>())
return D->getSourceRange();
if (const Stmt *S = get<Stmt>())
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index cc99efa..56552f3 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -60,16 +60,18 @@ template <class Emitter> class OptionScope final {
public:
/// Root constructor, compiling or discarding primitives.
OptionScope(Compiler<Emitter> *Ctx, bool NewDiscardResult,
- bool NewInitializing)
+ bool NewInitializing, bool NewToLValue)
: Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
- OldInitializing(Ctx->Initializing) {
+ OldInitializing(Ctx->Initializing), OldToLValue(Ctx->ToLValue) {
Ctx->DiscardResult = NewDiscardResult;
Ctx->Initializing = NewInitializing;
+ Ctx->ToLValue = NewToLValue;
}
~OptionScope() {
Ctx->DiscardResult = OldDiscardResult;
Ctx->Initializing = OldInitializing;
+ Ctx->ToLValue = OldToLValue;
}
private:
@@ -78,6 +80,7 @@ private:
/// Old discard flag to restore.
bool OldDiscardResult;
bool OldInitializing;
+ bool OldToLValue;
};
template <class Emitter>
@@ -222,6 +225,9 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
switch (CE->getCastKind()) {
case CK_LValueToRValue: {
+ if (ToLValue && CE->getType()->isPointerType())
+ return this->delegate(SubExpr);
+
if (SubExpr->getType().isVolatileQualified())
return this->emitInvalidCast(CastKind::Volatile, /*Fatal=*/true, CE);
@@ -250,7 +256,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
// Prepare storage for the result.
if (!Initializing && !SubExprT) {
- std::optional<unsigned> LocalIndex = allocateLocal(SubExpr);
+ UnsignedOrNone LocalIndex = allocateLocal(SubExpr);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, CE))
@@ -553,9 +559,9 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
// Possibly diagnose casts to enum types if the target type does not
// have a fixed size.
if (Ctx.getLangOpts().CPlusPlus && CE->getType()->isEnumeralType()) {
- if (const auto *ET = CE->getType().getCanonicalType()->castAs<EnumType>();
- !ET->getDecl()->isFixed()) {
- if (!this->emitCheckEnumValue(*FromT, ET->getDecl(), CE))
+ const auto *ED = CE->getType()->castAsEnumDecl();
+ if (!ED->isFixed()) {
+ if (!this->emitCheckEnumValue(*FromT, ED, CE))
return false;
}
}
@@ -602,7 +608,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
// We're creating a complex value here, so we need to
// allocate storage for it.
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateTemporary(CE);
+ UnsignedOrNone LocalIndex = allocateTemporary(CE);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, CE))
@@ -626,7 +632,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
assert(CE->getType()->isAnyComplexType());
assert(SubExpr->getType()->isAnyComplexType());
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateLocal(CE);
+ UnsignedOrNone LocalIndex = allocateLocal(CE);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, CE))
@@ -666,12 +672,12 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
}
case CK_VectorSplat: {
- assert(!classify(CE->getType()));
- assert(classify(SubExpr->getType()));
+ assert(!canClassify(CE->getType()));
+ assert(canClassify(SubExpr->getType()));
assert(CE->getType()->isVectorType());
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateLocal(CE);
+ UnsignedOrNone LocalIndex = allocateLocal(CE);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, CE))
@@ -715,7 +721,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
assert(CE->getType()->isVectorType());
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateTemporary(CE);
+ UnsignedOrNone LocalIndex = allocateTemporary(CE);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, CE))
@@ -803,7 +809,7 @@ bool Compiler<Emitter>::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
return true;
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateTemporary(E);
+ UnsignedOrNone LocalIndex = allocateTemporary(E);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, E))
@@ -904,7 +910,7 @@ bool Compiler<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
// We need a temporary variable holding our return value.
if (!Initializing) {
- std::optional<unsigned> ResultIndex = this->allocateLocal(BO);
+ UnsignedOrNone ResultIndex = this->allocateLocal(BO);
if (!this->emitGetPtrLocal(*ResultIndex, BO))
return false;
}
@@ -936,7 +942,7 @@ bool Compiler<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
if (!Result)
return false;
if (DiscardResult)
- return this->emitPop(*T, BO);
+ return this->emitPopBool(BO);
if (T != PT_Bool)
return this->emitCast(PT_Bool, *T, BO);
return true;
@@ -1144,7 +1150,7 @@ template <class Emitter>
bool Compiler<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
// Prepare storage for result.
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateTemporary(E);
+ UnsignedOrNone LocalIndex = allocateTemporary(E);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, E))
@@ -1203,7 +1209,7 @@ bool Compiler<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
if (!LHSIsComplex) {
// This is using the RHS type for the fake-complex LHS.
- std::optional<unsigned> LocalIndex = allocateTemporary(RHS);
+ UnsignedOrNone LocalIndex = allocateTemporary(RHS);
if (!LocalIndex)
return false;
LHSOffset = *LocalIndex;
@@ -1370,23 +1376,27 @@ bool Compiler<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
template <class Emitter>
bool Compiler<Emitter>::VisitVectorBinOp(const BinaryOperator *E) {
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
assert(!E->isCommaOp() &&
"Comma op should be handled in VisitBinaryOperator");
assert(E->getType()->isVectorType());
- assert(E->getLHS()->getType()->isVectorType());
- assert(E->getRHS()->getType()->isVectorType());
+ assert(LHS->getType()->isVectorType());
+ assert(RHS->getType()->isVectorType());
+
+ // We can only handle vectors with primitive element types.
+ if (!canClassify(LHS->getType()->castAs<VectorType>()->getElementType()))
+ return false;
// Prepare storage for result.
- if (!Initializing && !E->isCompoundAssignmentOp()) {
- std::optional<unsigned> LocalIndex = allocateTemporary(E);
+ if (!Initializing && !E->isCompoundAssignmentOp() && !E->isAssignmentOp()) {
+ UnsignedOrNone LocalIndex = allocateTemporary(E);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, E))
return false;
}
- const Expr *LHS = E->getLHS();
- const Expr *RHS = E->getRHS();
const auto *VecTy = E->getType()->getAs<VectorType>();
auto Op = E->isCompoundAssignmentOp()
? BinaryOperator::getOpForCompoundAssignment(E->getOpcode())
@@ -1396,6 +1406,21 @@ bool Compiler<Emitter>::VisitVectorBinOp(const BinaryOperator *E) {
PrimType RHSElemT = this->classifyVectorElementType(RHS->getType());
PrimType ResultElemT = this->classifyVectorElementType(E->getType());
+ if (E->getOpcode() == BO_Assign) {
+ assert(Ctx.getASTContext().hasSameUnqualifiedType(
+ LHS->getType()->castAs<VectorType>()->getElementType(),
+ RHS->getType()->castAs<VectorType>()->getElementType()));
+ if (!this->visit(LHS))
+ return false;
+ if (!this->visit(RHS))
+ return false;
+ if (!this->emitCopyArray(ElemT, 0, 0, VecTy->getNumElements(), E))
+ return false;
+ if (DiscardResult)
+ return this->emitPopPtr(E);
+ return true;
+ }
+
// Evaluate LHS and save value to LHSOffset.
unsigned LHSOffset =
this->allocateLocalPrimitive(LHS, PT_Ptr, /*IsConst=*/true);
@@ -1931,15 +1956,19 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
PrimType TargetT = classifyPrim(Init->getType());
- auto Eval = [&](const Expr *Init, unsigned ElemIndex) {
- PrimType InitT = classifyPrim(Init->getType());
- if (!this->visit(Init))
- return false;
- if (InitT != TargetT) {
- if (!this->emitCast(InitT, TargetT, E))
+ auto Eval = [&](const IntegerLiteral *IL, unsigned ElemIndex) {
+ if (TargetT == PT_Float) {
+ if (!this->emitConst(IL->getValue(), classifyPrim(IL), Init))
+ return false;
+ const auto *Sem = &Ctx.getFloatSemantics(CAT->getElementType());
+ if (!this->emitCastIntegralFloating(classifyPrim(IL), Sem,
+ getFPOptions(E), E))
+ return false;
+ } else {
+ if (!this->emitConst(IL->getValue(), TargetT, Init))
return false;
}
- return this->emitInitElem(TargetT, ElemIndex, Init);
+ return this->emitInitElem(TargetT, ElemIndex, IL);
};
if (!EmbedS->doForEachDataElement(Eval, ElementIndex))
return false;
@@ -2061,24 +2090,36 @@ bool Compiler<Emitter>::visitArrayElemInit(unsigned ElemIndex, const Expr *Init,
template <class Emitter>
bool Compiler<Emitter>::visitCallArgs(ArrayRef<const Expr *> Args,
const FunctionDecl *FuncDecl,
- bool Activate) {
+ bool Activate, bool IsOperatorCall) {
assert(VarScope->getKind() == ScopeKind::Call);
- bool HasNonNullAttr = false;
llvm::BitVector NonNullArgs;
- if (FuncDecl && FuncDecl->hasAttr<NonNullAttr>()) {
- HasNonNullAttr = true;
+ if (FuncDecl && FuncDecl->hasAttr<NonNullAttr>())
NonNullArgs = collectNonNullArgs(FuncDecl, Args);
- }
+
+ bool ExplicitMemberFn = false;
+ if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(FuncDecl))
+ ExplicitMemberFn = MD->isExplicitObjectMemberFunction();
unsigned ArgIndex = 0;
for (const Expr *Arg : Args) {
- if (OptPrimType T = classify(Arg)) {
+ if (canClassify(Arg)) {
if (!this->visit(Arg))
return false;
} else {
- std::optional<unsigned> LocalIndex = allocateLocal(
- Arg, Arg->getType(), /*ExtendingDecl=*/nullptr, ScopeKind::Call);
+ DeclTy Source = Arg;
+ if (FuncDecl) {
+ // Try to use the parameter declaration instead of the argument
+ // expression as a source.
+ unsigned DeclIndex = ArgIndex - IsOperatorCall + ExplicitMemberFn;
+ if (DeclIndex < FuncDecl->getNumParams())
+ Source = FuncDecl->getParamDecl(ArgIndex - IsOperatorCall +
+ ExplicitMemberFn);
+ }
+
+ UnsignedOrNone LocalIndex =
+ allocateLocal(std::move(Source), Arg->getType(),
+ /*ExtendingDecl=*/nullptr, ScopeKind::Call);
if (!LocalIndex)
return false;
@@ -2094,7 +2135,7 @@ bool Compiler<Emitter>::visitCallArgs(ArrayRef<const Expr *> Args,
return false;
}
- if (HasNonNullAttr && NonNullArgs[ArgIndex]) {
+ if (!NonNullArgs.empty() && NonNullArgs[ArgIndex]) {
PrimType ArgT = classify(Arg).value_or(PT_Ptr);
if (ArgT == PT_Ptr) {
if (!this->emitCheckNonNullArg(ArgT, Arg))
@@ -2225,7 +2266,9 @@ bool Compiler<Emitter>::VisitUnaryExprOrTypeTraitExpr(
assert(VAT);
if (VAT->getElementType()->isArrayType()) {
std::optional<APSInt> Res =
- VAT->getSizeExpr()->getIntegerConstantExpr(ASTCtx);
+ VAT->getSizeExpr()
+ ? VAT->getSizeExpr()->getIntegerConstantExpr(ASTCtx)
+ : std::nullopt;
if (Res) {
if (DiscardResult)
return true;
@@ -2919,7 +2962,7 @@ bool Compiler<Emitter>::VisitMaterializeTemporaryExpr(
return false;
const Expr *Inner = E->getSubExpr()->skipRValueSubobjectAdjustments();
- if (std::optional<unsigned> LocalIndex =
+ if (UnsignedOrNone LocalIndex =
allocateLocal(E, Inner->getType(), E->getExtendingDecl())) {
InitLinkScope<Emitter> ILS(this, InitLink::Temp(*LocalIndex));
if (!this->emitGetPtrLocal(*LocalIndex, E))
@@ -2966,20 +3009,25 @@ bool Compiler<Emitter>::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
if (T && !E->isLValue())
return this->delegate(Init);
- if (std::optional<unsigned> GlobalIndex = P.createGlobal(E)) {
- if (!this->emitGetPtrGlobal(*GlobalIndex, E))
- return false;
+ std::optional<unsigned> GlobalIndex = P.createGlobal(E);
+ if (!GlobalIndex)
+ return false;
- if (T) {
- if (!this->visit(Init))
- return false;
- return this->emitInitGlobal(*T, *GlobalIndex, E);
- }
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+
+ // Since this is a global variable, we might've already seen,
+ // don't do it again.
+ if (P.isGlobalInitialized(*GlobalIndex))
+ return true;
- return this->visitInitializer(Init) && this->emitFinishInit(E);
+ if (T) {
+ if (!this->visit(Init))
+ return false;
+ return this->emitInitGlobal(*T, *GlobalIndex, E);
}
- return false;
+ return this->visitInitializer(Init) && this->emitFinishInit(E);
}
// Otherwise, use a local variable.
@@ -2991,7 +3039,7 @@ bool Compiler<Emitter>::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
unsigned LocalIndex;
if (T)
LocalIndex = this->allocateLocalPrimitive(Init, *T, /*IsConst=*/false);
- else if (std::optional<unsigned> MaybeIndex = this->allocateLocal(Init))
+ else if (UnsignedOrNone MaybeIndex = this->allocateLocal(Init))
LocalIndex = *MaybeIndex;
else
return false;
@@ -3157,25 +3205,18 @@ bool Compiler<Emitter>::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
template <class Emitter>
bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
QualType T = E->getType();
- assert(!classify(T));
+ assert(!canClassify(T));
if (T->isRecordType()) {
const CXXConstructorDecl *Ctor = E->getConstructor();
- // Trivial copy/move constructor. Avoid copy.
- if (Ctor->isDefaulted() && Ctor->isCopyOrMoveConstructor() &&
- Ctor->isTrivial() &&
- E->getArg(0)->isTemporaryObject(Ctx.getASTContext(),
- T->getAsCXXRecordDecl()))
- return this->visitInitializer(E->getArg(0));
-
// If we're discarding a construct expression, we still need
// to allocate a variable and call the constructor and destructor.
if (DiscardResult) {
if (Ctor->isTrivial())
return true;
assert(!Initializing);
- std::optional<unsigned> LocalIndex = allocateLocal(E);
+ UnsignedOrNone LocalIndex = allocateLocal(E);
if (!LocalIndex)
return false;
@@ -3184,6 +3225,13 @@ bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
return false;
}
+ // Trivial copy/move constructor. Avoid copy.
+ if (Ctor->isDefaulted() && Ctor->isCopyOrMoveConstructor() &&
+ Ctor->isTrivial() &&
+ E->getArg(0)->isTemporaryObject(Ctx.getASTContext(),
+ T->getAsCXXRecordDecl()))
+ return this->visitInitializer(E->getArg(0));
+
// Zero initialization.
if (E->requiresZeroInitialization()) {
const Record *R = getRecord(E->getType());
@@ -3378,7 +3426,7 @@ bool Compiler<Emitter>::VisitCXXScalarValueInitExpr(
if (const auto *CT = Ty->getAs<ComplexType>()) {
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateLocal(E);
+ UnsignedOrNone LocalIndex = allocateLocal(E);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, E))
@@ -3401,7 +3449,7 @@ bool Compiler<Emitter>::VisitCXXScalarValueInitExpr(
if (const auto *VT = Ty->getAs<VectorType>()) {
// FIXME: Code duplication with the _Complex case above.
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateLocal(E);
+ UnsignedOrNone LocalIndex = allocateLocal(E);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, E))
@@ -3851,6 +3899,8 @@ bool Compiler<Emitter>::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
template <class Emitter>
bool Compiler<Emitter>::VisitRequiresExpr(const RequiresExpr *E) {
assert(classifyPrim(E->getType()) == PT_Bool);
+ if (E->isValueDependent())
+ return false;
if (DiscardResult)
return true;
return this->emitConstBool(E->isSatisfied(), E);
@@ -4027,8 +4077,7 @@ bool Compiler<Emitter>::VisitExtVectorElementExpr(
// Now the vector variable for the return value.
if (!Initializing) {
- std::optional<unsigned> ResultIndex;
- ResultIndex = allocateLocal(E);
+ UnsignedOrNone ResultIndex = allocateLocal(E);
if (!ResultIndex)
return false;
if (!this->emitGetPtrLocal(*ResultIndex, E))
@@ -4089,8 +4138,7 @@ bool Compiler<Emitter>::VisitCXXStdInitializerListExpr(
PrimType SecondFieldT = classifyPrim(R->getField(1u)->Decl->getType());
if (isIntegralType(SecondFieldT)) {
- if (!this->emitConst(static_cast<APSInt>(ArrayType->getSize()),
- SecondFieldT, E))
+ if (!this->emitConst(ArrayType->getSize(), SecondFieldT, E))
return false;
return this->emitInitField(SecondFieldT, R->getField(1u)->Offset, E);
}
@@ -4100,7 +4148,7 @@ bool Compiler<Emitter>::VisitCXXStdInitializerListExpr(
return false;
if (!this->emitExpandPtr(E))
return false;
- if (!this->emitConst(static_cast<APSInt>(ArrayType->getSize()), PT_Uint64, E))
+ if (!this->emitConst(ArrayType->getSize(), PT_Uint64, E))
return false;
if (!this->emitArrayElemPtrPop(PT_Uint64, E))
return false;
@@ -4132,13 +4180,13 @@ bool Compiler<Emitter>::VisitStmtExpr(const StmtExpr *E) {
template <class Emitter> bool Compiler<Emitter>::discard(const Expr *E) {
OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/true,
- /*NewInitializing=*/false);
+ /*NewInitializing=*/false, /*ToLValue=*/false);
return this->Visit(E);
}
template <class Emitter> bool Compiler<Emitter>::delegate(const Expr *E) {
// We're basically doing:
- // OptionScope<Emitter> Scope(this, DicardResult, Initializing);
+ // OptionScope<Emitter> Scope(this, DicardResult, Initializing, ToLValue);
// but that's unnecessary of course.
return this->Visit(E);
}
@@ -4152,8 +4200,8 @@ template <class Emitter> bool Compiler<Emitter>::visit(const Expr *E) {
// Create local variable to hold the return value.
if (!E->isGLValue() && !E->getType()->isAnyComplexType() &&
- !classify(E->getType())) {
- std::optional<unsigned> LocalIndex = allocateLocal(E);
+ !canClassify(E->getType())) {
+ UnsignedOrNone LocalIndex = allocateLocal(E);
if (!LocalIndex)
return false;
@@ -4166,16 +4214,22 @@ template <class Emitter> bool Compiler<Emitter>::visit(const Expr *E) {
// Otherwise,we have a primitive return value, produce the value directly
// and push it on the stack.
OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
- /*NewInitializing=*/false);
+ /*NewInitializing=*/false, /*ToLValue=*/ToLValue);
return this->Visit(E);
}
template <class Emitter>
bool Compiler<Emitter>::visitInitializer(const Expr *E) {
- assert(!classify(E->getType()));
+ assert(!canClassify(E->getType()));
OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
- /*NewInitializing=*/true);
+ /*NewInitializing=*/true, /*ToLValue=*/false);
+ return this->Visit(E);
+}
+
+template <class Emitter> bool Compiler<Emitter>::visitAsLValue(const Expr *E) {
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
+ /*NewInitializing=*/false, /*ToLValue=*/true);
return this->Visit(E);
}
@@ -4379,7 +4433,7 @@ bool Compiler<Emitter>::visitZeroArrayInitializer(QualType T, const Expr *E) {
template <class Emitter>
bool Compiler<Emitter>::visitAssignment(const Expr *LHS, const Expr *RHS,
const Expr *E) {
- if (!classify(E->getType()))
+ if (!canClassify(E->getType()))
return false;
if (!this->visit(RHS))
@@ -4472,12 +4526,18 @@ bool Compiler<Emitter>::emitConst(T Value, const Expr *E) {
template <class Emitter>
bool Compiler<Emitter>::emitConst(const APSInt &Value, PrimType Ty,
const Expr *E) {
+ return this->emitConst(static_cast<const APInt &>(Value), Ty, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::emitConst(const APInt &Value, PrimType Ty,
+ const Expr *E) {
if (Ty == PT_IntAPS)
return this->emitConstIntAPS(Value, E);
if (Ty == PT_IntAP)
return this->emitConstIntAP(Value, E);
- if (Value.isSigned())
+ if (isSignedType(Ty))
return this->emitConst(Value.getSExtValue(), Ty, E);
return this->emitConst(Value.getZExtValue(), Ty, E);
}
@@ -4491,14 +4551,6 @@ template <class Emitter>
unsigned Compiler<Emitter>::allocateLocalPrimitive(
DeclTy &&Src, PrimType Ty, bool IsConst, const ValueDecl *ExtendingDecl,
ScopeKind SC, bool IsConstexprUnknown) {
- // Make sure we don't accidentally register the same decl twice.
- if (const auto *VD =
- dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
- assert(!P.getGlobal(VD));
- assert(!Locals.contains(VD));
- (void)VD;
- }
-
// FIXME: There are cases where Src.is<Expr*>() is wrong, e.g.
// (int){12} in C. Consider using Expr::isTemporaryObject() instead
// or isa<MaterializeTemporaryExpr>().
@@ -4516,23 +4568,15 @@ unsigned Compiler<Emitter>::allocateLocalPrimitive(
}
template <class Emitter>
-std::optional<unsigned>
-Compiler<Emitter>::allocateLocal(DeclTy &&Src, QualType Ty,
- const ValueDecl *ExtendingDecl, ScopeKind SC,
- bool IsConstexprUnknown) {
- // Make sure we don't accidentally register the same decl twice.
- if ([[maybe_unused]] const auto *VD =
- dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
- assert(!P.getGlobal(VD));
- assert(!Locals.contains(VD));
- }
-
+UnsignedOrNone Compiler<Emitter>::allocateLocal(DeclTy &&Src, QualType Ty,
+ const ValueDecl *ExtendingDecl,
+ ScopeKind SC,
+ bool IsConstexprUnknown) {
const ValueDecl *Key = nullptr;
const Expr *Init = nullptr;
bool IsTemporary = false;
if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
Key = VD;
- Ty = VD->getType();
if (const auto *VarD = dyn_cast<VarDecl>(VD))
Init = VarD->getInit();
@@ -4561,7 +4605,7 @@ Compiler<Emitter>::allocateLocal(DeclTy &&Src, QualType Ty,
}
template <class Emitter>
-std::optional<unsigned> Compiler<Emitter>::allocateTemporary(const Expr *E) {
+UnsignedOrNone Compiler<Emitter>::allocateTemporary(const Expr *E) {
QualType Ty = E->getType();
assert(!Ty->isRecordType());
@@ -4586,13 +4630,13 @@ std::optional<unsigned> Compiler<Emitter>::allocateTemporary(const Expr *E) {
template <class Emitter>
const RecordType *Compiler<Emitter>::getRecordTy(QualType Ty) {
if (const PointerType *PT = dyn_cast<PointerType>(Ty))
- return PT->getPointeeType()->getAs<RecordType>();
- return Ty->getAs<RecordType>();
+ return PT->getPointeeType()->getAsCanonical<RecordType>();
+ return Ty->getAsCanonical<RecordType>();
}
template <class Emitter> Record *Compiler<Emitter>::getRecord(QualType Ty) {
if (const auto *RecordTy = getRecordTy(Ty))
- return getRecord(RecordTy->getDecl());
+ return getRecord(RecordTy->getOriginalDecl()->getDefinitionOrSelf());
return nullptr;
}
@@ -4640,7 +4684,7 @@ bool Compiler<Emitter>::visitExpr(const Expr *E, bool DestroyToplevelScope) {
// Expressions with a composite return type.
// For us, that means everything we don't
// have a PrimType for.
- if (std::optional<unsigned> LocalOffset = this->allocateLocal(E)) {
+ if (UnsignedOrNone LocalOffset = this->allocateLocal(E)) {
InitLinkScope<Emitter> ILS(this, InitLink::Temp(*LocalOffset));
if (!this->emitGetPtrLocal(*LocalOffset, E))
return false;
@@ -4837,7 +4881,7 @@ VarCreationState Compiler<Emitter>::visitVarDecl(const VarDecl *VD,
return this->emitSetLocal(*VarT, Offset, VD);
}
} else {
- if (std::optional<unsigned> Offset = this->allocateLocal(
+ if (UnsignedOrNone Offset = this->allocateLocal(
VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) {
if (!Init)
return true;
@@ -4952,7 +4996,6 @@ bool Compiler<Emitter>::visitAPValueInitializer(const APValue &Val,
template <class Emitter>
bool Compiler<Emitter>::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinID) {
-
if (BuiltinID == Builtin::BI__builtin_constant_p) {
// Void argument is always invalid and harder to handle later.
if (E->getArg(0)->getType()->isVoidType()) {
@@ -4990,18 +5033,38 @@ bool Compiler<Emitter>::VisitBuiltinCallExpr(const CallExpr *E,
// Non-primitive return type. Prepare storage.
if (!Initializing && !ReturnT && !ReturnType->isVoidType()) {
- std::optional<unsigned> LocalIndex = allocateLocal(E);
+ UnsignedOrNone LocalIndex = allocateLocal(E);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, E))
return false;
}
- if (!Context::isUnevaluatedBuiltin(BuiltinID)) {
- // Put arguments on the stack.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
+ // Prepare function arguments including special cases.
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_object_size:
+ case Builtin::BI__builtin_dynamic_object_size: {
+ assert(E->getNumArgs() == 2);
+ const Expr *Arg0 = E->getArg(0);
+ if (Arg0->isGLValue()) {
+ if (!this->visit(Arg0))
return false;
+
+ } else {
+ if (!this->visitAsLValue(Arg0))
+ return false;
+ }
+ if (!this->visit(E->getArg(1)))
+ return false;
+
+ } break;
+ default:
+ if (!Context::isUnevaluatedBuiltin(BuiltinID)) {
+ // Put arguments on the stack.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
}
}
@@ -5066,7 +5129,7 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
// If we need to discard the return value but the function returns its
// value via an RVO pointer, we need to create one such pointer just
// for this call.
- if (std::optional<unsigned> LocalIndex = allocateLocal(E)) {
+ if (UnsignedOrNone LocalIndex = allocateLocal(E)) {
if (!this->emitGetPtrLocal(*LocalIndex, E))
return false;
}
@@ -5074,7 +5137,7 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
// We need the result. Prepare a pointer to return or
// dup the current one.
if (!Initializing) {
- if (std::optional<unsigned> LocalIndex = allocateLocal(E)) {
+ if (UnsignedOrNone LocalIndex = allocateLocal(E)) {
if (!this->emitGetPtrLocal(*LocalIndex, E))
return false;
}
@@ -5110,7 +5173,7 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
}
bool Devirtualized = false;
- std::optional<unsigned> CalleeOffset;
+ UnsignedOrNone CalleeOffset = std::nullopt;
// Add the (optional, implicit) This pointer.
if (const auto *MC = dyn_cast<CXXMemberCallExpr>(E)) {
if (!FuncDecl && classifyPrim(E->getCallee()) == PT_MemberPtr) {
@@ -5154,7 +5217,8 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
if (!this->emitCheckPseudoDtor(E))
return false;
const Expr *Base = PD->getBase();
- if (!Base->isGLValue())
+ // E.g. `using T = int; 0.~T();`.
+ if (OptPrimType BaseT = classify(Base); !BaseT || BaseT != PT_Ptr)
return this->discard(Base);
if (!this->visit(Base))
return false;
@@ -5169,7 +5233,8 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
return false;
}
- if (!this->visitCallArgs(Args, FuncDecl, IsAssignmentOperatorCall))
+ if (!this->visitCallArgs(Args, FuncDecl, IsAssignmentOperatorCall,
+ isa<CXXOperatorCallExpr>(E)))
return false;
// Undo the argument reversal we did earlier.
@@ -5185,6 +5250,12 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
const Function *Func = getFunction(FuncDecl);
if (!Func)
return false;
+
+ // In error cases, the function may be called with fewer arguments than
+ // parameters.
+ if (E->getNumArgs() < Func->getNumWrittenParams())
+ return false;
+
assert(HasRVO == Func->hasRVO());
bool HasQualifier = false;
@@ -6516,7 +6587,7 @@ bool Compiler<Emitter>::VisitComplexUnaryOperator(const UnaryOperator *E) {
OptPrimType ResT = classify(E);
auto prepareResult = [=]() -> bool {
if (!ResT && !Initializing) {
- std::optional<unsigned> LocalIndex = allocateLocal(SubExpr);
+ UnsignedOrNone LocalIndex = allocateLocal(SubExpr);
if (!LocalIndex)
return false;
return this->emitGetPtrLocal(*LocalIndex, E);
@@ -6634,7 +6705,7 @@ bool Compiler<Emitter>::VisitVectorUnaryOperator(const UnaryOperator *E) {
return this->delegate(SubExpr);
if (!Initializing) {
- std::optional<unsigned> LocalIndex = allocateLocal(SubExpr);
+ UnsignedOrNone LocalIndex = allocateLocal(SubExpr);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, E))
@@ -6752,6 +6823,22 @@ bool Compiler<Emitter>::visitDeclRef(const ValueDecl *D, const Expr *E) {
// value.
bool IsReference = D->getType()->isReferenceType();
+ // Function parameters.
+ // Note that it's important to check them first since we might have a local
+ // variable created for a ParmVarDecl as well.
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) {
+ if (Ctx.getLangOpts().CPlusPlus && !Ctx.getLangOpts().CPlusPlus11 &&
+ !D->getType()->isIntegralOrEnumerationType()) {
+ return this->emitInvalidDeclRef(cast<DeclRefExpr>(E),
+ /*InitializerFailed=*/false, E);
+ }
+ if (auto It = this->Params.find(PVD); It != this->Params.end()) {
+ if (IsReference || !It->second.IsPtr)
+ return this->emitGetParam(classifyPrim(E), It->second.Offset, E);
+
+ return this->emitGetPtrParam(It->second.Offset, E);
+ }
+ }
// Local variables.
if (auto It = Locals.find(D); It != Locals.end()) {
const unsigned Offset = It->second.Offset;
@@ -6769,20 +6856,6 @@ bool Compiler<Emitter>::visitDeclRef(const ValueDecl *D, const Expr *E) {
return this->emitGetPtrGlobal(*GlobalIndex, E);
}
- // Function parameters.
- if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) {
- if (Ctx.getLangOpts().CPlusPlus && !Ctx.getLangOpts().CPlusPlus11 &&
- !D->getType()->isIntegralOrEnumerationType()) {
- return this->emitInvalidDeclRef(cast<DeclRefExpr>(E),
- /*InitializerFailed=*/false, E);
- }
- if (auto It = this->Params.find(PVD); It != this->Params.end()) {
- if (IsReference || !It->second.IsPtr)
- return this->emitGetParam(classifyPrim(E), It->second.Offset, E);
-
- return this->emitGetPtrParam(It->second.Offset, E);
- }
- }
// In case we need to re-visit a declaration.
auto revisit = [&](const VarDecl *VD) -> bool {
@@ -7142,10 +7215,6 @@ bool Compiler<Emitter>::emitDestruction(const Descriptor *Desc,
assert(!Desc->isPrimitive());
assert(!Desc->isPrimitiveArray());
- // Can happen if the decl is invalid.
- if (Desc->isDummy())
- return true;
-
// Arrays.
if (Desc->isArray()) {
const Descriptor *ElemDesc = Desc->ElemDesc;
@@ -7240,7 +7309,7 @@ bool Compiler<Emitter>::emitBuiltinBitCast(const CastExpr *E) {
// Prepare storage for the result in case we discard.
if (DiscardResult && !Initializing && !ToT) {
- std::optional<unsigned> LocalIndex = allocateLocal(E);
+ UnsignedOrNone LocalIndex = allocateLocal(E);
if (!LocalIndex)
return false;
if (!this->emitGetPtrLocal(*LocalIndex, E))
diff --git a/clang/lib/AST/ByteCode/Compiler.h b/clang/lib/AST/ByteCode/Compiler.h
index ee8327d..475faee 100644
--- a/clang/lib/AST/ByteCode/Compiler.h
+++ b/clang/lib/AST/ByteCode/Compiler.h
@@ -256,6 +256,8 @@ protected:
OptPrimType classify(const Expr *E) const { return Ctx.classify(E); }
OptPrimType classify(QualType Ty) const { return Ctx.classify(Ty); }
+ bool canClassify(const Expr *E) const { return Ctx.canClassify(E); }
+ bool canClassify(QualType T) const { return Ctx.canClassify(T); }
/// Classifies a known primitive type.
PrimType classifyPrim(QualType Ty) const {
@@ -280,6 +282,7 @@ protected:
/// been created. visitInitializer() then relies on a pointer to this
/// variable being on top of the stack.
bool visitInitializer(const Expr *E);
+ bool visitAsLValue(const Expr *E);
/// Evaluates an expression for side effects and discards the result.
bool discard(const Expr *E);
/// Just pass evaluation on to \p E. This leaves all the parsing flags
@@ -304,7 +307,7 @@ protected:
bool visitArrayElemInit(unsigned ElemIndex, const Expr *Init,
OptPrimType InitT);
bool visitCallArgs(ArrayRef<const Expr *> Args, const FunctionDecl *FuncDecl,
- bool Activate);
+ bool Activate, bool IsOperatorCall);
/// Creates a local primitive value.
unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsConst,
@@ -313,11 +316,11 @@ protected:
bool IsConstexprUnknown = false);
/// Allocates a space storing a local given its type.
- std::optional<unsigned>
- allocateLocal(DeclTy &&Decl, QualType Ty = QualType(),
- const ValueDecl *ExtendingDecl = nullptr,
- ScopeKind = ScopeKind::Block, bool IsConstexprUnknown = false);
- std::optional<unsigned> allocateTemporary(const Expr *E);
+ UnsignedOrNone allocateLocal(DeclTy &&Decl, QualType Ty = QualType(),
+ const ValueDecl *ExtendingDecl = nullptr,
+ ScopeKind = ScopeKind::Block,
+ bool IsConstexprUnknown = false);
+ UnsignedOrNone allocateTemporary(const Expr *E);
private:
friend class VariableScope<Emitter>;
@@ -344,9 +347,10 @@ private:
/// Emits an APSInt constant.
bool emitConst(const llvm::APSInt &Value, PrimType Ty, const Expr *E);
+ bool emitConst(const llvm::APInt &Value, PrimType Ty, const Expr *E);
bool emitConst(const llvm::APSInt &Value, const Expr *E);
bool emitConst(const llvm::APInt &Value, const Expr *E) {
- return emitConst(static_cast<llvm::APSInt>(Value), E);
+ return emitConst(Value, classifyPrim(E), E);
}
/// Emits an integer constant.
@@ -424,6 +428,7 @@ protected:
bool DiscardResult = false;
bool InStmtExpr = false;
+ bool ToLValue = false;
/// Flag inidicating if we're initializing an already created
/// variable. This is set in visitInitializer().
@@ -563,7 +568,7 @@ public:
void addLocal(const Scope::Local &Local) override {
if (!Idx) {
- Idx = this->Ctx->Descriptors.size();
+ Idx = static_cast<unsigned>(this->Ctx->Descriptors.size());
this->Ctx->Descriptors.emplace_back();
this->Ctx->emitInitScope(*Idx, {});
}
@@ -611,7 +616,7 @@ public:
}
/// Index of the scope in the chain.
- std::optional<unsigned> Idx;
+ UnsignedOrNone Idx = std::nullopt;
};
/// Scope for storage declared in a compound statement.
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index f7f528c..8598996 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -91,7 +91,7 @@ bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) {
#endif
}
- Result = Res.toAPValue();
+ Result = Res.stealAPValue();
return true;
}
@@ -121,7 +121,7 @@ bool Context::evaluate(State &Parent, const Expr *E, APValue &Result,
#endif
}
- Result = Res.toAPValue();
+ Result = Res.stealAPValue();
return true;
}
@@ -153,7 +153,7 @@ bool Context::evaluateAsInitializer(State &Parent, const VarDecl *VD,
#endif
}
- Result = Res.toAPValue();
+ Result = Res.stealAPValue();
return true;
}
@@ -364,8 +364,7 @@ OptPrimType Context::classify(QualType T) const {
return integralTypeToPrimTypeU(BT->getNumBits());
}
- if (const auto *ET = T->getAs<EnumType>()) {
- const auto *D = ET->getDecl();
+ if (const auto *D = T->getAsEnumDecl()) {
if (!D->isComplete())
return std::nullopt;
return classify(D->getIntegerType());
@@ -398,17 +397,11 @@ const llvm::fltSemantics &Context::getFloatSemantics(QualType T) const {
}
bool Context::Run(State &Parent, const Function *Func) {
-
- {
- InterpState State(Parent, *P, Stk, *this, Func);
- if (Interpret(State)) {
- assert(Stk.empty());
- return true;
- }
- // State gets destroyed here, so the Stk.clear() below doesn't accidentally
- // remove values the State's destructor might access.
+ InterpState State(Parent, *P, Stk, *this, Func);
+ if (Interpret(State)) {
+ assert(Stk.empty());
+ return true;
}
-
Stk.clear();
return false;
}
@@ -501,7 +494,7 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
// elsewhere in the code.
QualType Ty = FuncDecl->getReturnType();
bool HasRVO = false;
- if (!Ty->isVoidType() && !classify(Ty)) {
+ if (!Ty->isVoidType() && !canClassify(Ty)) {
HasRVO = true;
ParamTypes.push_back(PT_Ptr);
ParamOffsets.push_back(ParamOffset);
diff --git a/clang/lib/AST/ByteCode/Context.h b/clang/lib/AST/ByteCode/Context.h
index 1c084ac..fa98498 100644
--- a/clang/lib/AST/ByteCode/Context.h
+++ b/clang/lib/AST/ByteCode/Context.h
@@ -30,7 +30,7 @@ namespace interp {
class Function;
class Program;
class State;
-enum PrimType : unsigned;
+enum PrimType : uint8_t;
struct ParamOffset {
unsigned Offset;
@@ -93,6 +93,25 @@ public:
return classify(E->getType());
}
+ bool canClassify(QualType T) {
+ if (const auto *BT = dyn_cast<BuiltinType>(T)) {
+ if (BT->isInteger() || BT->isFloatingPoint())
+ return true;
+ if (BT->getKind() == BuiltinType::Bool)
+ return true;
+ }
+
+ if (T->isArrayType() || T->isRecordType() || T->isAnyComplexType() ||
+ T->isVectorType())
+ return false;
+ return classify(T) != std::nullopt;
+ }
+ bool canClassify(const Expr *E) {
+ if (E->isGLValue())
+ return true;
+ return canClassify(E->getType());
+ }
+
const CXXMethodDecl *
getOverridingFunction(const CXXRecordDecl *DynamicDecl,
const CXXRecordDecl *StaticDecl,
diff --git a/clang/lib/AST/ByteCode/Descriptor.cpp b/clang/lib/AST/ByteCode/Descriptor.cpp
index 629c1ff..647de56 100644
--- a/clang/lib/AST/ByteCode/Descriptor.cpp
+++ b/clang/lib/AST/ByteCode/Descriptor.cpp
@@ -50,14 +50,6 @@ static void dtorTy(Block *, std::byte *Ptr, const Descriptor *) {
}
template <typename T>
-static void moveTy(Block *, std::byte *Src, std::byte *Dst,
- const Descriptor *) {
- auto *SrcPtr = reinterpret_cast<T *>(Src);
- auto *DstPtr = reinterpret_cast<T *>(Dst);
- new (DstPtr) T(std::move(*SrcPtr));
-}
-
-template <typename T>
static void ctorArrayTy(Block *, std::byte *Ptr, bool, bool, bool, bool, bool,
const Descriptor *D) {
new (Ptr) InitMapPtr(std::nullopt);
@@ -85,28 +77,6 @@ static void dtorArrayTy(Block *, std::byte *Ptr, const Descriptor *D) {
}
}
-template <typename T>
-static void moveArrayTy(Block *, std::byte *Src, std::byte *Dst,
- const Descriptor *D) {
- InitMapPtr &SrcIMP = *reinterpret_cast<InitMapPtr *>(Src);
- if (SrcIMP) {
- // We only ever invoke the moveFunc when moving block contents to a
- // DeadBlock. DeadBlocks don't need InitMaps, so we destroy them here.
- SrcIMP = std::nullopt;
- }
- Src += sizeof(InitMapPtr);
- Dst += sizeof(InitMapPtr);
- if constexpr (!needsCtor<T>()) {
- std::memcpy(Dst, Src, D->getNumElems() * D->getElemSize());
- } else {
- for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) {
- auto *SrcPtr = &reinterpret_cast<T *>(Src)[I];
- auto *DstPtr = &reinterpret_cast<T *>(Dst)[I];
- new (DstPtr) T(std::move(*SrcPtr));
- }
- }
-}
-
static void ctorArrayDesc(Block *B, std::byte *Ptr, bool IsConst,
bool IsMutable, bool IsVolatile, bool IsActive,
bool InUnion, const Descriptor *D) {
@@ -144,12 +114,14 @@ static void dtorArrayDesc(Block *B, std::byte *Ptr, const Descriptor *D) {
D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
unsigned ElemOffset = 0;
- for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) {
+ auto Dtor = D->ElemDesc->DtorFn;
+ assert(Dtor &&
+ "a composite array without an elem dtor shouldn't have a dtor itself");
+ for (unsigned I = 0; I != NumElems; ++I, ElemOffset += ElemSize) {
auto *ElemPtr = Ptr + ElemOffset;
auto *Desc = reinterpret_cast<InlineDescriptor *>(ElemPtr);
auto *ElemLoc = reinterpret_cast<std::byte *>(Desc + 1);
- if (auto Fn = D->ElemDesc->DtorFn)
- Fn(B, ElemLoc, D->ElemDesc);
+ Dtor(B, ElemLoc, D->ElemDesc);
}
}
@@ -246,34 +218,59 @@ static void dtorRecord(Block *B, std::byte *Ptr, const Descriptor *D) {
destroyBase(B, Ptr, F.Desc, F.Offset);
}
-static BlockCtorFn getCtorPrim(PrimType Type) {
- // Floating types are special. They are primitives, but need their
- // constructor called.
- if (Type == PT_Float)
+/// Whether a record needs its descriptor dtor function called.
+static bool needsRecordDtor(const Record *R) {
+ for (const auto &B : R->bases()) {
+ if (B.Desc->DtorFn)
+ return true;
+ }
+
+ for (const auto &F : R->fields()) {
+ if (F.Desc->DtorFn)
+ return true;
+ }
+
+ for (const auto &V : R->virtual_bases()) {
+ if (V.Desc->DtorFn)
+ return true;
+ }
+ return false;
+}
+
+static BlockCtorFn getCtorPrim(PrimType T) {
+ switch (T) {
+ case PT_Float:
return ctorTy<PrimConv<PT_Float>::T>;
- if (Type == PT_IntAP)
+ case PT_IntAP:
return ctorTy<PrimConv<PT_IntAP>::T>;
- if (Type == PT_IntAPS)
+ case PT_IntAPS:
return ctorTy<PrimConv<PT_IntAPS>::T>;
- if (Type == PT_MemberPtr)
+ case PT_Ptr:
+ return ctorTy<PrimConv<PT_Ptr>::T>;
+ case PT_MemberPtr:
return ctorTy<PrimConv<PT_MemberPtr>::T>;
-
- COMPOSITE_TYPE_SWITCH(Type, return ctorTy<T>, return nullptr);
+ default:
+ return nullptr;
+ }
+ llvm_unreachable("Unhandled PrimType");
}
-static BlockDtorFn getDtorPrim(PrimType Type) {
- // Floating types are special. They are primitives, but need their
- // destructor called, since they might allocate memory.
- if (Type == PT_Float)
+static BlockDtorFn getDtorPrim(PrimType T) {
+ switch (T) {
+ case PT_Float:
return dtorTy<PrimConv<PT_Float>::T>;
- if (Type == PT_IntAP)
+ case PT_IntAP:
return dtorTy<PrimConv<PT_IntAP>::T>;
- if (Type == PT_IntAPS)
+ case PT_IntAPS:
return dtorTy<PrimConv<PT_IntAPS>::T>;
- if (Type == PT_MemberPtr)
+ case PT_Ptr:
+ return dtorTy<PrimConv<PT_Ptr>::T>;
+ case PT_MemberPtr:
return dtorTy<PrimConv<PT_MemberPtr>::T>;
-
- COMPOSITE_TYPE_SWITCH(Type, return dtorTy<T>, return nullptr);
+ default:
+ return nullptr;
+ }
+ llvm_unreachable("Unhandled PrimType");
}
static BlockCtorFn getCtorArrayPrim(PrimType Type) {
@@ -336,7 +333,7 @@ Descriptor::Descriptor(const DeclTy &D, const Type *SourceTy,
AllocSize(std::max<size_t>(alignof(void *), Size) + MDSize),
ElemDesc(Elem), IsConst(IsConst), IsMutable(IsMutable),
IsTemporary(IsTemporary), IsArray(true), CtorFn(ctorArrayDesc),
- DtorFn(dtorArrayDesc) {
+ DtorFn(Elem->DtorFn ? dtorArrayDesc : nullptr) {
assert(Source && "Missing source");
}
@@ -347,7 +344,7 @@ Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD,
Size(UnknownSizeMark), MDSize(MD.value_or(0)),
AllocSize(MDSize + alignof(void *)), ElemDesc(Elem), IsConst(true),
IsMutable(false), IsTemporary(IsTemporary), IsArray(true),
- CtorFn(ctorArrayDesc), DtorFn(dtorArrayDesc) {
+ CtorFn(ctorArrayDesc), DtorFn(Elem->DtorFn ? dtorArrayDesc : nullptr) {
assert(Source && "Missing source");
}
@@ -359,7 +356,7 @@ Descriptor::Descriptor(const DeclTy &D, const Record *R, MetadataSize MD,
Size(ElemSize), MDSize(MD.value_or(0)), AllocSize(Size + MDSize),
ElemRecord(R), IsConst(IsConst), IsMutable(IsMutable),
IsTemporary(IsTemporary), IsVolatile(IsVolatile), CtorFn(ctorRecord),
- DtorFn(dtorRecord) {
+ DtorFn(needsRecordDtor(R) ? dtorRecord : nullptr) {
assert(Source && "Missing source");
}
@@ -367,7 +364,7 @@ Descriptor::Descriptor(const DeclTy &D, const Record *R, MetadataSize MD,
Descriptor::Descriptor(const DeclTy &D, MetadataSize MD)
: Source(D), ElemSize(1), Size(1), MDSize(MD.value_or(0)),
AllocSize(MDSize), ElemRecord(nullptr), IsConst(true), IsMutable(false),
- IsTemporary(false), IsDummy(true) {
+ IsTemporary(false) {
assert(Source && "Missing source");
}
@@ -377,12 +374,14 @@ QualType Descriptor::getType() const {
if (const auto *D = asValueDecl())
return D->getType();
if (const auto *T = dyn_cast_if_present<TypeDecl>(asDecl()))
- return QualType(T->getTypeForDecl(), 0);
+ return T->getASTContext().getTypeDeclType(T);
// The Source sometimes has a different type than the once
// we really save. Try to consult the Record first.
- if (isRecord())
- return QualType(ElemRecord->getDecl()->getTypeForDecl(), 0);
+ if (isRecord()) {
+ const RecordDecl *RD = ElemRecord->getDecl();
+ return RD->getASTContext().getCanonicalTagType(RD);
+ }
if (const auto *E = asExpr())
return E->getType();
llvm_unreachable("Invalid descriptor type");
@@ -453,7 +452,7 @@ SourceInfo Descriptor::getLoc() const {
}
bool Descriptor::hasTrivialDtor() const {
- if (isPrimitive() || isPrimitiveArray() || isDummy())
+ if (isPrimitive() || isPrimitiveArray())
return true;
if (isRecord()) {
@@ -462,17 +461,16 @@ bool Descriptor::hasTrivialDtor() const {
return !Dtor || Dtor->isTrivial();
}
+ if (!ElemDesc)
+ return true;
// Composite arrays.
- assert(ElemDesc);
return ElemDesc->hasTrivialDtor();
}
bool Descriptor::isUnion() const { return isRecord() && ElemRecord->isUnion(); }
InitMap::InitMap(unsigned N)
- : UninitFields(N), Data(std::make_unique<T[]>(numFields(N))) {
- std::fill_n(data(), numFields(N), 0);
-}
+ : UninitFields(N), Data(std::make_unique<T[]>(numFields(N))) {}
bool InitMap::initializeElement(unsigned I) {
unsigned Bucket = I / PER_FIELD;
diff --git a/clang/lib/AST/ByteCode/Descriptor.h b/clang/lib/AST/ByteCode/Descriptor.h
index cd34e11..90dc2b4 100644
--- a/clang/lib/AST/ByteCode/Descriptor.h
+++ b/clang/lib/AST/ByteCode/Descriptor.h
@@ -24,7 +24,7 @@ class Record;
class SourceInfo;
struct InitMap;
struct Descriptor;
-enum PrimType : unsigned;
+enum PrimType : uint8_t;
using DeclTy = llvm::PointerUnion<const Decl *, const Expr *>;
using InitMapPtr = std::optional<std::pair<bool, std::shared_ptr<InitMap>>>;
@@ -166,8 +166,6 @@ public:
const bool IsVolatile = false;
/// Flag indicating if the block is an array.
const bool IsArray = false;
- /// Flag indicating if this is a dummy descriptor.
- bool IsDummy = false;
bool IsConstexprUnknown = false;
/// Storage management methods.
@@ -203,9 +201,6 @@ public:
/// Allocates a dummy descriptor.
Descriptor(const DeclTy &D, MetadataSize MD = std::nullopt);
- /// Make this descriptor a dummy descriptor.
- void makeDummy() { IsDummy = true; }
-
QualType getType() const;
QualType getElemQualType() const;
QualType getDataType(const ASTContext &Ctx) const;
@@ -273,8 +268,6 @@ public:
bool isRecord() const { return !IsArray && ElemRecord; }
/// Checks if the descriptor is of a union.
bool isUnion() const;
- /// Checks if this is a dummy descriptor.
- bool isDummy() const { return IsDummy; }
/// Whether variables of this descriptor need their destructor called or not.
bool hasTrivialDtor() const;
diff --git a/clang/lib/AST/ByteCode/Disasm.cpp b/clang/lib/AST/ByteCode/Disasm.cpp
index 5049a65..ac904d3 100644
--- a/clang/lib/AST/ByteCode/Disasm.cpp
+++ b/clang/lib/AST/ByteCode/Disasm.cpp
@@ -338,7 +338,7 @@ LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const {
}
OS << "\n";
- if (GP.isInitialized() && Desc->isPrimitive() && !Desc->isDummy()) {
+ if (GP.isInitialized() && Desc->isPrimitive() && !G->block()->isDummy()) {
OS << " ";
{
ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_CYAN, false});
@@ -394,8 +394,6 @@ LLVM_DUMP_METHOD void Descriptor::dump(llvm::raw_ostream &OS) const {
else if (isUnknownSizeArray())
OS << " unknown-size-array";
- if (isDummy())
- OS << " dummy";
if (IsConstexprUnknown)
OS << " constexpr-unknown";
}
@@ -541,50 +539,27 @@ LLVM_DUMP_METHOD void Block::dump(llvm::raw_ostream &OS) const {
else
OS << "-\n";
OS << " Pointers: " << NPointers << "\n";
- OS << " Dead: " << IsDead << "\n";
+ OS << " Dead: " << isDead() << "\n";
OS << " Static: " << IsStatic << "\n";
- OS << " Extern: " << IsExtern << "\n";
+ OS << " Extern: " << isExtern() << "\n";
OS << " Initialized: " << IsInitialized << "\n";
- OS << " Weak: " << IsWeak << "\n";
- OS << " Dynamic: " << IsDynamic << "\n";
+ OS << " Weak: " << isWeak() << "\n";
+ OS << " Dummy: " << isDummy() << '\n';
+ OS << " Dynamic: " << isDynamic() << "\n";
}
LLVM_DUMP_METHOD void EvaluationResult::dump() const {
- assert(Ctx);
auto &OS = llvm::errs();
- const ASTContext &ASTCtx = Ctx->getASTContext();
- switch (Kind) {
- case Empty:
+ if (empty()) {
OS << "Empty\n";
- break;
- case RValue:
- OS << "RValue: ";
- std::get<APValue>(Value).dump(OS, ASTCtx);
- break;
- case LValue: {
- assert(Source);
- QualType SourceType;
- if (const auto *D = dyn_cast<const Decl *>(Source)) {
- if (const auto *VD = dyn_cast<ValueDecl>(D))
- SourceType = VD->getType();
- } else if (const auto *E = dyn_cast<const Expr *>(Source)) {
- SourceType = E->getType();
- }
-
- OS << "LValue: ";
- if (const auto *P = std::get_if<Pointer>(&Value))
- P->toAPValue(ASTCtx).printPretty(OS, ASTCtx, SourceType);
- else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope
- FP->toAPValue(ASTCtx).printPretty(OS, ASTCtx, SourceType);
- OS << "\n";
- break;
- }
- case Invalid:
+ } else if (isInvalid()) {
OS << "Invalid\n";
- break;
- case Valid:
- OS << "Valid\n";
- break;
+ } else {
+ OS << "Value: ";
+#ifndef NDEBUG
+ assert(Ctx);
+ Value.dump(OS, Ctx->getASTContext());
+#endif
}
}
diff --git a/clang/lib/AST/ByteCode/DynamicAllocator.cpp b/clang/lib/AST/ByteCode/DynamicAllocator.cpp
index 9b8b664..4fedac6 100644
--- a/clang/lib/AST/ByteCode/DynamicAllocator.cpp
+++ b/clang/lib/AST/ByteCode/DynamicAllocator.cpp
@@ -13,25 +13,6 @@
using namespace clang;
using namespace clang::interp;
-// FIXME: There is a peculiar problem with the way we track pointers
-// to blocks and the way we allocate dynamic memory.
-//
-// When we have code like this:
-// while (true) {
-// char *buffer = new char[1024];
-// delete[] buffer;
-// }
-//
-// We have a local variable 'buffer' pointing to the heap allocated memory.
-// When deallocating the memory via delete[], that local variable still
-// points to the memory, which means we will create a DeadBlock for it and move
-// it over to that block, essentially duplicating the allocation. Moving
-// the data is also slow.
-//
-// However, when we actually try to access the allocation after it has been
-// freed, we need the block to still exist (alive or dead) so we can tell
-// that it's a dynamic allocation.
-
DynamicAllocator::~DynamicAllocator() { cleanup(); }
void DynamicAllocator::cleanup() {
@@ -42,12 +23,15 @@ void DynamicAllocator::cleanup() {
for (auto &Iter : AllocationSites) {
auto &AllocSite = Iter.second;
for (auto &Alloc : AllocSite.Allocations) {
- Block *B = reinterpret_cast<Block *>(Alloc.Memory.get());
+ Block *B = Alloc.block();
+ assert(!B->isDead());
+ assert(B->isInitialized());
B->invokeDtor();
+
if (B->hasPointers()) {
while (B->Pointers) {
Pointer *Next = B->Pointers->asBlockPointer().Next;
- B->Pointers->PointeeStorage.BS.Pointee = nullptr;
+ B->Pointers->BS.Pointee = nullptr;
B->Pointers = Next;
}
B->Pointers = nullptr;
@@ -89,6 +73,12 @@ Block *DynamicAllocator::allocate(const Descriptor *D, unsigned EvalID,
assert(D);
assert(D->asExpr());
+ // Garbage collection. Remove all dead allocations that don't have pointers to
+ // them anymore.
+ llvm::erase_if(DeadAllocations, [](Allocation &Alloc) -> bool {
+ return !Alloc.block()->hasPointers();
+ });
+
auto Memory =
std::make_unique<std::byte[]>(sizeof(Block) + D->getAllocSize());
auto *B = new (Memory.get()) Block(EvalID, D, /*isStatic=*/false);
@@ -111,13 +101,17 @@ Block *DynamicAllocator::allocate(const Descriptor *D, unsigned EvalID,
ID->LifeState =
AllocForm == Form::Operator ? Lifetime::Ended : Lifetime::Started;
- B->IsDynamic = true;
-
- if (auto It = AllocationSites.find(D->asExpr()); It != AllocationSites.end())
+ if (auto It = AllocationSites.find(D->asExpr());
+ It != AllocationSites.end()) {
It->second.Allocations.emplace_back(std::move(Memory));
- else
+ B->setDynAllocId(It->second.NumAllocs);
+ ++It->second.NumAllocs;
+ } else {
AllocationSites.insert(
{D->asExpr(), AllocationSite(std::move(Memory), AllocForm)});
+ B->setDynAllocId(0);
+ }
+ assert(B->isDynamic());
return B;
}
@@ -128,23 +122,39 @@ bool DynamicAllocator::deallocate(const Expr *Source,
return false;
auto &Site = It->second;
- assert(Site.size() > 0);
+ assert(!Site.empty());
// Find the Block to delete.
- auto AllocIt = llvm::find_if(Site.Allocations, [&](const Allocation &A) {
- const Block *B = reinterpret_cast<const Block *>(A.Memory.get());
- return BlockToDelete == B;
+ auto *AllocIt = llvm::find_if(Site.Allocations, [&](const Allocation &A) {
+ return BlockToDelete == A.block();
});
assert(AllocIt != Site.Allocations.end());
- Block *B = reinterpret_cast<Block *>(AllocIt->Memory.get());
+ Block *B = AllocIt->block();
+ assert(B->isInitialized());
+ assert(!B->isDead());
B->invokeDtor();
- S.deallocate(B);
- Site.Allocations.erase(AllocIt);
+ // Almost all our dynamic allocations have a pointer pointing to them
+ // when we deallocate them, since otherwise we can't call delete() at all.
+ // This means that we would usually need to create DeadBlocks for all of them.
+ // To work around that, we instead mark them as dead without moving the data
+ // over to a DeadBlock and simply keep the block in a separate DeadAllocations
+ // list.
+ if (B->hasPointers()) {
+ B->AccessFlags |= Block::DeadFlag;
+ DeadAllocations.push_back(std::move(*AllocIt));
+ Site.Allocations.erase(AllocIt);
+
+ if (Site.size() == 0)
+ AllocationSites.erase(It);
+ return true;
+ }
- if (Site.size() == 0)
+ // Get rid of the allocation altogether.
+ Site.Allocations.erase(AllocIt);
+ if (Site.empty())
AllocationSites.erase(It);
return true;
diff --git a/clang/lib/AST/ByteCode/DynamicAllocator.h b/clang/lib/AST/ByteCode/DynamicAllocator.h
index cff09bf..ab1058b 100644
--- a/clang/lib/AST/ByteCode/DynamicAllocator.h
+++ b/clang/lib/AST/ByteCode/DynamicAllocator.h
@@ -43,18 +43,22 @@ private:
std::unique_ptr<std::byte[]> Memory;
Allocation(std::unique_ptr<std::byte[]> Memory)
: Memory(std::move(Memory)) {}
+ Block *block() const { return reinterpret_cast<Block *>(Memory.get()); }
};
struct AllocationSite {
llvm::SmallVector<Allocation> Allocations;
+ unsigned NumAllocs = 0;
Form AllocForm;
AllocationSite(std::unique_ptr<std::byte[]> Memory, Form AllocForm)
: AllocForm(AllocForm) {
Allocations.push_back({std::move(Memory)});
+ ++NumAllocs;
}
size_t size() const { return Allocations.size(); }
+ bool empty() const { return Allocations.empty(); }
};
public:
@@ -65,8 +69,6 @@ public:
void cleanup();
- unsigned getNumAllocations() const { return AllocationSites.size(); }
-
/// Allocate ONE element of the given descriptor.
Block *allocate(const Descriptor *D, unsigned EvalID, Form AllocForm);
/// Allocate \p NumElements primitive elements of the given type.
@@ -96,8 +98,13 @@ public:
return llvm::make_range(AllocationSites.begin(), AllocationSites.end());
}
+ bool hasAllocations() const { return !AllocationSites.empty(); }
+
private:
llvm::DenseMap<const Expr *, AllocationSite> AllocationSites;
+ // Allocations that have already been deallocated but had pointers
+ // to them.
+ llvm::SmallVector<Allocation> DeadAllocations;
using PoolAllocTy = llvm::BumpPtrAllocator;
PoolAllocTy DescAllocator;
diff --git a/clang/lib/AST/ByteCode/EvalEmitter.cpp b/clang/lib/AST/ByteCode/EvalEmitter.cpp
index 976b7c0..2860a09 100644
--- a/clang/lib/AST/ByteCode/EvalEmitter.cpp
+++ b/clang/lib/AST/ByteCode/EvalEmitter.cpp
@@ -98,10 +98,7 @@ bool EvalEmitter::interpretCall(const FunctionDecl *FD, const Expr *E) {
this->Params.insert({PD, {0, false}});
}
- if (!this->visit(E))
- return false;
- PrimType T = Ctx.classify(E).value_or(PT_Ptr);
- return this->emitPop(T, E);
+ return this->visitExpr(E, /*DestroyToplevelScope=*/false);
}
void EvalEmitter::emitLabel(LabelTy Label) { CurrentLabel = Label; }
@@ -187,7 +184,7 @@ template <PrimType OpType> bool EvalEmitter::emitRet(const SourceInfo &Info) {
return true;
using T = typename PrimConv<OpType>::T;
- EvalResult.setValue(S.Stk.pop<T>().toAPValue(Ctx.getASTContext()));
+ EvalResult.takeValue(S.Stk.pop<T>().toAPValue(Ctx.getASTContext()));
return true;
}
@@ -198,7 +195,7 @@ template <> bool EvalEmitter::emitRet<PT_Ptr>(const SourceInfo &Info) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (Ptr.isFunctionPointer()) {
- EvalResult.setValue(Ptr.toAPValue(Ctx.getASTContext()));
+ EvalResult.takeValue(Ptr.toAPValue(Ctx.getASTContext()));
return true;
}
@@ -216,10 +213,8 @@ template <> bool EvalEmitter::emitRet<PT_Ptr>(const SourceInfo &Info) {
if (!Ptr.isZero() && !Ptr.isDereferencable())
return false;
- if (S.getLangOpts().CPlusPlus11 && Ptr.isBlockPointer() &&
- !CheckFinalLoad(S, OpPC, Ptr)) {
+ if (!Ptr.isZero() && !CheckFinalLoad(S, OpPC, Ptr))
return false;
- }
// Never allow reading from a non-const pointer, unless the memory
// has been created in this evaluation.
@@ -229,7 +224,7 @@ template <> bool EvalEmitter::emitRet<PT_Ptr>(const SourceInfo &Info) {
if (std::optional<APValue> V =
Ptr.toRValue(Ctx, EvalResult.getSourceType())) {
- EvalResult.setValue(*V);
+ EvalResult.takeValue(std::move(*V));
} else {
return false;
}
@@ -238,14 +233,14 @@ template <> bool EvalEmitter::emitRet<PT_Ptr>(const SourceInfo &Info) {
// the result, even if the pointer is dead.
// This will later be diagnosed by CheckLValueConstantExpression.
if (Ptr.isBlockPointer() && !Ptr.block()->isStatic()) {
- EvalResult.setValue(Ptr.toAPValue(Ctx.getASTContext()));
+ EvalResult.takeValue(Ptr.toAPValue(Ctx.getASTContext()));
return true;
}
if (!Ptr.isLive() && !Ptr.isTemporary())
return false;
- EvalResult.setValue(Ptr.toAPValue(Ctx.getASTContext()));
+ EvalResult.takeValue(Ptr.toAPValue(Ctx.getASTContext()));
}
return true;
@@ -266,7 +261,7 @@ bool EvalEmitter::emitRetValue(const SourceInfo &Info) {
if (std::optional<APValue> APV =
Ptr.toRValue(S.getASTContext(), EvalResult.getSourceType())) {
- EvalResult.setValue(*APV);
+ EvalResult.takeValue(std::move(*APV));
return true;
}
@@ -292,7 +287,7 @@ bool EvalEmitter::emitGetLocal(uint32_t I, const SourceInfo &Info) {
Block *B = getLocal(I);
- if (!CheckLocalLoad(S, OpPC, Pointer(B)))
+ if (!CheckLocalLoad(S, OpPC, B))
return false;
S.Stk.push<T>(*reinterpret_cast<T *>(B->data()));
diff --git a/clang/lib/AST/ByteCode/EvaluationResult.cpp b/clang/lib/AST/ByteCode/EvaluationResult.cpp
index b11531f..ba81878 100644
--- a/clang/lib/AST/ByteCode/EvaluationResult.cpp
+++ b/clang/lib/AST/ByteCode/EvaluationResult.cpp
@@ -8,6 +8,7 @@
#include "EvaluationResult.h"
#include "InterpState.h"
+#include "Pointer.h"
#include "Record.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
@@ -16,41 +17,6 @@
namespace clang {
namespace interp {
-APValue EvaluationResult::toAPValue() const {
- assert(!empty());
- switch (Kind) {
- case LValue:
- // Either a pointer or a function pointer.
- if (const auto *P = std::get_if<Pointer>(&Value))
- return P->toAPValue(Ctx->getASTContext());
- else if (const auto *FP = std::get_if<FunctionPointer>(&Value))
- return FP->toAPValue(Ctx->getASTContext());
- else
- llvm_unreachable("Unhandled LValue type");
- break;
- case RValue:
- return std::get<APValue>(Value);
- case Valid:
- return APValue();
- default:
- llvm_unreachable("Unhandled result kind?");
- }
-}
-
-std::optional<APValue> EvaluationResult::toRValue() const {
- if (Kind == RValue)
- return toAPValue();
-
- assert(Kind == LValue);
-
- // We have a pointer and want an RValue.
- if (const auto *P = std::get_if<Pointer>(&Value))
- return P->toRValue(*Ctx, getSourceType());
- else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope
- return FP->toAPValue(Ctx->getASTContext());
- llvm_unreachable("Unhandled lvalue kind");
-}
-
static void DiagnoseUninitializedSubobject(InterpState &S, SourceLocation Loc,
const FieldDecl *SubObjDecl) {
assert(SubObjDecl && "Subobject declaration does not exist");
@@ -66,8 +32,12 @@ static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc,
static bool CheckArrayInitialized(InterpState &S, SourceLocation Loc,
const Pointer &BasePtr,
const ConstantArrayType *CAT) {
- bool Result = true;
size_t NumElems = CAT->getZExtSize();
+
+ if (NumElems == 0)
+ return true;
+
+ bool Result = true;
QualType ElemType = CAT->getElementType();
if (ElemType->isRecordType()) {
@@ -82,8 +52,18 @@ static bool CheckArrayInitialized(InterpState &S, SourceLocation Loc,
Result &= CheckArrayInitialized(S, Loc, ElemPtr, ElemCAT);
}
} else {
+ // Primitive arrays.
+ if (S.getContext().canClassify(ElemType)) {
+ if (BasePtr.allElementsInitialized()) {
+ return true;
+ } else {
+ DiagnoseUninitializedSubobject(S, Loc, BasePtr.getField());
+ return false;
+ }
+ }
+
for (size_t I = 0; I != NumElems; ++I) {
- if (!BasePtr.atIndex(I).isInitialized()) {
+ if (!BasePtr.isElementInitialized(I)) {
DiagnoseUninitializedSubobject(S, Loc, BasePtr.getField());
Result = false;
}
@@ -178,8 +158,8 @@ bool EvaluationResult::checkFullyInitialized(InterpState &S,
static void collectBlocks(const Pointer &Ptr,
llvm::SetVector<const Block *> &Blocks) {
auto isUsefulPtr = [](const Pointer &P) -> bool {
- return P.isLive() && !P.isZero() && !P.isDummy() && P.isDereferencable() &&
- !P.isUnknownSizeArray() && !P.isOnePastEnd();
+ return P.isLive() && P.isBlockPointer() && !P.isZero() && !P.isDummy() &&
+ P.isDereferencable() && !P.isUnknownSizeArray() && !P.isOnePastEnd();
};
if (!isUsefulPtr(Ptr))
diff --git a/clang/lib/AST/ByteCode/EvaluationResult.h b/clang/lib/AST/ByteCode/EvaluationResult.h
index 3b6c65ef..c296cc9 100644
--- a/clang/lib/AST/ByteCode/EvaluationResult.h
+++ b/clang/lib/AST/ByteCode/EvaluationResult.h
@@ -9,23 +9,22 @@
#ifndef LLVM_CLANG_AST_INTERP_EVALUATION_RESULT_H
#define LLVM_CLANG_AST_INTERP_EVALUATION_RESULT_H
-#include "FunctionPointer.h"
-#include "Pointer.h"
#include "clang/AST/APValue.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
-#include <optional>
-#include <variant>
namespace clang {
namespace interp {
class EvalEmitter;
class Context;
+class Pointer;
+class SourceInfo;
+class InterpState;
/// Defines the result of an evaluation.
///
-/// The result might be in different forms--one of the pointer types,
-/// an APValue, or nothing.
+/// The Kind defined if the evaluation was invalid, valid (but empty, e.g. for
+/// void expressions) or if we have a valid evaluation result.
///
/// We use this class to inspect and diagnose the result, as well as
/// convert it to the requested form.
@@ -33,8 +32,6 @@ class EvaluationResult final {
public:
enum ResultKind {
Empty, // Initial state.
- LValue, // Result is an lvalue/pointer.
- RValue, // Result is an rvalue.
Invalid, // Result is invalid.
Valid, // Result is valid and empty.
};
@@ -42,29 +39,18 @@ public:
using DeclTy = llvm::PointerUnion<const Decl *, const Expr *>;
private:
+#ifndef NDEBUG
const Context *Ctx = nullptr;
- std::variant<std::monostate, Pointer, FunctionPointer, APValue> Value;
+#endif
+ APValue Value;
ResultKind Kind = Empty;
- DeclTy Source = nullptr; // Currently only needed for dump().
-
- EvaluationResult(ResultKind Kind) : Kind(Kind) {
- // Leave everything empty. Can be used as an
- // error marker or for void return values.
- assert(Kind == Valid || Kind == Invalid);
- }
+ DeclTy Source = nullptr;
void setSource(DeclTy D) { Source = D; }
- void setValue(const APValue &V) {
- // V could still be an LValue.
+ void takeValue(APValue &&V) {
assert(empty());
Value = std::move(V);
- Kind = RValue;
- }
- void setFunctionPointer(const FunctionPointer &P) {
- assert(empty());
- Value = P;
- Kind = LValue;
}
void setInvalid() {
// We are NOT asserting empty() here, since setting it to invalid
@@ -77,22 +63,23 @@ private:
}
public:
+#ifndef NDEBUG
EvaluationResult(const Context *Ctx) : Ctx(Ctx) {}
+#else
+ EvaluationResult(const Context *Ctx) {}
+#endif
bool empty() const { return Kind == Empty; }
bool isInvalid() const { return Kind == Invalid; }
- bool isLValue() const { return Kind == LValue; }
- bool isRValue() const { return Kind == RValue; }
- bool isPointer() const { return std::holds_alternative<Pointer>(Value); }
- /// Returns an APValue for the evaluation result. The returned
- /// APValue might be an LValue or RValue.
- APValue toAPValue() const;
+ /// Returns an APValue for the evaluation result.
+ APValue toAPValue() const {
+ assert(!empty());
+ assert(!isInvalid());
+ return Value;
+ }
- /// If the result is an LValue, convert that to an RValue
- /// and return it. This may fail, e.g. if the result is an
- /// LValue and we can't read from it.
- std::optional<APValue> toRValue() const;
+ APValue stealAPValue() { return std::move(Value); }
/// Check that all subobjects of the given pointer have been initialized.
bool checkFullyInitialized(InterpState &S, const Pointer &Ptr) const;
@@ -105,7 +92,7 @@ public:
if (const auto *D =
dyn_cast_if_present<ValueDecl>(Source.dyn_cast<const Decl *>()))
return D->getType();
- else if (const auto *E = Source.dyn_cast<const Expr *>())
+ if (const auto *E = Source.dyn_cast<const Expr *>())
return E->getType();
return QualType();
}
diff --git a/clang/lib/AST/ByteCode/Function.h b/clang/lib/AST/ByteCode/Function.h
index 92363b6..af429b7 100644
--- a/clang/lib/AST/ByteCode/Function.h
+++ b/clang/lib/AST/ByteCode/Function.h
@@ -28,7 +28,7 @@ namespace interp {
class Program;
class ByteCodeEmitter;
class Pointer;
-enum PrimType : uint32_t;
+enum PrimType : uint8_t;
/// Describes a scope block.
///
diff --git a/clang/lib/AST/ByteCode/Integral.h b/clang/lib/AST/ByteCode/Integral.h
index af5cd2d..1318024 100644
--- a/clang/lib/AST/ByteCode/Integral.h
+++ b/clang/lib/AST/ByteCode/Integral.h
@@ -318,6 +318,11 @@ private:
template <typename T> static bool CheckMulUB(T A, T B, T &R) {
if constexpr (std::is_signed_v<T>) {
return llvm::MulOverflow<T>(A, B, R);
+ } else if constexpr (sizeof(T) < sizeof(int)) {
+ // Silly integer promotion rules will convert both A and B to int,
+ // even it T is unsigned. Prevent that by manually casting to uint first.
+ R = static_cast<T>(static_cast<unsigned>(A) * static_cast<unsigned>(B));
+ return false;
} else {
R = A * B;
return false;
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index eb4e480..06b2bdc 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -211,25 +211,26 @@ static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC,
S.Note(VD->getLocation(), diag::note_declared_at);
}
-static bool CheckTemporary(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+static bool CheckTemporary(InterpState &S, CodePtr OpPC, const Block *B,
AccessKinds AK) {
- if (auto ID = Ptr.getDeclID()) {
- if (!Ptr.isStaticTemporary())
+ if (B->getDeclID()) {
+ if (!(B->isStatic() && B->isTemporary()))
return true;
const auto *MTE = dyn_cast_if_present<MaterializeTemporaryExpr>(
- Ptr.getDeclDesc()->asExpr());
+ B->getDescriptor()->asExpr());
if (!MTE)
return true;
// FIXME(perf): Since we do this check on every Load from a static
// temporary, it might make sense to cache the value of the
// isUsableInConstantExpressions call.
- if (!MTE->isUsableInConstantExpressions(S.getASTContext()) &&
- Ptr.block()->getEvalID() != S.Ctx.getEvalID()) {
+ if (B->getEvalID() != S.Ctx.getEvalID() &&
+ !MTE->isUsableInConstantExpressions(S.getASTContext())) {
const SourceInfo &E = S.Current->getSource(OpPC);
S.FFDiag(E, diag::note_constexpr_access_static_temporary, 1) << AK;
- S.Note(Ptr.getDeclLoc(), diag::note_constexpr_temporary_here);
+ S.Note(B->getDescriptor()->getLocation(),
+ diag::note_constexpr_temporary_here);
return false;
}
}
@@ -517,7 +518,7 @@ bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK) {
- if (!Ptr.isOnePastEnd())
+ if (!Ptr.isOnePastEnd() && !Ptr.isZeroSizeArray())
return true;
if (S.getLangOpts().CPlusPlus) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
@@ -529,7 +530,7 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK) {
- if (!Ptr.isElementPastEnd())
+ if (!Ptr.isElementPastEnd() && !Ptr.isZeroSizeArray())
return true;
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_past_end_subobject)
@@ -658,17 +659,19 @@ static bool CheckVolatile(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
-bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK) {
+bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK) {
assert(Ptr.isLive());
+ assert(!Ptr.isInitialized());
+ return DiagnoseUninitialized(S, OpPC, Ptr.isExtern(), Ptr.getDeclDesc(), AK);
+}
- if (Ptr.isInitialized())
- return true;
-
- if (Ptr.isExtern() && S.checkingPotentialConstantExpression())
+bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, bool Extern,
+ const Descriptor *Desc, AccessKinds AK) {
+ if (Extern && S.checkingPotentialConstantExpression())
return false;
- if (const auto *VD = Ptr.getDeclDesc()->asVarDecl();
+ if (const auto *VD = Desc->asVarDecl();
VD && (VD->isConstexpr() || VD->hasGlobalStorage())) {
if (VD == S.EvaluatingDecl &&
@@ -703,9 +706,9 @@ bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
-static bool CheckLifetime(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+static bool CheckLifetime(InterpState &S, CodePtr OpPC, Lifetime LT,
AccessKinds AK) {
- if (Ptr.getLifetime() == Lifetime::Started)
+ if (LT == Lifetime::Started)
return true;
if (!S.checkingPotentialConstantExpression()) {
@@ -715,11 +718,11 @@ static bool CheckLifetime(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
-static bool CheckWeak(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!Ptr.isWeak())
+static bool CheckWeak(InterpState &S, CodePtr OpPC, const Block *B) {
+ if (!B->isWeak())
return true;
- const auto *VD = Ptr.getDeclDesc()->asVarDecl();
+ const auto *VD = B->getDescriptor()->asVarDecl();
assert(VD);
S.FFDiag(S.Current->getLocation(OpPC), diag::note_constexpr_var_init_weak)
<< VD;
@@ -732,57 +735,100 @@ static bool CheckWeak(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
// ones removed that are impossible on primitive global values.
// For example, since those can't be members of structs, they also can't
// be mutable.
-bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckExtern(S, OpPC, Ptr))
- return false;
- if (!CheckConstant(S, OpPC, Ptr))
- return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK_Read))
+bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Block *B) {
+ const auto &Desc =
+ *reinterpret_cast<const GlobalInlineDescriptor *>(B->rawData());
+ if (!B->isAccessible()) {
+ if (!CheckExtern(S, OpPC, Pointer(const_cast<Block *>(B))))
+ return false;
+ if (!CheckDummy(S, OpPC, B, AK_Read))
+ return false;
+ return CheckWeak(S, OpPC, B);
+ }
+
+ if (!CheckConstant(S, OpPC, B->getDescriptor()))
return false;
- if (!CheckWeak(S, OpPC, Ptr))
+ if (Desc.InitState != GlobalInitState::Initialized)
+ return DiagnoseUninitialized(S, OpPC, B->isExtern(), B->getDescriptor(),
+ AK_Read);
+ if (!CheckTemporary(S, OpPC, B, AK_Read))
return false;
- if (!CheckVolatile(S, OpPC, Ptr, AK_Read))
+ if (B->getDescriptor()->IsVolatile) {
+ if (!S.getLangOpts().CPlusPlus)
+ return Invalid(S, OpPC);
+
+ const ValueDecl *D = B->getDescriptor()->asValueDecl();
+ S.FFDiag(S.Current->getLocation(OpPC),
+ diag::note_constexpr_access_volatile_obj, 1)
+ << AK_Read << 1 << D;
+ S.Note(D->getLocation(), diag::note_constexpr_volatile_here) << 1;
return false;
+ }
return true;
}
// Similarly, for local loads.
-bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckLifetime(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckVolatile(S, OpPC, Ptr, AK_Read))
+bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Block *B) {
+ assert(!B->isExtern());
+ const auto &Desc = *reinterpret_cast<const InlineDescriptor *>(B->rawData());
+ if (!CheckLifetime(S, OpPC, Desc.LifeState, AK_Read))
+ return false;
+ if (!Desc.IsInitialized)
+ return DiagnoseUninitialized(S, OpPC, /*Extern=*/false, B->getDescriptor(),
+ AK_Read);
+ if (B->getDescriptor()->IsVolatile) {
+ if (!S.getLangOpts().CPlusPlus)
+ return Invalid(S, OpPC);
+
+ const ValueDecl *D = B->getDescriptor()->asValueDecl();
+ S.FFDiag(S.Current->getLocation(OpPC),
+ diag::note_constexpr_access_volatile_obj, 1)
+ << AK_Read << 1 << D;
+ S.Note(D->getLocation(), diag::note_constexpr_volatile_here) << 1;
return false;
+ }
return true;
}
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK) {
- if (!CheckLive(S, OpPC, Ptr, AK))
- return false;
- if (!CheckExtern(S, OpPC, Ptr))
+ if (!Ptr.isBlockPointer()) {
+ if (Ptr.isZero()) {
+ const auto &Src = S.Current->getSource(OpPC);
+
+ if (Ptr.isField())
+ S.FFDiag(Src, diag::note_constexpr_null_subobject) << CSK_Field;
+ else
+ S.FFDiag(Src, diag::note_constexpr_access_null) << AK;
+ }
return false;
+ }
+
+ // Block pointers are the only ones we can actually read from.
+ if (!Ptr.block()->isAccessible()) {
+ if (!CheckLive(S, OpPC, Ptr, AK))
+ return false;
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckDummy(S, OpPC, Ptr.block(), AK))
+ return false;
+ if (!CheckWeak(S, OpPC, Ptr.block()))
+ return false;
+ }
+
if (!CheckConstant(S, OpPC, Ptr))
return false;
- if (!CheckDummy(S, OpPC, Ptr, AK))
- return false;
if (!CheckRange(S, OpPC, Ptr, AK))
return false;
if (!CheckActive(S, OpPC, Ptr, AK))
return false;
- if (!CheckLifetime(S, OpPC, Ptr, AK))
- return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK))
- return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK))
+ if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK))
return false;
- if (!CheckWeak(S, OpPC, Ptr))
+ if (!Ptr.isInitialized())
+ return DiagnoseUninitialized(S, OpPC, Ptr, AK);
+ if (!CheckTemporary(S, OpPC, Ptr.block(), AK))
return false;
+
if (!CheckMutable(S, OpPC, Ptr))
return false;
if (!CheckVolatile(S, OpPC, Ptr, AK))
@@ -793,26 +839,30 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
/// This is not used by any of the opcodes directly. It's used by
/// EvalEmitter to do the final lvalue-to-rvalue conversion.
bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckLive(S, OpPC, Ptr, AK_Read))
+ assert(!Ptr.isZero());
+ if (!Ptr.isBlockPointer())
return false;
+
+ if (!Ptr.block()->isAccessible()) {
+ if (!CheckLive(S, OpPC, Ptr, AK_Read))
+ return false;
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
+ return false;
+ return CheckWeak(S, OpPC, Ptr.block());
+ }
+
if (!CheckConstant(S, OpPC, Ptr))
return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckExtern(S, OpPC, Ptr))
- return false;
- if (!CheckRange(S, OpPC, Ptr, AK_Read))
- return false;
if (!CheckActive(S, OpPC, Ptr, AK_Read))
return false;
- if (!CheckLifetime(S, OpPC, Ptr, AK_Read))
+ if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK_Read))
return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckWeak(S, OpPC, Ptr))
+ if (!Ptr.isInitialized())
+ return DiagnoseUninitialized(S, OpPC, Ptr, AK_Read);
+ if (!CheckTemporary(S, OpPC, Ptr.block(), AK_Read))
return false;
if (!CheckMutable(S, OpPC, Ptr))
return false;
@@ -820,13 +870,17 @@ bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
}
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckLive(S, OpPC, Ptr, AK_Assign))
- return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Assign))
- return false;
- if (!CheckLifetime(S, OpPC, Ptr, AK_Assign))
+ if (!Ptr.isBlockPointer())
return false;
- if (!CheckExtern(S, OpPC, Ptr))
+
+ if (!Ptr.block()->isAccessible()) {
+ if (!CheckLive(S, OpPC, Ptr, AK_Assign))
+ return false;
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ return CheckDummy(S, OpPC, Ptr.block(), AK_Assign);
+ }
+ if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK_Assign))
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Assign))
return false;
@@ -1098,13 +1152,11 @@ bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
return diagnoseUnknownDecl(S, OpPC, D);
}
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK) {
- if (!Ptr.isDummy())
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK) {
+ if (!B->isDummy())
return true;
- const Descriptor *Desc = Ptr.getDeclDesc();
- const ValueDecl *D = Desc->asValueDecl();
+ const ValueDecl *D = B->getDescriptor()->asValueDecl();
if (!D)
return false;
@@ -1155,17 +1207,15 @@ static bool runRecordDestructor(InterpState &S, CodePtr OpPC,
}
// Destructor of this record.
- if (const CXXDestructorDecl *Dtor = R->getDestructor();
- Dtor && !Dtor->isTrivial()) {
- const Function *DtorFunc = S.getContext().getOrCreateFunction(Dtor);
- if (!DtorFunc)
- return false;
+ const CXXDestructorDecl *Dtor = R->getDestructor();
+ assert(Dtor);
+ assert(!Dtor->isTrivial());
+ const Function *DtorFunc = S.getContext().getOrCreateFunction(Dtor);
+ if (!DtorFunc)
+ return false;
- S.Stk.push<Pointer>(BasePtr);
- if (!Call(S, OpPC, DtorFunc, 0))
- return false;
- }
- return true;
+ S.Stk.push<Pointer>(BasePtr);
+ return Call(S, OpPC, DtorFunc, 0);
}
static bool RunDestructors(InterpState &S, CodePtr OpPC, const Block *B) {
@@ -1177,6 +1227,9 @@ static bool RunDestructors(InterpState &S, CodePtr OpPC, const Block *B) {
assert(Desc->isRecord() || Desc->isCompositeArray());
+ if (Desc->hasTrivialDtor())
+ return true;
+
if (Desc->isCompositeArray()) {
unsigned N = Desc->getNumElems();
if (N == 0)
@@ -1251,7 +1304,7 @@ bool Free(InterpState &S, CodePtr OpPC, bool DeleteIsArrayForm,
return false;
}
- if (!Ptr.isRoot() || Ptr.isOnePastEnd() ||
+ if (!Ptr.isRoot() || (Ptr.isOnePastEnd() && !Ptr.isZeroSizeArray()) ||
(Ptr.isArrayElement() && Ptr.getIndex() != 0)) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_delete_subobject)
@@ -1426,7 +1479,7 @@ static bool checkConstructor(InterpState &S, CodePtr OpPC, const Function *Func,
bool CheckDestructor(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckLive(S, OpPC, Ptr, AK_Destroy))
return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK_Destroy))
+ if (!CheckTemporary(S, OpPC, Ptr.block(), AK_Destroy))
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Destroy))
return false;
@@ -1620,8 +1673,17 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl());
const auto *InitialFunction = cast<CXXMethodDecl>(Callee);
- const CXXMethodDecl *Overrider = S.getContext().getOverridingFunction(
- DynamicDecl, StaticDecl, InitialFunction);
+ const CXXMethodDecl *Overrider;
+
+ if (StaticDecl != DynamicDecl) {
+ if (!DynamicDecl->isDerivedFrom(StaticDecl))
+ return false;
+ Overrider = S.getContext().getOverridingFunction(DynamicDecl, StaticDecl,
+ InitialFunction);
+
+ } else {
+ Overrider = InitialFunction;
+ }
if (Overrider != InitialFunction) {
// DR1872: An instantiated virtual constexpr function can't be called in a
@@ -1749,7 +1811,7 @@ static void startLifetimeRecurse(const Pointer &Ptr) {
bool StartLifetime(InterpState &S, CodePtr OpPC) {
const auto &Ptr = S.Stk.peek<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr, AK_Destroy))
+ if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy))
return false;
startLifetimeRecurse(Ptr.narrow());
return true;
@@ -1780,8 +1842,13 @@ static void endLifetimeRecurse(const Pointer &Ptr) {
/// Ends the lifetime of the peek'd pointer.
bool EndLifetime(InterpState &S, CodePtr OpPC) {
const auto &Ptr = S.Stk.peek<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr, AK_Destroy))
+ if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy))
return false;
+
+ // FIXME: We need per-element lifetime information for primitive arrays.
+ if (Ptr.isArrayElement())
+ return true;
+
endLifetimeRecurse(Ptr.narrow());
return true;
}
@@ -1789,8 +1856,13 @@ bool EndLifetime(InterpState &S, CodePtr OpPC) {
/// Ends the lifetime of the pop'd pointer.
bool EndLifetimePop(InterpState &S, CodePtr OpPC) {
const auto &Ptr = S.Stk.pop<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr, AK_Destroy))
+ if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy))
return false;
+
+ // FIXME: We need per-element lifetime information for primitive arrays.
+ if (Ptr.isArrayElement())
+ return true;
+
endLifetimeRecurse(Ptr.narrow());
return true;
}
@@ -1802,26 +1874,32 @@ bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
if (Ptr.inUnion() && Ptr.getBase().getRecord()->isUnion())
Ptr.activate();
+ if (!Ptr.isBlockPointer())
+ return false;
+
// Similar to CheckStore(), but with the additional CheckTemporary() call and
// the AccessKinds are different.
- if (!CheckTemporary(S, OpPC, Ptr, AK_Construct))
- return false;
- if (!CheckLive(S, OpPC, Ptr, AK_Construct))
- return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Construct))
+
+ if (!Ptr.block()->isAccessible()) {
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckLive(S, OpPC, Ptr, AK_Construct))
+ return false;
+ return CheckDummy(S, OpPC, Ptr.block(), AK_Construct);
+ }
+ if (!CheckTemporary(S, OpPC, Ptr.block(), AK_Construct))
return false;
// CheckLifetime for this and all base pointers.
for (Pointer P = Ptr;;) {
- if (!CheckLifetime(S, OpPC, P, AK_Construct))
+ if (!CheckLifetime(S, OpPC, P.getLifetime(), AK_Construct))
return false;
if (P.isRoot())
break;
P = P.getBase();
}
- if (!CheckExtern(S, OpPC, Ptr))
- return false;
+
if (!CheckRange(S, OpPC, Ptr, AK_Construct))
return false;
if (!CheckGlobal(S, OpPC, Ptr))
@@ -2011,7 +2089,7 @@ bool GetTypeidPtr(InterpState &S, CodePtr OpPC, const Type *TypeInfoType) {
return false;
// Pick the most-derived type.
- const Type *T = P.getDeclPtr().getType().getTypePtr();
+ CanQualType T = P.getDeclPtr().getType()->getCanonicalTypeUnqualified();
// ... unless we're currently constructing this object.
// FIXME: We have a similar check to this in more places.
if (S.Current->getFunction()) {
@@ -2019,14 +2097,14 @@ bool GetTypeidPtr(InterpState &S, CodePtr OpPC, const Type *TypeInfoType) {
if (const Function *Func = Frame->getFunction();
Func && (Func->isConstructor() || Func->isDestructor()) &&
P.block() == Frame->getThis().block()) {
- T = Func->getParentDecl()->getTypeForDecl();
+ T = S.getContext().getASTContext().getCanonicalTagType(
+ Func->getParentDecl());
break;
}
}
}
- S.Stk.push<Pointer>(T->getCanonicalTypeUnqualified().getTypePtr(),
- TypeInfoType);
+ S.Stk.push<Pointer>(T->getTypePtr(), TypeInfoType);
return true;
}
@@ -2040,8 +2118,8 @@ bool DiagTypeid(InterpState &S, CodePtr OpPC) {
bool arePotentiallyOverlappingStringLiterals(const Pointer &LHS,
const Pointer &RHS) {
- unsigned LHSOffset = LHS.getIndex();
- unsigned RHSOffset = RHS.getIndex();
+ unsigned LHSOffset = LHS.isOnePastEnd() ? LHS.getNumElems() : LHS.getIndex();
+ unsigned RHSOffset = RHS.isOnePastEnd() ? RHS.getNumElems() : RHS.getIndex();
unsigned LHSLength = (LHS.getNumElems() - 1) * LHS.elemSize();
unsigned RHSLength = (RHS.getNumElems() - 1) * RHS.elemSize();
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 8a28106..2da2202 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -51,8 +51,7 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK);
/// Checks if a pointer is a dummy pointer.
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK);
/// Checks if a pointer is null.
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
@@ -89,11 +88,14 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK = AK_Read);
bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
+bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, bool Extern,
+ const Descriptor *Desc, AccessKinds AK);
+
/// Checks a direct load of a primitive value from a global or local variable.
-bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Block *B);
+bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Block *B);
/// Checks if a value can be stored in a block.
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
@@ -1351,10 +1353,10 @@ inline bool ConstFloat(InterpState &S, CodePtr OpPC, const Floating &F) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetLocal(InterpState &S, CodePtr OpPC, uint32_t I) {
- const Pointer &Ptr = S.Current->getLocalPointer(I);
- if (!CheckLocalLoad(S, OpPC, Ptr))
+ const Block *B = S.Current->getLocalBlock(I);
+ if (!CheckLocalLoad(S, OpPC, B))
return false;
- S.Stk.push<T>(Ptr.deref<T>());
+ S.Stk.push<T>(B->deref<T>());
return true;
}
@@ -1465,22 +1467,26 @@ bool SetThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
- const Pointer &Ptr = S.P.getPtrGlobal(I);
+ const Block *B = S.P.getGlobal(I);
- if (!CheckGlobalLoad(S, OpPC, Ptr))
+ if (!CheckGlobalLoad(S, OpPC, B))
return false;
- S.Stk.push<T>(Ptr.deref<T>());
+ S.Stk.push<T>(B->deref<T>());
return true;
}
/// Same as GetGlobal, but without the checks.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetGlobalUnchecked(InterpState &S, CodePtr OpPC, uint32_t I) {
- const Pointer &Ptr = S.P.getPtrGlobal(I);
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
- S.Stk.push<T>(Ptr.deref<T>());
+ const Block *B = S.P.getGlobal(I);
+ const auto &Desc =
+ *reinterpret_cast<const GlobalInlineDescriptor *>(B->rawData());
+ if (Desc.InitState != GlobalInitState::Initialized)
+ return DiagnoseUninitialized(S, OpPC, B->isExtern(), B->getDescriptor(),
+ AK_Read);
+
+ S.Stk.push<T>(B->deref<T>());
return true;
}
@@ -1634,6 +1640,9 @@ bool InitField(InterpState &S, CodePtr OpPC, uint32_t I) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckRange(S, OpPC, Ptr, CSK_Field))
return false;
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
+
const Pointer &Field = Ptr.atField(I);
Field.deref<T>() = Value;
Field.initialize();
@@ -1646,6 +1655,9 @@ bool InitFieldActivate(InterpState &S, CodePtr OpPC, uint32_t I) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckRange(S, OpPC, Ptr, CSK_Field))
return false;
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
+
const Pointer &Field = Ptr.atField(I);
Field.deref<T>() = Value;
Field.activate();
@@ -1657,7 +1669,13 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitBitField(InterpState &S, CodePtr OpPC, const Record::Field *F) {
assert(F->isBitField());
const T &Value = S.Stk.pop<T>();
- const Pointer &Field = S.Stk.peek<Pointer>().atField(F->Offset);
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckRange(S, OpPC, Ptr, CSK_Field))
+ return false;
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
+
+ const Pointer &Field = Ptr.atField(F->Offset);
if constexpr (needsAlloc<T>()) {
T Result = S.allocAP<T>(Value.bitWidth());
@@ -1683,7 +1701,13 @@ bool InitBitFieldActivate(InterpState &S, CodePtr OpPC,
const Record::Field *F) {
assert(F->isBitField());
const T &Value = S.Stk.pop<T>();
- const Pointer &Field = S.Stk.peek<Pointer>().atField(F->Offset);
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckRange(S, OpPC, Ptr, CSK_Field))
+ return false;
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
+
+ const Pointer &Field = Ptr.atField(F->Offset);
if constexpr (needsAlloc<T>()) {
T Result = S.allocAP<T>(Value.bitWidth());
@@ -1764,10 +1788,7 @@ inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off,
const Record *TargetRecord = Ptr.atFieldSub(Off).getRecord();
assert(TargetRecord);
- if (TargetRecord->getDecl()
- ->getTypeForDecl()
- ->getAsCXXRecordDecl()
- ->getCanonicalDecl() !=
+ if (TargetRecord->getDecl()->getCanonicalDecl() !=
TargetType->getAsCXXRecordDecl()->getCanonicalDecl()) {
QualType MostDerivedType = Ptr.getDeclDesc()->getType();
S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_invalid_downcast)
@@ -1785,6 +1806,8 @@ inline bool GetPtrBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
return false;
if (!Ptr.isBlockPointer()) {
+ if (!Ptr.isIntegralPointer())
+ return false;
S.Stk.push<Pointer>(Ptr.asIntPointer().baseCast(S.getASTContext(), Off));
return true;
}
@@ -1806,6 +1829,8 @@ inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off,
return false;
if (!Ptr.isBlockPointer()) {
+ if (!Ptr.isIntegralPointer())
+ return false;
S.Stk.push<Pointer>(Ptr.asIntPointer().baseCast(S.getASTContext(), Off));
return true;
}
@@ -2351,8 +2376,8 @@ static inline bool IncDecPtrHelper(InterpState &S, CodePtr OpPC,
static inline bool IncPtr(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
- return false;
+ if (!Ptr.isInitialized())
+ return DiagnoseUninitialized(S, OpPC, Ptr, AK_Increment);
return IncDecPtrHelper<ArithOp::Add>(S, OpPC, Ptr);
}
@@ -2360,8 +2385,8 @@ static inline bool IncPtr(InterpState &S, CodePtr OpPC) {
static inline bool DecPtr(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
- return false;
+ if (!Ptr.isInitialized())
+ return DiagnoseUninitialized(S, OpPC, Ptr, AK_Decrement);
return IncDecPtrHelper<ArithOp::Sub>(S, OpPC, Ptr);
}
@@ -2434,9 +2459,17 @@ inline bool Destroy(InterpState &S, CodePtr OpPC, uint32_t I) {
const Pointer &Ptr = S.Current->getLocalPointer(Local.Offset);
if (Ptr.getLifetime() == Lifetime::Ended) {
- auto *D = cast<NamedDecl>(Ptr.getFieldDesc()->asDecl());
- S.FFDiag(D->getLocation(), diag::note_constexpr_destroy_out_of_lifetime)
- << D->getNameAsString();
+ // Try to use the declaration for better diagnostics
+ if (const Decl *D = Ptr.getDeclDesc()->asDecl()) {
+ auto *ND = cast<NamedDecl>(D);
+ S.FFDiag(ND->getLocation(),
+ diag::note_constexpr_destroy_out_of_lifetime)
+ << ND->getNameAsString();
+ } else {
+ S.FFDiag(Ptr.getDeclDesc()->getLocation(),
+ diag::note_constexpr_destroy_out_of_lifetime)
+ << Ptr.toDiagnosticString(S.getASTContext());
+ }
return false;
}
}
@@ -3155,8 +3188,10 @@ inline bool ArrayDecay(InterpState &S, CodePtr OpPC) {
return true;
}
- if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer))
- return false;
+ if (!Ptr.isZeroSizeArray()) {
+ if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer))
+ return false;
+ }
if (Ptr.isRoot() || !Ptr.isUnknownSizeArray()) {
S.Stk.push<Pointer>(Ptr.atIndex(0));
@@ -3195,6 +3230,9 @@ inline bool GetMemberPtr(InterpState &S, CodePtr OpPC, const ValueDecl *D) {
inline bool GetMemberPtrBase(InterpState &S, CodePtr OpPC) {
const auto &MP = S.Stk.pop<MemberPointer>();
+ if (!MP.isBaseCastPossible())
+ return false;
+
S.Stk.push<Pointer>(MP.getBase());
return true;
}
@@ -3452,7 +3490,15 @@ inline bool AllocN(InterpState &S, CodePtr OpPC, PrimType T, const Expr *Source,
S.Stk.push<Pointer>(0, nullptr);
return true;
}
- assert(NumElements.isPositive());
+ if (NumElements.isNegative()) {
+ if (!IsNoThrow) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_new_negative)
+ << NumElements.toDiagnosticString(S.getASTContext());
+ return false;
+ }
+ S.Stk.push<Pointer>(0, nullptr);
+ return true;
+ }
if (!CheckArraySize(S, OpPC, static_cast<uint64_t>(NumElements)))
return false;
diff --git a/clang/lib/AST/ByteCode/InterpBlock.cpp b/clang/lib/AST/ByteCode/InterpBlock.cpp
index 963b54e..ac6f01f 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.cpp
+++ b/clang/lib/AST/ByteCode/InterpBlock.cpp
@@ -18,18 +18,14 @@ using namespace clang::interp;
void Block::addPointer(Pointer *P) {
assert(P);
- if (IsStatic) {
- assert(!Pointers);
- return;
- }
#ifndef NDEBUG
assert(!hasPointer(P));
#endif
if (Pointers)
- Pointers->PointeeStorage.BS.Prev = P;
- P->PointeeStorage.BS.Next = Pointers;
- P->PointeeStorage.BS.Prev = nullptr;
+ Pointers->BS.Prev = P;
+ P->BS.Next = Pointers;
+ P->BS.Prev = nullptr;
Pointers = P;
#ifndef NDEBUG
assert(hasPointer(P));
@@ -39,32 +35,28 @@ void Block::addPointer(Pointer *P) {
void Block::removePointer(Pointer *P) {
assert(P->isBlockPointer());
assert(P);
- if (IsStatic) {
- assert(!Pointers);
- return;
- }
#ifndef NDEBUG
assert(hasPointer(P));
#endif
- BlockPointer &BP = P->PointeeStorage.BS;
+ BlockPointer &BP = P->BS;
if (Pointers == P)
Pointers = BP.Next;
if (BP.Prev)
- BP.Prev->PointeeStorage.BS.Next = BP.Next;
+ BP.Prev->BS.Next = BP.Next;
if (BP.Next)
- BP.Next->PointeeStorage.BS.Prev = BP.Prev;
- P->PointeeStorage.BS.Pointee = nullptr;
+ BP.Next->BS.Prev = BP.Prev;
+ P->BS.Pointee = nullptr;
#ifndef NDEBUG
assert(!hasPointer(P));
#endif
}
void Block::cleanup() {
- if (Pointers == nullptr && IsDead)
+ if (Pointers == nullptr && !isDynamic() && isDead())
(reinterpret_cast<DeadBlock *>(this + 1) - 1)->free();
}
@@ -74,21 +66,17 @@ void Block::replacePointer(Pointer *Old, Pointer *New) {
assert(New);
assert(New->isBlockPointer());
assert(Old != New);
- if (IsStatic) {
- assert(!Pointers);
- return;
- }
#ifndef NDEBUG
assert(hasPointer(Old));
#endif
- BlockPointer &OldBP = Old->PointeeStorage.BS;
- BlockPointer &NewBP = New->PointeeStorage.BS;
+ BlockPointer &OldBP = Old->BS;
+ BlockPointer &NewBP = New->BS;
if (OldBP.Prev)
- OldBP.Prev->PointeeStorage.BS.Next = New;
+ OldBP.Prev->BS.Next = New;
if (OldBP.Next)
- OldBP.Next->PointeeStorage.BS.Prev = New;
+ OldBP.Next->BS.Prev = New;
NewBP.Prev = OldBP.Prev;
NewBP.Next = OldBP.Next;
if (Pointers == Old)
@@ -113,8 +101,8 @@ bool Block::hasPointer(const Pointer *P) const {
#endif
DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
- : Root(Root), B(~0u, Blk->Desc, Blk->IsStatic, Blk->IsExtern, Blk->IsWeak,
- /*isDead=*/true) {
+ : Root(Root), B(~0u, Blk->Desc, Blk->isExtern(), Blk->IsStatic,
+ Blk->isWeak(), Blk->isDummy(), /*IsDead=*/true) {
// Add the block to the chain of dead blocks.
if (Root)
Root->Prev = this;
@@ -123,18 +111,17 @@ DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
Prev = nullptr;
Root = this;
- B.IsDynamic = Blk->IsDynamic;
+ B.DynAllocId = Blk->DynAllocId;
// Transfer pointers.
B.Pointers = Blk->Pointers;
for (Pointer *P = Blk->Pointers; P; P = P->asBlockPointer().Next)
- P->PointeeStorage.BS.Pointee = &B;
+ P->BS.Pointee = &B;
Blk->Pointers = nullptr;
}
void DeadBlock::free() {
- if (B.IsInitialized)
- B.invokeDtor();
+ assert(!B.isInitialized());
if (Prev)
Prev->Next = Next;
diff --git a/clang/lib/AST/ByteCode/InterpBlock.h b/clang/lib/AST/ByteCode/InterpBlock.h
index 5162223..ea9f44c 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.h
+++ b/clang/lib/AST/ByteCode/InterpBlock.h
@@ -22,7 +22,7 @@ class Block;
class DeadBlock;
class InterpState;
class Pointer;
-enum PrimType : unsigned;
+enum PrimType : uint8_t;
/// A memory block, either on the stack or in the heap.
///
@@ -42,21 +42,31 @@ enum PrimType : unsigned;
/// the data size and the metadata size.
///
class Block final {
+private:
+ static constexpr uint8_t ExternFlag = 1 << 0;
+ static constexpr uint8_t DeadFlag = 1 << 1;
+ static constexpr uint8_t WeakFlag = 1 << 2;
+ static constexpr uint8_t DummyFlag = 1 << 3;
+
public:
/// Creates a new block.
- Block(unsigned EvalID, const std::optional<unsigned> &DeclID,
- const Descriptor *Desc, bool IsStatic = false, bool IsExtern = false,
- bool IsWeak = false)
- : EvalID(EvalID), DeclID(DeclID), IsStatic(IsStatic), IsExtern(IsExtern),
- IsDynamic(false), IsWeak(IsWeak), Desc(Desc) {
+ Block(unsigned EvalID, UnsignedOrNone DeclID, const Descriptor *Desc,
+ bool IsStatic = false, bool IsExtern = false, bool IsWeak = false,
+ bool IsDummy = false)
+ : Desc(Desc), DeclID(DeclID), EvalID(EvalID), IsStatic(IsStatic) {
assert(Desc);
+ AccessFlags |= (ExternFlag * IsExtern);
+ AccessFlags |= (WeakFlag * IsWeak);
+ AccessFlags |= (DummyFlag * IsDummy);
}
Block(unsigned EvalID, const Descriptor *Desc, bool IsStatic = false,
- bool IsExtern = false, bool IsWeak = false)
- : EvalID(EvalID), DeclID((unsigned)-1), IsStatic(IsStatic),
- IsExtern(IsExtern), IsDynamic(false), IsWeak(IsWeak), Desc(Desc) {
+ bool IsExtern = false, bool IsWeak = false, bool IsDummy = false)
+ : Desc(Desc), EvalID(EvalID), IsStatic(IsStatic) {
assert(Desc);
+ AccessFlags |= (ExternFlag * IsExtern);
+ AccessFlags |= (WeakFlag * IsWeak);
+ AccessFlags |= (DummyFlag * IsDummy);
}
/// Returns the block's descriptor.
@@ -64,17 +74,19 @@ public:
/// Checks if the block has any live pointers.
bool hasPointers() const { return Pointers; }
/// Checks if the block is extern.
- bool isExtern() const { return IsExtern; }
+ bool isExtern() const { return AccessFlags & ExternFlag; }
/// Checks if the block has static storage duration.
bool isStatic() const { return IsStatic; }
/// Checks if the block is temporary.
bool isTemporary() const { return Desc->IsTemporary; }
- bool isWeak() const { return IsWeak; }
- bool isDynamic() const { return IsDynamic; }
+ bool isWeak() const { return AccessFlags & WeakFlag; }
+ bool isDynamic() const { return (DynAllocId != std::nullopt); }
+ bool isDummy() const { return AccessFlags & DummyFlag; }
+ bool isDead() const { return AccessFlags & DeadFlag; }
/// Returns the size of the block.
unsigned getSize() const { return Desc->getAllocSize(); }
/// Returns the declaration ID.
- std::optional<unsigned> getDeclID() const { return DeclID; }
+ UnsignedOrNone getDeclID() const { return DeclID; }
/// Returns whether the data of this block has been initialized via
/// invoking the Ctor func.
bool isInitialized() const { return IsInitialized; }
@@ -103,6 +115,10 @@ public:
return reinterpret_cast<const std::byte *>(this) + sizeof(Block);
}
+ template <typename T> T deref() const {
+ return *reinterpret_cast<const T *>(data());
+ }
+
/// Invokes the constructor.
void invokeCtor() {
assert(!IsInitialized);
@@ -126,19 +142,28 @@ public:
void dump() const { dump(llvm::errs()); }
void dump(llvm::raw_ostream &OS) const;
+ bool isAccessible() const { return AccessFlags == 0; }
+
private:
friend class Pointer;
friend class DeadBlock;
friend class InterpState;
friend class DynamicAllocator;
+ friend class Program;
Block(unsigned EvalID, const Descriptor *Desc, bool IsExtern, bool IsStatic,
- bool IsWeak, bool IsDead)
- : EvalID(EvalID), IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true),
- IsDynamic(false), IsWeak(IsWeak), Desc(Desc) {
+ bool IsWeak, bool IsDummy, bool IsDead)
+ : Desc(Desc), EvalID(EvalID), IsStatic(IsStatic) {
assert(Desc);
+ AccessFlags |= (ExternFlag * IsExtern);
+ AccessFlags |= (DeadFlag * IsDead);
+ AccessFlags |= (WeakFlag * IsWeak);
+ AccessFlags |= (DummyFlag * IsDummy);
}
+ /// To be called by DynamicAllocator.
+ void setDynAllocId(unsigned ID) { DynAllocId = ID; }
+
/// Deletes a dead block at the end of its lifetime.
void cleanup();
@@ -150,27 +175,22 @@ private:
bool hasPointer(const Pointer *P) const;
#endif
- const unsigned EvalID = ~0u;
+ /// Pointer to the stack slot descriptor.
+ const Descriptor *Desc;
/// Start of the chain of pointers.
Pointer *Pointers = nullptr;
/// Unique identifier of the declaration.
- std::optional<unsigned> DeclID;
+ UnsignedOrNone DeclID = std::nullopt;
+ const unsigned EvalID = ~0u;
/// Flag indicating if the block has static storage duration.
bool IsStatic = false;
- /// Flag indicating if the block is an extern.
- bool IsExtern = false;
- /// Flag indicating if the pointer is dead. This is only ever
- /// set once, when converting the Block to a DeadBlock.
- bool IsDead = false;
/// Flag indicating if the block contents have been initialized
/// via invokeCtor.
bool IsInitialized = false;
- /// Flag indicating if this block has been allocated via dynamic
- /// memory allocation (e.g. malloc).
- bool IsDynamic = false;
- bool IsWeak = false;
- /// Pointer to the stack slot descriptor.
- const Descriptor *Desc;
+ /// Allocation ID for this dynamic allocation, if it is one.
+ UnsignedOrNone DynAllocId = std::nullopt;
+ /// AccessFlags containing IsExtern, IsDead, IsWeak, and IsDummy bits.
+ uint8_t AccessFlags = 0;
};
/// Descriptor for a dead block.
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index f908d02..e05b1a8 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -141,6 +141,22 @@ static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC,
S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
}
+static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
+ assert(Val.getFieldDesc()->isPrimitiveArray() &&
+ Val.getFieldDesc()->getElemQualType()->isBooleanType() &&
+ "Not a boolean vector");
+ unsigned NumElems = Val.getNumElems();
+
+ // Each element is one bit, so create an integer with NumElts bits.
+ llvm::APSInt Result(NumElems, 0);
+ for (unsigned I = 0; I != NumElems; ++I) {
+ if (Val.elem<bool>(I))
+ Result.setBit(I);
+ }
+
+ return Result;
+}
+
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
@@ -205,6 +221,8 @@ static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
if (A.isDummy() || B.isDummy())
return false;
+ if (!A.isBlockPointer() || !B.isBlockPointer())
+ return false;
bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
ID == Builtin::BI__builtin_wcscmp ||
@@ -212,7 +230,10 @@ static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
assert(A.getFieldDesc()->isPrimitiveArray());
assert(B.getFieldDesc()->isPrimitiveArray());
- assert(getElemType(A).getTypePtr() == getElemType(B).getTypePtr());
+ // Different element types shouldn't happen, but with casts they can.
+ if (!S.getASTContext().hasSameUnqualifiedType(getElemType(A), getElemType(B)))
+ return false;
+
PrimType ElemT = *S.getContext().classify(getElemType(A));
auto returnResult = [&](int V) -> bool {
@@ -276,7 +297,7 @@ static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
if (!CheckLive(S, OpPC, StrPtr, AK_Read))
return false;
- if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
+ if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
return false;
assert(StrPtr.getFieldDesc()->isPrimitiveArray());
@@ -459,12 +480,13 @@ static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame, bool CheckSign,
const CallExpr *Call) {
const Floating &Arg = S.Stk.pop<Floating>();
- bool IsInf = Arg.isInf();
+ APFloat F = Arg.getAPFloat();
+ bool IsInf = F.isInfinity();
if (CheckSign)
- pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
+ pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
else
- pushInteger(S, Arg.isInf(), Call->getType());
+ pushInteger(S, IsInf, Call->getType());
return true;
}
@@ -597,6 +619,17 @@ static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
return true;
}
+static inline Floating abs(InterpState &S, const Floating &In) {
+ if (!In.isNegative())
+ return In;
+
+ Floating Output = S.allocFloat(In.getSemantics());
+ APFloat New = In.getAPFloat();
+ New.changeSign();
+ Output.copy(New);
+ return Output;
+}
+
// The C standard says "fabs raises no floating-point exceptions,
// even if x is a signaling NaN. The returned value is independent of
// the current rounding direction mode." Therefore constant folding can
@@ -605,16 +638,7 @@ static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame) {
const Floating &Val = S.Stk.pop<Floating>();
- APFloat F = Val.getAPFloat();
- if (!F.isNegative()) {
- S.Stk.push<Floating>(Val);
- return true;
- }
-
- Floating Result = S.allocFloat(Val.getSemantics());
- F.changeSign();
- Result.copy(F);
- S.Stk.push<Floating>(Result);
+ S.Stk.push<Floating>(abs(S, Val));
return true;
}
@@ -635,8 +659,14 @@ static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
- PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
- APSInt Val = popToAPSInt(S.Stk, ArgT);
+ APSInt Val;
+ if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
+ const Pointer &Arg = S.Stk.pop<Pointer>();
+ Val = convertBoolVectorToInt(Arg);
+ } else {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ Val = popToAPSInt(S.Stk, ArgT);
+ }
pushInteger(S, Val.popcount(), Call->getType());
return true;
}
@@ -932,8 +962,14 @@ static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
Fallback = popToAPSInt(S.Stk, FallbackT);
}
- PrimType ValT = *S.getContext().classify(Call->getArg(0));
- const APSInt &Val = popToAPSInt(S.Stk, ValT);
+ APSInt Val;
+ if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
+ const Pointer &Arg = S.Stk.pop<Pointer>();
+ Val = convertBoolVectorToInt(Arg);
+ } else {
+ PrimType ValT = *S.getContext().classify(Call->getArg(0));
+ Val = popToAPSInt(S.Stk, ValT);
+ }
// When the argument is 0, the result of GCC builtins is undefined, whereas
// for Microsoft intrinsics, the result is the bit-width of the argument.
@@ -963,8 +999,14 @@ static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
Fallback = popToAPSInt(S.Stk, FallbackT);
}
- PrimType ValT = *S.getContext().classify(Call->getArg(0));
- const APSInt &Val = popToAPSInt(S.Stk, ValT);
+ APSInt Val;
+ if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
+ const Pointer &Arg = S.Stk.pop<Pointer>();
+ Val = convertBoolVectorToInt(Arg);
+ } else {
+ PrimType ValT = *S.getContext().classify(Call->getArg(0));
+ Val = popToAPSInt(S.Stk, ValT);
+ }
if (Val == 0) {
if (Fallback) {
@@ -1544,8 +1586,7 @@ static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
// Composite arrays
if (IsArray) {
const Descriptor *Desc =
- S.P.createDescriptor(NewCall, ElemType.getTypePtr(),
- IsArray ? std::nullopt : Descriptor::InlineDescMD);
+ S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
Block *B =
Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
DynamicAllocator::Form::Operator);
@@ -1558,9 +1599,8 @@ static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
QualType AllocType = S.getASTContext().getConstantArrayType(
ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
- const Descriptor *Desc =
- S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
- IsArray ? std::nullopt : Descriptor::InlineDescMD);
+ const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
+ Descriptor::InlineDescMD);
Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
DynamicAllocator::Form::Operator);
assert(B);
@@ -1687,6 +1727,57 @@ static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call,
+ unsigned BuiltinID) {
+ assert(Call->getNumArgs() == 1);
+ QualType Ty = Call->getArg(0)->getType();
+ if (Ty->isIntegerType()) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Val = popToAPSInt(S.Stk, ArgT);
+
+ pushInteger(S, Val.abs(), Call->getType());
+ return true;
+ }
+
+ if (Ty->isFloatingType()) {
+ Floating Val = S.Stk.pop<Floating>();
+ Floating Result = abs(S, Val);
+ S.Stk.push<Floating>(Result);
+ return true;
+ }
+
+ // Otherwise, the argument must be a vector.
+ assert(Call->getArg(0)->getType()->isVectorType());
+ const Pointer &Arg = S.Stk.pop<Pointer>();
+ assert(Arg.getFieldDesc()->isPrimitiveArray());
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ assert(Dst.getFieldDesc()->isPrimitiveArray());
+ assert(Arg.getFieldDesc()->getNumElems() ==
+ Dst.getFieldDesc()->getNumElems());
+
+ QualType ElemType = Arg.getFieldDesc()->getElemQualType();
+ PrimType ElemT = *S.getContext().classify(ElemType);
+ unsigned NumElems = Arg.getNumElems();
+ // we can either have a vector of integer or a vector of floating point
+ for (unsigned I = 0; I != NumElems; ++I) {
+ if (ElemType->isIntegerType()) {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ Dst.elem<T>(I) = T::from(static_cast<T>(
+ APSInt(Arg.elem<T>(I).toAPSInt().abs(),
+ ElemType->isUnsignedIntegerOrEnumerationType())));
+ });
+ } else {
+ Floating Val = Arg.elem<Floating>(I);
+ Dst.elem<Floating>(I) = abs(S, Val);
+ }
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
/// Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
@@ -1733,6 +1824,94 @@ static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
return true;
}
+/// Can be called with an integer or vector as the first and only parameter.
+static bool interp__builtin_elementwise_countzeroes(InterpState &S,
+ CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call,
+ unsigned BuiltinID) {
+ const bool HasZeroArg = Call->getNumArgs() == 2;
+ const bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_cttz;
+ assert(Call->getNumArgs() == 1 || HasZeroArg);
+ if (Call->getArg(0)->getType()->isIntegerType()) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Val = popToAPSInt(S.Stk, ArgT);
+ std::optional<APSInt> ZeroVal;
+ if (HasZeroArg) {
+ ZeroVal = Val;
+ Val = popToAPSInt(S.Stk, ArgT);
+ }
+
+ if (Val.isZero()) {
+ if (ZeroVal) {
+ pushInteger(S, *ZeroVal, Call->getType());
+ return true;
+ }
+ // If we haven't been provided the second argument, the result is
+ // undefined
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_countzeroes_zero)
+ << /*IsTrailing=*/IsCTTZ;
+ return false;
+ }
+
+ if (BuiltinID == Builtin::BI__builtin_elementwise_ctlz) {
+ pushInteger(S, Val.countLeadingZeros(), Call->getType());
+ } else {
+ pushInteger(S, Val.countTrailingZeros(), Call->getType());
+ }
+ return true;
+ }
+ // Otherwise, the argument must be a vector.
+ const ASTContext &ASTCtx = S.getASTContext();
+ Pointer ZeroArg;
+ if (HasZeroArg) {
+ assert(Call->getArg(1)->getType()->isVectorType() &&
+ ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
+ Call->getArg(1)->getType()));
+ (void)ASTCtx;
+ ZeroArg = S.Stk.pop<Pointer>();
+ assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
+ }
+ assert(Call->getArg(0)->getType()->isVectorType());
+ const Pointer &Arg = S.Stk.pop<Pointer>();
+ assert(Arg.getFieldDesc()->isPrimitiveArray());
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ assert(Dst.getFieldDesc()->isPrimitiveArray());
+ assert(Arg.getFieldDesc()->getNumElems() ==
+ Dst.getFieldDesc()->getNumElems());
+
+ QualType ElemType = Arg.getFieldDesc()->getElemQualType();
+ PrimType ElemT = *S.getContext().classify(ElemType);
+ unsigned NumElems = Arg.getNumElems();
+
+ // FIXME: Reading from uninitialized vector elements?
+ for (unsigned I = 0; I != NumElems; ++I) {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
+ if (EltVal.isZero()) {
+ if (HasZeroArg) {
+ Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
+ } else {
+ // If we haven't been provided the second argument, the result is
+ // undefined
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_countzeroes_zero)
+ << /*IsTrailing=*/IsCTTZ;
+ return false;
+ }
+ } else if (IsCTTZ) {
+ Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
+ } else {
+ Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
+ }
+ Dst.atIndex(I).initialize();
+ });
+ }
+
+ return true;
+}
+
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call, unsigned ID) {
@@ -1784,7 +1963,27 @@ static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
if (DestPtr.isDummy() || SrcPtr.isDummy())
return false;
+ if (DestPtr.getType()->isIncompleteType()) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_memcpy_incomplete_type)
+ << Move << DestPtr.getType();
+ return false;
+ }
+ if (SrcPtr.getType()->isIncompleteType()) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_memcpy_incomplete_type)
+ << Move << SrcPtr.getType();
+ return false;
+ }
+
QualType DestElemType = getElemType(DestPtr);
+ if (DestElemType->isIncompleteType()) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_memcpy_incomplete_type)
+ << Move << DestElemType;
+ return false;
+ }
+
size_t RemainingDestElems;
if (DestPtr.getFieldDesc()->isArray()) {
RemainingDestElems = DestPtr.isUnknownSizeArray()
@@ -1826,16 +2025,6 @@ static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
return false;
}
- if (DestElemType->isIncompleteType() ||
- DestPtr.getType()->isIncompleteType()) {
- QualType DiagType =
- DestElemType->isIncompleteType() ? DestElemType : DestPtr.getType();
- S.FFDiag(S.Current->getSource(OpPC),
- diag::note_constexpr_memcpy_incomplete_type)
- << Move << DiagType;
- return false;
- }
-
if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
<< Move << DestElemType;
@@ -2024,8 +2213,13 @@ static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
return true;
}
- if (Ptr.isDummy())
+ if (Ptr.isDummy()) {
+ if (Ptr.getType()->isIncompleteType())
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_ltor_incomplete_type)
+ << Ptr.getType();
return false;
+ }
// Null is only okay if the given size is 0.
if (Ptr.isZero()) {
@@ -2103,29 +2297,32 @@ static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
return true;
}
-static unsigned computeFullDescSize(const ASTContext &ASTCtx,
- const Descriptor *Desc) {
-
+static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
+ const Descriptor *Desc) {
if (Desc->isPrimitive())
return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
-
if (Desc->isArray())
return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
Desc->getNumElems();
+ if (Desc->isRecord()) {
+ // Can't use Descriptor::getType() as that may return a pointer type. Look
+ // at the decl directly.
+ return ASTCtx
+ .getTypeSizeInChars(
+ ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
+ .getQuantity();
+ }
- if (Desc->isRecord())
- return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
-
- llvm_unreachable("Unhandled descriptor type");
- return 0;
+ return std::nullopt;
}
+/// Compute the byte offset of \p Ptr in the full declaration.
static unsigned computePointerOffset(const ASTContext &ASTCtx,
const Pointer &Ptr) {
unsigned Result = 0;
Pointer P = Ptr;
- while (P.isArrayElement() || P.isField()) {
+ while (P.isField() || P.isArrayElement()) {
P = P.expand();
const Descriptor *D = P.getFieldDesc();
@@ -2138,7 +2335,6 @@ static unsigned computePointerOffset(const ASTContext &ASTCtx,
Result += ElemSize * P.getIndex();
P = P.expand().getArray();
} else if (P.isBaseClass()) {
-
const auto *RD = cast<CXXRecordDecl>(D->asDecl());
bool IsVirtual = Ptr.isVirtualBaseClass();
P = P.getBase();
@@ -2167,30 +2363,136 @@ static unsigned computePointerOffset(const ASTContext &ASTCtx,
return Result;
}
+/// Does Ptr point to the last subobject?
+static bool pointsToLastObject(const Pointer &Ptr) {
+ Pointer P = Ptr;
+ while (!P.isRoot()) {
+
+ if (P.isArrayElement()) {
+ P = P.expand().getArray();
+ continue;
+ }
+ if (P.isBaseClass()) {
+ if (P.getRecord()->getNumFields() > 0)
+ return false;
+ P = P.getBase();
+ continue;
+ }
+
+ Pointer Base = P.getBase();
+ if (const Record *R = Base.getRecord()) {
+ assert(P.getField());
+ if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
+ return false;
+ }
+ P = Base;
+ }
+
+ return true;
+}
+
+/// Does Ptr point to the last object AND to a flexible array member?
+static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
+ auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
+ using FAMKind = LangOptions::StrictFlexArraysLevelKind;
+ FAMKind StrictFlexArraysLevel =
+ Ctx.getLangOpts().getStrictFlexArraysLevel();
+
+ if (StrictFlexArraysLevel == FAMKind::Default)
+ return true;
+
+ unsigned NumElems = FieldDesc->getNumElems();
+ if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
+ return true;
+
+ if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
+ return true;
+ return false;
+ };
+
+ const Descriptor *FieldDesc = Ptr.getFieldDesc();
+ if (!FieldDesc->isArray())
+ return false;
+
+ return Ptr.isDummy() && pointsToLastObject(Ptr) &&
+ isFlexibleArrayMember(FieldDesc);
+}
+
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
+ const ASTContext &ASTCtx = S.getASTContext();
PrimType KindT = *S.getContext().classify(Call->getArg(1));
- [[maybe_unused]] unsigned Kind = popToAPSInt(S.Stk, KindT).getZExtValue();
-
+ // From the GCC docs:
+ // Kind is an integer constant from 0 to 3. If the least significant bit is
+ // clear, objects are whole variables. If it is set, a closest surrounding
+ // subobject is considered the object a pointer points to. The second bit
+ // determines if maximum or minimum of remaining bytes is computed.
+ unsigned Kind = popToAPSInt(S.Stk, KindT).getZExtValue();
assert(Kind <= 3 && "unexpected kind");
-
+ bool UseFieldDesc = (Kind & 1u);
+ bool ReportMinimum = (Kind & 2u);
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (Ptr.isZero())
+ if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
+ // "If there are any side effects in them, it returns (size_t) -1
+ // for type 0 or 1 and (size_t) 0 for type 2 or 3."
+ pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
+ return true;
+ }
+
+ if (Ptr.isZero() || !Ptr.isBlockPointer())
return false;
- const Descriptor *DeclDesc = Ptr.getDeclDesc();
- if (!DeclDesc)
+ // We can't load through pointers.
+ if (Ptr.isDummy() && Ptr.getType()->isPointerType())
return false;
- const ASTContext &ASTCtx = S.getASTContext();
+ bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
+ const Descriptor *DeclDesc = Ptr.getDeclDesc();
+ assert(DeclDesc);
+
+ if (!UseFieldDesc || DetermineForCompleteObject) {
+ // Lower bound, so we can't fall back to this.
+ if (ReportMinimum && !DetermineForCompleteObject)
+ return false;
- unsigned ByteOffset = computePointerOffset(ASTCtx, Ptr);
- unsigned FullSize = computeFullDescSize(ASTCtx, DeclDesc);
+ // Can't read beyond the pointer decl desc.
+ if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
+ return false;
+ } else {
+ if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
+ // If we cannot determine the size of the initial allocation, then we
+ // can't given an accurate upper-bound. However, we are still able to give
+ // conservative lower-bounds for Type=3.
+ if (Kind == 1)
+ return false;
+ }
+ }
- pushInteger(S, FullSize - ByteOffset, Call->getType());
+ const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
+ assert(Desc);
+ std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
+ if (!FullSize)
+ return false;
+
+ unsigned ByteOffset;
+ if (UseFieldDesc) {
+ if (Ptr.isBaseClass())
+ ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
+ computePointerOffset(ASTCtx, Ptr);
+ else
+ ByteOffset =
+ computePointerOffset(ASTCtx, Ptr) -
+ computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
+ } else
+ ByteOffset = computePointerOffset(ASTCtx, Ptr);
+
+ assert(ByteOffset <= *FullSize);
+ unsigned Result = *FullSize - ByteOffset;
+
+ pushInteger(S, Result, Call->getType());
return true;
}
@@ -2232,17 +2534,13 @@ static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC,
return false;
if (!CheckMutable(S, OpPC, Ptr))
return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Read))
+ if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
return false;
}
// Check if we're currently running an initializer.
- for (InterpFrame *Frame = S.Current; Frame; Frame = Frame->Caller) {
- if (const Function *F = Frame->getFunction();
- F && F->isConstructor() && Frame->getThis().block() == Ptr.block()) {
- return Error(2);
- }
- }
+ if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
+ return Error(2);
if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
return Error(2);
@@ -2250,10 +2548,9 @@ static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC,
return true;
}
-static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC,
- const CallExpr *Call,
- unsigned BuiltinID) {
- Call->dumpColor();
+static bool interp__builtin_elementwise_int_binop(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID,
+ llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
assert(Call->getNumArgs() == 2);
// Single integer case.
@@ -2263,11 +2560,84 @@ static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC,
S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
APSInt LHS = popToAPSInt(
S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
+ APInt Result = Fn(LHS, RHS);
+ pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
+ return true;
+ }
+
+ const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+ assert(VT->getElementType()->isIntegralOrEnumerationType());
+ PrimType ElemT = *S.getContext().classify(VT->getElementType());
+ unsigned NumElems = VT->getNumElements();
+ bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
+
+ // Vector + Scalar case.
+ if (!Call->getArg(1)->getType()->isVectorType()) {
+ assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
+
+ APSInt RHS = popToAPSInt(
+ S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ for (unsigned I = 0; I != NumElems; ++I) {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ Dst.elem<T>(I) = static_cast<T>(
+ APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
+ });
+ }
+ Dst.initializeAllElements();
+ return true;
+ }
+
+ // Vector case.
+ assert(Call->getArg(0)->getType()->isVectorType() &&
+ Call->getArg(1)->getType()->isVectorType());
+ assert(VT->getElementType() ==
+ Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
+ assert(VT->getNumElements() ==
+ Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
+ assert(VT->getElementType()->isIntegralOrEnumerationType());
+
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ for (unsigned I = 0; I != NumElems; ++I) {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
+ APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
+ Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
+ });
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
+static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned BuiltinID) {
+ assert(Call->getNumArgs() == 2);
+
+ QualType Arg0Type = Call->getArg(0)->getType();
+
+ // TODO: Support floating-point types.
+ if (!(Arg0Type->isIntegerType() ||
+ (Arg0Type->isVectorType() &&
+ Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
+ return false;
+
+ if (!Arg0Type->isVectorType()) {
+ assert(!Call->getArg(1)->getType()->isVectorType());
+ APSInt RHS = popToAPSInt(
+ S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
+ APSInt LHS = popToAPSInt(
+ S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
APInt Result;
- if (BuiltinID == Builtin::BI__builtin_elementwise_add_sat) {
- Result = LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
- } else if (BuiltinID == Builtin::BI__builtin_elementwise_sub_sat) {
- Result = LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
+ if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
+ Result = std::max(LHS, RHS);
+ } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
+ Result = std::min(LHS, RHS);
} else {
llvm_unreachable("Wrong builtin ID");
}
@@ -2300,13 +2670,11 @@ static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC,
});
APSInt Result;
- if (BuiltinID == Builtin::BI__builtin_elementwise_add_sat) {
- Result = APSInt(Elem1.isSigned() ? Elem1.sadd_sat(Elem2)
- : Elem1.uadd_sat(Elem2),
+ if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
+ Result = APSInt(std::max(Elem1, Elem2),
Call->getType()->isUnsignedIntegerOrEnumerationType());
- } else if (BuiltinID == Builtin::BI__builtin_elementwise_sub_sat) {
- Result = APSInt(Elem1.isSigned() ? Elem1.ssub_sat(Elem2)
- : Elem1.usub_sat(Elem2),
+ } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
+ Result = APSInt(std::min(Elem1, Elem2),
Call->getType()->isUnsignedIntegerOrEnumerationType());
} else {
llvm_unreachable("Wrong builtin ID");
@@ -2320,6 +2688,147 @@ static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned BuiltinID) {
+ assert(Call->getArg(0)->getType()->isVectorType() &&
+ Call->getArg(1)->getType()->isVectorType());
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+ PrimType ElemT = *S.getContext().classify(VT->getElementType());
+ unsigned SourceLen = VT->getNumElements();
+
+ PrimType DstElemT = *S.getContext().classify(
+ Call->getType()->castAs<VectorType>()->getElementType());
+ unsigned DstElem = 0;
+ for (unsigned I = 0; I != SourceLen; I += 2) {
+ APSInt Elem1;
+ APSInt Elem2;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ Elem1 = LHS.elem<T>(I).toAPSInt();
+ Elem2 = RHS.elem<T>(I).toAPSInt();
+ });
+
+ APSInt Result;
+ switch (BuiltinID) {
+ case clang::X86::BI__builtin_ia32_pmuludq128:
+ case clang::X86::BI__builtin_ia32_pmuludq256:
+ case clang::X86::BI__builtin_ia32_pmuludq512:
+ Result = APSInt(llvm::APIntOps::muluExtended(Elem1, Elem2),
+ /*IsUnsigned=*/true);
+ break;
+ case clang::X86::BI__builtin_ia32_pmuldq128:
+ case clang::X86::BI__builtin_ia32_pmuldq256:
+ case clang::X86::BI__builtin_ia32_pmuldq512:
+ Result = APSInt(llvm::APIntOps::mulsExtended(Elem1, Elem2),
+ /*IsUnsigned=*/false);
+ break;
+ }
+ INT_TYPE_SWITCH_NO_BOOL(DstElemT,
+ { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
+ ++DstElem;
+ }
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_elementwise_fma(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 3);
+
+ FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
+ llvm::RoundingMode RM = getRoundingMode(FPO);
+ const QualType Arg1Type = Call->getArg(0)->getType();
+ const QualType Arg2Type = Call->getArg(1)->getType();
+ const QualType Arg3Type = Call->getArg(2)->getType();
+
+ // Non-vector floating point types.
+ if (!Arg1Type->isVectorType()) {
+ assert(!Arg2Type->isVectorType());
+ assert(!Arg3Type->isVectorType());
+ (void)Arg2Type;
+ (void)Arg3Type;
+
+ const Floating &Z = S.Stk.pop<Floating>();
+ const Floating &Y = S.Stk.pop<Floating>();
+ const Floating &X = S.Stk.pop<Floating>();
+ APFloat F = X.getAPFloat();
+ F.fusedMultiplyAdd(Y.getAPFloat(), Z.getAPFloat(), RM);
+ Floating Result = S.allocFloat(X.getSemantics());
+ Result.copy(F);
+ S.Stk.push<Floating>(Result);
+ return true;
+ }
+
+ // Vector type.
+ assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
+ Arg3Type->isVectorType());
+
+ const VectorType *VecT = Arg1Type->castAs<VectorType>();
+ const QualType ElemT = VecT->getElementType();
+ unsigned NumElems = VecT->getNumElements();
+
+ assert(ElemT == Arg2Type->castAs<VectorType>()->getElementType() &&
+ ElemT == Arg3Type->castAs<VectorType>()->getElementType());
+ assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
+ NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
+ assert(ElemT->isRealFloatingType());
+ (void)ElemT;
+
+ const Pointer &VZ = S.Stk.pop<Pointer>();
+ const Pointer &VY = S.Stk.pop<Pointer>();
+ const Pointer &VX = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ for (unsigned I = 0; I != NumElems; ++I) {
+ using T = PrimConv<PT_Float>::T;
+ APFloat X = VX.elem<T>(I).getAPFloat();
+ APFloat Y = VY.elem<T>(I).getAPFloat();
+ APFloat Z = VZ.elem<T>(I).getAPFloat();
+ (void)X.fusedMultiplyAdd(Y, Z, RM);
+ Dst.elem<Floating>(I) = Floating(X);
+ }
+ Dst.initializeAllElements();
+ return true;
+}
+
+/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
+static bool interp__builtin_select(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ PrimType MaskT = *S.getContext().classify(Call->getArg(0));
+ APSInt Mask = popToAPSInt(S.Stk, MaskT);
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ assert(LHS.getNumElems() == RHS.getNumElems());
+ assert(LHS.getNumElems() == Dst.getNumElems());
+ unsigned NumElems = LHS.getNumElems();
+ PrimType ElemT = LHS.getFieldDesc()->getPrimType();
+ PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
+
+ for (unsigned I = 0; I != NumElems; ++I) {
+ if (ElemT == PT_Float) {
+ assert(DstElemT == PT_Float);
+ Dst.elem<Floating>(I) =
+ Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
+ } else {
+ APSInt Elem;
+ INT_TYPE_SWITCH(ElemT, {
+ Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
+ });
+ INT_TYPE_SWITCH_NO_BOOL(DstElemT,
+ { Dst.elem<T>(I) = static_cast<T>(Elem); });
+ }
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
uint32_t BuiltinID) {
if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
@@ -2596,6 +3105,11 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_ctzg:
return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
+ case Builtin::BI__builtin_elementwise_ctlz:
+ case Builtin::BI__builtin_elementwise_cttz:
+ return interp__builtin_elementwise_countzeroes(S, OpPC, Frame, Call,
+ BuiltinID);
+
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64:
@@ -2687,6 +3201,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return interp__builtin_elementwise_popcount(S, OpPC, Frame, Call,
BuiltinID);
+ case Builtin::BI__builtin_elementwise_abs:
+ return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
+
case Builtin::BI__builtin_memcpy:
case Builtin::BImemcpy:
case Builtin::BI__builtin_wmemcpy:
@@ -2724,8 +3241,154 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return interp__builtin_is_within_lifetime(S, OpPC, Call);
case Builtin::BI__builtin_elementwise_add_sat:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
+ return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
+ });
+
case Builtin::BI__builtin_elementwise_sub_sat:
- return interp__builtin_elementwise_sat(S, OpPC, Call, BuiltinID);
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
+ return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
+ });
+
+ case clang::X86::BI__builtin_ia32_pmulhuw128:
+ case clang::X86::BI__builtin_ia32_pmulhuw256:
+ case clang::X86::BI__builtin_ia32_pmulhuw512:
+ return interp__builtin_elementwise_int_binop(S, OpPC, Call, BuiltinID,
+ llvm::APIntOps::mulhu);
+
+ case clang::X86::BI__builtin_ia32_pmulhw128:
+ case clang::X86::BI__builtin_ia32_pmulhw256:
+ case clang::X86::BI__builtin_ia32_pmulhw512:
+ return interp__builtin_elementwise_int_binop(S, OpPC, Call, BuiltinID,
+ llvm::APIntOps::mulhs);
+
+ case clang::X86::BI__builtin_ia32_psllv2di:
+ case clang::X86::BI__builtin_ia32_psllv4di:
+ case clang::X86::BI__builtin_ia32_psllv4si:
+ case clang::X86::BI__builtin_ia32_psllv8si:
+ case clang::X86::BI__builtin_ia32_psllwi128:
+ case clang::X86::BI__builtin_ia32_psllwi256:
+ case clang::X86::BI__builtin_ia32_psllwi512:
+ case clang::X86::BI__builtin_ia32_pslldi128:
+ case clang::X86::BI__builtin_ia32_pslldi256:
+ case clang::X86::BI__builtin_ia32_pslldi512:
+ case clang::X86::BI__builtin_ia32_psllqi128:
+ case clang::X86::BI__builtin_ia32_psllqi256:
+ case clang::X86::BI__builtin_ia32_psllqi512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
+ if (RHS.uge(LHS.getBitWidth())) {
+ return APInt::getZero(LHS.getBitWidth());
+ }
+ return LHS.shl(RHS.getZExtValue());
+ });
+
+ case clang::X86::BI__builtin_ia32_psrav4si:
+ case clang::X86::BI__builtin_ia32_psrav8si:
+ case clang::X86::BI__builtin_ia32_psrawi128:
+ case clang::X86::BI__builtin_ia32_psrawi256:
+ case clang::X86::BI__builtin_ia32_psrawi512:
+ case clang::X86::BI__builtin_ia32_psradi128:
+ case clang::X86::BI__builtin_ia32_psradi256:
+ case clang::X86::BI__builtin_ia32_psradi512:
+ case clang::X86::BI__builtin_ia32_psraqi128:
+ case clang::X86::BI__builtin_ia32_psraqi256:
+ case clang::X86::BI__builtin_ia32_psraqi512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
+ if (RHS.uge(LHS.getBitWidth())) {
+ return LHS.ashr(LHS.getBitWidth() - 1);
+ }
+ return LHS.ashr(RHS.getZExtValue());
+ });
+
+ case clang::X86::BI__builtin_ia32_psrlv2di:
+ case clang::X86::BI__builtin_ia32_psrlv4di:
+ case clang::X86::BI__builtin_ia32_psrlv4si:
+ case clang::X86::BI__builtin_ia32_psrlv8si:
+ case clang::X86::BI__builtin_ia32_psrlwi128:
+ case clang::X86::BI__builtin_ia32_psrlwi256:
+ case clang::X86::BI__builtin_ia32_psrlwi512:
+ case clang::X86::BI__builtin_ia32_psrldi128:
+ case clang::X86::BI__builtin_ia32_psrldi256:
+ case clang::X86::BI__builtin_ia32_psrldi512:
+ case clang::X86::BI__builtin_ia32_psrlqi128:
+ case clang::X86::BI__builtin_ia32_psrlqi256:
+ case clang::X86::BI__builtin_ia32_psrlqi512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
+ if (RHS.uge(LHS.getBitWidth())) {
+ return APInt::getZero(LHS.getBitWidth());
+ }
+ return LHS.lshr(RHS.getZExtValue());
+ });
+
+ case clang::X86::BI__builtin_ia32_vprotbi:
+ case clang::X86::BI__builtin_ia32_vprotdi:
+ case clang::X86::BI__builtin_ia32_vprotqi:
+ case clang::X86::BI__builtin_ia32_vprotwi:
+ case clang::X86::BI__builtin_ia32_prold128:
+ case clang::X86::BI__builtin_ia32_prold256:
+ case clang::X86::BI__builtin_ia32_prold512:
+ case clang::X86::BI__builtin_ia32_prolq128:
+ case clang::X86::BI__builtin_ia32_prolq256:
+ case clang::X86::BI__builtin_ia32_prolq512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, BuiltinID,
+ [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
+
+ case clang::X86::BI__builtin_ia32_prord128:
+ case clang::X86::BI__builtin_ia32_prord256:
+ case clang::X86::BI__builtin_ia32_prord512:
+ case clang::X86::BI__builtin_ia32_prorq128:
+ case clang::X86::BI__builtin_ia32_prorq256:
+ case clang::X86::BI__builtin_ia32_prorq512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, BuiltinID,
+ [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
+
+ case Builtin::BI__builtin_elementwise_max:
+ case Builtin::BI__builtin_elementwise_min:
+ return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
+
+ case clang::X86::BI__builtin_ia32_pmuldq128:
+ case clang::X86::BI__builtin_ia32_pmuldq256:
+ case clang::X86::BI__builtin_ia32_pmuldq512:
+ case clang::X86::BI__builtin_ia32_pmuludq128:
+ case clang::X86::BI__builtin_ia32_pmuludq256:
+ case clang::X86::BI__builtin_ia32_pmuludq512:
+ return interp__builtin_ia32_pmul(S, OpPC, Call, BuiltinID);
+
+ case Builtin::BI__builtin_elementwise_fma:
+ return interp__builtin_elementwise_fma(S, OpPC, Call);
+
+ case X86::BI__builtin_ia32_selectb_128:
+ case X86::BI__builtin_ia32_selectb_256:
+ case X86::BI__builtin_ia32_selectb_512:
+ case X86::BI__builtin_ia32_selectw_128:
+ case X86::BI__builtin_ia32_selectw_256:
+ case X86::BI__builtin_ia32_selectw_512:
+ case X86::BI__builtin_ia32_selectd_128:
+ case X86::BI__builtin_ia32_selectd_256:
+ case X86::BI__builtin_ia32_selectd_512:
+ case X86::BI__builtin_ia32_selectq_128:
+ case X86::BI__builtin_ia32_selectq_256:
+ case X86::BI__builtin_ia32_selectq_512:
+ case X86::BI__builtin_ia32_selectph_128:
+ case X86::BI__builtin_ia32_selectph_256:
+ case X86::BI__builtin_ia32_selectph_512:
+ case X86::BI__builtin_ia32_selectpbf_128:
+ case X86::BI__builtin_ia32_selectpbf_256:
+ case X86::BI__builtin_ia32_selectpbf_512:
+ case X86::BI__builtin_ia32_selectps_128:
+ case X86::BI__builtin_ia32_selectps_256:
+ case X86::BI__builtin_ia32_selectps_512:
+ case X86::BI__builtin_ia32_selectpd_128:
+ case X86::BI__builtin_ia32_selectpd_256:
+ case X86::BI__builtin_ia32_selectpd_512:
+ return interp__builtin_select(S, OpPC, Call);
default:
S.FFDiag(S.Current->getLocation(OpPC),
@@ -2751,11 +3414,8 @@ bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
switch (Node.getKind()) {
case OffsetOfNode::Field: {
const FieldDecl *MemberDecl = Node.getField();
- const RecordType *RT = CurrentType->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
- if (RD->isInvalidDecl())
+ const auto *RD = CurrentType->getAsRecordDecl();
+ if (!RD || RD->isInvalidDecl())
return false;
const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD);
unsigned FieldIndex = MemberDecl->getFieldIndex();
@@ -2784,22 +3444,19 @@ bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
return false;
// Find the layout of the class whose base we are looking into.
- const RecordType *RT = CurrentType->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
- if (RD->isInvalidDecl())
+ const auto *RD = CurrentType->getAsCXXRecordDecl();
+ if (!RD || RD->isInvalidDecl())
return false;
const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD);
// Find the base class itself.
CurrentType = BaseSpec->getType();
- const RecordType *BaseRT = CurrentType->getAs<RecordType>();
- if (!BaseRT)
+ const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
+ if (!BaseRD)
return false;
// Add the offset to the base.
- Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
+ Result += RL.getBaseClassOffset(BaseRD);
break;
}
case OffsetOfNode::Identifier:
diff --git a/clang/lib/AST/ByteCode/InterpFrame.cpp b/clang/lib/AST/ByteCode/InterpFrame.cpp
index 9342192..b9dc2ae 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.cpp
+++ b/clang/lib/AST/ByteCode/InterpFrame.cpp
@@ -169,7 +169,7 @@ void InterpFrame::describe(llvm::raw_ostream &OS) const {
} else if (const auto *M = dyn_cast<CXXMethodDecl>(F)) {
print(OS, This, S.getASTContext(),
S.getASTContext().getLValueReferenceType(
- S.getASTContext().getRecordType(M->getParent())));
+ S.getASTContext().getCanonicalTagType(M->getParent())));
OS << ".";
}
}
@@ -195,12 +195,6 @@ void InterpFrame::describe(llvm::raw_ostream &OS) const {
OS << ")";
}
-Frame *InterpFrame::getCaller() const {
- if (Caller->Caller)
- return Caller;
- return S.getSplitFrame();
-}
-
SourceRange InterpFrame::getCallRange() const {
if (!Caller->Func) {
if (SourceRange NullRange = S.getRange(nullptr, {}); NullRange.isValid())
@@ -231,6 +225,10 @@ Pointer InterpFrame::getLocalPointer(unsigned Offset) const {
return Pointer(localBlock(Offset));
}
+Block *InterpFrame::getLocalBlock(unsigned Offset) const {
+ return localBlock(Offset);
+}
+
Pointer InterpFrame::getParamPointer(unsigned Off) {
// Return the block if it was created previously.
if (auto Pt = Params.find(Off); Pt != Params.end())
diff --git a/clang/lib/AST/ByteCode/InterpFrame.h b/clang/lib/AST/ByteCode/InterpFrame.h
index cfebe93..cf4d27d 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.h
+++ b/clang/lib/AST/ByteCode/InterpFrame.h
@@ -59,7 +59,7 @@ public:
void describe(llvm::raw_ostream &OS) const override;
/// Returns the parent frame object.
- Frame *getCaller() const override;
+ Frame *getCaller() const override { return Caller; }
/// Returns the location of the call to the frame.
SourceRange getCallRange() const override;
@@ -86,6 +86,7 @@ public:
/// Returns a pointer to a local variables.
Pointer getLocalPointer(unsigned Offset) const;
+ Block *getLocalBlock(unsigned Offset) const;
/// Returns the value of an argument.
template <typename T> const T &getParam(unsigned Offset) const {
diff --git a/clang/lib/AST/ByteCode/InterpStack.cpp b/clang/lib/AST/ByteCode/InterpStack.cpp
index 6b748d6..7920378 100644
--- a/clang/lib/AST/ByteCode/InterpStack.cpp
+++ b/clang/lib/AST/ByteCode/InterpStack.cpp
@@ -26,33 +26,33 @@ InterpStack::~InterpStack() {
std::free(Chunk);
Chunk = nullptr;
StackSize = 0;
-#ifndef NDEBUG
ItemTypes.clear();
-#endif
}
// We keep the last chunk around to reuse.
void InterpStack::clear() {
- if (!Chunk)
- return;
-
- if (Chunk->Next)
- std::free(Chunk->Next);
-
- assert(Chunk);
- StackSize = 0;
-#ifndef NDEBUG
- ItemTypes.clear();
-#endif
+ for (PrimType Item : llvm::reverse(ItemTypes)) {
+ TYPE_SWITCH(Item, { this->discard<T>(); });
+ }
+ assert(ItemTypes.empty());
+ assert(empty());
}
void InterpStack::clearTo(size_t NewSize) {
- assert(NewSize <= size());
- size_t ToShrink = size() - NewSize;
- if (ToShrink == 0)
+ if (NewSize == 0)
+ return clear();
+ if (NewSize == size())
return;
- shrink(ToShrink);
+ assert(NewSize <= size());
+ for (PrimType Item : llvm::reverse(ItemTypes)) {
+ TYPE_SWITCH(Item, { this->discard<T>(); });
+
+ if (size() == NewSize)
+ break;
+ }
+
+ // Note: discard() above already removed the types from ItemTypes.
assert(size() == NewSize);
}
@@ -105,25 +105,9 @@ void InterpStack::shrink(size_t Size) {
Chunk->End -= Size;
StackSize -= Size;
-
-#ifndef NDEBUG
- size_t TypesSize = 0;
- for (PrimType T : ItemTypes)
- TYPE_SWITCH(T, { TypesSize += aligned_size<T>(); });
-
- size_t StackSize = size();
- while (TypesSize > StackSize) {
- TYPE_SWITCH(ItemTypes.back(), {
- TypesSize -= aligned_size<T>();
- ItemTypes.pop_back();
- });
- }
- assert(TypesSize == StackSize);
-#endif
}
void InterpStack::dump() const {
-#ifndef NDEBUG
llvm::errs() << "Items: " << ItemTypes.size() << ". Size: " << size() << '\n';
if (ItemTypes.empty())
return;
@@ -133,11 +117,11 @@ void InterpStack::dump() const {
// The type of the item on the top of the stack is inserted to the back
// of the vector, so the iteration has to happen backwards.
- for (auto TyIt = ItemTypes.rbegin(); TyIt != ItemTypes.rend(); ++TyIt) {
- Offset += align(primSize(*TyIt));
+ for (PrimType Item : llvm::reverse(ItemTypes)) {
+ Offset += align(primSize(Item));
llvm::errs() << Index << '/' << Offset << ": ";
- TYPE_SWITCH(*TyIt, {
+ TYPE_SWITCH(Item, {
const T &V = peek<T>(Offset);
llvm::errs() << V;
});
@@ -145,5 +129,4 @@ void InterpStack::dump() const {
++Index;
}
-#endif
}
diff --git a/clang/lib/AST/ByteCode/InterpStack.h b/clang/lib/AST/ByteCode/InterpStack.h
index 580494e..b0f9f6e 100644
--- a/clang/lib/AST/ByteCode/InterpStack.h
+++ b/clang/lib/AST/ByteCode/InterpStack.h
@@ -17,7 +17,6 @@
#include "IntegralAP.h"
#include "MemberPointer.h"
#include "PrimType.h"
-#include <vector>
namespace clang {
namespace interp {
@@ -33,18 +32,14 @@ public:
/// Constructs a value in place on the top of the stack.
template <typename T, typename... Tys> void push(Tys &&...Args) {
new (grow(aligned_size<T>())) T(std::forward<Tys>(Args)...);
-#ifndef NDEBUG
ItemTypes.push_back(toPrimType<T>());
-#endif
}
/// Returns the value from the top of the stack and removes it.
template <typename T> T pop() {
-#ifndef NDEBUG
assert(!ItemTypes.empty());
assert(ItemTypes.back() == toPrimType<T>());
ItemTypes.pop_back();
-#endif
T *Ptr = &peekInternal<T>();
T Value = std::move(*Ptr);
shrink(aligned_size<T>());
@@ -53,22 +48,20 @@ public:
/// Discards the top value from the stack.
template <typename T> void discard() {
-#ifndef NDEBUG
assert(!ItemTypes.empty());
assert(ItemTypes.back() == toPrimType<T>());
ItemTypes.pop_back();
-#endif
T *Ptr = &peekInternal<T>();
- Ptr->~T();
+ if constexpr (!std::is_trivially_destructible_v<T>) {
+ Ptr->~T();
+ }
shrink(aligned_size<T>());
}
/// Returns a reference to the value on the top of the stack.
template <typename T> T &peek() const {
-#ifndef NDEBUG
assert(!ItemTypes.empty());
assert(ItemTypes.back() == toPrimType<T>());
-#endif
return peekInternal<T>();
}
@@ -83,7 +76,7 @@ public:
/// Returns the size of the stack in bytes.
size_t size() const { return StackSize; }
- /// Clears the stack without calling any destructors.
+ /// Clears the stack.
void clear();
void clearTo(size_t NewSize);
@@ -146,9 +139,11 @@ private:
/// Total size of the stack.
size_t StackSize = 0;
-#ifndef NDEBUG
- /// vector recording the type of data we pushed into the stack.
- std::vector<PrimType> ItemTypes;
+ /// SmallVector recording the type of data we pushed into the stack.
+ /// We don't usually need this during normal code interpretation but
+ /// when aborting, we need type information to call the destructors
+ /// for what's left on the stack.
+ llvm::SmallVector<PrimType> ItemTypes;
template <typename T> static constexpr PrimType toPrimType() {
if constexpr (std::is_same_v<T, Pointer>)
@@ -192,7 +187,6 @@ private:
llvm_unreachable("unknown type push()'ed into InterpStack");
}
-#endif
};
} // namespace interp
diff --git a/clang/lib/AST/ByteCode/InterpState.cpp b/clang/lib/AST/ByteCode/InterpState.cpp
index a06b125..a2a1e58 100644
--- a/clang/lib/AST/ByteCode/InterpState.cpp
+++ b/clang/lib/AST/ByteCode/InterpState.cpp
@@ -45,6 +45,12 @@ InterpState::~InterpState() {
while (DeadBlocks) {
DeadBlock *Next = DeadBlocks->Next;
+
+ // There might be a pointer in a global structure pointing to the dead
+ // block.
+ for (Pointer *P = DeadBlocks->B.Pointers; P; P = P->asBlockPointer().Next)
+ DeadBlocks->B.removePointer(P);
+
std::free(DeadBlocks);
DeadBlocks = Next;
}
@@ -53,20 +59,10 @@ InterpState::~InterpState() {
void InterpState::cleanup() {
// As a last resort, make sure all pointers still pointing to a dead block
// don't point to it anymore.
- for (DeadBlock *DB = DeadBlocks; DB; DB = DB->Next) {
- for (Pointer *P = DB->B.Pointers; P; P = P->asBlockPointer().Next) {
- P->PointeeStorage.BS.Pointee = nullptr;
- }
- }
-
Alloc.cleanup();
}
-Frame *InterpState::getCurrentFrame() {
- if (Current && Current->Caller)
- return Current;
- return Parent.getCurrentFrame();
-}
+Frame *InterpState::getCurrentFrame() { return Current; }
bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) {
QualType Type = E->getType();
@@ -76,8 +72,9 @@ bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) {
void InterpState::deallocate(Block *B) {
assert(B);
- const Descriptor *Desc = B->getDescriptor();
- assert(Desc);
+ assert(!B->isDynamic());
+ assert(!B->isStatic());
+ assert(!B->isDead());
// The block might have a pointer saved in a field in its data
// that points to the block itself. We call the dtor first,
@@ -87,6 +84,7 @@ void InterpState::deallocate(Block *B) {
if (B->IsInitialized)
B->invokeDtor();
+ assert(!B->isInitialized());
if (B->hasPointers()) {
size_t Size = B->getSize();
// Allocate a new block, transferring over pointers.
@@ -95,24 +93,20 @@ void InterpState::deallocate(Block *B) {
auto *D = new (Memory) DeadBlock(DeadBlocks, B);
// Since the block doesn't hold any actual data anymore, we can just
// memcpy() everything over.
- std::memcpy(D->rawData(), B->rawData(), Desc->getAllocSize());
- D->B.IsInitialized = B->IsInitialized;
-
- // We moved the contents over to the DeadBlock.
- B->IsInitialized = false;
+ std::memcpy(D->rawData(), B->rawData(), Size);
+ D->B.IsInitialized = false;
}
}
bool InterpState::maybeDiagnoseDanglingAllocations() {
- bool NoAllocationsLeft = (Alloc.getNumAllocations() == 0);
+ bool NoAllocationsLeft = !Alloc.hasAllocations();
if (!checkingPotentialConstantExpression()) {
- for (const auto &It : Alloc.allocation_sites()) {
- assert(It.second.size() > 0);
+ for (const auto &[Source, Site] : Alloc.allocation_sites()) {
+ assert(!Site.empty());
- const Expr *Source = It.first;
CCEDiag(Source->getExprLoc(), diag::note_constexpr_memory_leak)
- << (It.second.size() - 1) << Source->getSourceRange();
+ << (Site.size() - 1) << Source->getSourceRange();
}
}
// Keep evaluating before C++20, since the CXXNewExpr wasn't valid there
diff --git a/clang/lib/AST/ByteCode/InterpState.h b/clang/lib/AST/ByteCode/InterpState.h
index 861e4c3..f123a1f 100644
--- a/clang/lib/AST/ByteCode/InterpState.h
+++ b/clang/lib/AST/ByteCode/InterpState.h
@@ -57,14 +57,11 @@ public:
bool diagnosing() const { return getEvalStatus().Diag != nullptr; }
// Stack frame accessors.
- Frame *getSplitFrame() { return Parent.getCurrentFrame(); }
Frame *getCurrentFrame() override;
unsigned getCallStackDepth() override {
return Current ? (Current->getDepth() + 1) : 1;
}
- const Frame *getBottomFrame() const override {
- return Parent.getBottomFrame();
- }
+ const Frame *getBottomFrame() const override { return &BottomFrame; }
// Access objects from the walker context.
Expr::EvalStatus &getEvalStatus() const override {
diff --git a/clang/lib/AST/ByteCode/MemberPointer.h b/clang/lib/AST/ByteCode/MemberPointer.h
index b17ce25..8dd75ca 100644
--- a/clang/lib/AST/ByteCode/MemberPointer.h
+++ b/clang/lib/AST/ByteCode/MemberPointer.h
@@ -51,6 +51,12 @@ public:
FunctionPointer toFunctionPointer(const Context &Ctx) const;
+ bool isBaseCastPossible() const {
+ if (PtrOffset < 0)
+ return true;
+ return static_cast<uint64_t>(PtrOffset) <= Base.getByteOffset();
+ }
+
Pointer getBase() const {
if (PtrOffset < 0)
return Base.atField(-PtrOffset);
diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp
index dec2088..973bc7c 100644
--- a/clang/lib/AST/ByteCode/Pointer.cpp
+++ b/clang/lib/AST/ByteCode/Pointer.cpp
@@ -30,39 +30,62 @@ Pointer::Pointer(Block *Pointee)
Pointer::Pointer(Block *Pointee, uint64_t BaseAndOffset)
: Pointer(Pointee, BaseAndOffset, BaseAndOffset) {}
-Pointer::Pointer(const Pointer &P)
- : Offset(P.Offset), StorageKind(P.StorageKind),
- PointeeStorage(P.PointeeStorage) {
-
- if (isBlockPointer() && PointeeStorage.BS.Pointee)
- PointeeStorage.BS.Pointee->addPointer(this);
-}
-
Pointer::Pointer(Block *Pointee, unsigned Base, uint64_t Offset)
: Offset(Offset), StorageKind(Storage::Block) {
assert((Base == RootPtrMark || Base % alignof(void *) == 0) && "wrong base");
- PointeeStorage.BS = {Pointee, Base, nullptr, nullptr};
+ BS = {Pointee, Base, nullptr, nullptr};
if (Pointee)
Pointee->addPointer(this);
}
-Pointer::Pointer(Pointer &&P)
- : Offset(P.Offset), StorageKind(P.StorageKind),
- PointeeStorage(P.PointeeStorage) {
+Pointer::Pointer(const Pointer &P)
+ : Offset(P.Offset), StorageKind(P.StorageKind) {
+ switch (StorageKind) {
+ case Storage::Int:
+ Int = P.Int;
+ break;
+ case Storage::Block:
+ BS = P.BS;
+ if (BS.Pointee)
+ BS.Pointee->addPointer(this);
+ break;
+ case Storage::Fn:
+ Fn = P.Fn;
+ break;
+ case Storage::Typeid:
+ Typeid = P.Typeid;
+ break;
+ }
+}
- if (StorageKind == Storage::Block && PointeeStorage.BS.Pointee)
- PointeeStorage.BS.Pointee->replacePointer(&P, this);
+Pointer::Pointer(Pointer &&P) : Offset(P.Offset), StorageKind(P.StorageKind) {
+ switch (StorageKind) {
+ case Storage::Int:
+ Int = P.Int;
+ break;
+ case Storage::Block:
+ BS = P.BS;
+ if (BS.Pointee)
+ BS.Pointee->replacePointer(&P, this);
+ break;
+ case Storage::Fn:
+ Fn = P.Fn;
+ break;
+ case Storage::Typeid:
+ Typeid = P.Typeid;
+ break;
+ }
}
Pointer::~Pointer() {
if (!isBlockPointer())
return;
- if (Block *Pointee = PointeeStorage.BS.Pointee) {
+ if (Block *Pointee = BS.Pointee) {
Pointee->removePointer(this);
- PointeeStorage.BS.Pointee = nullptr;
+ BS.Pointee = nullptr;
Pointee->cleanup();
}
}
@@ -73,13 +96,13 @@ Pointer &Pointer::operator=(const Pointer &P) {
if (isBlockPointer()) {
if (P.isBlockPointer() && this->block() == P.block()) {
Offset = P.Offset;
- PointeeStorage.BS.Base = P.PointeeStorage.BS.Base;
+ BS.Base = P.BS.Base;
return *this;
}
- if (Block *Pointee = PointeeStorage.BS.Pointee) {
+ if (Block *Pointee = BS.Pointee) {
Pointee->removePointer(this);
- PointeeStorage.BS.Pointee = nullptr;
+ BS.Pointee = nullptr;
Pointee->cleanup();
}
}
@@ -88,16 +111,16 @@ Pointer &Pointer::operator=(const Pointer &P) {
Offset = P.Offset;
if (P.isBlockPointer()) {
- PointeeStorage.BS = P.PointeeStorage.BS;
+ BS = P.BS;
- if (PointeeStorage.BS.Pointee)
- PointeeStorage.BS.Pointee->addPointer(this);
+ if (BS.Pointee)
+ BS.Pointee->addPointer(this);
} else if (P.isIntegralPointer()) {
- PointeeStorage.Int = P.PointeeStorage.Int;
+ Int = P.Int;
} else if (P.isFunctionPointer()) {
- PointeeStorage.Fn = P.PointeeStorage.Fn;
+ Fn = P.Fn;
} else if (P.isTypeidPointer()) {
- PointeeStorage.Typeid = P.PointeeStorage.Typeid;
+ Typeid = P.Typeid;
} else {
assert(false && "Unhandled storage kind");
}
@@ -110,13 +133,13 @@ Pointer &Pointer::operator=(Pointer &&P) {
if (isBlockPointer()) {
if (P.isBlockPointer() && this->block() == P.block()) {
Offset = P.Offset;
- PointeeStorage.BS.Base = P.PointeeStorage.BS.Base;
+ BS.Base = P.BS.Base;
return *this;
}
- if (Block *Pointee = PointeeStorage.BS.Pointee) {
+ if (Block *Pointee = BS.Pointee) {
Pointee->removePointer(this);
- PointeeStorage.BS.Pointee = nullptr;
+ BS.Pointee = nullptr;
Pointee->cleanup();
}
}
@@ -125,16 +148,16 @@ Pointer &Pointer::operator=(Pointer &&P) {
Offset = P.Offset;
if (P.isBlockPointer()) {
- PointeeStorage.BS = P.PointeeStorage.BS;
+ BS = P.BS;
- if (PointeeStorage.BS.Pointee)
- PointeeStorage.BS.Pointee->addPointer(this);
+ if (BS.Pointee)
+ BS.Pointee->addPointer(this);
} else if (P.isIntegralPointer()) {
- PointeeStorage.Int = P.PointeeStorage.Int;
+ Int = P.Int;
} else if (P.isFunctionPointer()) {
- PointeeStorage.Fn = P.PointeeStorage.Fn;
+ Fn = P.Fn;
} else if (P.isTypeidPointer()) {
- PointeeStorage.Typeid = P.PointeeStorage.Typeid;
+ Typeid = P.Typeid;
} else {
assert(false && "Unhandled storage kind");
}
@@ -163,12 +186,11 @@ APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
}
if (isTypeidPointer()) {
- TypeInfoLValue TypeInfo(PointeeStorage.Typeid.TypePtr);
- return APValue(
- APValue::LValueBase::getTypeInfo(
- TypeInfo, QualType(PointeeStorage.Typeid.TypeInfoType, 0)),
- CharUnits::Zero(), {},
- /*OnePastTheEnd=*/false, /*IsNull=*/false);
+ TypeInfoLValue TypeInfo(Typeid.TypePtr);
+ return APValue(APValue::LValueBase::getTypeInfo(
+ TypeInfo, QualType(Typeid.TypeInfoType, 0)),
+ CharUnits::Zero(), {},
+ /*OnePastTheEnd=*/false, /*IsNull=*/false);
}
// Build the lvalue base from the block.
@@ -179,10 +201,7 @@ APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
else if (const auto *E = Desc->asExpr()) {
if (block()->isDynamic()) {
QualType AllocatedType = getDeclPtr().getFieldDesc()->getDataType(ASTCtx);
- // FIXME: Suboptimal counting of dynamic allocations. Move this to Context
- // or InterpState?
- static int ReportedDynamicAllocs = 0;
- DynamicAllocLValue DA(ReportedDynamicAllocs++);
+ DynamicAllocLValue DA(*block()->DynAllocId);
Base = APValue::LValueBase::getDynamicAlloc(DA, AllocatedType);
} else {
Base = E;
@@ -212,7 +231,7 @@ APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
UsePath = false;
// Build the path into the object.
- bool OnePastEnd = isOnePastEnd();
+ bool OnePastEnd = isOnePastEnd() && !isZeroSizeArray();
Pointer Ptr = *this;
while (Ptr.isField() || Ptr.isArrayElement()) {
@@ -259,10 +278,10 @@ APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
Ptr = Ptr.getArray();
} else {
const Descriptor *Desc = Ptr.getFieldDesc();
- bool IsVirtual = false;
// Create a path entry for the field.
if (const auto *BaseOrMember = Desc->asDecl()) {
+ bool IsVirtual = false;
if (const auto *FD = dyn_cast<FieldDecl>(BaseOrMember)) {
Ptr = Ptr.getBase();
Offset += getFieldOffset(FD);
@@ -303,13 +322,13 @@ APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
void Pointer::print(llvm::raw_ostream &OS) const {
switch (StorageKind) {
case Storage::Block: {
- const Block *B = PointeeStorage.BS.Pointee;
+ const Block *B = BS.Pointee;
OS << "(Block) " << B << " {";
if (isRoot())
- OS << "rootptr(" << PointeeStorage.BS.Base << "), ";
+ OS << "rootptr(" << BS.Base << "), ";
else
- OS << PointeeStorage.BS.Base << ", ";
+ OS << BS.Base << ", ";
if (isElementPastEnd())
OS << "pastend, ";
@@ -324,8 +343,7 @@ void Pointer::print(llvm::raw_ostream &OS) const {
} break;
case Storage::Int:
OS << "(Int) {";
- OS << PointeeStorage.Int.Value << " + " << Offset << ", "
- << PointeeStorage.Int.Desc;
+ OS << Int.Value << " + " << Offset << ", " << Int.Desc;
OS << "}";
break;
case Storage::Fn:
@@ -378,6 +396,8 @@ size_t Pointer::computeOffsetForComparison() const {
}
if (const Record *R = P.getBase().getRecord(); R && R->isUnion()) {
+ if (P.isOnePastEnd())
+ ++Result;
// Direct child of a union - all have offset 0.
P = P.getBase();
continue;
@@ -413,45 +433,60 @@ bool Pointer::isInitialized() const {
if (!isBlockPointer())
return true;
- if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor)) {
+ if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor)) {
const GlobalInlineDescriptor &GD =
*reinterpret_cast<const GlobalInlineDescriptor *>(block()->rawData());
return GD.InitState == GlobalInitState::Initialized;
}
- assert(PointeeStorage.BS.Pointee &&
- "Cannot check if null pointer was initialized");
+ assert(BS.Pointee && "Cannot check if null pointer was initialized");
const Descriptor *Desc = getFieldDesc();
assert(Desc);
- if (Desc->isPrimitiveArray()) {
- if (isStatic() && PointeeStorage.BS.Base == 0)
- return true;
+ if (Desc->isPrimitiveArray())
+ return isElementInitialized(getIndex());
- InitMapPtr &IM = getInitMap();
+ if (asBlockPointer().Base == 0)
+ return true;
+ // Field has its bit in an inline descriptor.
+ return getInlineDesc()->IsInitialized;
+}
+bool Pointer::isElementInitialized(unsigned Index) const {
+ if (!isBlockPointer())
+ return true;
+
+ const Descriptor *Desc = getFieldDesc();
+ assert(Desc);
+
+ if (isStatic() && BS.Base == 0)
+ return true;
+
+ if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor)) {
+ const GlobalInlineDescriptor &GD =
+ *reinterpret_cast<const GlobalInlineDescriptor *>(block()->rawData());
+ return GD.InitState == GlobalInitState::Initialized;
+ }
+
+ if (Desc->isPrimitiveArray()) {
+ InitMapPtr &IM = getInitMap();
if (!IM)
return false;
if (IM->first)
return true;
- return IM->second->isElementInitialized(getIndex());
+ return IM->second->isElementInitialized(Index);
}
-
- if (asBlockPointer().Base == 0)
- return true;
-
- // Field has its bit in an inline descriptor.
- return getInlineDesc()->IsInitialized;
+ return isInitialized();
}
void Pointer::initialize() const {
if (!isBlockPointer())
return;
- assert(PointeeStorage.BS.Pointee && "Cannot initialize null pointer");
+ assert(BS.Pointee && "Cannot initialize null pointer");
- if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor)) {
+ if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor)) {
GlobalInlineDescriptor &GD = *reinterpret_cast<GlobalInlineDescriptor *>(
asBlockPointer().Pointee->rawData());
GD.InitState = GlobalInitState::Initialized;
@@ -462,7 +497,7 @@ void Pointer::initialize() const {
assert(Desc);
if (Desc->isPrimitiveArray()) {
// Primitive global arrays don't have an initmap.
- if (isStatic() && PointeeStorage.BS.Base == 0)
+ if (isStatic() && BS.Base == 0)
return;
// Nothing to do for these.
@@ -488,8 +523,7 @@ void Pointer::initialize() const {
}
// Field has its bit in an inline descriptor.
- assert(PointeeStorage.BS.Base != 0 &&
- "Only composite fields can be initialised");
+ assert(BS.Base != 0 && "Only composite fields can be initialised");
getInlineDesc()->IsInitialized = true;
}
@@ -506,12 +540,28 @@ void Pointer::initializeAllElements() const {
}
}
+bool Pointer::allElementsInitialized() const {
+ assert(getFieldDesc()->isPrimitiveArray());
+ assert(isArrayRoot());
+
+ if (isStatic() && BS.Base == 0)
+ return true;
+
+ if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor)) {
+ const GlobalInlineDescriptor &GD =
+ *reinterpret_cast<const GlobalInlineDescriptor *>(block()->rawData());
+ return GD.InitState == GlobalInitState::Initialized;
+ }
+
+ InitMapPtr &IM = getInitMap();
+ return IM && IM->first;
+}
+
void Pointer::activate() const {
// Field has its bit in an inline descriptor.
- assert(PointeeStorage.BS.Base != 0 &&
- "Only composite fields can be activated");
+ assert(BS.Base != 0 && "Only composite fields can be activated");
- if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor))
+ if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor))
return;
if (!getInlineDesc()->InUnion)
return;
@@ -593,8 +643,7 @@ bool Pointer::pointToSameBlock(const Pointer &A, const Pointer &B) {
}
bool Pointer::hasSameArray(const Pointer &A, const Pointer &B) {
- return hasSameBase(A, B) &&
- A.PointeeStorage.BS.Base == B.PointeeStorage.BS.Base &&
+ return hasSameBase(A, B) && A.BS.Base == B.BS.Base &&
A.getFieldDesc()->IsArray;
}
@@ -684,12 +733,12 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
return true;
}
- if (const auto *RT = Ty->getAs<RecordType>()) {
+ if (const auto *RT = Ty->getAsCanonical<RecordType>()) {
const auto *Record = Ptr.getRecord();
assert(Record && "Missing record descriptor");
bool Ok = true;
- if (RT->getDecl()->isUnion()) {
+ if (RT->getOriginalDecl()->isUnion()) {
const FieldDecl *ActiveField = nullptr;
APValue Value;
for (const auto &F : Record->fields()) {
@@ -728,14 +777,15 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
for (unsigned I = 0; I < NB; ++I) {
const Record::Base *BD = Record->getBase(I);
- QualType BaseTy = Ctx.getASTContext().getRecordType(BD->Decl);
+ QualType BaseTy = Ctx.getASTContext().getCanonicalTagType(BD->Decl);
const Pointer &BP = Ptr.atField(BD->Offset);
Ok &= Composite(BaseTy, BP, R.getStructBase(I));
}
for (unsigned I = 0; I < NV; ++I) {
const Record::Base *VD = Record->getVirtualBase(I);
- QualType VirtBaseTy = Ctx.getASTContext().getRecordType(VD->Decl);
+ QualType VirtBaseTy =
+ Ctx.getASTContext().getCanonicalTagType(VD->Decl);
const Pointer &VP = Ptr.atField(VD->Offset);
Ok &= Composite(VirtBaseTy, VP, R.getStructBase(NB + I));
}
@@ -754,13 +804,13 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
R = APValue(APValue::UninitArray{}, NumElems, NumElems);
bool Ok = true;
- for (unsigned I = 0; I < NumElems; ++I) {
+ OptPrimType ElemT = Ctx.classify(ElemTy);
+ for (unsigned I = 0; I != NumElems; ++I) {
APValue &Slot = R.getArrayInitializedElt(I);
- const Pointer &EP = Ptr.atIndex(I);
- if (OptPrimType T = Ctx.classify(ElemTy)) {
- TYPE_SWITCH(*T, Slot = EP.deref<T>().toAPValue(ASTCtx));
+ if (ElemT) {
+ TYPE_SWITCH(*ElemT, Slot = Ptr.elem<T>(I).toAPValue(ASTCtx));
} else {
- Ok &= Composite(ElemTy, EP.narrow(), Slot);
+ Ok &= Composite(ElemTy, Ptr.atIndex(I).narrow(), Slot);
}
}
return Ok;
@@ -768,8 +818,11 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
// Complex types.
if (const auto *CT = Ty->getAs<ComplexType>()) {
- QualType ElemTy = CT->getElementType();
+ // Can happen via C casts.
+ if (!Ptr.getFieldDesc()->isPrimitiveArray())
+ return false;
+ QualType ElemTy = CT->getElementType();
if (ElemTy->isIntegerType()) {
OptPrimType ElemT = Ctx.classify(ElemTy);
assert(ElemT);
diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h
index 5bafc5b..49d701c 100644
--- a/clang/lib/AST/ByteCode/Pointer.h
+++ b/clang/lib/AST/ByteCode/Pointer.h
@@ -28,8 +28,6 @@ class Block;
class DeadBlock;
class Pointer;
class Context;
-template <unsigned A, bool B> class Integral;
-enum PrimType : unsigned;
class Pointer;
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Pointer &P);
@@ -95,31 +93,21 @@ private:
static constexpr unsigned RootPtrMark = ~0u;
public:
- Pointer() {
- StorageKind = Storage::Int;
- PointeeStorage.Int.Value = 0;
- PointeeStorage.Int.Desc = nullptr;
- }
- Pointer(IntPointer &&IntPtr) : StorageKind(Storage::Int) {
- PointeeStorage.Int = std::move(IntPtr);
- }
+ Pointer() : StorageKind(Storage::Int), Int{nullptr, 0} {}
+ Pointer(IntPointer &&IntPtr)
+ : StorageKind(Storage::Int), Int(std::move(IntPtr)) {}
Pointer(Block *B);
Pointer(Block *B, uint64_t BaseAndOffset);
Pointer(const Pointer &P);
Pointer(Pointer &&P);
Pointer(uint64_t Address, const Descriptor *Desc, uint64_t Offset = 0)
- : Offset(Offset), StorageKind(Storage::Int) {
- PointeeStorage.Int.Value = Address;
- PointeeStorage.Int.Desc = Desc;
- }
+ : Offset(Offset), StorageKind(Storage::Int), Int{Desc, Address} {}
Pointer(const Function *F, uint64_t Offset = 0)
- : Offset(Offset), StorageKind(Storage::Fn) {
- PointeeStorage.Fn = FunctionPointer(F);
- }
+ : Offset(Offset), StorageKind(Storage::Fn), Fn(F) {}
Pointer(const Type *TypePtr, const Type *TypeInfoType, uint64_t Offset = 0)
: Offset(Offset), StorageKind(Storage::Typeid) {
- PointeeStorage.Typeid.TypePtr = TypePtr;
- PointeeStorage.Typeid.TypeInfoType = TypeInfoType;
+ Typeid.TypePtr = TypePtr;
+ Typeid.TypeInfoType = TypeInfoType;
}
Pointer(Block *Pointee, unsigned Base, uint64_t Offset);
~Pointer();
@@ -132,17 +120,14 @@ public:
if (P.StorageKind != StorageKind)
return false;
if (isIntegralPointer())
- return P.asIntPointer().Value == asIntPointer().Value &&
- P.asIntPointer().Desc == asIntPointer().Desc && P.Offset == Offset;
+ return P.Int.Value == Int.Value && P.Int.Desc == Int.Desc &&
+ P.Offset == Offset;
if (isFunctionPointer())
- return P.asFunctionPointer().getFunction() ==
- asFunctionPointer().getFunction() &&
- P.Offset == Offset;
+ return P.Fn.getFunction() == Fn.getFunction() && P.Offset == Offset;
assert(isBlockPointer());
- return P.asBlockPointer().Pointee == asBlockPointer().Pointee &&
- P.asBlockPointer().Base == asBlockPointer().Base &&
+ return P.BS.Pointee == BS.Pointee && P.BS.Base == BS.Base &&
P.Offset == Offset;
}
@@ -156,10 +141,10 @@ public:
uint64_t getIntegerRepresentation() const {
if (isIntegralPointer())
- return asIntPointer().Value + (Offset * elemSize());
+ return Int.Value + (Offset * elemSize());
if (isFunctionPointer())
- return asFunctionPointer().getIntegerRepresentation() + Offset;
- return reinterpret_cast<uint64_t>(asBlockPointer().Pointee) + Offset;
+ return Fn.getIntegerRepresentation() + Offset;
+ return reinterpret_cast<uint64_t>(BS.Pointee) + Offset;
}
/// Converts the pointer to an APValue that is an rvalue.
@@ -169,27 +154,25 @@ public:
/// Offsets a pointer inside an array.
[[nodiscard]] Pointer atIndex(uint64_t Idx) const {
if (isIntegralPointer())
- return Pointer(asIntPointer().Value, asIntPointer().Desc, Idx);
+ return Pointer(Int.Value, Int.Desc, Idx);
if (isFunctionPointer())
- return Pointer(asFunctionPointer().getFunction(), Idx);
+ return Pointer(Fn.getFunction(), Idx);
- if (asBlockPointer().Base == RootPtrMark)
- return Pointer(asBlockPointer().Pointee, RootPtrMark,
- getDeclDesc()->getSize());
+ if (BS.Base == RootPtrMark)
+ return Pointer(BS.Pointee, RootPtrMark, getDeclDesc()->getSize());
uint64_t Off = Idx * elemSize();
if (getFieldDesc()->ElemDesc)
Off += sizeof(InlineDescriptor);
else
Off += sizeof(InitMapPtr);
- return Pointer(asBlockPointer().Pointee, asBlockPointer().Base,
- asBlockPointer().Base + Off);
+ return Pointer(BS.Pointee, BS.Base, BS.Base + Off);
}
/// Creates a pointer to a field.
[[nodiscard]] Pointer atField(unsigned Off) const {
assert(isBlockPointer());
unsigned Field = Offset + Off;
- return Pointer(asBlockPointer().Pointee, Field, Field);
+ return Pointer(BS.Pointee, Field, Field);
}
/// Subtract the given offset from the current Base and Offset
@@ -197,7 +180,7 @@ public:
[[nodiscard]] Pointer atFieldSub(unsigned Off) const {
assert(Offset >= Off);
unsigned O = Offset - Off;
- return Pointer(asBlockPointer().Pointee, O, O);
+ return Pointer(BS.Pointee, O, O);
}
/// Restricts the scope of an array element pointer.
@@ -209,15 +192,15 @@ public:
if (isZero() || isUnknownSizeArray())
return *this;
- unsigned Base = asBlockPointer().Base;
+ unsigned Base = BS.Base;
// Pointer to an array of base types - enter block.
if (Base == RootPtrMark)
- return Pointer(asBlockPointer().Pointee, sizeof(InlineDescriptor),
+ return Pointer(BS.Pointee, sizeof(InlineDescriptor),
Offset == 0 ? Offset : PastEndMark);
// Pointer is one past end - magic offset marks that.
if (isOnePastEnd())
- return Pointer(asBlockPointer().Pointee, Base, PastEndMark);
+ return Pointer(BS.Pointee, Base, PastEndMark);
if (Offset != Base) {
// If we're pointing to a primitive array element, there's nothing to do.
@@ -225,7 +208,7 @@ public:
return *this;
// Pointer is to a composite array element - enter it.
if (Offset != Base)
- return Pointer(asBlockPointer().Pointee, Offset, Offset);
+ return Pointer(BS.Pointee, Offset, Offset);
}
// Otherwise, we're pointing to a non-array element or
@@ -236,7 +219,7 @@ public:
/// Expands a pointer to the containing array, undoing narrowing.
[[nodiscard]] Pointer expand() const {
assert(isBlockPointer());
- Block *Pointee = asBlockPointer().Pointee;
+ Block *Pointee = BS.Pointee;
if (isElementPastEnd()) {
// Revert to an outer one-past-end pointer.
@@ -245,19 +228,18 @@ public:
Adjust = sizeof(InitMapPtr);
else
Adjust = sizeof(InlineDescriptor);
- return Pointer(Pointee, asBlockPointer().Base,
- asBlockPointer().Base + getSize() + Adjust);
+ return Pointer(Pointee, BS.Base, BS.Base + getSize() + Adjust);
}
// Do not step out of array elements.
- if (asBlockPointer().Base != Offset)
+ if (BS.Base != Offset)
return *this;
if (isRoot())
- return Pointer(Pointee, asBlockPointer().Base, asBlockPointer().Base);
+ return Pointer(Pointee, BS.Base, BS.Base);
// Step into the containing array, if inside one.
- unsigned Next = asBlockPointer().Base - getInlineDesc()->Offset;
+ unsigned Next = BS.Base - getInlineDesc()->Offset;
const Descriptor *Desc =
(Next == Pointee->getDescriptor()->getMetadataSize())
? getDeclDesc()
@@ -270,19 +252,19 @@ public:
/// Checks if the pointer is null.
bool isZero() const {
if (isBlockPointer())
- return asBlockPointer().Pointee == nullptr;
+ return BS.Pointee == nullptr;
if (isFunctionPointer())
- return asFunctionPointer().isZero();
+ return Fn.isZero();
if (isTypeidPointer())
return false;
assert(isIntegralPointer());
- return asIntPointer().Value == 0 && Offset == 0;
+ return Int.Value == 0 && Offset == 0;
}
/// Checks if the pointer is live.
bool isLive() const {
if (!isBlockPointer())
return true;
- return asBlockPointer().Pointee && !asBlockPointer().Pointee->IsDead;
+ return BS.Pointee && !BS.Pointee->isDead();
}
/// Checks if the item is a field in an object.
bool isField() const {
@@ -295,13 +277,13 @@ public:
/// Accessor for information about the declaration site.
const Descriptor *getDeclDesc() const {
if (isIntegralPointer())
- return asIntPointer().Desc;
+ return Int.Desc;
if (isFunctionPointer() || isTypeidPointer())
return nullptr;
assert(isBlockPointer());
- assert(asBlockPointer().Pointee);
- return asBlockPointer().Pointee->Desc;
+ assert(BS.Pointee);
+ return BS.Pointee->Desc;
}
SourceLocation getDeclLoc() const { return getDeclDesc()->getLocation(); }
@@ -310,37 +292,36 @@ public:
if (isBlockPointer())
return getDeclDesc()->getSource();
if (isFunctionPointer()) {
- const Function *F = asFunctionPointer().getFunction();
+ const Function *F = Fn.getFunction();
return F ? F->getDecl() : DeclTy();
}
assert(isIntegralPointer());
- return asIntPointer().Desc ? asIntPointer().Desc->getSource() : DeclTy();
+ return Int.Desc ? Int.Desc->getSource() : DeclTy();
}
/// Returns a pointer to the object of which this pointer is a field.
[[nodiscard]] Pointer getBase() const {
- if (asBlockPointer().Base == RootPtrMark) {
+ if (BS.Base == RootPtrMark) {
assert(Offset == PastEndMark && "cannot get base of a block");
- return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, 0);
+ return Pointer(BS.Pointee, BS.Base, 0);
}
- unsigned NewBase = asBlockPointer().Base - getInlineDesc()->Offset;
- return Pointer(asBlockPointer().Pointee, NewBase, NewBase);
+ unsigned NewBase = BS.Base - getInlineDesc()->Offset;
+ return Pointer(BS.Pointee, NewBase, NewBase);
}
/// Returns the parent array.
[[nodiscard]] Pointer getArray() const {
- if (asBlockPointer().Base == RootPtrMark) {
+ if (BS.Base == RootPtrMark) {
assert(Offset != 0 && Offset != PastEndMark && "not an array element");
- return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, 0);
+ return Pointer(BS.Pointee, BS.Base, 0);
}
- assert(Offset != asBlockPointer().Base && "not an array element");
- return Pointer(asBlockPointer().Pointee, asBlockPointer().Base,
- asBlockPointer().Base);
+ assert(Offset != BS.Base && "not an array element");
+ return Pointer(BS.Pointee, BS.Base, BS.Base);
}
/// Accessors for information about the innermost field.
const Descriptor *getFieldDesc() const {
if (isIntegralPointer())
- return asIntPointer().Desc;
+ return Int.Desc;
if (isRoot())
return getDeclDesc();
@@ -350,9 +331,11 @@ public:
/// Returns the type of the innermost field.
QualType getType() const {
if (isTypeidPointer())
- return QualType(PointeeStorage.Typeid.TypeInfoType, 0);
+ return QualType(Typeid.TypeInfoType, 0);
+ if (isFunctionPointer())
+ return Fn.getFunction()->getDecl()->getType();
- if (inPrimitiveArray() && Offset != asBlockPointer().Base) {
+ if (inPrimitiveArray() && Offset != BS.Base) {
// Unfortunately, complex and vector types are not array types in clang,
// but they are for us.
if (const auto *AT = getFieldDesc()->getType()->getAsArrayTypeUnsafe())
@@ -365,19 +348,17 @@ public:
return getFieldDesc()->getType();
}
- [[nodiscard]] Pointer getDeclPtr() const {
- return Pointer(asBlockPointer().Pointee);
- }
+ [[nodiscard]] Pointer getDeclPtr() const { return Pointer(BS.Pointee); }
/// Returns the element size of the innermost field.
size_t elemSize() const {
if (isIntegralPointer()) {
- if (!asIntPointer().Desc)
+ if (!Int.Desc)
return 1;
- return asIntPointer().Desc->getElemSize();
+ return Int.Desc->getElemSize();
}
- if (asBlockPointer().Base == RootPtrMark)
+ if (BS.Base == RootPtrMark)
return getDeclDesc()->getSize();
return getFieldDesc()->getElemSize();
}
@@ -391,24 +372,22 @@ public:
unsigned getOffset() const {
assert(Offset != PastEndMark && "invalid offset");
assert(isBlockPointer());
- if (asBlockPointer().Base == RootPtrMark)
+ if (BS.Base == RootPtrMark)
return Offset;
unsigned Adjust = 0;
- if (Offset != asBlockPointer().Base) {
+ if (Offset != BS.Base) {
if (getFieldDesc()->ElemDesc)
Adjust = sizeof(InlineDescriptor);
else
Adjust = sizeof(InitMapPtr);
}
- return Offset - asBlockPointer().Base - Adjust;
+ return Offset - BS.Base - Adjust;
}
/// Whether this array refers to an array, but not
/// to the first element.
- bool isArrayRoot() const {
- return inArray() && Offset == asBlockPointer().Base;
- }
+ bool isArrayRoot() const { return inArray() && Offset == BS.Base; }
/// Checks if the innermost field is an array.
bool inArray() const {
@@ -417,7 +396,7 @@ public:
return false;
}
bool inUnion() const {
- if (isBlockPointer() && asBlockPointer().Base >= sizeof(InlineDescriptor))
+ if (isBlockPointer() && BS.Base >= sizeof(InlineDescriptor))
return getInlineDesc()->InUnion;
return false;
};
@@ -439,7 +418,7 @@ public:
if (!isBlockPointer())
return false;
- const BlockPointer &BP = asBlockPointer();
+ const BlockPointer &BP = BS;
if (inArray() && BP.Base != Offset)
return true;
@@ -454,33 +433,32 @@ public:
bool isRoot() const {
if (isZero() || !isBlockPointer())
return true;
- return (asBlockPointer().Base ==
- asBlockPointer().Pointee->getDescriptor()->getMetadataSize() ||
- asBlockPointer().Base == 0);
+ return (BS.Base == BS.Pointee->getDescriptor()->getMetadataSize() ||
+ BS.Base == 0);
}
/// If this pointer has an InlineDescriptor we can use to initialize.
bool canBeInitialized() const {
if (!isBlockPointer())
return false;
- return asBlockPointer().Pointee && asBlockPointer().Base > 0;
+ return BS.Pointee && BS.Base > 0;
}
[[nodiscard]] const BlockPointer &asBlockPointer() const {
assert(isBlockPointer());
- return PointeeStorage.BS;
+ return BS;
}
[[nodiscard]] const IntPointer &asIntPointer() const {
assert(isIntegralPointer());
- return PointeeStorage.Int;
+ return Int;
}
[[nodiscard]] const FunctionPointer &asFunctionPointer() const {
assert(isFunctionPointer());
- return PointeeStorage.Fn;
+ return Fn;
}
[[nodiscard]] const TypeidPointer &asTypeidPointer() const {
assert(isTypeidPointer());
- return PointeeStorage.Typeid;
+ return Typeid;
}
bool isBlockPointer() const { return StorageKind == Storage::Block; }
@@ -505,29 +483,29 @@ public:
/// Checks if the storage is extern.
bool isExtern() const {
if (isBlockPointer())
- return asBlockPointer().Pointee && asBlockPointer().Pointee->isExtern();
+ return BS.Pointee && BS.Pointee->isExtern();
return false;
}
/// Checks if the storage is static.
bool isStatic() const {
if (!isBlockPointer())
return true;
- assert(asBlockPointer().Pointee);
- return asBlockPointer().Pointee->isStatic();
+ assert(BS.Pointee);
+ return BS.Pointee->isStatic();
}
/// Checks if the storage is temporary.
bool isTemporary() const {
if (isBlockPointer()) {
- assert(asBlockPointer().Pointee);
- return asBlockPointer().Pointee->isTemporary();
+ assert(BS.Pointee);
+ return BS.Pointee->isTemporary();
}
return false;
}
/// Checks if the storage has been dynamically allocated.
bool isDynamic() const {
if (isBlockPointer()) {
- assert(asBlockPointer().Pointee);
- return asBlockPointer().Pointee->isDynamic();
+ assert(BS.Pointee);
+ return BS.Pointee->isDynamic();
}
return false;
}
@@ -543,15 +521,13 @@ public:
bool isWeak() const {
if (isFunctionPointer())
- return asFunctionPointer().isWeak();
+ return Fn.isWeak();
if (!isBlockPointer())
return false;
assert(isBlockPointer());
- return asBlockPointer().Pointee->isWeak();
+ return BS.Pointee->isWeak();
}
- /// Checks if an object was initialized.
- bool isInitialized() const;
/// Checks if the object is active.
bool isActive() const {
if (!isBlockPointer())
@@ -568,10 +544,9 @@ public:
if (!isBlockPointer())
return false;
- if (!asBlockPointer().Pointee)
- return false;
-
- return getDeclDesc()->isDummy();
+ if (const Block *Pointee = BS.Pointee)
+ return Pointee->isDummy();
+ return false;
}
/// Checks if an object or a subfield is mutable.
@@ -594,10 +569,10 @@ public:
}
/// Returns the declaration ID.
- std::optional<unsigned> getDeclID() const {
+ UnsignedOrNone getDeclID() const {
if (isBlockPointer()) {
- assert(asBlockPointer().Pointee);
- return asBlockPointer().Pointee->getDeclID();
+ assert(BS.Pointee);
+ return BS.Pointee->getDeclID();
}
return std::nullopt;
}
@@ -605,9 +580,9 @@ public:
/// Returns the byte offset from the start.
uint64_t getByteOffset() const {
if (isIntegralPointer())
- return asIntPointer().Value + Offset;
+ return Int.Value + Offset;
if (isTypeidPointer())
- return reinterpret_cast<uintptr_t>(asTypeidPointer().TypePtr) + Offset;
+ return reinterpret_cast<uintptr_t>(Typeid.TypePtr) + Offset;
if (isOnePastEnd())
return PastEndMark;
return Offset;
@@ -620,13 +595,13 @@ public:
return getSize() / elemSize();
}
- const Block *block() const { return asBlockPointer().Pointee; }
+ const Block *block() const { return BS.Pointee; }
/// If backed by actual data (i.e. a block pointer), return
/// an address to that data.
const std::byte *getRawAddress() const {
assert(isBlockPointer());
- return asBlockPointer().Pointee->rawData() + Offset;
+ return BS.Pointee->rawData() + Offset;
}
/// Returns the index into an array.
@@ -638,8 +613,7 @@ public:
return 0;
// narrow()ed element in a composite array.
- if (asBlockPointer().Base > sizeof(InlineDescriptor) &&
- asBlockPointer().Base == Offset)
+ if (BS.Base > sizeof(InlineDescriptor) && BS.Base == Offset)
return 0;
if (auto ElemSize = elemSize())
@@ -652,13 +626,13 @@ public:
if (!isBlockPointer())
return false;
- if (!asBlockPointer().Pointee)
+ if (!BS.Pointee)
return false;
if (isUnknownSizeArray())
return false;
- return isPastEnd() || (getSize() == getOffset() && !isZeroSizeArray());
+ return isPastEnd() || (getSize() == getOffset());
}
/// Checks if the pointer points past the end of the object.
@@ -666,7 +640,7 @@ public:
if (isIntegralPointer())
return false;
- return !isZero() && Offset > PointeeStorage.BS.Pointee->getSize();
+ return !isZero() && Offset > BS.Pointee->getSize();
}
/// Checks if the pointer is an out-of-bounds element pointer.
@@ -685,16 +659,15 @@ public:
template <typename T> T &deref() const {
assert(isLive() && "Invalid pointer");
assert(isBlockPointer());
- assert(asBlockPointer().Pointee);
+ assert(BS.Pointee);
assert(isDereferencable());
- assert(Offset + sizeof(T) <=
- asBlockPointer().Pointee->getDescriptor()->getAllocSize());
+ assert(Offset + sizeof(T) <= BS.Pointee->getDescriptor()->getAllocSize());
if (isArrayRoot())
- return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() +
- asBlockPointer().Base + sizeof(InitMapPtr));
+ return *reinterpret_cast<T *>(BS.Pointee->rawData() + BS.Base +
+ sizeof(InitMapPtr));
- return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() + Offset);
+ return *reinterpret_cast<T *>(BS.Pointee->rawData() + Offset);
}
/// Dereferences the element at index \p I.
@@ -702,18 +675,17 @@ public:
template <typename T> T &elem(unsigned I) const {
assert(isLive() && "Invalid pointer");
assert(isBlockPointer());
- assert(asBlockPointer().Pointee);
+ assert(BS.Pointee);
assert(isDereferencable());
assert(getFieldDesc()->isPrimitiveArray());
+ assert(I < getFieldDesc()->getNumElems());
unsigned ElemByteOffset = I * getFieldDesc()->getElemSize();
- if (isArrayRoot())
- return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() +
- asBlockPointer().Base + sizeof(InitMapPtr) +
- ElemByteOffset);
+ unsigned ReadOffset = BS.Base + sizeof(InitMapPtr) + ElemByteOffset;
+ assert(ReadOffset + sizeof(T) <=
+ BS.Pointee->getDescriptor()->getAllocSize());
- return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() + Offset +
- ElemByteOffset);
+ return *reinterpret_cast<T *>(BS.Pointee->rawData() + ReadOffset);
}
/// Whether this block can be read from at all. This is only true for
@@ -733,6 +705,11 @@ public:
/// used in situations where we *know* we have initialized *all* elements
/// of a primtive array.
void initializeAllElements() const;
+ /// Checks if an object was initialized.
+ bool isInitialized() const;
+ /// Like isInitialized(), but for primitive arrays.
+ bool isElementInitialized(unsigned Index) const;
+ bool allElementsInitialized() const;
/// Activats a field.
void activate() const;
/// Deactivates an entire strurcutre.
@@ -741,7 +718,7 @@ public:
Lifetime getLifetime() const {
if (!isBlockPointer())
return Lifetime::Started;
- if (asBlockPointer().Base < sizeof(InlineDescriptor))
+ if (BS.Base < sizeof(InlineDescriptor))
return Lifetime::Started;
return getInlineDesc()->LifeState;
}
@@ -749,7 +726,7 @@ public:
void endLifetime() const {
if (!isBlockPointer())
return;
- if (asBlockPointer().Base < sizeof(InlineDescriptor))
+ if (BS.Base < sizeof(InlineDescriptor))
return;
getInlineDesc()->LifeState = Lifetime::Ended;
}
@@ -757,7 +734,7 @@ public:
void startLifetime() const {
if (!isBlockPointer())
return;
- if (asBlockPointer().Base < sizeof(InlineDescriptor))
+ if (BS.Base < sizeof(InlineDescriptor))
return;
getInlineDesc()->LifeState = Lifetime::Started;
}
@@ -805,14 +782,15 @@ private:
friend class InterpState;
friend struct InitMap;
friend class DynamicAllocator;
+ friend class Program;
/// Returns the embedded descriptor preceding a field.
InlineDescriptor *getInlineDesc() const {
assert(isBlockPointer());
- assert(asBlockPointer().Base != sizeof(GlobalInlineDescriptor));
- assert(asBlockPointer().Base <= asBlockPointer().Pointee->getSize());
- assert(asBlockPointer().Base >= sizeof(InlineDescriptor));
- return getDescriptor(asBlockPointer().Base);
+ assert(BS.Base != sizeof(GlobalInlineDescriptor));
+ assert(BS.Base <= BS.Pointee->getSize());
+ assert(BS.Base >= sizeof(InlineDescriptor));
+ return getDescriptor(BS.Base);
}
/// Returns a descriptor at a given offset.
@@ -820,8 +798,8 @@ private:
assert(Offset != 0 && "Not a nested pointer");
assert(isBlockPointer());
assert(!isZero());
- return reinterpret_cast<InlineDescriptor *>(
- asBlockPointer().Pointee->rawData() + Offset) -
+ return reinterpret_cast<InlineDescriptor *>(BS.Pointee->rawData() +
+ Offset) -
1;
}
@@ -829,8 +807,7 @@ private:
InitMapPtr &getInitMap() const {
assert(isBlockPointer());
assert(!isZero());
- return *reinterpret_cast<InitMapPtr *>(asBlockPointer().Pointee->rawData() +
- asBlockPointer().Base);
+ return *reinterpret_cast<InitMapPtr *>(BS.Pointee->rawData() + BS.Base);
}
/// Offset into the storage.
@@ -842,7 +819,7 @@ private:
BlockPointer BS;
FunctionPointer Fn;
TypeidPointer Typeid;
- } PointeeStorage;
+ };
};
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Pointer &P) {
diff --git a/clang/lib/AST/ByteCode/PrimType.h b/clang/lib/AST/ByteCode/PrimType.h
index 724da93..54fd39a 100644
--- a/clang/lib/AST/ByteCode/PrimType.h
+++ b/clang/lib/AST/ByteCode/PrimType.h
@@ -31,7 +31,7 @@ template <bool Signed> class IntegralAP;
template <unsigned Bits, bool Signed> class Integral;
/// Enumeration of the primitive types of the VM.
-enum PrimType : unsigned {
+enum PrimType : uint8_t {
PT_Sint8 = 0,
PT_Uint8 = 1,
PT_Sint16 = 2,
@@ -51,14 +51,15 @@ enum PrimType : unsigned {
// Like std::optional<PrimType>, but only sizeof(PrimType).
class OptPrimType final {
- unsigned V = ~0u;
+ static constexpr uint8_t None = 0xFF;
+ uint8_t V = None;
public:
OptPrimType() = default;
OptPrimType(std::nullopt_t) {}
OptPrimType(PrimType T) : V(static_cast<unsigned>(T)) {}
- explicit constexpr operator bool() const { return V != ~0u; }
+ explicit constexpr operator bool() const { return V != None; }
PrimType operator*() const {
assert(operator bool());
return static_cast<PrimType>(V);
@@ -85,6 +86,19 @@ inline constexpr bool isPtrType(PrimType T) {
return T == PT_Ptr || T == PT_MemberPtr;
}
+inline constexpr bool isSignedType(PrimType T) {
+ switch (T) {
+ case PT_Sint8:
+ case PT_Sint16:
+ case PT_Sint32:
+ case PT_Sint64:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
enum class CastKind : uint8_t {
Reinterpret,
Volatile,
@@ -258,14 +272,4 @@ static inline bool aligned(const void *P) {
} \
} while (0)
-#define COMPOSITE_TYPE_SWITCH(Expr, B, D) \
- do { \
- switch (Expr) { \
- TYPE_SWITCH_CASE(PT_Ptr, B) \
- default: { \
- D; \
- break; \
- } \
- } \
- } while (0)
#endif
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index 4daa4ab..0be017e 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -101,7 +101,7 @@ unsigned Program::createGlobalString(const StringLiteral *S, const Expr *Base) {
}
}
}
- Ptr.initialize();
+ Ptr.initializeAllElements();
return GlobalIndex;
}
@@ -164,8 +164,8 @@ unsigned Program::getOrCreateDummy(const DeclTy &D) {
const auto *VD = cast<ValueDecl>(cast<const Decl *>(D));
IsWeak = VD->isWeak();
QT = VD->getType();
- if (const auto *RT = QT->getAs<ReferenceType>())
- QT = RT->getPointeeType();
+ if (QT->isPointerOrReferenceType())
+ QT = QT->getPointeeType();
}
assert(!QT.isNull());
@@ -180,17 +180,15 @@ unsigned Program::getOrCreateDummy(const DeclTy &D) {
Desc = allocateDescriptor(D);
assert(Desc);
- Desc->makeDummy();
-
- assert(Desc->isDummy());
// Allocate a block for storage.
unsigned I = Globals.size();
auto *G = new (Allocator, Desc->getAllocSize())
Global(Ctx.getEvalID(), getCurrentDecl(), Desc, /*IsStatic=*/true,
- /*IsExtern=*/false, IsWeak);
+ /*IsExtern=*/false, IsWeak, /*IsDummy=*/true);
G->block()->invokeCtor();
+ assert(G->block()->isDummy());
Globals.push_back(G);
DummyVariables[D.getOpaqueValue()] = I;
@@ -215,19 +213,31 @@ std::optional<unsigned> Program::createGlobal(const ValueDecl *VD,
// Register all previous declarations as well. For extern blocks, just replace
// the index with the new variable.
- if (auto Idx =
- createGlobal(VD, VD->getType(), IsStatic, IsExtern, IsWeak, Init)) {
- for (const Decl *P = VD; P; P = P->getPreviousDecl()) {
- unsigned &PIdx = GlobalIndices[P];
- if (P != VD) {
- if (Globals[PIdx]->block()->isExtern())
- Globals[PIdx] = Globals[*Idx];
+ std::optional<unsigned> Idx =
+ createGlobal(VD, VD->getType(), IsStatic, IsExtern, IsWeak, Init);
+ if (!Idx)
+ return std::nullopt;
+
+ Global *NewGlobal = Globals[*Idx];
+ for (const Decl *Redecl : VD->redecls()) {
+ unsigned &PIdx = GlobalIndices[Redecl];
+ if (Redecl != VD) {
+ if (Block *RedeclBlock = Globals[PIdx]->block();
+ RedeclBlock->isExtern()) {
+ Globals[PIdx] = NewGlobal;
+ // All pointers pointing to the previous extern decl now point to the
+ // new decl.
+ for (Pointer *Ptr = RedeclBlock->Pointers; Ptr; Ptr = Ptr->BS.Next) {
+ RedeclBlock->removePointer(Ptr);
+ Ptr->BS.Pointee = NewGlobal->block();
+ NewGlobal->block()->addPointer(Ptr);
+ }
}
- PIdx = *Idx;
}
- return *Idx;
+ PIdx = *Idx;
}
- return std::nullopt;
+
+ return *Idx;
}
std::optional<unsigned> Program::createGlobal(const Expr *E) {
@@ -266,7 +276,7 @@ std::optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
Ctx.getEvalID(), getCurrentDecl(), Desc, IsStatic, IsExtern, IsWeak);
G->block()->invokeCtor();
- // Initialize InlineDescriptor fields.
+ // Initialize GlobalInlineDescriptor fields.
auto *GD = new (G->block()->rawData()) GlobalInlineDescriptor();
if (!Init)
GD->InitState = GlobalInitState::NoInitializer;
@@ -322,10 +332,9 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
continue;
// In error cases, the base might not be a RecordType.
- const auto *RT = Spec.getType()->getAs<RecordType>();
- if (!RT)
+ const auto *BD = Spec.getType()->getAsCXXRecordDecl();
+ if (!BD)
return nullptr;
- const RecordDecl *BD = RT->getDecl();
const Record *BR = getOrCreateRecord(BD);
const Descriptor *Desc = GetBaseDesc(BD, BR);
@@ -338,11 +347,7 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
}
for (const CXXBaseSpecifier &Spec : CD->vbases()) {
- const auto *RT = Spec.getType()->getAs<RecordType>();
- if (!RT)
- return nullptr;
-
- const RecordDecl *BD = RT->getDecl();
+ const auto *BD = Spec.getType()->castAsCXXRecordDecl();
const Record *BR = getOrCreateRecord(BD);
const Descriptor *Desc = GetBaseDesc(BD, BR);
@@ -398,8 +403,8 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
const Expr *Init) {
// Classes and structures.
- if (const auto *RT = Ty->getAs<RecordType>()) {
- if (const auto *Record = getOrCreateRecord(RT->getDecl()))
+ if (const auto *RD = Ty->getAsRecordDecl()) {
+ if (const auto *Record = getOrCreateRecord(RD))
return allocateDescriptor(D, Record, MDSize, IsConst, IsTemporary,
IsMutable, IsVolatile);
return allocateDescriptor(D, MDSize);
diff --git a/clang/lib/AST/ByteCode/Program.h b/clang/lib/AST/ByteCode/Program.h
index 207ceef..90b48ee5 100644
--- a/clang/lib/AST/ByteCode/Program.h
+++ b/clang/lib/AST/ByteCode/Program.h
@@ -73,6 +73,10 @@ public:
return Globals[Idx]->block();
}
+ bool isGlobalInitialized(unsigned Index) const {
+ return getPtrGlobal(Index).isInitialized();
+ }
+
/// Finds a global's index.
std::optional<unsigned> getGlobal(const ValueDecl *VD);
std::optional<unsigned> getGlobal(const Expr *E);
@@ -152,7 +156,7 @@ public:
};
/// Returns the current declaration ID.
- std::optional<unsigned> getCurrentDecl() const {
+ UnsignedOrNone getCurrentDecl() const {
if (CurrentDeclaration == NoDeclaration)
return std::nullopt;
return CurrentDeclaration;
@@ -172,9 +176,6 @@ private:
/// List of anonymous functions.
std::vector<std::unique_ptr<Function>> AnonFuncs;
- /// Function relocation locations.
- llvm::DenseMap<const FunctionDecl *, std::vector<unsigned>> Relocs;
-
/// Native pointers referenced by bytecode.
std::vector<const void *> NativePointers;
/// Cached native pointer indices.
diff --git a/clang/lib/AST/ByteCode/Record.cpp b/clang/lib/AST/ByteCode/Record.cpp
index 1d4ac71..c20ec18 100644
--- a/clang/lib/AST/ByteCode/Record.cpp
+++ b/clang/lib/AST/ByteCode/Record.cpp
@@ -50,10 +50,8 @@ const Record::Base *Record::getBase(const RecordDecl *FD) const {
}
const Record::Base *Record::getBase(QualType T) const {
- if (auto *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ if (auto *RD = T->getAsCXXRecordDecl())
return BaseMap.lookup(RD);
- }
return nullptr;
}
diff --git a/clang/lib/AST/CXXInheritance.cpp b/clang/lib/AST/CXXInheritance.cpp
index f037616..7a3e7ea 100644
--- a/clang/lib/AST/CXXInheritance.cpp
+++ b/clang/lib/AST/CXXInheritance.cpp
@@ -128,17 +128,11 @@ bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches) const {
const CXXRecordDecl *Record = this;
while (true) {
for (const auto &I : Record->bases()) {
- const RecordType *Ty = I.getType()->getAs<RecordType>();
- if (!Ty)
+ const auto *Base = I.getType()->getAsCXXRecordDecl();
+ if (!Base || !(Base->isBeingDefined() || Base->isCompleteDefinition()))
return false;
-
- CXXRecordDecl *Base =
- cast_if_present<CXXRecordDecl>(Ty->getDecl()->getDefinition());
- if (!Base ||
- (Base->isDependentContext() &&
- !Base->isCurrentInstantiation(Record))) {
+ if (Base->isDependentContext() && !Base->isCurrentInstantiation(Record))
return false;
- }
Queue.push_back(Base);
if (!BaseMatches(Base))
@@ -196,7 +190,7 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
if (isDetectingVirtual() && DetectedVirtual == nullptr) {
// If this is the first virtual we find, remember it. If it turns out
// there is no base path here, we'll reset it later.
- DetectedVirtual = BaseType->getAs<RecordType>();
+ DetectedVirtual = BaseType->getAsCanonical<RecordType>();
SetVirtual = true;
}
} else {
@@ -255,8 +249,7 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
const TemplateSpecializationType *TST =
BaseSpec.getType()->getAs<TemplateSpecializationType>();
if (!TST) {
- if (auto *RT = BaseSpec.getType()->getAs<RecordType>())
- BaseRecord = cast<CXXRecordDecl>(RT->getDecl());
+ BaseRecord = BaseSpec.getType()->getAsCXXRecordDecl();
} else {
TemplateName TN = TST->getTemplateName();
if (auto *TD =
@@ -270,7 +263,7 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
BaseRecord = nullptr;
}
} else {
- BaseRecord = cast<CXXRecordDecl>(BaseSpec.getType()->getAsRecordDecl());
+ BaseRecord = BaseSpec.getType()->castAsCXXRecordDecl();
}
if (BaseRecord &&
lookupInBases(Context, BaseRecord, BaseMatches, LookupInDependent)) {
@@ -334,9 +327,7 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback BaseMatches,
if (!PE.Base->isVirtual())
continue;
- CXXRecordDecl *VBase = nullptr;
- if (const RecordType *Record = PE.Base->getType()->getAs<RecordType>())
- VBase = cast<CXXRecordDecl>(Record->getDecl());
+ auto *VBase = PE.Base->getType()->getAsCXXRecordDecl();
if (!VBase)
break;
@@ -345,10 +336,8 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback BaseMatches,
// base is a subobject of any other path; if so, then the
// declaration in this path are hidden by that patch.
for (const CXXBasePath &HidingP : Paths) {
- CXXRecordDecl *HidingClass = nullptr;
- if (const RecordType *Record =
- HidingP.back().Base->getType()->getAs<RecordType>())
- HidingClass = cast<CXXRecordDecl>(Record->getDecl());
+ auto *HidingClass =
+ HidingP.back().Base->getType()->getAsCXXRecordDecl();
if (!HidingClass)
break;
@@ -404,7 +393,7 @@ bool CXXRecordDecl::hasMemberName(DeclarationName Name) const {
CXXBasePaths Paths(false, false, false);
return lookupInBases(
[Name](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- return findOrdinaryMember(Specifier->getType()->getAsCXXRecordDecl(),
+ return findOrdinaryMember(Specifier->getType()->castAsCXXRecordDecl(),
Path, Name);
},
Paths);
@@ -467,8 +456,7 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
= ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];
for (const auto &Base : RD->bases()) {
- if (const RecordType *RT = Base.getType()->getAs<RecordType>()) {
- const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (const auto *BaseDecl = Base.getType()->getAsCXXRecordDecl()) {
if (!BaseDecl->isPolymorphic())
continue;
diff --git a/clang/lib/AST/Comment.cpp b/clang/lib/AST/Comment.cpp
index cd73d27..37e21c3 100644
--- a/clang/lib/AST/Comment.cpp
+++ b/clang/lib/AST/Comment.cpp
@@ -147,8 +147,6 @@ static TypeLoc lookThroughTypedefOrTypeAliasLocs(TypeLoc &SrcTL) {
return BlockPointerTL.getPointeeLoc().getUnqualifiedLoc();
if (MemberPointerTypeLoc MemberPointerTL = TL.getAs<MemberPointerTypeLoc>())
return MemberPointerTL.getPointeeLoc().getUnqualifiedLoc();
- if (ElaboratedTypeLoc ETL = TL.getAs<ElaboratedTypeLoc>())
- return ETL.getNamedTypeLoc();
return TL;
}
diff --git a/clang/lib/AST/CommentLexer.cpp b/clang/lib/AST/CommentLexer.cpp
index e19c232..a0903d0 100644
--- a/clang/lib/AST/CommentLexer.cpp
+++ b/clang/lib/AST/CommentLexer.cpp
@@ -214,7 +214,7 @@ bool isCommandNameStartCharacter(char C) {
}
bool isCommandNameCharacter(char C) {
- return isAlphanumeric(C);
+ return isAsciiIdentifierContinue(C, false);
}
const char *skipCommandName(const char *BufferPtr, const char *BufferEnd) {
diff --git a/clang/lib/AST/CommentParser.cpp b/clang/lib/AST/CommentParser.cpp
index e61846d..2e5821a 100644
--- a/clang/lib/AST/CommentParser.cpp
+++ b/clang/lib/AST/CommentParser.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/CommentParser.h"
+#include "clang/AST/Comment.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/CommentSema.h"
#include "clang/Basic/CharInfo.h"
@@ -569,6 +570,8 @@ BlockCommandComment *Parser::parseBlockCommand() {
InlineCommandComment *Parser::parseInlineCommand() {
assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
+ CommandMarkerKind CMK =
+ Tok.is(tok::backslash_command) ? CMK_Backslash : CMK_At;
const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
const Token CommandTok = Tok;
@@ -580,7 +583,7 @@ InlineCommandComment *Parser::parseInlineCommand() {
InlineCommandComment *IC = S.actOnInlineCommand(
CommandTok.getLocation(), CommandTok.getEndLocation(),
- CommandTok.getCommandID(), Args);
+ CommandTok.getCommandID(), CMK, Args);
if (Args.size() < Info->NumArgs) {
Diag(CommandTok.getEndLocation().getLocWithOffset(1),
diff --git a/clang/lib/AST/CommentSema.cpp b/clang/lib/AST/CommentSema.cpp
index 88520d7..649fba9 100644
--- a/clang/lib/AST/CommentSema.cpp
+++ b/clang/lib/AST/CommentSema.cpp
@@ -363,12 +363,13 @@ void Sema::actOnTParamCommandFinish(TParamCommandComment *Command,
InlineCommandComment *
Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd, unsigned CommandID,
+ CommandMarkerKind CommandMarker,
ArrayRef<Comment::Argument> Args) {
StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
- return new (Allocator)
- InlineCommandComment(CommandLocBegin, CommandLocEnd, CommandID,
- getInlineCommandRenderKind(CommandName), Args);
+ return new (Allocator) InlineCommandComment(
+ CommandLocBegin, CommandLocEnd, CommandID,
+ getInlineCommandRenderKind(CommandName), CommandMarker, Args);
}
InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
@@ -905,17 +906,9 @@ bool Sema::isClassOrStructOrTagTypedefDecl() {
if (isClassOrStructDeclImpl(ThisDeclInfo->CurrentDecl))
return true;
- if (auto *ThisTypedefDecl = dyn_cast<TypedefDecl>(ThisDeclInfo->CurrentDecl)) {
- auto UnderlyingType = ThisTypedefDecl->getUnderlyingType();
- if (auto ThisElaboratedType = dyn_cast<ElaboratedType>(UnderlyingType)) {
- auto DesugaredType = ThisElaboratedType->desugar();
- if (auto *DesugaredTypePtr = DesugaredType.getTypePtrOrNull()) {
- if (auto *ThisRecordType = dyn_cast<RecordType>(DesugaredTypePtr)) {
- return isClassOrStructDeclImpl(ThisRecordType->getAsRecordDecl());
- }
- }
- }
- }
+ if (auto *ThisTypedefDecl = dyn_cast<TypedefDecl>(ThisDeclInfo->CurrentDecl))
+ if (auto *D = ThisTypedefDecl->getUnderlyingType()->getAsRecordDecl())
+ return isClassOrStructDeclImpl(D);
return false;
}
diff --git a/clang/lib/AST/ComparisonCategories.cpp b/clang/lib/AST/ComparisonCategories.cpp
index 2824410..0c7a7f4 100644
--- a/clang/lib/AST/ComparisonCategories.cpp
+++ b/clang/lib/AST/ComparisonCategories.cpp
@@ -166,7 +166,7 @@ const ComparisonCategoryInfo &ComparisonCategories::getInfoForType(QualType Ty)
QualType ComparisonCategoryInfo::getType() const {
assert(Record);
- return QualType(Record->getTypeForDecl(), 0);
+ return Record->getASTContext().getCanonicalTagType(Record);
}
StringRef ComparisonCategories::getCategoryString(ComparisonCategoryType Kind) {
diff --git a/clang/lib/AST/ComputeDependence.cpp b/clang/lib/AST/ComputeDependence.cpp
index 87334d9..e0cf0de 100644
--- a/clang/lib/AST/ComputeDependence.cpp
+++ b/clang/lib/AST/ComputeDependence.cpp
@@ -500,9 +500,8 @@ ExprDependence clang::computeDependence(OMPIteratorExpr *E) {
ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) {
auto Deps = ExprDependence::None;
- if (auto *NNS = E->getQualifier())
- Deps |= toExprDependence(NNS->getDependence() &
- ~NestedNameSpecifierDependence::Dependent);
+ Deps |= toExprDependence(E->getQualifier().getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
if (auto *FirstArg = E->getTemplateArgs()) {
unsigned NumArgs = E->getNumTemplateArgs();
@@ -673,9 +672,8 @@ ExprDependence clang::computeDependence(MemberExpr *E) {
auto D = E->getBase()->getDependence();
D |= getDependenceInExpr(E->getMemberNameInfo());
- if (auto *NNS = E->getQualifier())
- D |= toExprDependence(NNS->getDependence() &
- ~NestedNameSpecifierDependence::Dependent);
+ D |= toExprDependence(E->getQualifier().getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
@@ -783,9 +781,8 @@ ExprDependence clang::computeDependence(CXXPseudoDestructorExpr *E) {
if (auto *ST = E->getScopeTypeInfo())
D |= turnTypeToValueDependence(
toExprDependenceAsWritten(ST->getType()->getDependence()));
- if (auto *Q = E->getQualifier())
- D |= toExprDependence(Q->getDependence() &
- ~NestedNameSpecifierDependence::Dependent);
+ D |= toExprDependence(E->getQualifier().getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
return D;
}
@@ -801,9 +798,8 @@ clang::computeDependence(OverloadExpr *E, bool KnownDependent,
if (KnownContainsUnexpandedParameterPack)
Deps |= ExprDependence::UnexpandedPack;
Deps |= getDependenceInExpr(E->getNameInfo());
- if (auto *Q = E->getQualifier())
- Deps |= toExprDependence(Q->getDependence() &
- ~NestedNameSpecifierDependence::Dependent);
+ Deps |= toExprDependence(E->getQualifier().getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
for (auto *D : E->decls()) {
if (D->getDeclContext()->isDependentContext() ||
isa<UnresolvedUsingValueDecl>(D) || isa<TemplateTemplateParmDecl>(D))
@@ -820,8 +816,7 @@ clang::computeDependence(OverloadExpr *E, bool KnownDependent,
ExprDependence clang::computeDependence(DependentScopeDeclRefExpr *E) {
auto D = ExprDependence::TypeValue;
D |= getDependenceInExpr(E->getNameInfo());
- if (auto *Q = E->getQualifier())
- D |= toExprDependence(Q->getDependence());
+ D |= toExprDependence(E->getQualifier().getDependence());
for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
return D;
@@ -872,8 +867,7 @@ ExprDependence clang::computeDependence(CXXDependentScopeMemberExpr *E) {
auto D = ExprDependence::TypeValueInstantiation;
if (!E->isImplicitAccess())
D |= E->getBase()->getDependence();
- if (auto *Q = E->getQualifier())
- D |= toExprDependence(Q->getDependence());
+ D |= toExprDependence(E->getQualifier().getDependence());
D |= getDependenceInExpr(E->getMemberNameInfo());
for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index 5471f31..d8dffb7 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -1604,17 +1604,20 @@ LinkageInfo LinkageComputer::getLVForDecl(const NamedDecl *D,
// We have just computed the linkage for this decl. By induction we know
// that all other computed linkages match, check that the one we just
// computed also does.
- NamedDecl *Old = nullptr;
- for (auto *I : D->redecls()) {
- auto *T = cast<NamedDecl>(I);
- if (T == D)
+ // We can't assume the redecl chain is well formed at this point,
+ // so keep track of already visited declarations.
+ for (llvm::SmallPtrSet<const Decl *, 4> AlreadyVisited{D}; /**/; /**/) {
+ D = cast<NamedDecl>(const_cast<NamedDecl *>(D)->getNextRedeclarationImpl());
+ if (!AlreadyVisited.insert(D).second)
+ break;
+ if (D->isInvalidDecl())
continue;
- if (!T->isInvalidDecl() && T->hasCachedLinkage()) {
- Old = T;
+ if (auto OldLinkage = D->getCachedLinkage();
+ OldLinkage != Linkage::Invalid) {
+ assert(LV.getLinkage() == OldLinkage);
break;
}
}
- assert(!Old || Old->getCachedLinkage() == D->getCachedLinkage());
#endif
return LV;
@@ -1693,9 +1696,9 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
return;
}
printNestedNameSpecifier(OS, P);
- if (getDeclName())
- OS << *this;
- else {
+ if (getDeclName()) {
+ printName(OS, P);
+ } else {
// Give the printName override a chance to pick a different name before we
// fall back to "(anonymous)".
SmallString<64> NameBuffer;
@@ -1883,18 +1886,13 @@ bool NamedDecl::declarationReplaces(const NamedDecl *OldD,
// Using declarations can be replaced if they import the same name from the
// same context.
- if (const auto *UD = dyn_cast<UsingDecl>(this)) {
- ASTContext &Context = getASTContext();
- return Context.getCanonicalNestedNameSpecifier(UD->getQualifier()) ==
- Context.getCanonicalNestedNameSpecifier(
- cast<UsingDecl>(OldD)->getQualifier());
- }
- if (const auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) {
- ASTContext &Context = getASTContext();
- return Context.getCanonicalNestedNameSpecifier(UUVD->getQualifier()) ==
- Context.getCanonicalNestedNameSpecifier(
- cast<UnresolvedUsingValueDecl>(OldD)->getQualifier());
- }
+ if (const auto *UD = dyn_cast<UsingDecl>(this))
+ return UD->getQualifier().getCanonical() ==
+
+ cast<UsingDecl>(OldD)->getQualifier().getCanonical();
+ if (const auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this))
+ return UUVD->getQualifier().getCanonical() ==
+ cast<UnresolvedUsingValueDecl>(OldD)->getQualifier().getCanonical();
if (isRedeclarable(getKind())) {
if (getCanonicalDecl() != OldD->getCanonicalDecl())
@@ -2863,8 +2861,8 @@ VarDecl::needsDestruction(const ASTContext &Ctx) const {
bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const {
assert(hasInit() && "Expect initializer to check for flexible array init");
- auto *Ty = getType()->getAs<RecordType>();
- if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember())
+ auto *D = getType()->getAsRecordDecl();
+ if (!D || !D->hasFlexibleArrayMember())
return false;
auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens());
if (!List)
@@ -2878,8 +2876,8 @@ bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const {
CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const {
assert(hasInit() && "Expect initializer to check for flexible array init");
- auto *Ty = getType()->getAs<RecordType>();
- if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember())
+ auto *RD = getType()->getAsRecordDecl();
+ if (!RD || !RD->hasFlexibleArrayMember())
return CharUnits::Zero();
auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens());
if (!List || List->getNumInits() == 0)
@@ -2889,7 +2887,7 @@ CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const {
if (!InitTy)
return CharUnits::Zero();
CharUnits FlexibleArraySize = Ctx.getTypeSizeInChars(InitTy);
- const ASTRecordLayout &RL = Ctx.getASTRecordLayout(Ty->getDecl());
+ const ASTRecordLayout &RL = Ctx.getASTRecordLayout(RD);
CharUnits FlexibleArrayOffset =
Ctx.toCharUnitsFromBits(RL.getFieldOffset(RL.getFieldCount() - 1));
if (FlexibleArrayOffset + FlexibleArraySize < RL.getSize())
@@ -2990,8 +2988,11 @@ bool ParmVarDecl::isDestroyedInCallee() const {
// FIXME: isParamDestroyedInCallee() should probably imply
// isDestructedType()
- const auto *RT = getType()->getAs<RecordType>();
- if (RT && RT->getDecl()->isParamDestroyedInCallee() &&
+ const auto *RT = getType()->getAsCanonical<RecordType>();
+ if (RT &&
+ RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee() &&
getType().isDestructedType())
return true;
@@ -3502,7 +3503,7 @@ bool FunctionDecl::isUsableAsGlobalAllocationFunctionInConstantEvaluation(
while (const auto *TD = T->getAs<TypedefType>())
T = TD->getDecl()->getUnderlyingType();
const IdentifierInfo *II =
- T->castAs<EnumType>()->getDecl()->getIdentifier();
+ T->castAsCanonical<EnumType>()->getOriginalDecl()->getIdentifier();
if (II && II->isStr("__hot_cold_t"))
Consume();
}
@@ -3599,6 +3600,10 @@ bool FunctionDecl::isNoReturn() const {
return false;
}
+bool FunctionDecl::isAnalyzerNoReturn() const {
+ return hasAttr<AnalyzerNoReturnAttr>();
+}
+
bool FunctionDecl::isMemberLikeConstrainedFriend() const {
// C++20 [temp.friend]p9:
// A non-template friend declaration with a requires-clause [or]
@@ -4652,8 +4657,8 @@ bool FieldDecl::isAnonymousStructOrUnion() const {
if (!isImplicit() || getDeclName())
return false;
- if (const auto *Record = getType()->getAs<RecordType>())
- return Record->getDecl()->isAnonymousStructOrUnion();
+ if (const auto *Record = getType()->getAsCanonical<RecordType>())
+ return Record->getOriginalDecl()->isAnonymousStructOrUnion();
return false;
}
@@ -4710,10 +4715,10 @@ bool FieldDecl::isZeroSize(const ASTContext &Ctx) const {
return false;
// -- is not of class type, or
- const auto *RT = getType()->getAs<RecordType>();
+ const auto *RT = getType()->getAsCanonical<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinition();
if (!RD) {
assert(isInvalidDecl() && "valid field has incomplete type");
return false;
@@ -4733,7 +4738,7 @@ bool FieldDecl::isZeroSize(const ASTContext &Ctx) const {
// MS ABI: has nonzero size if it is a class type with class type fields,
// whether or not they have nonzero size
return !llvm::any_of(CXXRD->fields(), [](const FieldDecl *Field) {
- return Field->getType()->getAs<RecordType>();
+ return Field->getType()->isRecordType();
});
}
@@ -4836,10 +4841,6 @@ TagDecl *TagDecl::getCanonicalDecl() { return getFirstDecl(); }
void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
TypedefNameDeclOrQualifier = TDD;
- if (const Type *T = getTypeForDecl()) {
- (void)T;
- assert(T->isLinkageValid());
- }
assert(isLinkageValid());
}
@@ -4867,25 +4868,16 @@ void TagDecl::completeDefinition() {
}
TagDecl *TagDecl::getDefinition() const {
- if (isCompleteDefinition())
+ if (isCompleteDefinition() || isBeingDefined())
return const_cast<TagDecl *>(this);
- // If it's possible for us to have an out-of-date definition, check now.
- if (mayHaveOutOfDateDef()) {
- if (IdentifierInfo *II = getIdentifier()) {
- if (II->isOutOfDate()) {
- updateOutOfDate(*II);
- }
- }
- }
-
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(this))
return CXXRD->getDefinition();
- for (auto *R : redecls())
- if (R->isCompleteDefinition())
+ for (TagDecl *R :
+ redecl_range(redecl_iterator(getNextRedeclaration()), redecl_iterator()))
+ if (R->isCompleteDefinition() || R->isBeingDefined())
return R;
-
return nullptr;
}
@@ -4919,7 +4911,7 @@ void TagDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const {
// is already printed as part of the type.
PrintingPolicy Copy(Policy);
Copy.SuppressScope = true;
- getASTContext().getTagDeclType(this).print(OS, Copy);
+ QualType(getASTContext().getCanonicalTagType(this)).print(OS, Copy);
return;
}
// Otherwise, do the normal printing.
@@ -4963,19 +4955,13 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
EnumDecl *PrevDecl, bool IsScoped,
bool IsScopedUsingClassTag, bool IsFixed) {
- auto *Enum = new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl,
- IsScoped, IsScopedUsingClassTag, IsFixed);
- Enum->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
- C.getTypeDeclType(Enum, PrevDecl);
- return Enum;
+ return new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl, IsScoped,
+ IsScopedUsingClassTag, IsFixed);
}
EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
- EnumDecl *Enum =
- new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(),
- nullptr, nullptr, false, false, false);
- Enum->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
- return Enum;
+ return new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(),
+ nullptr, nullptr, false, false, false);
}
SourceRange EnumDecl::getIntegerTypeRange() const {
@@ -5035,7 +5021,7 @@ EnumDecl *EnumDecl::getTemplateInstantiationPattern() const {
EnumDecl *ED = getInstantiatedFromMemberEnum();
while (auto *NewED = ED->getInstantiatedFromMemberEnum())
ED = NewED;
- return getDefinitionOrSelf(ED);
+ return ::getDefinitionOrSelf(ED);
}
}
@@ -5125,21 +5111,15 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, RecordDecl* PrevDecl) {
- RecordDecl *R = new (C, DC) RecordDecl(Record, TK, C, DC,
- StartLoc, IdLoc, Id, PrevDecl);
- R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
-
- C.getTypeDeclType(R, PrevDecl);
- return R;
+ return new (C, DC)
+ RecordDecl(Record, TK, C, DC, StartLoc, IdLoc, Id, PrevDecl);
}
RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C,
GlobalDeclID ID) {
- RecordDecl *R = new (C, ID)
+ return new (C, ID)
RecordDecl(Record, TagTypeKind::Struct, C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr);
- R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
- return R;
}
bool RecordDecl::isLambda() const {
@@ -5162,8 +5142,8 @@ bool RecordDecl::isOrContainsUnion() const {
if (const RecordDecl *Def = getDefinition()) {
for (const FieldDecl *FD : Def->fields()) {
- const RecordType *RT = FD->getType()->getAs<RecordType>();
- if (RT && RT->getDecl()->isOrContainsUnion())
+ const RecordType *RT = FD->getType()->getAsCanonical<RecordType>();
+ if (RT && RT->getOriginalDecl()->isOrContainsUnion())
return true;
}
}
@@ -5294,9 +5274,8 @@ const FieldDecl *RecordDecl::findFirstNamedDataMember() const {
if (I->getIdentifier())
return I;
- if (const auto *RT = I->getType()->getAs<RecordType>())
- if (const FieldDecl *NamedDataMember =
- RT->getDecl()->findFirstNamedDataMember())
+ if (const auto *RD = I->getType()->getAsRecordDecl())
+ if (const FieldDecl *NamedDataMember = RD->findFirstNamedDataMember())
return NamedDataMember;
}
@@ -5658,14 +5637,14 @@ void TypedefNameDecl::anchor() {}
TagDecl *TypedefNameDecl::getAnonDeclWithTypedefName(bool AnyRedecl) const {
if (auto *TT = getTypeSourceInfo()->getType()->getAs<TagType>()) {
- auto *OwningTypedef = TT->getDecl()->getTypedefNameForAnonDecl();
+ auto *OwningTypedef = TT->getOriginalDecl()->getTypedefNameForAnonDecl();
auto *ThisTypedef = this;
if (AnyRedecl && OwningTypedef) {
OwningTypedef = OwningTypedef->getCanonicalDecl();
ThisTypedef = ThisTypedef->getCanonicalDecl();
}
if (OwningTypedef == ThisTypedef)
- return TT->getDecl();
+ return TT->getOriginalDecl()->getDefinitionOrSelf();
}
return nullptr;
@@ -5674,7 +5653,7 @@ TagDecl *TypedefNameDecl::getAnonDeclWithTypedefName(bool AnyRedecl) const {
bool TypedefNameDecl::isTransparentTagSlow() const {
auto determineIsTransparent = [&]() {
if (auto *TT = getUnderlyingType()->getAs<TagType>()) {
- if (auto *TD = TT->getDecl()) {
+ if (auto *TD = TT->getOriginalDecl()) {
if (TD->getName() != getName())
return false;
SourceLocation TTLoc = getLocation();
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index 13c46fd..680a4d7 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -58,10 +58,6 @@ using namespace clang;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
-void Decl::updateOutOfDate(IdentifierInfo &II) const {
- getASTContext().getExternalSource()->updateOutOfDateIdentifier(II);
-}
-
#define DECL(DERIVED, BASE) \
static_assert(alignof(Decl) >= alignof(DERIVED##Decl), \
"Alignment sufficient after objects prepended to " #DERIVED);
@@ -489,8 +485,7 @@ bool Decl::isFlexibleArrayMemberLike(
// Look through typedefs.
if (TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>()) {
- const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
- TInfo = TDL->getTypeSourceInfo();
+ TInfo = TTL.getDecl()->getTypeSourceInfo();
continue;
}
@@ -1512,30 +1507,19 @@ DeclContext *DeclContext::getPrimaryContext() {
case Decl::ObjCCategoryImpl:
return this;
- default:
- if (getDeclKind() >= Decl::firstTag && getDeclKind() <= Decl::lastTag) {
- // If this is a tag type that has a definition or is currently
- // being defined, that definition is our primary context.
- auto *Tag = cast<TagDecl>(this);
-
- if (TagDecl *Def = Tag->getDefinition())
- return Def;
-
- if (const auto *TagTy = dyn_cast<TagType>(Tag->getTypeForDecl())) {
- // Note, TagType::getDecl returns the (partial) definition one exists.
- TagDecl *PossiblePartialDef = TagTy->getDecl();
- if (PossiblePartialDef->isBeingDefined())
- return PossiblePartialDef;
- } else {
- assert(isa<InjectedClassNameType>(Tag->getTypeForDecl()));
- }
-
- return Tag;
- }
+ // If this is a tag type that has a definition or is currently
+ // being defined, that definition is our primary context.
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::CXXRecord:
+ return cast<CXXRecordDecl>(this)->getDefinitionOrSelf();
+ case Decl::Record:
+ case Decl::Enum:
+ return cast<TagDecl>(this)->getDefinitionOrSelf();
+ default:
assert(getDeclKind() >= Decl::firstFunction &&
- getDeclKind() <= Decl::lastFunction &&
- "Unknown DeclContext kind");
+ getDeclKind() <= Decl::lastFunction && "Unknown DeclContext kind");
return this;
}
}
diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp
index 037a28c4..aa1f5a1 100644
--- a/clang/lib/AST/DeclCXX.cpp
+++ b/clang/lib/AST/DeclCXX.cpp
@@ -132,16 +132,9 @@ CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, const ASTContext &C,
CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
- CXXRecordDecl *PrevDecl,
- bool DelayTypeCreation) {
- auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc, IdLoc, Id,
- PrevDecl);
- R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
-
- // FIXME: DelayTypeCreation seems like such a hack
- if (!DelayTypeCreation)
- C.getTypeDeclType(R, PrevDecl);
- return R;
+ CXXRecordDecl *PrevDecl) {
+ return new (C, DC)
+ CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc, IdLoc, Id, PrevDecl);
}
CXXRecordDecl *
@@ -154,10 +147,7 @@ CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
R->setBeingDefined(true);
R->DefinitionData = new (C) struct LambdaDefinitionData(
R, Info, DependencyKind, IsGeneric, CaptureDefault);
- R->setMayHaveOutOfDateDef(false);
R->setImplicit(true);
-
- C.getTypeDeclType(R, /*PrevDecl=*/nullptr);
return R;
}
@@ -166,7 +156,6 @@ CXXRecordDecl *CXXRecordDecl::CreateDeserialized(const ASTContext &C,
auto *R = new (C, ID)
CXXRecordDecl(CXXRecord, TagTypeKind::Struct, C, nullptr,
SourceLocation(), SourceLocation(), nullptr, nullptr);
- R->setMayHaveOutOfDateDef(false);
return R;
}
@@ -178,7 +167,7 @@ static bool hasRepeatedBaseClass(const CXXRecordDecl *StartRD) {
SmallVector<const CXXRecordDecl*, 8> WorkList = {StartRD};
while (!WorkList.empty()) {
const CXXRecordDecl *RD = WorkList.pop_back_val();
- if (RD->getTypeForDecl()->isDependentType())
+ if (RD->isDependentType())
continue;
for (const CXXBaseSpecifier &BaseSpec : RD->bases()) {
if (const CXXRecordDecl *B = BaseSpec.getType()->getAsCXXRecordDecl()) {
@@ -227,8 +216,7 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// Skip dependent types; we can't do any checking on them now.
if (BaseType->isDependentType())
continue;
- auto *BaseClassDecl =
- cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getDecl());
+ auto *BaseClassDecl = BaseType->castAsCXXRecordDecl();
// C++2a [class]p7:
// A standard-layout class is a class that:
@@ -1217,9 +1205,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
// those because they are always unnamed.
bool IsZeroSize = Field->isZeroSize(Context);
- if (const auto *RecordTy = T->getAs<RecordType>()) {
- auto *FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
- if (FieldRec->getDefinition()) {
+ if (auto *FieldRec = T->getAsCXXRecordDecl()) {
+ if (FieldRec->isBeingDefined() || FieldRec->isCompleteDefinition()) {
addedClassSubobject(FieldRec);
// We may need to perform overload resolution to determine whether a
@@ -1448,6 +1435,13 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().StructuralIfLiteral = false;
}
+ // If this type contains any address discriminated values we should
+ // have already indicated that the only special member functions that
+ // can possibly be trivial are the default constructor and destructor.
+ if (T.hasAddressDiscriminatedPointerAuth())
+ data().HasTrivialSpecialMembers &=
+ SMF_DefaultConstructor | SMF_Destructor;
+
// C++14 [meta.unary.prop]p4:
// T is a class type [...] with [...] no non-static data members other
// than subobjects of zero size
@@ -1918,14 +1912,14 @@ static void CollectVisibleConversions(
// Collect information recursively from any base classes.
for (const auto &I : Record->bases()) {
- const auto *RT = I.getType()->getAs<RecordType>();
- if (!RT) continue;
+ const auto *Base = I.getType()->getAsCXXRecordDecl();
+ if (!Base)
+ continue;
AccessSpecifier BaseAccess
= CXXRecordDecl::MergeAccess(Access, I.getAccessSpecifier());
bool BaseInVirtual = InVirtual || I.isVirtual();
- auto *Base = cast<CXXRecordDecl>(RT->getDecl());
CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
*HiddenTypes, Output, VOutput, HiddenVBaseCs);
}
@@ -1960,12 +1954,13 @@ static void CollectVisibleConversions(ASTContext &Context,
// Recursively collect conversions from base classes.
for (const auto &I : Record->bases()) {
- const auto *RT = I.getType()->getAs<RecordType>();
- if (!RT) continue;
+ const auto *Base = I.getType()->getAsCXXRecordDecl();
+ if (!Base)
+ continue;
- CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()),
- I.isVirtual(), I.getAccessSpecifier(),
- HiddenTypes, Output, VBaseCs, HiddenVBaseCs);
+ CollectVisibleConversions(Context, Base, I.isVirtual(),
+ I.getAccessSpecifier(), HiddenTypes, Output,
+ VBaseCs, HiddenVBaseCs);
}
// Add any unhidden conversions provided by virtual bases.
@@ -2125,11 +2120,10 @@ const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
ASTContext &Context = getASTContext();
- QualType ClassType = Context.getTypeDeclType(this);
+ CanQualType ClassType = Context.getCanonicalTagType(this);
- DeclarationName Name
- = Context.DeclarationNames.getCXXDestructorName(
- Context.getCanonicalType(ClassType));
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXDestructorName(ClassType);
DeclContext::lookup_result R = lookup(Name);
@@ -2159,6 +2153,29 @@ bool CXXRecordDecl::isInjectedClassName() const {
return false;
}
+bool CXXRecordDecl::hasInjectedClassType() const {
+ switch (getDeclKind()) {
+ case Decl::ClassTemplatePartialSpecialization:
+ return true;
+ case Decl::ClassTemplateSpecialization:
+ return false;
+ case Decl::CXXRecord:
+ return getDescribedClassTemplate() != nullptr;
+ default:
+ llvm_unreachable("unexpected decl kind");
+ }
+}
+
+CanQualType CXXRecordDecl::getCanonicalTemplateSpecializationType(
+ const ASTContext &Ctx) const {
+ if (auto *RD = dyn_cast<ClassTemplatePartialSpecializationDecl>(this))
+ return RD->getCanonicalInjectedSpecializationType(Ctx);
+ if (const ClassTemplateDecl *TD = getDescribedClassTemplate();
+ TD && !isa<ClassTemplateSpecializationDecl>(this))
+ return TD->getCanonicalInjectedSpecializationType(Ctx);
+ return CanQualType();
+}
+
static bool isDeclContextInNamespace(const DeclContext *DC) {
while (!DC->isTranslationUnit()) {
if (DC->isNamespace())
@@ -2272,7 +2289,7 @@ void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
Context.getDiagnostics().Report(
AT->getLocation(),
diag::warn_cxx20_compat_requires_explicit_init_non_aggregate)
- << AT << FD << Context.getRecordType(this);
+ << AT << FD << Context.getCanonicalTagType(this);
}
}
@@ -2284,7 +2301,7 @@ void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
if (const auto *AT = FD->getAttr<ExplicitInitAttr>())
Context.getDiagnostics().Report(AT->getLocation(),
diag::warn_attribute_needs_aggregate)
- << AT << Context.getRecordType(this);
+ << AT << Context.getCanonicalTagType(this);
}
setHasUninitializedExplicitInitFields(false);
}
@@ -2296,8 +2313,8 @@ bool CXXRecordDecl::mayBeAbstract() const {
return false;
for (const auto &B : bases()) {
- const auto *BaseDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ const auto *BaseDecl = cast<CXXRecordDecl>(
+ B.getType()->castAsCanonical<RecordType>()->getOriginalDecl());
if (BaseDecl->isAbstract())
return true;
}
@@ -2457,10 +2474,9 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
};
for (const auto &I : RD->bases()) {
- const RecordType *RT = I.getType()->getAs<RecordType>();
- if (!RT)
+ const auto *Base = I.getType()->getAsCXXRecordDecl();
+ if (!Base)
continue;
- const auto *Base = cast<CXXRecordDecl>(RT->getDecl());
if (CXXMethodDecl *D = this->getCorrespondingMethodInClass(Base))
AddFinalOverrider(D);
}
@@ -2712,8 +2728,7 @@ bool CXXMethodDecl::isCopyAssignmentOperator() const {
ParamType = Ref->getPointeeType();
ASTContext &Context = getASTContext();
- QualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ CanQualType ClassType = Context.getCanonicalTagType(getParent());
return Context.hasSameUnqualifiedType(ClassType, ParamType);
}
@@ -2733,8 +2748,7 @@ bool CXXMethodDecl::isMoveAssignmentOperator() const {
ParamType = ParamType->getPointeeType();
ASTContext &Context = getASTContext();
- QualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ CanQualType ClassType = Context.getCanonicalTagType(getParent());
return Context.hasSameUnqualifiedType(ClassType, ParamType);
}
@@ -2769,7 +2783,7 @@ CXXMethodDecl::overridden_methods() const {
static QualType getThisObjectType(ASTContext &C, const FunctionProtoType *FPT,
const CXXRecordDecl *Decl) {
- QualType ClassTy = C.getTypeDeclType(Decl);
+ CanQualType ClassTy = C.getCanonicalTagType(Decl);
return C.getQualifiedType(ClassTy, FPT->getMethodQuals());
}
@@ -3027,11 +3041,9 @@ bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
// Is it a reference to our class type?
ASTContext &Context = getASTContext();
- CanQualType PointeeType
- = Context.getCanonicalType(ParamRefType->getPointeeType());
- CanQualType ClassTy
- = Context.getCanonicalType(Context.getTagDeclType(getParent()));
- if (PointeeType.getUnqualifiedType() != ClassTy)
+ QualType PointeeType = ParamRefType->getPointeeType();
+ CanQualType ClassTy = Context.getCanonicalTagType(getParent());
+ if (!Context.hasSameUnqualifiedType(PointeeType, ClassTy))
return false;
// FIXME: other qualifiers?
@@ -3066,15 +3078,11 @@ bool CXXConstructorDecl::isSpecializationCopyingObject() const {
const ParmVarDecl *Param = getParamDecl(0);
ASTContext &Context = getASTContext();
- CanQualType ParamType = Context.getCanonicalType(Param->getType());
+ CanQualType ParamType = Param->getType()->getCanonicalTypeUnqualified();
// Is it the same as our class type?
- CanQualType ClassTy
- = Context.getCanonicalType(Context.getTagDeclType(getParent()));
- if (ParamType.getUnqualifiedType() != ClassTy)
- return false;
-
- return true;
+ CanQualType ClassTy = Context.getCanonicalTagType(getParent());
+ return ParamType == ClassTy;
}
void CXXDestructorDecl::anchor() {}
@@ -3371,7 +3379,7 @@ ConstructorUsingShadowDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
}
CXXRecordDecl *ConstructorUsingShadowDecl::getNominatedBaseClass() const {
- return getIntroducer()->getQualifier()->getAsRecordDecl();
+ return getIntroducer()->getQualifier().getAsRecordDecl();
}
void BaseUsingDecl::anchor() {}
@@ -3429,13 +3437,12 @@ SourceRange UsingDecl::getSourceRange() const {
void UsingEnumDecl::anchor() {}
UsingEnumDecl *UsingEnumDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation UL,
- SourceLocation EL,
+ SourceLocation UL, SourceLocation EL,
SourceLocation NL,
TypeSourceInfo *EnumType) {
- assert(isa<EnumDecl>(EnumType->getType()->getAsTagDecl()));
return new (C, DC)
- UsingEnumDecl(DC, EnumType->getType()->getAsTagDecl()->getDeclName(), UL, EL, NL, EnumType);
+ UsingEnumDecl(DC, EnumType->getType()->castAsEnumDecl()->getDeclName(),
+ UL, EL, NL, EnumType);
}
UsingEnumDecl *UsingEnumDecl::CreateDeserialized(ASTContext &C,
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
index f4265dd0..196057f 100644
--- a/clang/lib/AST/DeclPrinter.cpp
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -202,8 +202,7 @@ void Decl::printGroup(Decl** Begin, unsigned NumDecls,
}
Decl** End = Begin + NumDecls;
- TagDecl* TD = dyn_cast<TagDecl>(*Begin);
- if (TD)
+ if (isa<TagDecl>(*Begin))
++Begin;
PrintingPolicy SubPolicy(Policy);
@@ -211,13 +210,9 @@ void Decl::printGroup(Decl** Begin, unsigned NumDecls,
bool isFirst = true;
for ( ; Begin != End; ++Begin) {
if (isFirst) {
- if(TD)
- SubPolicy.IncludeTagDefinition = true;
- SubPolicy.SuppressSpecifiers = false;
isFirst = false;
} else {
- if (!isFirst) Out << ", ";
- SubPolicy.IncludeTagDefinition = false;
+ Out << ", ";
SubPolicy.SuppressSpecifiers = true;
}
@@ -487,10 +482,12 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
QualType CurDeclType = getDeclType(*D);
if (!Decls.empty() && !CurDeclType.isNull()) {
QualType BaseType = GetBaseType(CurDeclType);
- if (!BaseType.isNull() && isa<ElaboratedType>(BaseType) &&
- cast<ElaboratedType>(BaseType)->getOwnedTagDecl() == Decls[0]) {
- Decls.push_back(*D);
- continue;
+ if (const auto *TT = dyn_cast_or_null<TagType>(BaseType);
+ TT && TT->isTagOwned()) {
+ if (TT->getOriginalDecl() == Decls[0]) {
+ Decls.push_back(*D);
+ continue;
+ }
}
}
@@ -662,16 +659,6 @@ static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out,
Out << Proto;
}
-static void MaybePrintTagKeywordIfSupressingScopes(PrintingPolicy &Policy,
- QualType T,
- llvm::raw_ostream &Out) {
- StringRef prefix = T->isClassType() ? "class "
- : T->isStructureType() ? "struct "
- : T->isUnionType() ? "union "
- : "";
- Out << prefix;
-}
-
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (!D->getDescribedFunctionTemplate() &&
!D->isFunctionTemplateSpecialization()) {
@@ -721,11 +708,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Proto += D->getQualifiedNameAsString();
} else {
llvm::raw_string_ostream OS(Proto);
- if (!Policy.SuppressScope) {
- if (const NestedNameSpecifier *NS = D->getQualifier()) {
- NS->print(OS, Policy);
- }
- }
+ if (!Policy.SuppressScope)
+ D->getQualifier().print(OS, Policy);
D->getNameInfo().printName(OS, Policy);
}
@@ -833,10 +817,6 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Out << Proto << " -> ";
Proto.clear();
}
- if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
- !Policy.SuppressUnwrittenScope)
- MaybePrintTagKeywordIfSupressingScopes(Policy, AFT->getReturnType(),
- Out);
AFT->getReturnType().print(Out, Policy, Proto);
Proto.clear();
}
@@ -995,10 +975,6 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
}
}
- if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
- !Policy.SuppressUnwrittenScope)
- MaybePrintTagKeywordIfSupressingScopes(Policy, T, Out);
-
printDeclType(T, (isa<ParmVarDecl>(D) && Policy.CleanUglifiedParameters &&
D->getIdentifier())
? D->getIdentifier()->deuglifiedName()
@@ -1028,7 +1004,6 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
}
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressSpecifiers = false;
- SubPolicy.IncludeTagDefinition = false;
Init->printPretty(Out, nullptr, SubPolicy, Indentation, "\n", &Context);
if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
Out << ")";
@@ -1086,15 +1061,13 @@ void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
void DeclPrinter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
Out << "using namespace ";
- if (D->getQualifier())
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << *D->getNominatedNamespaceAsWritten();
}
void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
Out << "namespace " << *D << " = ";
- if (D->getQualifier())
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << *D->getAliasedNamespace();
}
@@ -1115,8 +1088,7 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
Out << ' ';
if (D->getIdentifier()) {
- if (auto *NNS = D->getQualifier())
- NNS->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << *D;
if (auto *S = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
@@ -1746,7 +1718,7 @@ void DeclPrinter::VisitUsingDecl(UsingDecl *D) {
Out << "using ";
if (D->hasTypename())
Out << "typename ";
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
// Use the correct record name when the using declaration is used for
// inheriting constructors.
@@ -1768,14 +1740,14 @@ void DeclPrinter::VisitUsingEnumDecl(UsingEnumDecl *D) {
void
DeclPrinter::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
Out << "using typename ";
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << D->getDeclName();
}
void DeclPrinter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
if (!D->isAccessDeclaration())
Out << "using ";
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << D->getDeclName();
}
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index bc4a299..3162857 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -307,8 +307,9 @@ bool TemplateDecl::hasAssociatedConstraints() const {
bool TemplateDecl::isTypeAlias() const {
switch (getKind()) {
case TemplateDecl::TypeAliasTemplate:
- case TemplateDecl::BuiltinTemplate:
return true;
+ case TemplateDecl::BuiltinTemplate:
+ return !cast<BuiltinTemplateDecl>(this)->isPackProducingBuiltinTemplate();
default:
return false;
};
@@ -632,7 +633,8 @@ ClassTemplateDecl::findPartialSpecialization(QualType T) {
ASTContext &Context = getASTContext();
for (ClassTemplatePartialSpecializationDecl &P :
getPartialSpecializations()) {
- if (Context.hasSameType(P.getInjectedSpecializationType(), T))
+ if (Context.hasSameType(P.getCanonicalInjectedSpecializationType(Context),
+ T))
return P.getMostRecentDecl();
}
@@ -651,28 +653,20 @@ ClassTemplateDecl::findPartialSpecInstantiatedFromMember(
return nullptr;
}
-QualType
-ClassTemplateDecl::getInjectedClassNameSpecialization() {
+CanQualType ClassTemplateDecl::getCanonicalInjectedSpecializationType(
+ const ASTContext &Ctx) const {
Common *CommonPtr = getCommonPtr();
- if (!CommonPtr->InjectedClassNameType.isNull())
- return CommonPtr->InjectedClassNameType;
-
- // C++0x [temp.dep.type]p2:
- // The template argument list of a primary template is a template argument
- // list in which the nth template argument has the value of the nth template
- // parameter of the class template. If the nth template parameter is a
- // template parameter pack (14.5.3), the nth template argument is a pack
- // expansion (14.5.3) whose pattern is the name of the template parameter
- // pack.
- ASTContext &Context = getASTContext();
- TemplateName Name = Context.getQualifiedTemplateName(
- /*NNS=*/nullptr, /*TemplateKeyword=*/false, TemplateName(this));
- auto TemplateArgs = getTemplateParameters()->getInjectedTemplateArgs(Context);
- CommonPtr->InjectedClassNameType =
- Context.getTemplateSpecializationType(Name,
- /*SpecifiedArgs=*/TemplateArgs,
- /*CanonicalArgs=*/{});
- return CommonPtr->InjectedClassNameType;
+
+ if (CommonPtr->CanonInjectedTST.isNull()) {
+ SmallVector<TemplateArgument> CanonicalArgs(
+ getTemplateParameters()->getInjectedTemplateArgs(Ctx));
+ Ctx.canonicalizeTemplateArguments(CanonicalArgs);
+ CommonPtr->CanonInjectedTST =
+ CanQualType::CreateUnsafe(Ctx.getCanonicalTemplateSpecializationType(
+ TemplateName(const_cast<ClassTemplateDecl *>(getCanonicalDecl())),
+ CanonicalArgs));
+ }
+ return CommonPtr->CanonInjectedTST;
}
//===----------------------------------------------------------------------===//
@@ -736,15 +730,15 @@ void TemplateTypeParmDecl::setDefaultArgument(
}
unsigned TemplateTypeParmDecl::getDepth() const {
- return getTypeForDecl()->castAs<TemplateTypeParmType>()->getDepth();
+ return dyn_cast<TemplateTypeParmType>(getTypeForDecl())->getDepth();
}
unsigned TemplateTypeParmDecl::getIndex() const {
- return getTypeForDecl()->castAs<TemplateTypeParmType>()->getIndex();
+ return dyn_cast<TemplateTypeParmType>(getTypeForDecl())->getIndex();
}
bool TemplateTypeParmDecl::isParameterPack() const {
- return getTypeForDecl()->castAs<TemplateTypeParmType>()->isParameterPack();
+ return dyn_cast<TemplateTypeParmType>(getTypeForDecl())->isParameterPack();
}
void TemplateTypeParmDecl::setTypeConstraint(
@@ -998,7 +992,6 @@ ClassTemplateSpecializationDecl *ClassTemplateSpecializationDecl::Create(
auto *Result = new (Context, DC) ClassTemplateSpecializationDecl(
Context, ClassTemplateSpecialization, TK, DC, StartLoc, IdLoc,
SpecializedTemplate, Args, StrictPackMatch, PrevDecl);
- Result->setMayHaveOutOfDateDef(false);
// If the template decl is incomplete, copy the external lexical storage from
// the base template. This allows instantiations of incomplete types to
@@ -1008,17 +1001,14 @@ ClassTemplateSpecializationDecl *ClassTemplateSpecializationDecl::Create(
Result->setHasExternalLexicalStorage(
SpecializedTemplate->getTemplatedDecl()->hasExternalLexicalStorage());
- Context.getTypeDeclType(Result, PrevDecl);
return Result;
}
ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
GlobalDeclID ID) {
- auto *Result =
- new (C, ID) ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
- Result->setMayHaveOutOfDateDef(false);
- return Result;
+ return new (C, ID)
+ ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
}
void ClassTemplateSpecializationDecl::getNameForDiagnostic(
@@ -1180,13 +1170,15 @@ ClassTemplatePartialSpecializationDecl::ClassTemplatePartialSpecializationDecl(
ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args,
+ CanQualType CanonInjectedTST,
ClassTemplatePartialSpecializationDecl *PrevDecl)
: ClassTemplateSpecializationDecl(
Context, ClassTemplatePartialSpecialization, TK, DC, StartLoc, IdLoc,
// Tracking StrictPackMatch for Partial
// Specializations is not needed.
SpecializedTemplate, Args, /*StrictPackMatch=*/false, PrevDecl),
- TemplateParams(Params), InstantiatedFromMember(nullptr, false) {
+ TemplateParams(Params), InstantiatedFromMember(nullptr, false),
+ CanonInjectedTST(CanonInjectedTST) {
if (AdoptTemplateParameterList(Params, this))
setInvalidDecl();
}
@@ -1196,24 +1188,31 @@ ClassTemplatePartialSpecializationDecl::Create(
ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args,
- QualType CanonInjectedType,
+ CanQualType CanonInjectedTST,
ClassTemplatePartialSpecializationDecl *PrevDecl) {
auto *Result = new (Context, DC) ClassTemplatePartialSpecializationDecl(
Context, TK, DC, StartLoc, IdLoc, Params, SpecializedTemplate, Args,
- PrevDecl);
+ CanonInjectedTST, PrevDecl);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
- Result->setMayHaveOutOfDateDef(false);
-
- Context.getInjectedClassNameType(Result, CanonInjectedType);
return Result;
}
ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
GlobalDeclID ID) {
- auto *Result = new (C, ID) ClassTemplatePartialSpecializationDecl(C);
- Result->setMayHaveOutOfDateDef(false);
- return Result;
+ return new (C, ID) ClassTemplatePartialSpecializationDecl(C);
+}
+
+CanQualType
+ClassTemplatePartialSpecializationDecl::getCanonicalInjectedSpecializationType(
+ const ASTContext &Ctx) const {
+ if (CanonInjectedTST.isNull()) {
+ CanonInjectedTST =
+ CanQualType::CreateUnsafe(Ctx.getCanonicalTemplateSpecializationType(
+ TemplateName(getSpecializedTemplate()->getCanonicalDecl()),
+ getTemplateArgs().asArray()));
+ }
+ return CanonInjectedTST;
}
SourceRange ClassTemplatePartialSpecializationDecl::getSourceRange() const {
@@ -1600,6 +1599,16 @@ BuiltinTemplateDecl::BuiltinTemplateDecl(const ASTContext &C, DeclContext *DC,
createBuiltinTemplateParameterList(C, DC, BTK)),
BTK(BTK) {}
+bool BuiltinTemplateDecl::isPackProducingBuiltinTemplate() const {
+ return getBuiltinTemplateKind() == clang::BTK__builtin_dedup_pack;
+}
+
+bool clang::isPackProducingBuiltinTemplateName(TemplateName N) {
+ auto *T = dyn_cast_or_null<BuiltinTemplateDecl>(
+ N.getAsTemplateDecl(/*IgnoreDeduced=*/true));
+ return T && T->isPackProducingBuiltinTemplate();
+}
+
TemplateParamObjectDecl *TemplateParamObjectDecl::Create(const ASTContext &C,
QualType T,
const APValue &V) {
diff --git a/clang/lib/AST/DeclarationName.cpp b/clang/lib/AST/DeclarationName.cpp
index ae5fcf6..55f5a99 100644
--- a/clang/lib/AST/DeclarationName.cpp
+++ b/clang/lib/AST/DeclarationName.cpp
@@ -113,14 +113,15 @@ static void printCXXConstructorDestructorName(QualType ClassType,
PrintingPolicy Policy) {
// We know we're printing C++ here. Ensure we print types properly.
Policy.adjustForCPlusPlus();
+ Policy.SuppressScope = true;
- if (const RecordType *ClassRec = ClassType->getAs<RecordType>()) {
- ClassRec->getDecl()->printName(OS, Policy);
+ if (const RecordType *ClassRec = ClassType->getAsCanonical<RecordType>()) {
+ ClassRec->getOriginalDecl()->printName(OS, Policy);
return;
}
if (Policy.SuppressTemplateArgsInCXXConstructors) {
- if (auto *InjTy = ClassType->getAs<InjectedClassNameType>()) {
- InjTy->getDecl()->printName(OS, Policy);
+ if (auto *InjTy = ClassType->getAsCanonical<InjectedClassNameType>()) {
+ InjTy->getOriginalDecl()->printName(OS, Policy);
return;
}
}
@@ -184,7 +185,7 @@ void DeclarationName::print(raw_ostream &OS,
OS << "operator ";
QualType Type = getCXXNameType();
if (const RecordType *Rec = Type->getAs<RecordType>()) {
- OS << *Rec->getDecl();
+ OS << *Rec->getOriginalDecl();
return;
}
// We know we're printing C++ here, ensure we print 'bool' properly.
diff --git a/clang/lib/AST/DynamicRecursiveASTVisitor.cpp b/clang/lib/AST/DynamicRecursiveASTVisitor.cpp
index b478e7a..8821cd3 100644
--- a/clang/lib/AST/DynamicRecursiveASTVisitor.cpp
+++ b/clang/lib/AST/DynamicRecursiveASTVisitor.cpp
@@ -115,8 +115,12 @@ template <bool Const> struct Impl : RecursiveASTVisitor<Impl<Const>> {
bool TraverseAST(ASTContext &AST) { return Visitor.TraverseAST(AST); }
bool TraverseAttr(Attr *At) { return Visitor.TraverseAttr(At); }
bool TraverseDecl(Decl *D) { return Visitor.TraverseDecl(D); }
- bool TraverseType(QualType T) { return Visitor.TraverseType(T); }
- bool TraverseTypeLoc(TypeLoc TL) { return Visitor.TraverseTypeLoc(TL); }
+ bool TraverseType(QualType T, bool TraverseQualifier = true) {
+ return Visitor.TraverseType(T, TraverseQualifier);
+ }
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) {
+ return Visitor.TraverseTypeLoc(TL, TraverseQualifier);
+ }
bool TraverseStmt(Stmt *S) { return Visitor.TraverseStmt(S); }
bool TraverseConstructorInitializer(CXXCtorInitializer *Init) {
@@ -172,7 +176,7 @@ template <bool Const> struct Impl : RecursiveASTVisitor<Impl<Const>> {
return Visitor.TraverseLambdaCapture(LE, C, Init);
}
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS) {
return Visitor.TraverseNestedNameSpecifier(NNS);
}
@@ -241,8 +245,8 @@ template <bool Const> struct Impl : RecursiveASTVisitor<Impl<Const>> {
// Types.
#define ABSTRACT_TYPE(CLASS, BASE)
#define TYPE(CLASS, BASE) \
- bool Traverse##CLASS##Type(CLASS##Type *T) { \
- return Visitor.Traverse##CLASS##Type(T); \
+ bool Traverse##CLASS##Type(CLASS##Type *T, bool TraverseQualifier) { \
+ return Visitor.Traverse##CLASS##Type(T, TraverseQualifier); \
}
#include "clang/AST/TypeNodes.inc"
@@ -255,8 +259,8 @@ template <bool Const> struct Impl : RecursiveASTVisitor<Impl<Const>> {
// TypeLocs.
#define ABSTRACT_TYPELOC(CLASS, BASE)
#define TYPELOC(CLASS, BASE) \
- bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL) { \
- return Visitor.Traverse##CLASS##TypeLoc(TL); \
+ bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL, bool TraverseQualifier) { \
+ return Visitor.Traverse##CLASS##TypeLoc(TL, TraverseQualifier); \
}
#include "clang/AST/TypeLocNodes.def"
@@ -297,7 +301,6 @@ FORWARD_TO_BASE(TraverseAttr, Attr, *)
FORWARD_TO_BASE(TraverseConstructorInitializer, CXXCtorInitializer, *)
FORWARD_TO_BASE(TraverseDecl, Decl, *)
FORWARD_TO_BASE(TraverseStmt, Stmt, *)
-FORWARD_TO_BASE(TraverseNestedNameSpecifier, NestedNameSpecifier, *)
FORWARD_TO_BASE(TraverseTemplateInstantiations, ClassTemplateDecl, *)
FORWARD_TO_BASE(TraverseTemplateInstantiations, VarTemplateDecl, *)
FORWARD_TO_BASE(TraverseTemplateInstantiations, FunctionTemplateDecl, *)
@@ -314,8 +317,22 @@ FORWARD_TO_BASE_EXACT(TraverseTemplateArgument, const TemplateArgument &)
FORWARD_TO_BASE_EXACT(TraverseTemplateArguments, ArrayRef<TemplateArgument>)
FORWARD_TO_BASE_EXACT(TraverseTemplateArgumentLoc, const TemplateArgumentLoc &)
FORWARD_TO_BASE_EXACT(TraverseTemplateName, TemplateName)
-FORWARD_TO_BASE_EXACT(TraverseType, QualType)
-FORWARD_TO_BASE_EXACT(TraverseTypeLoc, TypeLoc)
+FORWARD_TO_BASE_EXACT(TraverseNestedNameSpecifier, NestedNameSpecifier)
+
+template <bool Const>
+bool DynamicRecursiveASTVisitorBase<Const>::TraverseType(
+ QualType T, bool TraverseQualifier) {
+ return Impl<Const>(*this).RecursiveASTVisitor<Impl<Const>>::TraverseType(
+ T, TraverseQualifier);
+}
+
+template <bool Const>
+bool DynamicRecursiveASTVisitorBase<Const>::TraverseTypeLoc(
+ TypeLoc TL, bool TraverseQualifier) {
+ return Impl<Const>(*this).RecursiveASTVisitor<Impl<Const>>::TraverseTypeLoc(
+ TL, TraverseQualifier);
+}
+
FORWARD_TO_BASE_EXACT(TraverseTypeConstraint, const TypeConstraint *)
FORWARD_TO_BASE_EXACT(TraverseObjCProtocolLoc, ObjCProtocolLoc)
FORWARD_TO_BASE_EXACT(TraverseNestedNameSpecifierLoc, NestedNameSpecifierLoc)
@@ -354,13 +371,25 @@ bool DynamicRecursiveASTVisitorBase<Const>::dataTraverseNode(
// Declare Traverse*() and friends for all concrete Type classes.
#define ABSTRACT_TYPE(CLASS, BASE)
#define TYPE(CLASS, BASE) \
- FORWARD_TO_BASE(Traverse##CLASS##Type, CLASS##Type, *) \
+ template <bool Const> \
+ bool DynamicRecursiveASTVisitorBase<Const>::Traverse##CLASS##Type( \
+ MaybeConst<CLASS##Type> *T, bool TraverseQualifier) { \
+ return Impl<Const>(*this) \
+ .RecursiveASTVisitor<Impl<Const>>::Traverse##CLASS##Type( \
+ const_cast<CLASS##Type *>(T), TraverseQualifier); \
+ } \
FORWARD_TO_BASE(WalkUpFrom##CLASS##Type, CLASS##Type, *)
#include "clang/AST/TypeNodes.inc"
#define ABSTRACT_TYPELOC(CLASS, BASE)
#define TYPELOC(CLASS, BASE) \
- FORWARD_TO_BASE_EXACT(Traverse##CLASS##TypeLoc, CLASS##TypeLoc)
+ template <bool Const> \
+ bool DynamicRecursiveASTVisitorBase<Const>::Traverse##CLASS##TypeLoc( \
+ CLASS##TypeLoc TL, bool TraverseQualifier) { \
+ return Impl<Const>(*this) \
+ .RecursiveASTVisitor<Impl<Const>>::Traverse##CLASS##TypeLoc( \
+ TL, TraverseQualifier); \
+ }
#include "clang/AST/TypeLocNodes.def"
#define TYPELOC(CLASS, BASE) \
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index cd9672d..cdff160 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -74,9 +74,7 @@ const CXXRecordDecl *Expr::getBestDynamicClassType() const {
if (DerivedType->isDependentType())
return nullptr;
- const RecordType *Ty = DerivedType->castAs<RecordType>();
- Decl *D = Ty->getDecl();
- return cast<CXXRecordDecl>(D);
+ return DerivedType->castAsCXXRecordDecl();
}
const Expr *Expr::skipRValueSubobjectAdjustments(
@@ -91,8 +89,7 @@ const Expr *Expr::skipRValueSubobjectAdjustments(
CE->getCastKind() == CK_UncheckedDerivedToBase) &&
E->getType()->isRecordType()) {
E = CE->getSubExpr();
- const auto *Derived =
- cast<CXXRecordDecl>(E->getType()->castAs<RecordType>()->getDecl());
+ const auto *Derived = E->getType()->castAsCXXRecordDecl();
Adjustments.push_back(SubobjectAdjustment(CE, Derived));
continue;
}
@@ -268,7 +265,7 @@ QualType Expr::getEnumCoercedType(const ASTContext &Ctx) const {
if (const auto *ECD = getEnumConstantDecl()) {
const auto *ED = cast<EnumDecl>(ECD->getDeclContext());
if (ED->isCompleteDefinition())
- return Ctx.getTypeDeclType(ED);
+ return Ctx.getCanonicalTagType(ED);
}
return getType();
}
@@ -2031,8 +2028,7 @@ CXXBaseSpecifier **CastExpr::path_buffer() {
const FieldDecl *CastExpr::getTargetFieldForToUnionCast(QualType unionType,
QualType opType) {
- auto RD = unionType->castAs<RecordType>()->getDecl();
- return getTargetFieldForToUnionCast(RD, opType);
+ return getTargetFieldForToUnionCast(unionType->castAsRecordDecl(), opType);
}
const FieldDecl *CastExpr::getTargetFieldForToUnionCast(const RecordDecl *RD,
@@ -2399,6 +2395,7 @@ EmbedExpr::EmbedExpr(const ASTContext &Ctx, SourceLocation Loc,
setDependence(ExprDependence::None);
FakeChildNode = IntegerLiteral::Create(
Ctx, llvm::APInt::getZero(Ctx.getTypeSize(getType())), getType(), Loc);
+ assert(getType()->isSignedIntegerType() && "IntTy should be signed");
}
InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc,
@@ -2774,23 +2771,22 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
case UserDefinedLiteralClass: {
// If this is a direct call, get the callee.
const CallExpr *CE = cast<CallExpr>(this);
- if (const Decl *FD = CE->getCalleeDecl()) {
- // If the callee has attribute pure, const, or warn_unused_result, warn
- // about it. void foo() { strlen("bar"); } should warn.
- //
- // Note: If new cases are added here, DiagnoseUnusedExprResult should be
- // updated to match for QoI.
- if (CE->hasUnusedResultAttr(Ctx) ||
- FD->hasAttr<PureAttr>() || FD->hasAttr<ConstAttr>()) {
- WarnE = this;
- Loc = CE->getCallee()->getBeginLoc();
- R1 = CE->getCallee()->getSourceRange();
-
- if (unsigned NumArgs = CE->getNumArgs())
- R2 = SourceRange(CE->getArg(0)->getBeginLoc(),
- CE->getArg(NumArgs - 1)->getEndLoc());
- return true;
- }
+ // If the callee has attribute pure, const, or warn_unused_result, warn
+ // about it. void foo() { strlen("bar"); } should warn.
+ // Note: If new cases are added here, DiagnoseUnusedExprResult should be
+ // updated to match for QoI.
+ const Decl *FD = CE->getCalleeDecl();
+ bool PureOrConst =
+ FD && (FD->hasAttr<PureAttr>() || FD->hasAttr<ConstAttr>());
+ if (CE->hasUnusedResultAttr(Ctx) || PureOrConst) {
+ WarnE = this;
+ Loc = getBeginLoc();
+ R1 = getSourceRange();
+
+ if (unsigned NumArgs = CE->getNumArgs())
+ R2 = SourceRange(CE->getArg(0)->getBeginLoc(),
+ CE->getArg(NumArgs - 1)->getEndLoc());
+ return true;
}
return false;
}
@@ -2803,32 +2799,20 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
case CXXTemporaryObjectExprClass:
case CXXConstructExprClass: {
- if (const CXXRecordDecl *Type = getType()->getAsCXXRecordDecl()) {
- const auto *WarnURAttr = Type->getAttr<WarnUnusedResultAttr>();
- if (Type->hasAttr<WarnUnusedAttr>() ||
- (WarnURAttr && WarnURAttr->IsCXX11NoDiscard())) {
- WarnE = this;
- Loc = getBeginLoc();
- R1 = getSourceRange();
- return true;
- }
- }
-
const auto *CE = cast<CXXConstructExpr>(this);
- if (const CXXConstructorDecl *Ctor = CE->getConstructor()) {
- const auto *WarnURAttr = Ctor->getAttr<WarnUnusedResultAttr>();
- if (WarnURAttr && WarnURAttr->IsCXX11NoDiscard()) {
- WarnE = this;
- Loc = getBeginLoc();
- R1 = getSourceRange();
-
- if (unsigned NumArgs = CE->getNumArgs())
- R2 = SourceRange(CE->getArg(0)->getBeginLoc(),
- CE->getArg(NumArgs - 1)->getEndLoc());
- return true;
- }
- }
+ const CXXRecordDecl *Type = getType()->getAsCXXRecordDecl();
+ if ((Type && Type->hasAttr<WarnUnusedAttr>()) ||
+ CE->hasUnusedResultAttr(Ctx)) {
+ WarnE = this;
+ Loc = getBeginLoc();
+ R1 = getSourceRange();
+
+ if (unsigned NumArgs = CE->getNumArgs())
+ R2 = SourceRange(CE->getArg(0)->getBeginLoc(),
+ CE->getArg(NumArgs - 1)->getEndLoc());
+ return true;
+ }
return false;
}
@@ -3221,7 +3205,7 @@ static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
/// isTemporaryObject - Determines if this expression produces a
/// temporary of the given class type.
bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const {
- if (!C.hasSameUnqualifiedType(getType(), C.getTypeDeclType(TempTy)))
+ if (!C.hasSameUnqualifiedType(getType(), C.getCanonicalTagType(TempTy)))
return false;
const Expr *E = skipTemporaryBindingsNoOpCastsAndParens(this);
@@ -3407,7 +3391,7 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
if (ILE->getType()->isRecordType()) {
unsigned ElementNo = 0;
- RecordDecl *RD = ILE->getType()->castAs<RecordType>()->getDecl();
+ auto *RD = ILE->getType()->castAsRecordDecl();
// In C++17, bases were added to the list of members used by aggregate
// initialization.
@@ -3541,6 +3525,56 @@ bool CallExpr::isBuiltinAssumeFalse(const ASTContext &Ctx) const {
Arg->EvaluateAsBooleanCondition(ArgVal, Ctx) && !ArgVal;
}
+const AllocSizeAttr *CallExpr::getCalleeAllocSizeAttr() const {
+ if (const FunctionDecl *DirectCallee = getDirectCallee())
+ return DirectCallee->getAttr<AllocSizeAttr>();
+ if (const Decl *IndirectCallee = getCalleeDecl())
+ return IndirectCallee->getAttr<AllocSizeAttr>();
+ return nullptr;
+}
+
+std::optional<llvm::APInt>
+CallExpr::evaluateBytesReturnedByAllocSizeCall(const ASTContext &Ctx) const {
+ const AllocSizeAttr *AllocSize = getCalleeAllocSizeAttr();
+
+ assert(AllocSize && AllocSize->getElemSizeParam().isValid());
+ unsigned SizeArgNo = AllocSize->getElemSizeParam().getASTIndex();
+ unsigned BitsInSizeT = Ctx.getTypeSize(Ctx.getSizeType());
+ if (getNumArgs() <= SizeArgNo)
+ return std::nullopt;
+
+ auto EvaluateAsSizeT = [&](const Expr *E, llvm::APSInt &Into) {
+ Expr::EvalResult ExprResult;
+ if (E->isValueDependent() ||
+ !E->EvaluateAsInt(ExprResult, Ctx, Expr::SE_AllowSideEffects))
+ return false;
+ Into = ExprResult.Val.getInt();
+ if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
+ return false;
+ Into = Into.zext(BitsInSizeT);
+ return true;
+ };
+
+ llvm::APSInt SizeOfElem;
+ if (!EvaluateAsSizeT(getArg(SizeArgNo), SizeOfElem))
+ return std::nullopt;
+
+ if (!AllocSize->getNumElemsParam().isValid())
+ return SizeOfElem;
+
+ llvm::APSInt NumberOfElems;
+ unsigned NumArgNo = AllocSize->getNumElemsParam().getASTIndex();
+ if (!EvaluateAsSizeT(getArg(NumArgNo), NumberOfElems))
+ return std::nullopt;
+
+ bool Overflow;
+ llvm::APInt BytesAvailable = SizeOfElem.umul_ov(NumberOfElems, Overflow);
+ if (Overflow)
+ return std::nullopt;
+
+ return BytesAvailable;
+}
+
bool CallExpr::isCallToStdMove() const {
return getBuiltinCallee() == Builtin::BImove;
}
@@ -4050,8 +4084,10 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
return NPCK_CXX11_nullptr;
if (const RecordType *UT = getType()->getAsUnionType())
- if (!Ctx.getLangOpts().CPlusPlus11 &&
- UT && UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (!Ctx.getLangOpts().CPlusPlus11 && UT &&
+ UT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<TransparentUnionAttr>())
if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(this)){
const Expr *InitExpr = CLE->getInitializer();
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(InitExpr))
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index a099e97..97ae4a0 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -1319,7 +1319,7 @@ LambdaExpr *LambdaExpr::Create(const ASTContext &Context, CXXRecordDecl *Class,
bool ContainsUnexpandedParameterPack) {
// Determine the type of the expression (i.e., the type of the
// function object we're creating).
- QualType T = Context.getTypeDeclType(Class);
+ CanQualType T = Context.getCanonicalTagType(Class);
unsigned Size = totalSizeToAlloc<Stmt *>(CaptureInits.size() + 1);
void *Mem = Context.Allocate(Size);
@@ -1687,10 +1687,9 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() {
// It can't be dependent: after all, we were actually able to do the
// lookup.
CXXRecordDecl *Record = nullptr;
- auto *NNS = getQualifier();
- if (NNS && NNS->getKind() != NestedNameSpecifier::Super) {
- const Type *T = getQualifier()->getAsType();
- assert(T && "qualifier in member expression does not name type");
+ if (NestedNameSpecifier Qualifier = getQualifier();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Type) {
+ const Type *T = getQualifier().getAsType();
Record = T->getAsCXXRecordDecl();
assert(Record && "qualifier in member expression does not name record");
}
diff --git a/clang/lib/AST/ExprConcepts.cpp b/clang/lib/AST/ExprConcepts.cpp
index ac0e566..36f910d 100644
--- a/clang/lib/AST/ExprConcepts.cpp
+++ b/clang/lib/AST/ExprConcepts.cpp
@@ -41,10 +41,10 @@ ConceptSpecializationExpr::ConceptSpecializationExpr(
assert(!Loc->getNestedNameSpecifierLoc() ||
(!Loc->getNestedNameSpecifierLoc()
.getNestedNameSpecifier()
- ->isInstantiationDependent() &&
+ .isInstantiationDependent() &&
!Loc->getNestedNameSpecifierLoc()
.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()));
+ .containsUnexpandedParameterPack()));
assert((!isValueDependent() || isInstantiationDependent()) &&
"should not be value-dependent");
}
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 3679327..b4f1e76 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -114,15 +114,6 @@ namespace {
return Ctx.getLValueReferenceType(E->getType());
}
- /// Given a CallExpr, try to get the alloc_size attribute. May return null.
- static const AllocSizeAttr *getAllocSizeAttr(const CallExpr *CE) {
- if (const FunctionDecl *DirectCallee = CE->getDirectCallee())
- return DirectCallee->getAttr<AllocSizeAttr>();
- if (const Decl *IndirectCallee = CE->getCalleeDecl())
- return IndirectCallee->getAttr<AllocSizeAttr>();
- return nullptr;
- }
-
/// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
/// This will look through a single cast.
///
@@ -142,7 +133,7 @@ namespace {
E = Cast->getSubExpr()->IgnoreParens();
if (const auto *CE = dyn_cast<CallExpr>(E))
- return getAllocSizeAttr(CE) ? CE : nullptr;
+ return CE->getCalleeAllocSizeAttr() ? CE : nullptr;
return nullptr;
}
@@ -401,7 +392,7 @@ namespace {
assert(!Invalid && "invalid designator has no subobject type");
return MostDerivedPathLength == Entries.size()
? MostDerivedType
- : Ctx.getRecordType(getAsBaseClass(Entries.back()));
+ : Ctx.getCanonicalTagType(getAsBaseClass(Entries.back()));
}
/// Update this designator to refer to the first element within this array.
@@ -2623,7 +2614,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
Value.getUnionValue(), Kind, Value.getUnionField(), CheckedTemps);
}
if (Value.isStruct()) {
- RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ auto *RD = Type->castAsRecordDecl();
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
unsigned BaseIndex = 0;
for (const CXXBaseSpecifier &BS : CD->bases()) {
@@ -4109,7 +4100,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
}
// Next subobject is a class, struct or union field.
- RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl();
+ RecordDecl *RD =
+ ObjType->castAsCanonical<RecordType>()->getOriginalDecl();
if (RD->isUnion()) {
const FieldDecl *UnionField = O->getUnionField();
if (!UnionField ||
@@ -4144,7 +4136,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
O = &O->getStructBase(getBaseIndex(Derived, Base));
- ObjType = getSubobjectType(ObjType, Info.Ctx.getRecordType(Base));
+ ObjType = getSubobjectType(ObjType, Info.Ctx.getCanonicalTagType(Base));
}
}
}
@@ -6363,7 +6355,7 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
const CXXRecordDecl *C = E->getTypeAsWritten()->getPointeeCXXRecordDecl();
assert(C && "dynamic_cast target is not void pointer nor class");
- CanQualType CQT = Info.Ctx.getCanonicalType(Info.Ctx.getRecordType(C));
+ CanQualType CQT = Info.Ctx.getCanonicalTagType(C);
auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) {
// C++ [expr.dynamic.cast]p9:
@@ -6389,7 +6381,7 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
}
Info.FFDiag(E, diag::note_constexpr_dynamic_cast_to_reference_failed)
<< DiagKind << Ptr.Designator.getType(Info.Ctx)
- << Info.Ctx.getRecordType(DynType->Type)
+ << Info.Ctx.getCanonicalTagType(DynType->Type)
<< E->getType().getUnqualifiedType();
return false;
};
@@ -6886,8 +6878,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// FIXME: This immediately starts the lifetime of all members of
// an anonymous struct. It would be preferable to strictly start
// member lifetime in initialization order.
- Success &=
- handleDefaultInitValue(Info.Ctx.getRecordType(CD), *Value);
+ Success &= handleDefaultInitValue(Info.Ctx.getCanonicalTagType(CD),
+ *Value);
}
// Store Subobject as its parent before updating it for the last element
// in the chain.
@@ -7794,7 +7786,8 @@ class BufferToAPValueConverter {
}
std::optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
- QualType RepresentationType = Ty->getDecl()->getIntegerType();
+ QualType RepresentationType =
+ Ty->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
assert(!RepresentationType.isNull() &&
"enum forward decl should be caught by Sema");
const auto *AsBuiltin =
@@ -7982,8 +7975,9 @@ static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
// so its layout is unspecified. For now, we'll simply treat these cases
// as unsupported (this should only be possible with OpenCL bool vectors
// whose element count isn't a multiple of the byte size).
- Info->FFDiag(Loc, diag::note_constexpr_bit_cast_invalid_vector)
- << QualType(VTy, 0) << EltSize << NElts << Ctx.getCharWidth();
+ if (Info)
+ Info->FFDiag(Loc, diag::note_constexpr_bit_cast_invalid_vector)
+ << QualType(VTy, 0) << EltSize << NElts << Ctx.getCharWidth();
return false;
}
@@ -7992,8 +7986,9 @@ static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
// The layout for x86_fp80 vectors seems to be handled very inconsistently
// by both clang and LLVM, so for now we won't allow bit_casts involving
// it in a constexpr context.
- Info->FFDiag(Loc, diag::note_constexpr_bit_cast_unsupported_type)
- << EltTy;
+ if (Info)
+ Info->FFDiag(Loc, diag::note_constexpr_bit_cast_unsupported_type)
+ << EltTy;
return false;
}
}
@@ -8528,7 +8523,7 @@ public:
if (auto *DD = dyn_cast<CXXDestructorDecl>(FD)) {
assert(This && "no 'this' pointer for destructor call");
return HandleDestruction(Info, E, *This,
- Info.Ctx.getRecordType(DD->getParent())) &&
+ Info.Ctx.getCanonicalTagType(DD->getParent())) &&
CallScope.destroy();
}
@@ -8589,8 +8584,9 @@ public:
const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
if (!FD) return Error(E);
assert(!FD->getType()->isReferenceType() && "prvalue reference?");
- assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
- FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+ assert(BaseTy->castAsCanonical<RecordType>()->getOriginalDecl() ==
+ FD->getParent()->getCanonicalDecl() &&
+ "record / field mismatch");
// Note: there is no lvalue base here. But this case should only ever
// happen in C or in C++98, where we cannot be evaluating a constexpr
@@ -8817,8 +8813,9 @@ public:
const ValueDecl *MD = E->getMemberDecl();
if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
- assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
- FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+ assert(BaseTy->castAsCanonical<RecordType>()->getOriginalDecl() ==
+ FD->getParent()->getCanonicalDecl() &&
+ "record / field mismatch");
(void)BaseTy;
if (!HandleLValueMember(this->Info, E, Result, FD))
return false;
@@ -9257,8 +9254,8 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
if (!DynType)
return false;
- TypeInfo =
- TypeInfoLValue(Info.Ctx.getRecordType(DynType->Type).getTypePtr());
+ TypeInfo = TypeInfoLValue(
+ Info.Ctx.getCanonicalTagType(DynType->Type).getTypePtr());
}
return Success(APValue::LValueBase::getTypeInfo(TypeInfo, E->getType()));
@@ -9460,57 +9457,6 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
// Pointer Evaluation
//===----------------------------------------------------------------------===//
-/// Attempts to compute the number of bytes available at the pointer
-/// returned by a function with the alloc_size attribute. Returns true if we
-/// were successful. Places an unsigned number into `Result`.
-///
-/// This expects the given CallExpr to be a call to a function with an
-/// alloc_size attribute.
-static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
- const CallExpr *Call,
- llvm::APInt &Result) {
- const AllocSizeAttr *AllocSize = getAllocSizeAttr(Call);
-
- assert(AllocSize && AllocSize->getElemSizeParam().isValid());
- unsigned SizeArgNo = AllocSize->getElemSizeParam().getASTIndex();
- unsigned BitsInSizeT = Ctx.getTypeSize(Ctx.getSizeType());
- if (Call->getNumArgs() <= SizeArgNo)
- return false;
-
- auto EvaluateAsSizeT = [&](const Expr *E, APSInt &Into) {
- Expr::EvalResult ExprResult;
- if (!E->EvaluateAsInt(ExprResult, Ctx, Expr::SE_AllowSideEffects))
- return false;
- Into = ExprResult.Val.getInt();
- if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
- return false;
- Into = Into.zext(BitsInSizeT);
- return true;
- };
-
- APSInt SizeOfElem;
- if (!EvaluateAsSizeT(Call->getArg(SizeArgNo), SizeOfElem))
- return false;
-
- if (!AllocSize->getNumElemsParam().isValid()) {
- Result = std::move(SizeOfElem);
- return true;
- }
-
- APSInt NumberOfElems;
- unsigned NumArgNo = AllocSize->getNumElemsParam().getASTIndex();
- if (!EvaluateAsSizeT(Call->getArg(NumArgNo), NumberOfElems))
- return false;
-
- bool Overflow;
- llvm::APInt BytesAvailable = SizeOfElem.umul_ov(NumberOfElems, Overflow);
- if (Overflow)
- return false;
-
- Result = std::move(BytesAvailable);
- return true;
-}
-
/// Convenience function. LVal's base must be a call to an alloc_size
/// function.
static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
@@ -9520,7 +9466,13 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
"Can't get the size of a non alloc_size function");
const auto *Base = LVal.getLValueBase().get<const Expr *>();
const CallExpr *CE = tryUnwrapAllocSizeCall(Base);
- return getBytesReturnedByAllocSizeCall(Ctx, CE, Result);
+ std::optional<llvm::APInt> Size =
+ CE->evaluateBytesReturnedByAllocSizeCall(Ctx);
+ if (!Size)
+ return false;
+
+ Result = std::move(*Size);
+ return true;
}
/// Attempts to evaluate the given LValueBase as the result of a call to
@@ -10011,7 +9963,7 @@ bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
if (ExprEvaluatorBaseTy::VisitCallExpr(E))
return true;
- if (!(InvalidBaseOK && getAllocSizeAttr(E)))
+ if (!(InvalidBaseOK && E->getCalleeAllocSizeAttr()))
return false;
Result.setInvalid(E);
@@ -10818,7 +10770,7 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
}
bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
- const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
+ const auto *RD = T->castAsRecordDecl();
if (RD->isInvalidDecl()) return false;
if (RD->isUnion()) {
// C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
@@ -10887,8 +10839,7 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
const Expr *ExprToVisit, ArrayRef<Expr *> Args) {
- const RecordDecl *RD =
- ExprToVisit->getType()->castAs<RecordType>()->getDecl();
+ const auto *RD = ExprToVisit->getType()->castAsRecordDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
@@ -11036,10 +10987,6 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
bool ZeroInit = E->requiresZeroInitialization();
if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
- // If we've already performed zero-initialization, we're already done.
- if (Result.hasValue())
- return true;
-
if (ZeroInit)
return ZeroInitialization(E, T);
@@ -11116,7 +11063,7 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
Result = APValue(APValue::UninitStruct(), 0, 2);
Array.moveInto(Result.getStructField(0));
- RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ auto *Record = E->getType()->castAsRecordDecl();
RecordDecl::field_iterator Field = Record->field_begin();
assert(Field != Record->field_end() &&
Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
@@ -11302,6 +11249,24 @@ static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
return VectorExprEvaluator(Info, Result).Visit(E);
}
+static llvm::APInt ConvertBoolVectorToInt(const APValue &Val) {
+ assert(Val.isVector() && "expected vector APValue");
+ unsigned NumElts = Val.getVectorLength();
+
+ // Each element is one bit, so create an integer with NumElts bits.
+ llvm::APInt Result(NumElts, 0);
+
+ for (unsigned I = 0; I < NumElts; ++I) {
+ const APValue &Elt = Val.getVectorElt(I);
+ assert(Elt.isInt() && "expected integer element in bool vector");
+
+ if (Elt.getInt().getBoolValue())
+ Result.setBit(I);
+ }
+
+ return Result;
+}
+
bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
const VectorType *VTy = E->getType()->castAs<VectorType>();
unsigned NElts = VTy->getNumElements();
@@ -11627,30 +11592,424 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case Builtin::BI__builtin_elementwise_abs: {
+ APValue Source;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Source))
+ return false;
+
+ QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+ unsigned SourceLen = Source.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(SourceLen);
+
+ for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+ APValue CurrentEle = Source.getVectorElt(EltNum);
+ APValue Val = DestEltTy->isFloatingType()
+ ? APValue(llvm::abs(CurrentEle.getFloat()))
+ : APValue(APSInt(
+ CurrentEle.getInt().abs(),
+ DestEltTy->isUnsignedIntegerOrEnumerationType()));
+ ResultElements.push_back(Val);
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
case Builtin::BI__builtin_elementwise_add_sat:
- case Builtin::BI__builtin_elementwise_sub_sat: {
+ case Builtin::BI__builtin_elementwise_sub_sat:
+ case clang::X86::BI__builtin_ia32_pmulhuw128:
+ case clang::X86::BI__builtin_ia32_pmulhuw256:
+ case clang::X86::BI__builtin_ia32_pmulhuw512:
+ case clang::X86::BI__builtin_ia32_pmulhw128:
+ case clang::X86::BI__builtin_ia32_pmulhw256:
+ case clang::X86::BI__builtin_ia32_pmulhw512:
+ case clang::X86::BI__builtin_ia32_psllv2di:
+ case clang::X86::BI__builtin_ia32_psllv4di:
+ case clang::X86::BI__builtin_ia32_psllv4si:
+ case clang::X86::BI__builtin_ia32_psllv8si:
+ case clang::X86::BI__builtin_ia32_psrav4si:
+ case clang::X86::BI__builtin_ia32_psrav8si:
+ case clang::X86::BI__builtin_ia32_psrlv2di:
+ case clang::X86::BI__builtin_ia32_psrlv4di:
+ case clang::X86::BI__builtin_ia32_psrlv4si:
+ case clang::X86::BI__builtin_ia32_psrlv8si:
+
+ case clang::X86::BI__builtin_ia32_psllwi128:
+ case clang::X86::BI__builtin_ia32_pslldi128:
+ case clang::X86::BI__builtin_ia32_psllqi128:
+ case clang::X86::BI__builtin_ia32_psllwi256:
+ case clang::X86::BI__builtin_ia32_pslldi256:
+ case clang::X86::BI__builtin_ia32_psllqi256:
+ case clang::X86::BI__builtin_ia32_psllwi512:
+ case clang::X86::BI__builtin_ia32_pslldi512:
+ case clang::X86::BI__builtin_ia32_psllqi512:
+
+ case clang::X86::BI__builtin_ia32_psrlwi128:
+ case clang::X86::BI__builtin_ia32_psrldi128:
+ case clang::X86::BI__builtin_ia32_psrlqi128:
+ case clang::X86::BI__builtin_ia32_psrlwi256:
+ case clang::X86::BI__builtin_ia32_psrldi256:
+ case clang::X86::BI__builtin_ia32_psrlqi256:
+ case clang::X86::BI__builtin_ia32_psrlwi512:
+ case clang::X86::BI__builtin_ia32_psrldi512:
+ case clang::X86::BI__builtin_ia32_psrlqi512:
+
+ case clang::X86::BI__builtin_ia32_psrawi128:
+ case clang::X86::BI__builtin_ia32_psradi128:
+ case clang::X86::BI__builtin_ia32_psraqi128:
+ case clang::X86::BI__builtin_ia32_psrawi256:
+ case clang::X86::BI__builtin_ia32_psradi256:
+ case clang::X86::BI__builtin_ia32_psraqi256:
+ case clang::X86::BI__builtin_ia32_psrawi512:
+ case clang::X86::BI__builtin_ia32_psradi512:
+ case clang::X86::BI__builtin_ia32_psraqi512: {
+
APValue SourceLHS, SourceRHS;
if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
!EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
return false;
QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+ bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
unsigned SourceLen = SourceLHS.getVectorLength();
SmallVector<APValue, 4> ResultElements;
ResultElements.reserve(SourceLen);
for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
APSInt LHS = SourceLHS.getVectorElt(EltNum).getInt();
+
+ if (SourceRHS.isInt()) {
+ const unsigned LaneBitWidth = LHS.getBitWidth();
+ const unsigned ShiftAmount = SourceRHS.getInt().getZExtValue();
+
+ switch (E->getBuiltinCallee()) {
+ case clang::X86::BI__builtin_ia32_psllwi128:
+ case clang::X86::BI__builtin_ia32_psllwi256:
+ case clang::X86::BI__builtin_ia32_psllwi512:
+ case clang::X86::BI__builtin_ia32_pslldi128:
+ case clang::X86::BI__builtin_ia32_pslldi256:
+ case clang::X86::BI__builtin_ia32_pslldi512:
+ case clang::X86::BI__builtin_ia32_psllqi128:
+ case clang::X86::BI__builtin_ia32_psllqi256:
+ case clang::X86::BI__builtin_ia32_psllqi512:
+ if (ShiftAmount >= LaneBitWidth) {
+ ResultElements.push_back(
+ APValue(APSInt(APInt::getZero(LaneBitWidth), DestUnsigned)));
+ } else {
+ ResultElements.push_back(
+ APValue(APSInt(LHS.shl(ShiftAmount), DestUnsigned)));
+ }
+ break;
+ case clang::X86::BI__builtin_ia32_psrlwi128:
+ case clang::X86::BI__builtin_ia32_psrlwi256:
+ case clang::X86::BI__builtin_ia32_psrlwi512:
+ case clang::X86::BI__builtin_ia32_psrldi128:
+ case clang::X86::BI__builtin_ia32_psrldi256:
+ case clang::X86::BI__builtin_ia32_psrldi512:
+ case clang::X86::BI__builtin_ia32_psrlqi128:
+ case clang::X86::BI__builtin_ia32_psrlqi256:
+ case clang::X86::BI__builtin_ia32_psrlqi512:
+ if (ShiftAmount >= LaneBitWidth) {
+ ResultElements.push_back(
+ APValue(APSInt(APInt::getZero(LaneBitWidth), DestUnsigned)));
+ } else {
+ ResultElements.push_back(
+ APValue(APSInt(LHS.lshr(ShiftAmount), DestUnsigned)));
+ }
+ break;
+ case clang::X86::BI__builtin_ia32_psrawi128:
+ case clang::X86::BI__builtin_ia32_psrawi256:
+ case clang::X86::BI__builtin_ia32_psrawi512:
+ case clang::X86::BI__builtin_ia32_psradi128:
+ case clang::X86::BI__builtin_ia32_psradi256:
+ case clang::X86::BI__builtin_ia32_psradi512:
+ case clang::X86::BI__builtin_ia32_psraqi128:
+ case clang::X86::BI__builtin_ia32_psraqi256:
+ case clang::X86::BI__builtin_ia32_psraqi512:
+ ResultElements.push_back(
+ APValue(APSInt(LHS.ashr(std::min(ShiftAmount, LaneBitWidth - 1)),
+ DestUnsigned)));
+ break;
+ default:
+ llvm_unreachable("Unexpected builtin callee");
+ }
+ continue;
+ }
APSInt RHS = SourceRHS.getVectorElt(EltNum).getInt();
switch (E->getBuiltinCallee()) {
case Builtin::BI__builtin_elementwise_add_sat:
ResultElements.push_back(APValue(
APSInt(LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS),
- DestEltTy->isUnsignedIntegerOrEnumerationType())));
+ DestUnsigned)));
break;
case Builtin::BI__builtin_elementwise_sub_sat:
ResultElements.push_back(APValue(
APSInt(LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS),
+ DestUnsigned)));
+ break;
+ case clang::X86::BI__builtin_ia32_pmulhuw128:
+ case clang::X86::BI__builtin_ia32_pmulhuw256:
+ case clang::X86::BI__builtin_ia32_pmulhuw512:
+ ResultElements.push_back(APValue(APSInt(llvm::APIntOps::mulhu(LHS, RHS),
+ /*isUnsigned=*/true)));
+ break;
+ case clang::X86::BI__builtin_ia32_pmulhw128:
+ case clang::X86::BI__builtin_ia32_pmulhw256:
+ case clang::X86::BI__builtin_ia32_pmulhw512:
+ ResultElements.push_back(APValue(APSInt(llvm::APIntOps::mulhs(LHS, RHS),
+ /*isUnsigned=*/false)));
+ break;
+ case clang::X86::BI__builtin_ia32_psllv2di:
+ case clang::X86::BI__builtin_ia32_psllv4di:
+ case clang::X86::BI__builtin_ia32_psllv4si:
+ case clang::X86::BI__builtin_ia32_psllv8si:
+ if (RHS.uge(RHS.getBitWidth())) {
+ ResultElements.push_back(
+ APValue(APSInt(APInt::getZero(RHS.getBitWidth()), DestUnsigned)));
+ break;
+ }
+ ResultElements.push_back(
+ APValue(APSInt(LHS.shl(RHS.getZExtValue()), DestUnsigned)));
+ break;
+ case clang::X86::BI__builtin_ia32_psrav4si:
+ case clang::X86::BI__builtin_ia32_psrav8si:
+ if (RHS.uge(RHS.getBitWidth())) {
+ ResultElements.push_back(
+ APValue(APSInt(LHS.ashr(RHS.getBitWidth() - 1), DestUnsigned)));
+ break;
+ }
+ ResultElements.push_back(
+ APValue(APSInt(LHS.ashr(RHS.getZExtValue()), DestUnsigned)));
+ break;
+ case clang::X86::BI__builtin_ia32_psrlv2di:
+ case clang::X86::BI__builtin_ia32_psrlv4di:
+ case clang::X86::BI__builtin_ia32_psrlv4si:
+ case clang::X86::BI__builtin_ia32_psrlv8si:
+ if (RHS.uge(RHS.getBitWidth())) {
+ ResultElements.push_back(
+ APValue(APSInt(APInt::getZero(RHS.getBitWidth()), DestUnsigned)));
+ break;
+ }
+ ResultElements.push_back(
+ APValue(APSInt(LHS.lshr(RHS.getZExtValue()), DestUnsigned)));
+ break;
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case clang::X86::BI__builtin_ia32_pmuldq128:
+ case clang::X86::BI__builtin_ia32_pmuldq256:
+ case clang::X86::BI__builtin_ia32_pmuldq512:
+ case clang::X86::BI__builtin_ia32_pmuludq128:
+ case clang::X86::BI__builtin_ia32_pmuludq256:
+ case clang::X86::BI__builtin_ia32_pmuludq512: {
+ APValue SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
+ return false;
+
+ unsigned SourceLen = SourceLHS.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(SourceLen / 2);
+
+ for (unsigned EltNum = 0; EltNum < SourceLen; EltNum += 2) {
+ APSInt LHS = SourceLHS.getVectorElt(EltNum).getInt();
+ APSInt RHS = SourceRHS.getVectorElt(EltNum).getInt();
+
+ switch (E->getBuiltinCallee()) {
+ case clang::X86::BI__builtin_ia32_pmuludq128:
+ case clang::X86::BI__builtin_ia32_pmuludq256:
+ case clang::X86::BI__builtin_ia32_pmuludq512:
+ ResultElements.push_back(
+ APValue(APSInt(llvm::APIntOps::muluExtended(LHS, RHS), true)));
+ break;
+ case clang::X86::BI__builtin_ia32_pmuldq128:
+ case clang::X86::BI__builtin_ia32_pmuldq256:
+ case clang::X86::BI__builtin_ia32_pmuldq512:
+ ResultElements.push_back(
+ APValue(APSInt(llvm::APIntOps::mulsExtended(LHS, RHS), false)));
+ break;
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case clang::X86::BI__builtin_ia32_vprotbi:
+ case clang::X86::BI__builtin_ia32_vprotdi:
+ case clang::X86::BI__builtin_ia32_vprotqi:
+ case clang::X86::BI__builtin_ia32_vprotwi:
+ case clang::X86::BI__builtin_ia32_prold128:
+ case clang::X86::BI__builtin_ia32_prold256:
+ case clang::X86::BI__builtin_ia32_prold512:
+ case clang::X86::BI__builtin_ia32_prolq128:
+ case clang::X86::BI__builtin_ia32_prolq256:
+ case clang::X86::BI__builtin_ia32_prolq512: {
+ APValue SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
+ return false;
+
+ QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+ bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
+ unsigned SourceLen = SourceLHS.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(SourceLen);
+
+ APSInt RHS = SourceRHS.getInt();
+
+ for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+ const APSInt &LHS = SourceLHS.getVectorElt(EltNum).getInt();
+ ResultElements.push_back(APValue(APSInt(LHS.rotl(RHS), DestUnsigned)));
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case clang::X86::BI__builtin_ia32_prord128:
+ case clang::X86::BI__builtin_ia32_prord256:
+ case clang::X86::BI__builtin_ia32_prord512:
+ case clang::X86::BI__builtin_ia32_prorq128:
+ case clang::X86::BI__builtin_ia32_prorq256:
+ case clang::X86::BI__builtin_ia32_prorq512: {
+ APValue SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
+ return false;
+
+ QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+ bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
+ unsigned SourceLen = SourceLHS.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(SourceLen);
+
+ APSInt RHS = SourceRHS.getInt();
+
+ for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+ const APSInt &LHS = SourceLHS.getVectorElt(EltNum).getInt();
+ ResultElements.push_back(APValue(APSInt(LHS.rotr(RHS), DestUnsigned)));
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case Builtin::BI__builtin_elementwise_max:
+ case Builtin::BI__builtin_elementwise_min: {
+ APValue SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
+ return false;
+
+ QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+
+ if (!DestEltTy->isIntegerType())
+ return false;
+
+ unsigned SourceLen = SourceLHS.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(SourceLen);
+
+ for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+ APSInt LHS = SourceLHS.getVectorElt(EltNum).getInt();
+ APSInt RHS = SourceRHS.getVectorElt(EltNum).getInt();
+ switch (E->getBuiltinCallee()) {
+ case Builtin::BI__builtin_elementwise_max:
+ ResultElements.push_back(
+ APValue(APSInt(std::max(LHS, RHS),
+ DestEltTy->isUnsignedIntegerOrEnumerationType())));
+ break;
+ case Builtin::BI__builtin_elementwise_min:
+ ResultElements.push_back(
+ APValue(APSInt(std::min(LHS, RHS),
+ DestEltTy->isUnsignedIntegerOrEnumerationType())));
+ break;
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case X86::BI__builtin_ia32_selectb_128:
+ case X86::BI__builtin_ia32_selectb_256:
+ case X86::BI__builtin_ia32_selectb_512:
+ case X86::BI__builtin_ia32_selectw_128:
+ case X86::BI__builtin_ia32_selectw_256:
+ case X86::BI__builtin_ia32_selectw_512:
+ case X86::BI__builtin_ia32_selectd_128:
+ case X86::BI__builtin_ia32_selectd_256:
+ case X86::BI__builtin_ia32_selectd_512:
+ case X86::BI__builtin_ia32_selectq_128:
+ case X86::BI__builtin_ia32_selectq_256:
+ case X86::BI__builtin_ia32_selectq_512:
+ case X86::BI__builtin_ia32_selectph_128:
+ case X86::BI__builtin_ia32_selectph_256:
+ case X86::BI__builtin_ia32_selectph_512:
+ case X86::BI__builtin_ia32_selectpbf_128:
+ case X86::BI__builtin_ia32_selectpbf_256:
+ case X86::BI__builtin_ia32_selectpbf_512:
+ case X86::BI__builtin_ia32_selectps_128:
+ case X86::BI__builtin_ia32_selectps_256:
+ case X86::BI__builtin_ia32_selectps_512:
+ case X86::BI__builtin_ia32_selectpd_128:
+ case X86::BI__builtin_ia32_selectpd_256:
+ case X86::BI__builtin_ia32_selectpd_512: {
+ // AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
+ APValue SourceMask, SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceMask) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(2), SourceRHS))
+ return false;
+
+ APSInt Mask = SourceMask.getInt();
+ unsigned SourceLen = SourceLHS.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(SourceLen);
+
+ for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+ const APValue &LHS = SourceLHS.getVectorElt(EltNum);
+ const APValue &RHS = SourceRHS.getVectorElt(EltNum);
+ ResultElements.push_back(Mask[EltNum] ? LHS : RHS);
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case Builtin::BI__builtin_elementwise_ctlz:
+ case Builtin::BI__builtin_elementwise_cttz: {
+ APValue SourceLHS;
+ std::optional<APValue> Fallback;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS))
+ return false;
+ if (E->getNumArgs() > 1) {
+ APValue FallbackTmp;
+ if (!EvaluateAsRValue(Info, E->getArg(1), FallbackTmp))
+ return false;
+ Fallback = FallbackTmp;
+ }
+
+ QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+ unsigned SourceLen = SourceLHS.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(SourceLen);
+
+ for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+ APSInt LHS = SourceLHS.getVectorElt(EltNum).getInt();
+ if (!LHS) {
+ // Without a fallback, a zero element is undefined
+ if (!Fallback) {
+ Info.FFDiag(E, diag::note_constexpr_countzeroes_zero)
+ << /*IsTrailing=*/(E->getBuiltinCallee() ==
+ Builtin::BI__builtin_elementwise_cttz);
+ return false;
+ }
+ ResultElements.push_back(Fallback->getVectorElt(EltNum));
+ continue;
+ }
+ switch (E->getBuiltinCallee()) {
+ case Builtin::BI__builtin_elementwise_ctlz:
+ ResultElements.push_back(APValue(
+ APSInt(APInt(Info.Ctx.getIntWidth(DestEltTy), LHS.countl_zero()),
+ DestEltTy->isUnsignedIntegerOrEnumerationType())));
+ break;
+ case Builtin::BI__builtin_elementwise_cttz:
+ ResultElements.push_back(APValue(
+ APSInt(APInt(Info.Ctx.getIntWidth(DestEltTy), LHS.countr_zero()),
DestEltTy->isUnsignedIntegerOrEnumerationType())));
break;
}
@@ -11658,6 +12017,28 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+
+ case Builtin::BI__builtin_elementwise_fma: {
+ APValue SourceX, SourceY, SourceZ;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceX) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceY) ||
+ !EvaluateAsRValue(Info, E->getArg(2), SourceZ))
+ return false;
+
+ unsigned SourceLen = SourceX.getVectorLength();
+ SmallVector<APValue> ResultElements;
+ ResultElements.reserve(SourceLen);
+ llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E);
+ for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+ const APFloat &X = SourceX.getVectorElt(EltNum).getFloat();
+ const APFloat &Y = SourceY.getVectorElt(EltNum).getFloat();
+ const APFloat &Z = SourceZ.getVectorElt(EltNum).getFloat();
+ APFloat Result(X);
+ (void)Result.fusedMultiplyAdd(Y, Z, RM);
+ ResultElements.push_back(APValue(Result));
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
}
}
@@ -12889,7 +13270,7 @@ static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
static void addFlexibleArrayMemberInitSize(EvalInfo &Info, const QualType &T,
const LValue &LV, CharUnits &Size) {
if (!T.isNull() && T->isStructureType() &&
- T->getAsStructureType()->getDecl()->hasFlexibleArrayMember())
+ T->castAsRecordDecl()->hasFlexibleArrayMember())
if (const auto *V = LV.getLValueBase().dyn_cast<const ValueDecl *>())
if (const auto *VD = dyn_cast<VarDecl>(V))
if (VD->hasInit())
@@ -13210,15 +13591,24 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_clzll:
case Builtin::BI__builtin_clzs:
case Builtin::BI__builtin_clzg:
+ case Builtin::BI__builtin_elementwise_ctlz:
case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
case Builtin::BI__lzcnt:
case Builtin::BI__lzcnt64: {
APSInt Val;
- if (!EvaluateInteger(E->getArg(0), Val, Info))
+ if (E->getArg(0)->getType()->isExtVectorBoolType()) {
+ APValue Vec;
+ if (!EvaluateVector(E->getArg(0), Vec, Info))
+ return false;
+ Val = ConvertBoolVectorToInt(Vec);
+ } else if (!EvaluateInteger(E->getArg(0), Val, Info)) {
return false;
+ }
std::optional<APSInt> Fallback;
- if (BuiltinOp == Builtin::BI__builtin_clzg && E->getNumArgs() > 1) {
+ if ((BuiltinOp == Builtin::BI__builtin_clzg ||
+ BuiltinOp == Builtin::BI__builtin_elementwise_ctlz) &&
+ E->getNumArgs() > 1) {
APSInt FallbackTemp;
if (!EvaluateInteger(E->getArg(1), FallbackTemp, Info))
return false;
@@ -13236,6 +13626,11 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
BuiltinOp != Builtin::BI__lzcnt &&
BuiltinOp != Builtin::BI__lzcnt64;
+ if (BuiltinOp == Builtin::BI__builtin_elementwise_ctlz) {
+ Info.FFDiag(E, diag::note_constexpr_countzeroes_zero)
+ << /*IsTrailing=*/false;
+ }
+
if (ZeroIsUndefined)
return Error(E);
}
@@ -13290,13 +13685,22 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll:
case Builtin::BI__builtin_ctzs:
- case Builtin::BI__builtin_ctzg: {
+ case Builtin::BI__builtin_ctzg:
+ case Builtin::BI__builtin_elementwise_cttz: {
APSInt Val;
- if (!EvaluateInteger(E->getArg(0), Val, Info))
+ if (E->getArg(0)->getType()->isExtVectorBoolType()) {
+ APValue Vec;
+ if (!EvaluateVector(E->getArg(0), Vec, Info))
+ return false;
+ Val = ConvertBoolVectorToInt(Vec);
+ } else if (!EvaluateInteger(E->getArg(0), Val, Info)) {
return false;
+ }
std::optional<APSInt> Fallback;
- if (BuiltinOp == Builtin::BI__builtin_ctzg && E->getNumArgs() > 1) {
+ if ((BuiltinOp == Builtin::BI__builtin_ctzg ||
+ BuiltinOp == Builtin::BI__builtin_elementwise_cttz) &&
+ E->getNumArgs() > 1) {
APSInt FallbackTemp;
if (!EvaluateInteger(E->getArg(1), FallbackTemp, Info))
return false;
@@ -13307,6 +13711,10 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (Fallback)
return Success(*Fallback, E);
+ if (BuiltinOp == Builtin::BI__builtin_elementwise_cttz) {
+ Info.FFDiag(E, diag::note_constexpr_countzeroes_zero)
+ << /*IsTrailing=*/true;
+ }
return Error(E);
}
@@ -13319,6 +13727,14 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Operand, E);
}
+ case Builtin::BI__builtin_elementwise_abs: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+
+ return Success(Val.abs(), E);
+ }
+
case Builtin::BI__builtin_expect:
case Builtin::BI__builtin_expect_with_probability:
return Visit(E->getArg(0));
@@ -13494,8 +13910,14 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__popcnt:
case Builtin::BI__popcnt64: {
APSInt Val;
- if (!EvaluateInteger(E->getArg(0), Val, Info))
+ if (E->getArg(0)->getType()->isExtVectorBoolType()) {
+ APValue Vec;
+ if (!EvaluateVector(E->getArg(0), Vec, Info))
+ return false;
+ Val = ConvertBoolVectorToInt(Vec);
+ } else if (!EvaluateInteger(E->getArg(0), Val, Info)) {
return false;
+ }
return Success(Val.popcount(), E);
}
@@ -13552,7 +13974,24 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
APInt Result = LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
return Success(APSInt(Result, !LHS.isSigned()), E);
}
+ case Builtin::BI__builtin_elementwise_max: {
+ APSInt LHS, RHS;
+ if (!EvaluateInteger(E->getArg(0), LHS, Info) ||
+ !EvaluateInteger(E->getArg(1), RHS, Info))
+ return false;
+
+ APInt Result = std::max(LHS, RHS);
+ return Success(APSInt(Result, !LHS.isSigned()), E);
+ }
+ case Builtin::BI__builtin_elementwise_min: {
+ APSInt LHS, RHS;
+ if (!EvaluateInteger(E->getArg(0), LHS, Info) ||
+ !EvaluateInteger(E->getArg(1), RHS, Info))
+ return false;
+ APInt Result = std::min(LHS, RHS);
+ return Success(APSInt(Result, !LHS.isSigned()), E);
+ }
case Builtin::BIstrlen:
case Builtin::BIwcslen:
// A call to strlen is not a constant expression.
@@ -15086,6 +15525,13 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
const auto *VAT = Info.Ctx.getAsVariableArrayType(Ty);
assert(VAT);
if (VAT->getElementType()->isArrayType()) {
+ // Variable array size expression could be missing (e.g. int a[*][10]) In
+ // that case, it can't be a constant expression.
+ if (!VAT->getSizeExpr()) {
+ Info.FFDiag(E->getBeginLoc());
+ return false;
+ }
+
std::optional<APSInt> Res =
VAT->getSizeExpr()->getIntegerConstantExpr(Info.Ctx);
if (Res) {
@@ -15133,10 +15579,9 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
case OffsetOfNode::Field: {
FieldDecl *MemberDecl = ON.getField();
- const RecordType *RT = CurrentType->getAs<RecordType>();
- if (!RT)
+ const auto *RD = CurrentType->getAsRecordDecl();
+ if (!RD)
return Error(OOE);
- RecordDecl *RD = RT->getDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
unsigned i = MemberDecl->getFieldIndex();
@@ -15155,21 +15600,20 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
return Error(OOE);
// Find the layout of the class whose base we are looking into.
- const RecordType *RT = CurrentType->getAs<RecordType>();
- if (!RT)
+ const auto *RD = CurrentType->getAsCXXRecordDecl();
+ if (!RD)
return Error(OOE);
- RecordDecl *RD = RT->getDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
// Find the base class itself.
CurrentType = BaseSpec->getType();
- const RecordType *BaseRT = CurrentType->getAs<RecordType>();
- if (!BaseRT)
+ const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
+ if (!BaseRD)
return Error(OOE);
// Add the offset to the base.
- Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
+ Result += RL.getBaseClassOffset(BaseRD);
break;
}
}
@@ -15347,8 +15791,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
}
if (Info.Ctx.getLangOpts().CPlusPlus && DestType->isEnumeralType()) {
- const EnumType *ET = dyn_cast<EnumType>(DestType.getCanonicalType());
- const EnumDecl *ED = ET->getDecl();
+ const auto *ED = DestType->getAsEnumDecl();
// Check that the value is within the range of the enumeration values.
//
// This corressponds to [expr.static.cast]p10 which says:
@@ -15792,6 +16235,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Error(E);
return true;
+ case Builtin::BI__builtin_elementwise_abs:
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
case Builtin::BI__builtin_fabsl:
@@ -15878,6 +16322,21 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
Result = minimumnum(Result, RHS);
return true;
}
+
+ case Builtin::BI__builtin_elementwise_fma: {
+ if (!E->getArg(0)->isPRValue() || !E->getArg(1)->isPRValue() ||
+ !E->getArg(2)->isPRValue()) {
+ return false;
+ }
+ APFloat SourceY(0.), SourceZ(0.);
+ if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+ !EvaluateFloat(E->getArg(1), SourceY, Info) ||
+ !EvaluateFloat(E->getArg(2), SourceZ, Info))
+ return false;
+ llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E);
+ (void)Result.fusedMultiplyAdd(SourceY, SourceZ, RM);
+ return true;
+ }
}
}
@@ -17606,7 +18065,10 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
// it is an ICE or not.
const auto *VAT = Ctx.getAsVariableArrayType(ArgTy);
if (VAT->getElementType()->isArrayType())
- return CheckICE(VAT->getSizeExpr(), Ctx);
+ // Variable array size expression could be missing (e.g. int a[*][10])
+ // In that case, it can't be a constant expression.
+ return VAT->getSizeExpr() ? CheckICE(VAT->getSizeExpr(), Ctx)
+ : ICEDiag(IK_NotICE, E->getBeginLoc());
// Otherwise, this is a regular VLA, which is definitely not an ICE.
return ICEDiag(IK_NotICE, E->getBeginLoc());
@@ -18010,7 +18472,8 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
// Fabricate an arbitrary expression on the stack and pretend that it
// is a temporary being used as the 'this' pointer.
LValue This;
- ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
+ ImplicitValueInitExpr VIE(RD ? Info.Ctx.getCanonicalTagType(RD)
+ : Info.Ctx.IntTy);
This.set({&VIE, Info.CurrentCall->Index});
ArrayRef<const Expr*> Args;
diff --git a/clang/lib/AST/FormatString.cpp b/clang/lib/AST/FormatString.cpp
index 112b756d..d4cb89b 100644
--- a/clang/lib/AST/FormatString.cpp
+++ b/clang/lib/AST/FormatString.cpp
@@ -413,14 +413,14 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
return Match;
case AnyCharTy: {
- if (const auto *ETy = argTy->getAs<EnumType>()) {
+ if (const auto *ED = argTy->getAsEnumDecl()) {
// If the enum is incomplete we know nothing about the underlying type.
// Assume that it's 'int'. Do not use the underlying type for a scoped
// enumeration.
- if (!ETy->getDecl()->isComplete())
+ if (!ED->isComplete())
return NoMatch;
- if (ETy->isUnscopedEnumerationType())
- argTy = ETy->getDecl()->getIntegerType();
+ if (!ED->isScoped())
+ argTy = ED->getIntegerType();
}
if (const auto *BT = argTy->getAs<BuiltinType>()) {
@@ -462,14 +462,14 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
return matchesSizeTPtrdiffT(C, argTy, T);
}
- if (const EnumType *ETy = argTy->getAs<EnumType>()) {
+ if (const auto *ED = argTy->getAsEnumDecl()) {
// If the enum is incomplete we know nothing about the underlying type.
// Assume that it's 'int'. Do not use the underlying type for a scoped
// enumeration as that needs an exact match.
- if (!ETy->getDecl()->isComplete())
+ if (!ED->isComplete())
argTy = C.IntTy;
- else if (ETy->isUnscopedEnumerationType())
- argTy = ETy->getDecl()->getIntegerType();
+ else if (!ED->isScoped())
+ argTy = ED->getIntegerType();
}
if (argTy->isSaturatedFixedPointType())
@@ -653,7 +653,7 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
// to Objective-C objects. Since the compiler doesn't know which
// structs can be toll-free bridged, we just accept them all.
QualType pointee = PT->getPointeeType();
- if (pointee->getAsStructureType() || pointee->isVoidType())
+ if (pointee->isStructureType() || pointee->isVoidType())
return Match;
}
return NoMatch;
diff --git a/clang/lib/AST/InheritViz.cpp b/clang/lib/AST/InheritViz.cpp
index 1dafed8..3c4a5a8 100644
--- a/clang/lib/AST/InheritViz.cpp
+++ b/clang/lib/AST/InheritViz.cpp
@@ -89,8 +89,8 @@ void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) {
Out << " \"];\n";
// Display the base classes.
- const auto *Decl =
- static_cast<const CXXRecordDecl *>(Type->castAs<RecordType>()->getDecl());
+ const auto *Decl = cast<CXXRecordDecl>(
+ Type->castAsCanonical<RecordType>()->getOriginalDecl());
for (const auto &Base : Decl->bases()) {
QualType CanonBaseType = Context.getCanonicalType(Base.getType());
@@ -133,7 +133,7 @@ InheritanceHierarchyWriter::WriteNodeReference(QualType Type,
/// viewInheritance - Display the inheritance hierarchy of this C++
/// class using GraphViz.
void CXXRecordDecl::viewInheritance(ASTContext& Context) const {
- QualType Self = Context.getTypeDeclType(this);
+ QualType Self = Context.getCanonicalTagType(this);
int FD;
SmallString<128> Filename;
diff --git a/clang/lib/AST/ItaniumCXXABI.cpp b/clang/lib/AST/ItaniumCXXABI.cpp
index 6ceedd6..adef158 100644
--- a/clang/lib/AST/ItaniumCXXABI.cpp
+++ b/clang/lib/AST/ItaniumCXXABI.cpp
@@ -42,10 +42,9 @@ namespace {
///
/// Returns the name of anonymous union VarDecl or nullptr if it is not found.
static const IdentifierInfo *findAnonymousUnionVarDeclName(const VarDecl& VD) {
- const RecordType *RT = VD.getType()->getAs<RecordType>();
- assert(RT && "type of VarDecl is expected to be RecordType.");
- assert(RT->getDecl()->isUnion() && "RecordType is expected to be a union.");
- if (const FieldDecl *FD = RT->getDecl()->findFirstNamedDataMember()) {
+ const auto *RD = VD.getType()->castAsRecordDecl();
+ assert(RD->isUnion() && "RecordType is expected to be a union.");
+ if (const FieldDecl *FD = RD->findFirstNamedDataMember()) {
return FD->getIdentifier();
}
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 5233648..ffadfce 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -463,9 +463,7 @@ public:
void mangleVendorType(StringRef Name);
private:
-
bool mangleSubstitution(const NamedDecl *ND);
- bool mangleSubstitution(NestedNameSpecifier *NNS);
bool mangleSubstitution(QualType T);
bool mangleSubstitution(TemplateName Template);
bool mangleSubstitution(uintptr_t Ptr);
@@ -479,21 +477,15 @@ private:
addSubstitution(reinterpret_cast<uintptr_t>(ND));
}
- void addSubstitution(NestedNameSpecifier *NNS) {
- NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS);
-
- addSubstitution(reinterpret_cast<uintptr_t>(NNS));
- }
void addSubstitution(QualType T);
void addSubstitution(TemplateName Template);
void addSubstitution(uintptr_t Ptr);
// Destructive copy substitutions from other mangler.
void extendSubstitutions(CXXNameMangler* Other);
- void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+ void mangleUnresolvedPrefix(NestedNameSpecifier Qualifier,
bool recursive = false);
- void mangleUnresolvedName(NestedNameSpecifier *qualifier,
- DeclarationName name,
+ void mangleUnresolvedName(NestedNameSpecifier Qualifier, DeclarationName name,
const TemplateArgumentLoc *TemplateArgs,
unsigned NumTemplateArgs,
unsigned KnownArity = UnknownArity);
@@ -542,7 +534,7 @@ private:
void mangleNestedNameWithClosurePrefix(GlobalDecl GD,
const NamedDecl *PrefixND,
const AbiTagList *AdditionalAbiTags);
- void manglePrefix(NestedNameSpecifier *qualifier);
+ void manglePrefix(NestedNameSpecifier Qualifier);
void manglePrefix(const DeclContext *DC, bool NoFunction=false);
void manglePrefix(QualType type);
void mangleTemplatePrefix(GlobalDecl GD, bool NoFunction=false);
@@ -588,12 +580,10 @@ private:
void mangleMemberExprBase(const Expr *base, bool isArrow);
void mangleMemberExpr(const Expr *base, bool isArrow,
- NestedNameSpecifier *qualifier,
- NamedDecl *firstQualifierLookup,
- DeclarationName name,
+ NestedNameSpecifier Qualifier,
+ NamedDecl *firstQualifierLookup, DeclarationName name,
const TemplateArgumentLoc *TemplateArgs,
- unsigned NumTemplateArgs,
- unsigned knownArity);
+ unsigned NumTemplateArgs, unsigned knownArity);
void mangleCastExpression(const Expr *E, StringRef CastEncoding);
void mangleInitListElements(const InitListExpr *InitList);
void mangleRequirement(SourceLocation RequiresExprLoc,
@@ -1334,6 +1324,21 @@ void CXXNameMangler::manglePrefix(QualType type) {
mangleTemplateArgs(Template, DTST->template_arguments());
addSubstitution(QualType(DTST, 0));
}
+ } else if (const auto *DNT = type->getAs<DependentNameType>()) {
+ // Clang 14 and before did not consider this substitutable.
+ bool Clang14Compat = isCompatibleWith(LangOptions::ClangABI::Ver14);
+ if (!Clang14Compat && mangleSubstitution(QualType(DNT, 0)))
+ return;
+
+ // Member expressions can have these without prefixes, but that
+ // should end up in mangleUnresolvedPrefix instead.
+ assert(DNT->getQualifier());
+ manglePrefix(DNT->getQualifier());
+
+ mangleSourceName(DNT->getIdentifier());
+
+ if (!Clang14Compat)
+ addSubstitution(QualType(DNT, 0));
} else {
// We use the QualType mangle type variant here because it handles
// substitutions.
@@ -1345,7 +1350,7 @@ void CXXNameMangler::manglePrefix(QualType type) {
///
/// \param recursive - true if this is being called recursively,
/// i.e. if there is more prefix "to the right".
-void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier Qualifier,
bool recursive) {
// x, ::x
@@ -1362,8 +1367,11 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
// <unresolved-name> ::= [gs] sr <unresolved-qualifier-level>+ E
// <base-unresolved-name>
- switch (qualifier->getKind()) {
- case NestedNameSpecifier::Global:
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
+
+ case NestedNameSpecifier::Kind::Global:
Out << "gs";
// We want an 'sr' unless this is the entire NNS.
@@ -1373,27 +1381,29 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
// We never want an 'E' here.
return;
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
llvm_unreachable("Can't mangle __super specifier");
- case NestedNameSpecifier::Namespace:
- if (qualifier->getPrefix())
- mangleUnresolvedPrefix(qualifier->getPrefix(),
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = Qualifier.getAsNamespaceAndPrefix();
+ if (Prefix)
+ mangleUnresolvedPrefix(Prefix,
/*recursive*/ true);
else
Out << "sr";
- mangleSourceNameWithAbiTags(qualifier->getAsNamespace());
+ mangleSourceNameWithAbiTags(Namespace);
break;
+ }
- case NestedNameSpecifier::TypeSpec: {
- const Type *type = qualifier->getAsType();
+ case NestedNameSpecifier::Kind::Type: {
+ const Type *type = Qualifier.getAsType();
// We only want to use an unresolved-type encoding if this is one of:
// - a decltype
// - a template type parameter
// - a template template parameter with arguments
// In all of these cases, we should have no prefix.
- if (NestedNameSpecifier *Prefix = qualifier->getPrefix()) {
+ if (NestedNameSpecifier Prefix = type->getPrefix()) {
mangleUnresolvedPrefix(Prefix,
/*recursive=*/true);
} else {
@@ -1406,18 +1416,6 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
break;
}
-
- case NestedNameSpecifier::Identifier:
- // Member expressions can have these without prefixes.
- if (qualifier->getPrefix())
- mangleUnresolvedPrefix(qualifier->getPrefix(),
- /*recursive*/ true);
- else
- Out << "sr";
-
- mangleSourceName(qualifier->getAsIdentifier());
- // An Identifier has no type information, so we can't emit abi tags for it.
- break;
}
// If this was the innermost part of the NNS, and we fell out to
@@ -1429,10 +1427,11 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
/// Mangle an unresolved-name, which is generally used for names which
/// weren't resolved to specific entities.
void CXXNameMangler::mangleUnresolvedName(
- NestedNameSpecifier *qualifier, DeclarationName name,
+ NestedNameSpecifier Qualifier, DeclarationName name,
const TemplateArgumentLoc *TemplateArgs, unsigned NumTemplateArgs,
unsigned knownArity) {
- if (qualifier) mangleUnresolvedPrefix(qualifier);
+ if (Qualifier)
+ mangleUnresolvedPrefix(Qualifier);
switch (name.getNameKind()) {
// <base-unresolved-name> ::= <simple-id>
case DeclarationName::Identifier:
@@ -1581,7 +1580,7 @@ void CXXNameMangler::mangleUnqualifiedName(
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// We must have an anonymous union or struct declaration.
- const RecordDecl *RD = VD->getType()->castAs<RecordType>()->getDecl();
+ const auto *RD = VD->getType()->castAsRecordDecl();
// Itanium C++ ABI 5.1.2:
//
@@ -2167,49 +2166,22 @@ void CXXNameMangler::mangleLambdaSig(const CXXRecordDecl *Lambda) {
Lambda->getLambdaStaticInvoker());
}
-void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
- switch (qualifier->getKind()) {
- case NestedNameSpecifier::Global:
+void CXXNameMangler::manglePrefix(NestedNameSpecifier Qualifier) {
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
// nothing
return;
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
llvm_unreachable("Can't mangle __super specifier");
- case NestedNameSpecifier::Namespace:
- mangleName(qualifier->getAsNamespace()->getNamespace());
- return;
-
- case NestedNameSpecifier::TypeSpec:
- if (NestedNameSpecifier *Prefix = qualifier->getPrefix()) {
- const auto *DTST =
- cast<DependentTemplateSpecializationType>(qualifier->getAsType());
- QualType NewT = getASTContext().getDependentTemplateSpecializationType(
- DTST->getKeyword(),
- {Prefix, DTST->getDependentTemplateName().getName(),
- /*HasTemplateKeyword=*/true},
- DTST->template_arguments(), /*IsCanonical=*/true);
- manglePrefix(NewT);
- return;
- }
- manglePrefix(QualType(qualifier->getAsType(), 0));
+ case NestedNameSpecifier::Kind::Namespace:
+ mangleName(Qualifier.getAsNamespaceAndPrefix().Namespace->getNamespace());
return;
- case NestedNameSpecifier::Identifier:
- // Clang 14 and before did not consider this substitutable.
- bool Clang14Compat = isCompatibleWith(LangOptions::ClangABI::Ver14);
- if (!Clang14Compat && mangleSubstitution(qualifier))
- return;
-
- // Member expressions can have these without prefixes, but that
- // should end up in mangleUnresolvedPrefix instead.
- assert(qualifier->getPrefix());
- manglePrefix(qualifier->getPrefix());
-
- mangleSourceName(qualifier->getAsIdentifier());
-
- if (!Clang14Compat)
- addSubstitution(qualifier);
+ case NestedNameSpecifier::Kind::Type:
+ manglePrefix(QualType(Qualifier.getAsType(), 0));
return;
}
@@ -2269,8 +2241,7 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
if (!Clang11Compat && mangleSubstitution(Template))
return;
- if (NestedNameSpecifier *Qualifier = Dependent->getQualifier())
- manglePrefix(Qualifier);
+ manglePrefix(Dependent->getQualifier());
if (Clang11Compat && mangleSubstitution(Template))
return;
@@ -2470,6 +2441,13 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::CountAttributed:
llvm_unreachable("type is illegal as a nested name specifier");
+ case Type::SubstBuiltinTemplatePack:
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(__builtin_dedup_pack<T...>(*)(U) x...);
+ // };
+ Out << "_SUBSTBUILTINPACK_";
+ break;
case Type::SubstTemplateTypeParmPack:
// FIXME: not clear how to mangle this!
// template <class T...> class A {
@@ -2525,7 +2503,8 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Enum:
case Type::Record:
- mangleSourceNameWithAbiTags(cast<TagType>(Ty)->getDecl());
+ mangleSourceNameWithAbiTags(
+ cast<TagType>(Ty)->getOriginalDecl()->getDefinitionOrSelf());
break;
case Type::TemplateSpecialization: {
@@ -2586,8 +2565,9 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
}
case Type::InjectedClassName:
- mangleSourceNameWithAbiTags(
- cast<InjectedClassNameType>(Ty)->getDecl());
+ mangleSourceNameWithAbiTags(cast<InjectedClassNameType>(Ty)
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf());
break;
case Type::DependentName:
@@ -2608,9 +2588,6 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Using:
return mangleUnresolvedTypeOrSimpleId(cast<UsingType>(Ty)->desugar(),
Prefix);
- case Type::Elaborated:
- return mangleUnresolvedTypeOrSimpleId(
- cast<ElaboratedType>(Ty)->getNamedType(), Prefix);
}
return false;
@@ -3838,7 +3815,7 @@ void CXXNameMangler::mangleType(const RecordType *T) {
mangleType(static_cast<const TagType*>(T));
}
void CXXNameMangler::mangleType(const TagType *T) {
- mangleName(T->getDecl());
+ mangleName(T->getOriginalDecl()->getDefinitionOrSelf());
}
// <type> ::= <array-type>
@@ -3875,16 +3852,10 @@ void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
// <pointer-to-member-type> ::= M <class type> <member type>
void CXXNameMangler::mangleType(const MemberPointerType *T) {
Out << 'M';
- if (auto *RD = T->getMostRecentCXXRecordDecl()) {
+ if (auto *RD = T->getMostRecentCXXRecordDecl())
mangleCXXRecordDecl(RD);
- } else {
- NestedNameSpecifier *NNS = T->getQualifier();
- if (auto *II = NNS->getAsIdentifier())
- mangleType(getASTContext().getDependentNameType(
- ElaboratedTypeKeyword::None, NNS->getPrefix(), II));
- else
- manglePrefix(NNS);
- }
+ else
+ mangleType(QualType(T->getQualifier().getAsType(), 0));
QualType PointeeType = T->getPointeeType();
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
mangleType(FPT);
@@ -3924,6 +3895,14 @@ void CXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T) {
Out << "_SUBSTPACK_";
}
+void CXXNameMangler::mangleType(const SubstBuiltinTemplatePackType *T) {
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(__builtin_dedup_pack<T...>(*)(U) x...);
+ // };
+ Out << "_SUBSTBUILTINPACK_";
+}
+
// <type> ::= P <type> # pointer-to
void CXXNameMangler::mangleType(const PointerType *T) {
Out << 'P';
@@ -4471,7 +4450,8 @@ void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
// Mangle injected class name types as if the user had written the
// specialization out fully. It may not actually be possible to see
// this mangling, though.
- mangleType(T->getInjectedSpecializationType());
+ mangleType(T->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ getASTContext()));
}
void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
@@ -4746,8 +4726,8 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T,
void CXXNameMangler::mangleMemberExprBase(const Expr *Base, bool IsArrow) {
// Ignore member expressions involving anonymous unions.
- while (const auto *RT = Base->getType()->getAs<RecordType>()) {
- if (!RT->getDecl()->isAnonymousStructOrUnion())
+ while (const auto *RT = Base->getType()->getAsCanonical<RecordType>()) {
+ if (!RT->getOriginalDecl()->isAnonymousStructOrUnion())
break;
const auto *ME = dyn_cast<MemberExpr>(Base);
if (!ME)
@@ -4768,9 +4748,8 @@ void CXXNameMangler::mangleMemberExprBase(const Expr *Base, bool IsArrow) {
}
/// Mangles a member expression.
-void CXXNameMangler::mangleMemberExpr(const Expr *base,
- bool isArrow,
- NestedNameSpecifier *qualifier,
+void CXXNameMangler::mangleMemberExpr(const Expr *base, bool isArrow,
+ NestedNameSpecifier Qualifier,
NamedDecl *firstQualifierLookup,
DeclarationName member,
const TemplateArgumentLoc *TemplateArgs,
@@ -4780,7 +4759,7 @@ void CXXNameMangler::mangleMemberExpr(const Expr *base,
// ::= pt <expression> <unresolved-name>
if (base)
mangleMemberExprBase(base, isArrow);
- mangleUnresolvedName(qualifier, member, TemplateArgs, NumTemplateArgs, arity);
+ mangleUnresolvedName(Qualifier, member, TemplateArgs, NumTemplateArgs, arity);
}
/// Look at the callee of the given call expression and determine if
@@ -5230,7 +5209,7 @@ recurse:
const auto *PDE = cast<CXXPseudoDestructorExpr>(E);
if (const Expr *Base = PDE->getBase())
mangleMemberExprBase(Base, PDE->isArrow());
- NestedNameSpecifier *Qualifier = PDE->getQualifier();
+ NestedNameSpecifier Qualifier = PDE->getQualifier();
if (TypeSourceInfo *ScopeInfo = PDE->getScopeTypeInfo()) {
if (Qualifier) {
mangleUnresolvedPrefix(Qualifier,
@@ -5855,7 +5834,8 @@ recurse:
// externally-visible declaration, so there's no standard mangling for
// this, but mangling as a literal of the closure type seems reasonable.
Out << "L";
- mangleType(Context.getASTContext().getRecordType(cast<LambdaExpr>(E)->getLambdaClass()));
+ mangleType(Context.getASTContext().getCanonicalTagType(
+ cast<LambdaExpr>(E)->getLambdaClass()));
Out << "E";
break;
}
@@ -6528,7 +6508,7 @@ static QualType getLValueType(ASTContext &Ctx, const APValue &LV) {
dyn_cast<FieldDecl>(E.getAsBaseOrMember().getPointer()))
T = FD->getType();
else
- T = Ctx.getRecordType(
+ T = Ctx.getCanonicalTagType(
cast<CXXRecordDecl>(E.getAsBaseOrMember().getPointer()));
}
return T;
@@ -6895,7 +6875,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
}
TypeSoFar = FD->getType();
} else {
- TypeSoFar = Ctx.getRecordType(cast<CXXRecordDecl>(D));
+ TypeSoFar = Ctx.getCanonicalTagType(cast<CXXRecordDecl>(D));
}
}
}
@@ -7005,14 +6985,6 @@ bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
}
-bool CXXNameMangler::mangleSubstitution(NestedNameSpecifier *NNS) {
- assert(NNS->getKind() == NestedNameSpecifier::Identifier &&
- "mangleSubstitution(NestedNameSpecifier *) is only used for "
- "identifier nested name specifiers.");
- NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS);
- return mangleSubstitution(reinterpret_cast<uintptr_t>(NNS));
-}
-
/// Determine whether the given type has any qualifiers that are relevant for
/// substitutions.
static bool hasMangledSubstitutionQualifiers(QualType T) {
@@ -7022,8 +6994,8 @@ static bool hasMangledSubstitutionQualifiers(QualType T) {
bool CXXNameMangler::mangleSubstitution(QualType T) {
if (!hasMangledSubstitutionQualifiers(T)) {
- if (const RecordType *RT = T->getAs<RecordType>())
- return mangleSubstitution(RT->getDecl());
+ if (const auto *RD = T->getAsCXXRecordDecl())
+ return mangleSubstitution(RD);
}
uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
@@ -7059,12 +7031,12 @@ bool CXXNameMangler::isSpecializedAs(QualType S, llvm::StringRef Name,
if (S.isNull())
return false;
- const RecordType *RT = S->getAs<RecordType>();
+ const RecordType *RT = S->getAsCanonical<RecordType>();
if (!RT)
return false;
const ClassTemplateSpecializationDecl *SD =
- dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl());
if (!SD || !SD->getIdentifier()->isStr(Name))
return false;
@@ -7193,8 +7165,8 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
void CXXNameMangler::addSubstitution(QualType T) {
if (!hasMangledSubstitutionQualifiers(T)) {
- if (const RecordType *RT = T->getAs<RecordType>()) {
- addSubstitution(RT->getDecl());
+ if (const auto *RD = T->getAsCXXRecordDecl()) {
+ addSubstitution(RD);
return;
}
}
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index 64ddb1e..ca8e2af 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -395,8 +395,8 @@ llvm::json::Array JSONNodeDumper::createCastPath(const CastExpr *C) {
for (auto I = C->path_begin(), E = C->path_end(); I != E; ++I) {
const CXXBaseSpecifier *Base = *I;
- const auto *RD =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ const auto *RD = cast<CXXRecordDecl>(
+ Base->getType()->castAsCanonical<RecordType>()->getOriginalDecl());
llvm::json::Object Val{{"name", RD->getName()}};
if (Base->isVirtual())
@@ -606,9 +606,8 @@ void JSONNodeDumper::VisitTypedefType(const TypedefType *TT) {
}
void JSONNodeDumper::VisitUsingType(const UsingType *TT) {
- JOS.attribute("decl", createBareDeclRef(TT->getFoundDecl()));
- if (!TT->typeMatchesDecl())
- JOS.attribute("type", createQualType(TT->desugar()));
+ JOS.attribute("decl", createBareDeclRef(TT->getDecl()));
+ JOS.attribute("type", createQualType(TT->desugar()));
}
void JSONNodeDumper::VisitFunctionType(const FunctionType *T) {
@@ -759,7 +758,15 @@ void JSONNodeDumper::VisitUnaryTransformType(const UnaryTransformType *UTT) {
}
void JSONNodeDumper::VisitTagType(const TagType *TT) {
- JOS.attribute("decl", createBareDeclRef(TT->getDecl()));
+ if (NestedNameSpecifier Qualifier = TT->getQualifier()) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ Qualifier.print(OS, PrintPolicy, /*ResolveTemplateArguments=*/true);
+ JOS.attribute("qualifier", Str);
+ }
+ JOS.attribute("decl", createBareDeclRef(TT->getOriginalDecl()));
+ if (TT->isTagOwned())
+ JOS.attribute("isTagOwned", true);
}
void JSONNodeDumper::VisitTemplateTypeParmType(
@@ -809,7 +816,7 @@ void JSONNodeDumper::VisitTemplateSpecializationType(
void JSONNodeDumper::VisitInjectedClassNameType(
const InjectedClassNameType *ICNT) {
- JOS.attribute("decl", createBareDeclRef(ICNT->getDecl()));
+ JOS.attribute("decl", createBareDeclRef(ICNT->getOriginalDecl()));
}
void JSONNodeDumper::VisitObjCInterfaceType(const ObjCInterfaceType *OIT) {
@@ -821,17 +828,6 @@ void JSONNodeDumper::VisitPackExpansionType(const PackExpansionType *PET) {
JOS.attribute("numExpansions", *N);
}
-void JSONNodeDumper::VisitElaboratedType(const ElaboratedType *ET) {
- if (const NestedNameSpecifier *NNS = ET->getQualifier()) {
- std::string Str;
- llvm::raw_string_ostream OS(Str);
- NNS->print(OS, PrintPolicy, /*ResolveTemplateArgs*/ true);
- JOS.attribute("qualifier", Str);
- }
- if (const TagDecl *TD = ET->getOwnedTagDecl())
- JOS.attribute("ownedTagDecl", createBareDeclRef(TD));
-}
-
void JSONNodeDumper::VisitMacroQualifiedType(const MacroQualifiedType *MQT) {
JOS.attribute("macroName", MQT->getMacroIdentifier()->getName());
}
@@ -902,9 +898,9 @@ void JSONNodeDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *NAD) {
void JSONNodeDumper::VisitUsingDecl(const UsingDecl *UD) {
std::string Name;
- if (const NestedNameSpecifier *NNS = UD->getQualifier()) {
+ if (NestedNameSpecifier Qualifier = UD->getQualifier()) {
llvm::raw_string_ostream SOS(Name);
- NNS->print(SOS, UD->getASTContext().getPrintingPolicy());
+ Qualifier.print(SOS, UD->getASTContext().getPrintingPolicy());
}
Name += UD->getNameAsString();
JOS.attribute("name", Name);
diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp
index e6ea0ad..2ac38a2 100644
--- a/clang/lib/AST/MicrosoftMangle.cpp
+++ b/clang/lib/AST/MicrosoftMangle.cpp
@@ -1805,17 +1805,16 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
case TemplateArgument::Declaration: {
const NamedDecl *ND = TA.getAsDecl();
if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) {
- mangleMemberDataPointer(cast<CXXRecordDecl>(ND->getDeclContext())
- ->getMostRecentNonInjectedDecl(),
- cast<ValueDecl>(ND),
- cast<NonTypeTemplateParmDecl>(Parm),
- TA.getParamTypeForDecl());
+ mangleMemberDataPointer(
+ cast<CXXRecordDecl>(ND->getDeclContext())->getMostRecentDecl(),
+ cast<ValueDecl>(ND), cast<NonTypeTemplateParmDecl>(Parm),
+ TA.getParamTypeForDecl());
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && MD->isInstance()) {
- mangleMemberFunctionPointer(
- MD->getParent()->getMostRecentNonInjectedDecl(), MD,
- cast<NonTypeTemplateParmDecl>(Parm), TA.getParamTypeForDecl());
+ mangleMemberFunctionPointer(MD->getParent()->getMostRecentDecl(), MD,
+ cast<NonTypeTemplateParmDecl>(Parm),
+ TA.getParamTypeForDecl());
} else {
mangleFunctionPointer(FD, cast<NonTypeTemplateParmDecl>(Parm),
TA.getParamTypeForDecl());
@@ -2021,7 +2020,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
if (RD->isAnonymousStructOrUnion())
continue;
} else {
- ET = getASTContext().getRecordType(cast<CXXRecordDecl>(D));
+ ET = getASTContext().getCanonicalTagType(cast<CXXRecordDecl>(D));
// Bug in MSVC: fully qualified name of base class should be used for
// mangling to prevent collisions e.g. on base classes with same names
// in different namespaces.
@@ -3247,13 +3246,17 @@ void MicrosoftCXXNameMangler::mangleTagTypeKind(TagTypeKind TTK) {
}
void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers,
SourceRange) {
- mangleType(cast<TagType>(T)->getDecl());
+ mangleType(cast<TagType>(T)->getOriginalDecl());
}
void MicrosoftCXXNameMangler::mangleType(const RecordType *T, Qualifiers,
SourceRange) {
- mangleType(cast<TagType>(T)->getDecl());
+ mangleType(cast<TagType>(T)->getOriginalDecl());
}
void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
+ // MSVC chooses the tag kind of the definition if it exists, otherwise it
+ // always picks the first declaration.
+ const auto *Def = TD->getDefinition();
+ TD = Def ? Def : TD->getFirstDecl();
mangleTagTypeKind(TD->getTagKind());
mangleName(TD);
}
@@ -3384,6 +3387,11 @@ void MicrosoftCXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T,
Error(Range.getBegin(), "substituted parameter pack") << Range;
}
+void MicrosoftCXXNameMangler::mangleType(const SubstBuiltinTemplatePackType *T,
+ Qualifiers, SourceRange Range) {
+ Error(Range.getBegin(), "substituted builtin template pack") << Range;
+}
+
// <type> ::= <pointer-type>
// <pointer-type> ::= E? <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
// # the E is required for 64-bit non-static pointers
diff --git a/clang/lib/AST/NestedNameSpecifier.cpp b/clang/lib/AST/NestedNameSpecifier.cpp
index 56f74b9..c6af91f 100644
--- a/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/clang/lib/AST/NestedNameSpecifier.cpp
@@ -15,7 +15,6 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateName.h"
@@ -35,250 +34,67 @@
using namespace clang;
-NestedNameSpecifier *
-NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
- const NestedNameSpecifier &Mockup) {
+const NamespaceAndPrefixStorage *
+NestedNameSpecifier::MakeNamespaceAndPrefixStorage(
+ const ASTContext &Ctx, const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix) {
llvm::FoldingSetNodeID ID;
- Mockup.Profile(ID);
+ NamespaceAndPrefixStorage::Profile(ID, Namespace, Prefix);
void *InsertPos = nullptr;
- NestedNameSpecifier *NNS
- = Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
- if (!NNS) {
- NNS =
- new (Context, alignof(NestedNameSpecifier)) NestedNameSpecifier(Mockup);
- Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
+ NamespaceAndPrefixStorage *S =
+ Ctx.NamespaceAndPrefixStorages.FindNodeOrInsertPos(ID, InsertPos);
+ if (!S) {
+ S = new (Ctx, alignof(NamespaceAndPrefixStorage))
+ NamespaceAndPrefixStorage(Namespace, Prefix);
+ Ctx.NamespaceAndPrefixStorages.InsertNode(S, InsertPos);
}
-
- return NNS;
-}
-
-NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const IdentifierInfo *II) {
- assert(II && "Identifier cannot be NULL");
- assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent");
-
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(Prefix);
- Mockup.Prefix.setInt(StoredIdentifier);
- Mockup.Specifier = const_cast<IdentifierInfo *>(II);
- return FindOrInsert(Context, Mockup);
-}
-
-NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const NamespaceBaseDecl *NS) {
- assert(NS && "Namespace cannot be NULL");
- assert((!Prefix ||
- (Prefix->getAsType() == nullptr &&
- Prefix->getAsIdentifier() == nullptr)) &&
- "Broken nested name specifier");
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(Prefix);
- Mockup.Prefix.setInt(StoredDecl);
- Mockup.Specifier = const_cast<NamespaceBaseDecl *>(NS);
- return FindOrInsert(Context, Mockup);
-}
-
-NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const Type *T) {
- assert(T && "Type cannot be NULL");
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(Prefix);
- Mockup.Prefix.setInt(StoredTypeSpec);
- Mockup.Specifier = const_cast<Type*>(T);
- return FindOrInsert(Context, Mockup);
-}
-
-NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
- const IdentifierInfo *II) {
- assert(II && "Identifier cannot be NULL");
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(nullptr);
- Mockup.Prefix.setInt(StoredIdentifier);
- Mockup.Specifier = const_cast<IdentifierInfo *>(II);
- return FindOrInsert(Context, Mockup);
-}
-
-NestedNameSpecifier *
-NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
- if (!Context.GlobalNestedNameSpecifier)
- Context.GlobalNestedNameSpecifier =
- new (Context, alignof(NestedNameSpecifier)) NestedNameSpecifier();
- return Context.GlobalNestedNameSpecifier;
-}
-
-NestedNameSpecifier *
-NestedNameSpecifier::SuperSpecifier(const ASTContext &Context,
- CXXRecordDecl *RD) {
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(nullptr);
- Mockup.Prefix.setInt(StoredDecl);
- Mockup.Specifier = RD;
- return FindOrInsert(Context, Mockup);
+ return S;
}
-NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
- if (!Specifier)
- return Global;
-
- switch (Prefix.getInt()) {
- case StoredIdentifier:
- return Identifier;
-
- case StoredDecl: {
- NamedDecl *ND = static_cast<NamedDecl *>(Specifier);
- return isa<CXXRecordDecl>(ND) ? Super : Namespace;
- }
-
- case StoredTypeSpec:
- return TypeSpec;
- }
-
- llvm_unreachable("Invalid NNS Kind!");
-}
-
-/// Retrieve the namespace or namespace alias stored in this nested name
-/// specifier.
-NamespaceBaseDecl *NestedNameSpecifier::getAsNamespace() const {
- if (Prefix.getInt() == StoredDecl)
- return dyn_cast<NamespaceBaseDecl>(static_cast<NamedDecl *>(Specifier));
-
- return nullptr;
-}
-
-/// Retrieve the record declaration stored in this nested name specifier.
-CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
- switch (Prefix.getInt()) {
- case StoredIdentifier:
- return nullptr;
-
- case StoredDecl:
- return dyn_cast<CXXRecordDecl>(static_cast<NamedDecl *>(Specifier));
-
- case StoredTypeSpec:
- return getAsType()->getAsCXXRecordDecl();
+bool NestedNameSpecifier::isFullyQualified() const {
+ switch (getKind()) {
+ case NestedNameSpecifier::Kind::Global:
+ return true;
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return false;
+ case NestedNameSpecifier::Kind::Namespace:
+ return getAsNamespaceAndPrefix().Prefix.isFullyQualified();
+ case NestedNameSpecifier::Kind::Type:
+ return getAsType()->getPrefix().isFullyQualified();
}
-
llvm_unreachable("Invalid NNS Kind!");
}
NestedNameSpecifierDependence NestedNameSpecifier::getDependence() const {
switch (getKind()) {
- case Identifier: {
- // Identifier specifiers always represent dependent types
- auto F = NestedNameSpecifierDependence::Dependent |
- NestedNameSpecifierDependence::Instantiation;
- // Prefix can contain unexpanded template parameters.
- if (getPrefix())
- return F | getPrefix()->getDependence();
- return F;
- }
-
- case Namespace:
- case Global:
- return NestedNameSpecifierDependence::None;
-
- case Super: {
- CXXRecordDecl *RD = static_cast<CXXRecordDecl *>(Specifier);
- for (const auto &Base : RD->bases())
- if (Base.getType()->isDependentType())
- // FIXME: must also be instantiation-dependent.
- return NestedNameSpecifierDependence::Dependent;
+ case Kind::Null:
+ case Kind::Global:
+ case Kind::Namespace:
return NestedNameSpecifierDependence::None;
+ case Kind::MicrosoftSuper: {
+ CXXRecordDecl *RD = getAsMicrosoftSuper();
+ return RD->isDependentContext()
+ ? NestedNameSpecifierDependence::DependentInstantiation |
+ NestedNameSpecifierDependence::Dependent
+ : NestedNameSpecifierDependence::None;
}
-
- case TypeSpec: {
- NestedNameSpecifierDependence Dep =
- toNestedNameSpecifierDependendence(getAsType()->getDependence());
- if (NestedNameSpecifier *Prefix = getPrefix())
- Dep |=
- Prefix->getDependence() & ~NestedNameSpecifierDependence::Dependent;
- return Dep;
- }
+ case Kind::Type:
+ return toNestedNameSpecifierDependence(getAsType()->getDependence());
}
llvm_unreachable("Invalid NNS Kind!");
}
-bool NestedNameSpecifier::isDependent() const {
- return getDependence() & NestedNameSpecifierDependence::Dependent;
-}
-
-bool NestedNameSpecifier::isInstantiationDependent() const {
- return getDependence() & NestedNameSpecifierDependence::Instantiation;
-}
-
-bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
- return getDependence() & NestedNameSpecifierDependence::UnexpandedPack;
-}
-
-bool NestedNameSpecifier::containsErrors() const {
- return getDependence() & NestedNameSpecifierDependence::Error;
-}
-
-const Type *
-NestedNameSpecifier::translateToType(const ASTContext &Context) const {
- NestedNameSpecifier *Prefix = getPrefix();
- switch (getKind()) {
- case SpecifierKind::Identifier:
- return Context
- .getDependentNameType(ElaboratedTypeKeyword::None, Prefix,
- getAsIdentifier())
- .getTypePtr();
- case SpecifierKind::TypeSpec: {
- const Type *T = getAsType();
- switch (T->getTypeClass()) {
- case Type::DependentTemplateSpecialization: {
- const auto *DT = cast<DependentTemplateSpecializationType>(T);
- const DependentTemplateStorage &DTN = DT->getDependentTemplateName();
- return Context
- .getDependentTemplateSpecializationType(
- ElaboratedTypeKeyword::None,
- {Prefix, DTN.getName(), DTN.hasTemplateKeyword()},
- DT->template_arguments())
- .getTypePtr();
- }
- case Type::Record:
- case Type::TemplateSpecialization:
- case Type::Using:
- case Type::Enum:
- case Type::Typedef:
- case Type::UnresolvedUsing:
- return Context
- .getElaboratedType(ElaboratedTypeKeyword::None, Prefix,
- QualType(T, 0))
- .getTypePtr();
- default:
- assert(Prefix == nullptr && "unexpected type with elaboration");
- return T;
- }
- }
- case SpecifierKind::Global:
- case SpecifierKind::Namespace:
- case SpecifierKind::Super:
- // These are not representable as types.
- return nullptr;
- }
- llvm_unreachable("Unhandled SpecifierKind enum");
-}
-
/// Print this nested name specifier to the given output
/// stream.
void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
bool ResolveTemplateArguments,
bool PrintFinalScopeResOp) const {
- if (getPrefix())
- getPrefix()->print(OS, Policy);
-
switch (getKind()) {
- case Identifier:
- OS << getAsIdentifier()->getName();
- break;
-
- case Namespace: {
- NamespaceBaseDecl *Namespace = getAsNamespace();
+ case Kind::Namespace: {
+ auto [Namespace, Prefix] = getAsNamespaceAndPrefix();
+ Prefix.print(OS, Policy);
if (const auto *NS = dyn_cast<NamespaceDecl>(Namespace)) {
assert(!NS->isAnonymousNamespace());
OS << NS->getName();
@@ -287,134 +103,49 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
}
break;
}
-
- case Global:
+ case Kind::Global:
OS << "::";
return;
-
- case Super:
+ case Kind::MicrosoftSuper:
OS << "__super";
break;
-
- case TypeSpec: {
+ case Kind::Type: {
PrintingPolicy InnerPolicy(Policy);
- InnerPolicy.SuppressScope = true;
InnerPolicy.SuppressTagKeyword = true;
QualType(getAsType(), 0).print(OS, InnerPolicy);
break;
}
+ case Kind::Null:
+ return;
}
-
if (PrintFinalScopeResOp)
OS << "::";
}
-LLVM_DUMP_METHOD void NestedNameSpecifier::dump(const LangOptions &LO) const {
- dump(llvm::errs(), LO);
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream *OS,
+ const LangOptions *LO) const {
+ print(OS ? *OS : llvm::errs(), LO ? *LO : LangOptions());
}
-LLVM_DUMP_METHOD void NestedNameSpecifier::dump() const { dump(llvm::errs()); }
-
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(const LangOptions &LO) const {
+ dump(/*OS=*/nullptr, &LO);
+}
LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream &OS) const {
- LangOptions LO;
- dump(OS, LO);
+ dump(&OS);
}
-
LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream &OS,
const LangOptions &LO) const {
- print(OS, PrintingPolicy(LO));
-}
-
-unsigned
-NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
- assert(Qualifier && "Expected a non-NULL qualifier");
-
- // Location of the trailing '::'.
- unsigned Length = sizeof(SourceLocation::UIntTy);
-
- switch (Qualifier->getKind()) {
- case NestedNameSpecifier::Global:
- // Nothing more to add.
- break;
-
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Super:
- // The location of the identifier or namespace name.
- Length += sizeof(SourceLocation::UIntTy);
- break;
-
- case NestedNameSpecifier::TypeSpec:
- // The "void*" that points at the TypeLoc data.
- // Note: the 'template' keyword is part of the TypeLoc.
- Length += sizeof(void *);
- break;
- }
-
- return Length;
-}
-
-unsigned
-NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) {
- unsigned Length = 0;
- for (; Qualifier; Qualifier = Qualifier->getPrefix())
- Length += getLocalDataLength(Qualifier);
- return Length;
-}
-
-/// Load a (possibly unaligned) source location from a given address
-/// and offset.
-static SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
- SourceLocation::UIntTy Raw;
- memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(Raw));
- return SourceLocation::getFromRawEncoding(Raw);
+ dump(&OS, &LO);
}
-/// Load a (possibly unaligned) pointer from a given address and
-/// offset.
-static void *LoadPointer(void *Data, unsigned Offset) {
- void *Result;
- memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void*));
- return Result;
-}
-
-SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
+SourceLocation NestedNameSpecifierLoc::getBeginLoc() const {
if (!Qualifier)
- return SourceRange();
-
- unsigned Offset = getDataLength(Qualifier->getPrefix());
- switch (Qualifier->getKind()) {
- case NestedNameSpecifier::Global:
- return LoadSourceLocation(Data, Offset);
-
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Super:
- return SourceRange(
- LoadSourceLocation(Data, Offset),
- LoadSourceLocation(Data, Offset + sizeof(SourceLocation::UIntTy)));
-
- case NestedNameSpecifier::TypeSpec: {
- // The "void*" that points at the TypeLoc data.
- // Note: the 'template' keyword is part of the TypeLoc.
- void *TypeData = LoadPointer(Data, Offset);
- TypeLoc TL(Qualifier->getAsType(), TypeData);
- return SourceRange(TL.getBeginLoc(),
- LoadSourceLocation(Data, Offset + sizeof(void*)));
- }
- }
+ return SourceLocation();
- llvm_unreachable("Invalid NNS Kind!");
-}
-
-TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
- if (Qualifier->getKind() != NestedNameSpecifier::TypeSpec)
- return TypeLoc();
-
- // The "void*" that points at the TypeLoc data.
- unsigned Offset = getDataLength(Qualifier->getPrefix());
- void *TypeData = LoadPointer(Data, Offset);
- return TypeLoc(Qualifier->getAsType(), TypeData);
+ NestedNameSpecifierLoc First = *this;
+ while (NestedNameSpecifierLoc Prefix = First.getAsNamespaceAndPrefix().Prefix)
+ First = Prefix;
+ return First.getLocalSourceRange().getBegin();
}
static void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
@@ -516,10 +247,10 @@ operator=(const NestedNameSpecifierLocBuilder &Other) {
return *this;
}
-void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context, TypeLoc TL,
- SourceLocation ColonColonLoc) {
- Representation =
- NestedNameSpecifier::Create(Context, Representation, TL.getTypePtr());
+void NestedNameSpecifierLocBuilder::Make(ASTContext &Context, TypeLoc TL,
+ SourceLocation ColonColonLoc) {
+ assert(!Representation);
+ Representation = NestedNameSpecifier(TL.getTypePtr());
// Push source-location info into the buffer.
SavePointer(TL.getOpaqueData(), Buffer, BufferSize, BufferCapacity);
@@ -527,23 +258,10 @@ void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context, TypeLoc TL,
}
void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
- IdentifierInfo *Identifier,
- SourceLocation IdentifierLoc,
- SourceLocation ColonColonLoc) {
- Representation = NestedNameSpecifier::Create(Context, Representation,
- Identifier);
-
- // Push source-location info into the buffer.
- SaveSourceLocation(IdentifierLoc, Buffer, BufferSize, BufferCapacity);
- SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
-}
-
-void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
- NamespaceBaseDecl *Namespace,
+ const NamespaceBaseDecl *Namespace,
SourceLocation NamespaceLoc,
SourceLocation ColonColonLoc) {
- Representation = NestedNameSpecifier::Create(Context, Representation,
- Namespace);
+ Representation = NestedNameSpecifier(Context, Namespace, Representation);
// Push source-location info into the buffer.
SaveSourceLocation(NamespaceLoc, Buffer, BufferSize, BufferCapacity);
@@ -553,60 +271,48 @@ void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
void NestedNameSpecifierLocBuilder::MakeGlobal(ASTContext &Context,
SourceLocation ColonColonLoc) {
assert(!Representation && "Already have a nested-name-specifier!?");
- Representation = NestedNameSpecifier::GlobalSpecifier(Context);
+ Representation = NestedNameSpecifier::getGlobal();
// Push source-location info into the buffer.
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
-void NestedNameSpecifierLocBuilder::MakeSuper(ASTContext &Context,
- CXXRecordDecl *RD,
- SourceLocation SuperLoc,
- SourceLocation ColonColonLoc) {
- Representation = NestedNameSpecifier::SuperSpecifier(Context, RD);
+void NestedNameSpecifierLocBuilder::MakeMicrosoftSuper(
+ ASTContext &Context, CXXRecordDecl *RD, SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier(RD);
// Push source-location info into the buffer.
SaveSourceLocation(SuperLoc, Buffer, BufferSize, BufferCapacity);
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
-void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context,
- NestedNameSpecifier *Qualifier,
+void NestedNameSpecifierLocBuilder::PushTrivial(ASTContext &Context,
+ NestedNameSpecifier Qualifier,
SourceRange R) {
- Representation = Qualifier;
-
// Construct bogus (but well-formed) source information for the
// nested-name-specifier.
- BufferSize = 0;
- SmallVector<NestedNameSpecifier *, 4> Stack;
- for (NestedNameSpecifier *NNS = Qualifier; NNS; NNS = NNS->getPrefix())
- Stack.push_back(NNS);
- while (!Stack.empty()) {
- NestedNameSpecifier *NNS = Stack.pop_back_val();
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
- break;
-
- case NestedNameSpecifier::TypeSpec: {
- TypeSourceInfo *TSInfo
- = Context.getTrivialTypeSourceInfo(QualType(NNS->getAsType(), 0),
- R.getBegin());
- SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize,
- BufferCapacity);
- break;
- }
-
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- break;
- }
-
- // Save the location of the '::'.
- SaveSourceLocation(Stack.empty()? R.getEnd() : R.getBegin(),
- Buffer, BufferSize, BufferCapacity);
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ return;
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [_1, Prefix] = Qualifier.getAsNamespaceAndPrefix();
+ PushTrivial(Context, Prefix, R.getBegin());
+ SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
+ break;
+ }
+ case NestedNameSpecifier::Kind::Type: {
+ TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(
+ QualType(Qualifier.getAsType(), 0), R.getBegin());
+ SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize,
+ BufferCapacity);
+ break;
+ }
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ break;
}
+ SaveSourceLocation(R.getEnd(), Buffer, BufferSize, BufferCapacity);
}
void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) {
@@ -614,7 +320,7 @@ void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) {
free(Buffer);
if (!Other) {
- Representation = nullptr;
+ Representation = std::nullopt;
BufferSize = 0;
return;
}
diff --git a/clang/lib/AST/ODRHash.cpp b/clang/lib/AST/ODRHash.cpp
index bd87d44..fb95f58 100644
--- a/clang/lib/AST/ODRHash.cpp
+++ b/clang/lib/AST/ODRHash.cpp
@@ -111,34 +111,28 @@ void ODRHash::AddDeclarationNameInfoImpl(DeclarationNameInfo NameInfo) {
}
}
-void ODRHash::AddNestedNameSpecifier(const NestedNameSpecifier *NNS) {
- assert(NNS && "Expecting non-null pointer.");
- const auto *Prefix = NNS->getPrefix();
- AddBoolean(Prefix);
- if (Prefix) {
- AddNestedNameSpecifier(Prefix);
- }
- auto Kind = NNS->getKind();
- ID.AddInteger(Kind);
+void ODRHash::AddNestedNameSpecifier(NestedNameSpecifier NNS) {
+ auto Kind = NNS.getKind();
+ ID.AddInteger(llvm::to_underlying(Kind));
switch (Kind) {
- case NestedNameSpecifier::Identifier:
- AddIdentifierInfo(NNS->getAsIdentifier());
- break;
- case NestedNameSpecifier::Namespace:
- AddDecl(NNS->getAsNamespace());
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
+ AddDecl(Namespace);
+ AddNestedNameSpecifier(Prefix);
break;
- case NestedNameSpecifier::TypeSpec:
- AddType(NNS->getAsType());
+ }
+ case NestedNameSpecifier::Kind::Type:
+ AddType(NNS.getAsType());
break;
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
break;
}
}
void ODRHash::AddDependentTemplateName(const DependentTemplateStorage &Name) {
- if (NestedNameSpecifier *NNS = Name.getQualifier())
- AddNestedNameSpecifier(NNS);
+ AddNestedNameSpecifier(Name.getQualifier());
if (IdentifierOrOverloadedOperator IO = Name.getName();
const IdentifierInfo *II = IO.getIdentifier())
AddIdentifierInfo(II);
@@ -156,8 +150,7 @@ void ODRHash::AddTemplateName(TemplateName Name) {
break;
case TemplateName::QualifiedTemplate: {
QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName();
- if (NestedNameSpecifier *NNS = QTN->getQualifier())
- AddNestedNameSpecifier(NNS);
+ AddNestedNameSpecifier(QTN->getQualifier());
AddBoolean(QTN->hasTemplateKeyword());
AddTemplateName(QTN->getUnderlyingTemplate());
break;
@@ -889,11 +882,8 @@ public:
}
}
- void AddNestedNameSpecifier(const NestedNameSpecifier *NNS) {
- Hash.AddBoolean(NNS);
- if (NNS) {
- Hash.AddNestedNameSpecifier(NNS);
- }
+ void AddNestedNameSpecifier(NestedNameSpecifier NNS) {
+ Hash.AddNestedNameSpecifier(NNS);
}
void AddIdentifierInfo(const IdentifierInfo *II) {
@@ -907,52 +897,33 @@ public:
ID.AddInteger(Quals.getAsOpaqueValue());
}
- // Return the RecordType if the typedef only strips away a keyword.
- // Otherwise, return the original type.
- static const Type *RemoveTypedef(const Type *T) {
+ // Handle typedefs which only strip away a keyword.
+ bool handleTypedef(const Type *T) {
const auto *TypedefT = dyn_cast<TypedefType>(T);
- if (!TypedefT) {
- return T;
- }
-
- const TypedefNameDecl *D = TypedefT->getDecl();
- QualType UnderlyingType = D->getUnderlyingType();
-
- if (UnderlyingType.hasLocalQualifiers()) {
- return T;
- }
-
- const auto *ElaboratedT = dyn_cast<ElaboratedType>(UnderlyingType);
- if (!ElaboratedT) {
- return T;
- }
+ if (!TypedefT)
+ return false;
- if (ElaboratedT->getQualifier() != nullptr) {
- return T;
- }
+ QualType UnderlyingType = TypedefT->desugar();
- QualType NamedType = ElaboratedT->getNamedType();
- if (NamedType.hasLocalQualifiers()) {
- return T;
- }
+ if (UnderlyingType.hasLocalQualifiers())
+ return false;
- const auto *RecordT = dyn_cast<RecordType>(NamedType);
- if (!RecordT) {
- return T;
- }
+ const auto *TagT = dyn_cast<TagType>(UnderlyingType);
+ if (!TagT || TagT->getQualifier())
+ return false;
- const IdentifierInfo *TypedefII = TypedefT->getDecl()->getIdentifier();
- const IdentifierInfo *RecordII = RecordT->getDecl()->getIdentifier();
- if (!TypedefII || !RecordII ||
- TypedefII->getName() != RecordII->getName()) {
- return T;
- }
+ if (TypedefT->getDecl()->getIdentifier() !=
+ TagT->getOriginalDecl()->getIdentifier())
+ return false;
- return RecordT;
+ ID.AddInteger(TagT->getTypeClass());
+ VisitTagType(TagT, /*ElaboratedOverride=*/TypedefT);
+ return true;
}
void Visit(const Type *T) {
- T = RemoveTypedef(T);
+ if (handleTypedef(T))
+ return;
ID.AddInteger(T->getTypeClass());
Inherited::Visit(T);
}
@@ -1088,7 +1059,7 @@ public:
}
void VisitInjectedClassNameType(const InjectedClassNameType *T) {
- AddDecl(T->getDecl());
+ AddDecl(T->getOriginalDecl()->getDefinitionOrSelf());
VisitType(T);
}
@@ -1186,14 +1157,17 @@ public:
VisitType(T);
}
- void VisitTagType(const TagType *T) {
- AddDecl(T->getDecl());
+ void VisitTagType(const TagType *T,
+ const TypedefType *ElaboratedOverride = nullptr) {
+ ID.AddInteger(llvm::to_underlying(
+ ElaboratedOverride ? ElaboratedTypeKeyword::None : T->getKeyword()));
+ AddNestedNameSpecifier(ElaboratedOverride
+ ? ElaboratedOverride->getQualifier()
+ : T->getQualifier());
+ AddDecl(T->getOriginalDecl()->getDefinitionOrSelf());
VisitType(T);
}
- void VisitRecordType(const RecordType *T) { VisitTagType(T); }
- void VisitEnumType(const EnumType *T) { VisitTagType(T); }
-
void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
ID.AddInteger(T->template_arguments().size());
for (const auto &TA : T->template_arguments()) {
@@ -1211,6 +1185,8 @@ public:
}
void VisitTypedefType(const TypedefType *T) {
+ ID.AddInteger(llvm::to_underlying(T->getKeyword()));
+ AddNestedNameSpecifier(T->getQualifier());
AddDecl(T->getDecl());
VisitType(T);
}
@@ -1247,12 +1223,6 @@ public:
VisitTypeWithKeyword(T);
}
- void VisitElaboratedType(const ElaboratedType *T) {
- AddNestedNameSpecifier(T->getQualifier());
- AddQualType(T->getNamedType());
- VisitTypeWithKeyword(T);
- }
-
void VisitUnaryTransformType(const UnaryTransformType *T) {
AddQualType(T->getUnderlyingType());
AddQualType(T->getBaseType());
@@ -1330,7 +1300,7 @@ void ODRHash::AddStructuralValue(const APValue &Value) {
TypeSoFar = FD->getType();
} else {
TypeSoFar =
- D->getASTContext().getRecordType(cast<CXXRecordDecl>(D));
+ D->getASTContext().getCanonicalTagType(cast<CXXRecordDecl>(D));
}
}
}
diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp
index fe20004..9a9ede4 100644
--- a/clang/lib/AST/OpenACCClause.cpp
+++ b/clang/lib/AST/OpenACCClause.cpp
@@ -506,11 +506,13 @@ OpenACCDeviceTypeClause *OpenACCDeviceTypeClause::Create(
OpenACCReductionClause *OpenACCReductionClause::Create(
const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList,
+ ArrayRef<OpenACCReductionRecipe> Recipes,
SourceLocation EndLoc) {
void *Mem = C.Allocate(
- OpenACCReductionClause::totalSizeToAlloc<Expr *>(VarList.size()));
- return new (Mem)
- OpenACCReductionClause(BeginLoc, LParenLoc, Operator, VarList, EndLoc);
+ OpenACCReductionClause::totalSizeToAlloc<Expr *, OpenACCReductionRecipe>(
+ VarList.size(), Recipes.size()));
+ return new (Mem) OpenACCReductionClause(BeginLoc, LParenLoc, Operator,
+ VarList, Recipes, EndLoc);
}
OpenACCAutoClause *OpenACCAutoClause::Create(const ASTContext &C,
diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp
index de8b599..0930ca2 100644
--- a/clang/lib/AST/OpenMPClause.cpp
+++ b/clang/lib/AST/OpenMPClause.cpp
@@ -104,6 +104,8 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
return static_cast<const OMPFilterClause *>(C);
case OMPC_ompx_dyn_cgroup_mem:
return static_cast<const OMPXDynCGroupMemClause *>(C);
+ case OMPC_message:
+ return static_cast<const OMPMessageClause *>(C);
case OMPC_default:
case OMPC_proc_bind:
case OMPC_safelen:
@@ -158,7 +160,6 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_self_maps:
case OMPC_at:
case OMPC_severity:
- case OMPC_message:
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
@@ -1963,8 +1964,10 @@ void OMPClausePrinter::VisitOMPSeverityClause(OMPSeverityClause *Node) {
}
void OMPClausePrinter::VisitOMPMessageClause(OMPMessageClause *Node) {
- OS << "message(\""
- << cast<StringLiteral>(Node->getMessageString())->getString() << "\")";
+ OS << "message(";
+ if (Expr *E = Node->getMessageString())
+ E->printPretty(OS, nullptr, Policy);
+ OS << ")";
}
void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
@@ -2350,17 +2353,16 @@ void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
if (Node->getModifierLoc().isValid())
OS << getOpenMPSimpleClauseTypeName(OMPC_reduction, Node->getModifier())
<< ", ";
- NestedNameSpecifier *QualifierLoc =
+ NestedNameSpecifier Qualifier =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
+ if (!Qualifier && OOK != OO_None) {
// Print reduction identifier in C format
OS << getOperatorSpelling(OOK);
} else {
// Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
+ Qualifier.print(OS, Policy);
OS << Node->getNameInfo();
}
OS << ":";
@@ -2373,17 +2375,16 @@ void OMPClausePrinter::VisitOMPTaskReductionClause(
OMPTaskReductionClause *Node) {
if (!Node->varlist_empty()) {
OS << "task_reduction(";
- NestedNameSpecifier *QualifierLoc =
+ NestedNameSpecifier Qualifier =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
+ if (!Qualifier && OOK != OO_None) {
// Print reduction identifier in C format
OS << getOperatorSpelling(OOK);
} else {
// Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
+ Qualifier.print(OS, Policy);
OS << Node->getNameInfo();
}
OS << ":";
@@ -2395,17 +2396,16 @@ void OMPClausePrinter::VisitOMPTaskReductionClause(
void OMPClausePrinter::VisitOMPInReductionClause(OMPInReductionClause *Node) {
if (!Node->varlist_empty()) {
OS << "in_reduction(";
- NestedNameSpecifier *QualifierLoc =
+ NestedNameSpecifier Qualifier =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
+ if (!Qualifier && OOK != OO_None) {
// Print reduction identifier in C format
OS << getOperatorSpelling(OOK);
} else {
// Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
+ Qualifier.print(OS, Policy);
OS << Node->getNameInfo();
}
OS << ":";
@@ -2508,10 +2508,9 @@ template <typename T>
static void PrintMapper(raw_ostream &OS, T *Node,
const PrintingPolicy &Policy) {
OS << '(';
- NestedNameSpecifier *MapperNNS =
+ NestedNameSpecifier MapperNNS =
Node->getMapperQualifierLoc().getNestedNameSpecifier();
- if (MapperNNS)
- MapperNNS->print(OS, Policy);
+ MapperNNS.print(OS, Policy);
OS << Node->getMapperIdInfo() << ')';
}
diff --git a/clang/lib/AST/ParentMapContext.cpp b/clang/lib/AST/ParentMapContext.cpp
index 68dfe4d..acc011c 100644
--- a/clang/lib/AST/ParentMapContext.cpp
+++ b/clang/lib/AST/ParentMapContext.cpp
@@ -438,10 +438,12 @@ private:
DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
&Map.PointerParents);
}
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ bool TraverseTypeLoc(TypeLoc TypeLocNode, bool TraverseQualifier = true) {
return TraverseNode(
TypeLocNode, DynTypedNode::create(TypeLocNode),
- [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
+ [&] {
+ return VisitorBase::TraverseTypeLoc(TypeLocNode, TraverseQualifier);
+ },
&Map.OtherParents);
}
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
diff --git a/clang/lib/AST/PrintfFormatString.cpp b/clang/lib/AST/PrintfFormatString.cpp
index bcd44f0..8555504 100644
--- a/clang/lib/AST/PrintfFormatString.cpp
+++ b/clang/lib/AST/PrintfFormatString.cpp
@@ -793,8 +793,8 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
}
// If it's an enum, get its underlying type.
- if (const EnumType *ETy = QT->getAs<EnumType>())
- QT = ETy->getDecl()->getIntegerType();
+ if (const auto *ED = QT->getAsEnumDecl())
+ QT = ED->getIntegerType();
const BuiltinType *BT = QT->getAs<BuiltinType>();
if (!BT) {
diff --git a/clang/lib/AST/QualTypeNames.cpp b/clang/lib/AST/QualTypeNames.cpp
index 9731b3a..ee7fec3 100644
--- a/clang/lib/AST/QualTypeNames.cpp
+++ b/clang/lib/AST/QualTypeNames.cpp
@@ -24,10 +24,9 @@ namespace TypeName {
/// is requested.
/// \param[in] WithGlobalNsPrefix - Indicate whether the global namespace
/// specifier "::" should be prepended or not.
-static NestedNameSpecifier *createNestedNameSpecifier(
- const ASTContext &Ctx,
- const NamespaceDecl *Namesp,
- bool WithGlobalNsPrefix);
+static NestedNameSpecifier
+createNestedNameSpecifier(const ASTContext &Ctx, const NamespaceDecl *Namesp,
+ bool WithGlobalNsPrefix);
/// Create a NestedNameSpecifier for TagDecl and its enclosing
/// scopes.
@@ -39,22 +38,24 @@ static NestedNameSpecifier *createNestedNameSpecifier(
/// qualified names.
/// \param[in] WithGlobalNsPrefix - Indicate whether the global namespace
/// specifier "::" should be prepended or not.
-static NestedNameSpecifier *createNestedNameSpecifier(
- const ASTContext &Ctx, const TypeDecl *TD,
- bool FullyQualify, bool WithGlobalNsPrefix);
+static NestedNameSpecifier createNestedNameSpecifier(const ASTContext &Ctx,
+ const TypeDecl *TD,
+ bool FullyQualify,
+ bool WithGlobalNsPrefix);
-static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
- const ASTContext &Ctx, const Decl *decl,
- bool FullyQualified, bool WithGlobalNsPrefix);
+static NestedNameSpecifier
+createNestedNameSpecifierForScopeOf(const ASTContext &Ctx, const Decl *decl,
+ bool FullyQualified,
+ bool WithGlobalNsPrefix);
-static NestedNameSpecifier *getFullyQualifiedNestedNameSpecifier(
- const ASTContext &Ctx, NestedNameSpecifier *scope, bool WithGlobalNsPrefix);
+static NestedNameSpecifier getFullyQualifiedNestedNameSpecifier(
+ const ASTContext &Ctx, NestedNameSpecifier NNS, bool WithGlobalNsPrefix);
static bool getFullyQualifiedTemplateName(const ASTContext &Ctx,
TemplateName &TName,
bool WithGlobalNsPrefix) {
bool Changed = false;
- NestedNameSpecifier *NNS = nullptr;
+ NestedNameSpecifier NNS = std::nullopt;
TemplateDecl *ArgTDecl = TName.getAsTemplateDecl();
// ArgTDecl won't be NULL because we asserted that this isn't a
@@ -65,13 +66,13 @@ static bool getFullyQualifiedTemplateName(const ASTContext &Ctx,
if (QTName &&
!QTName->hasTemplateKeyword() &&
(NNS = QTName->getQualifier())) {
- NestedNameSpecifier *QNNS = getFullyQualifiedNestedNameSpecifier(
- Ctx, NNS, WithGlobalNsPrefix);
+ NestedNameSpecifier QNNS =
+ getFullyQualifiedNestedNameSpecifier(Ctx, NNS, WithGlobalNsPrefix);
if (QNNS != NNS) {
Changed = true;
NNS = QNNS;
} else {
- NNS = nullptr;
+ NNS = std::nullopt;
}
} else {
NNS = createNestedNameSpecifierForScopeOf(
@@ -116,76 +117,81 @@ static bool getFullyQualifiedTemplateArgument(const ASTContext &Ctx,
}
static const Type *getFullyQualifiedTemplateType(const ASTContext &Ctx,
- const Type *TypePtr,
+ const TagType *TSTRecord,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
bool WithGlobalNsPrefix) {
- // DependentTemplateTypes exist within template declarations and
- // definitions. Therefore we shouldn't encounter them at the end of
- // a translation unit. If we do, the caller has made an error.
- assert(!isa<DependentTemplateSpecializationType>(TypePtr));
- // In case of template specializations, iterate over the arguments
- // and fully qualify them as well.
- if (const auto *TST = dyn_cast<const TemplateSpecializationType>(TypePtr)) {
- bool MightHaveChanged = false;
- SmallVector<TemplateArgument, 4> FQArgs;
- // Cheap to copy and potentially modified by
- // getFullyQualifedTemplateArgument.
- for (TemplateArgument Arg : TST->template_arguments()) {
- MightHaveChanged |= getFullyQualifiedTemplateArgument(
- Ctx, Arg, WithGlobalNsPrefix);
- FQArgs.push_back(Arg);
- }
+ // We are asked to fully qualify and we have a Record Type,
+ // which can point to a template instantiation with no sugar in any of
+ // its template argument, however we still need to fully qualify them.
+
+ const auto *TD = TSTRecord->getOriginalDecl();
+ const auto *TSTDecl = dyn_cast<ClassTemplateSpecializationDecl>(TD);
+ if (!TSTDecl)
+ return Ctx.getTagType(Keyword, Qualifier, TD, /*OwnsTag=*/false)
+ .getTypePtr();
+
+ const TemplateArgumentList &TemplateArgs = TSTDecl->getTemplateArgs();
+
+ bool MightHaveChanged = false;
+ SmallVector<TemplateArgument, 4> FQArgs;
+ for (unsigned int I = 0, E = TemplateArgs.size(); I != E; ++I) {
+ // cheap to copy and potentially modified by
+ // getFullyQualifedTemplateArgument
+ TemplateArgument Arg(TemplateArgs[I]);
+ MightHaveChanged |=
+ getFullyQualifiedTemplateArgument(Ctx, Arg, WithGlobalNsPrefix);
+ FQArgs.push_back(Arg);
+ }
- // If a fully qualified arg is different from the unqualified arg,
- // allocate new type in the AST.
- if (MightHaveChanged) {
- QualType QT = Ctx.getTemplateSpecializationType(
- TST->getTemplateName(), FQArgs,
- /*CanonicalArgs=*/{}, TST->desugar());
- // getTemplateSpecializationType returns a fully qualified
- // version of the specialization itself, so no need to qualify
- // it.
- return QT.getTypePtr();
- }
- } else if (const auto *TSTRecord = dyn_cast<const RecordType>(TypePtr)) {
- // We are asked to fully qualify and we have a Record Type,
- // which can point to a template instantiation with no sugar in any of
- // its template argument, however we still need to fully qualify them.
-
- if (const auto *TSTDecl =
- dyn_cast<ClassTemplateSpecializationDecl>(TSTRecord->getDecl())) {
- const TemplateArgumentList &TemplateArgs = TSTDecl->getTemplateArgs();
-
- bool MightHaveChanged = false;
- SmallVector<TemplateArgument, 4> FQArgs;
- for (unsigned int I = 0, E = TemplateArgs.size(); I != E; ++I) {
- // cheap to copy and potentially modified by
- // getFullyQualifedTemplateArgument
- TemplateArgument Arg(TemplateArgs[I]);
- MightHaveChanged |= getFullyQualifiedTemplateArgument(
- Ctx, Arg, WithGlobalNsPrefix);
- FQArgs.push_back(Arg);
- }
+ if (!MightHaveChanged)
+ return Ctx.getTagType(Keyword, Qualifier, TD, /*OwnsTag=*/false)
+ .getTypePtr();
+ // If a fully qualified arg is different from the unqualified arg,
+ // allocate new type in the AST.
+ TemplateName TN = Ctx.getQualifiedTemplateName(
+ Qualifier, /*TemplateKeyword=*/false,
+ TemplateName(TSTDecl->getSpecializedTemplate()));
+ QualType QT = Ctx.getTemplateSpecializationType(
+ Keyword, TN, FQArgs,
+ /*CanonicalArgs=*/{}, TSTRecord->getCanonicalTypeInternal());
+ // getTemplateSpecializationType returns a fully qualified
+ // version of the specialization itself, so no need to qualify
+ // it.
+ return QT.getTypePtr();
+}
- // If a fully qualified arg is different from the unqualified arg,
- // allocate new type in the AST.
- if (MightHaveChanged) {
- TemplateName TN(TSTDecl->getSpecializedTemplate());
- QualType QT = Ctx.getTemplateSpecializationType(
- TN, FQArgs,
- /*CanonicalArgs=*/{}, TSTRecord->getCanonicalTypeInternal());
- // getTemplateSpecializationType returns a fully qualified
- // version of the specialization itself, so no need to qualify
- // it.
- return QT.getTypePtr();
- }
- }
+static const Type *
+getFullyQualifiedTemplateType(const ASTContext &Ctx,
+ const TemplateSpecializationType *TST,
+ bool WithGlobalNsPrefix) {
+ TemplateName TName = TST->getTemplateName();
+ bool MightHaveChanged =
+ getFullyQualifiedTemplateName(Ctx, TName, WithGlobalNsPrefix);
+ SmallVector<TemplateArgument, 4> FQArgs;
+ // Cheap to copy and potentially modified by
+ // getFullyQualifedTemplateArgument.
+ for (TemplateArgument Arg : TST->template_arguments()) {
+ MightHaveChanged |=
+ getFullyQualifiedTemplateArgument(Ctx, Arg, WithGlobalNsPrefix);
+ FQArgs.push_back(Arg);
}
- return TypePtr;
+
+ if (!MightHaveChanged)
+ return TST;
+
+ QualType NewQT =
+ Ctx.getTemplateSpecializationType(TST->getKeyword(), TName, FQArgs,
+ /*CanonicalArgs=*/{}, TST->desugar());
+ // getTemplateSpecializationType returns a fully qualified
+ // version of the specialization itself, so no need to qualify
+ // it.
+ return NewQT.getTypePtr();
}
-static NestedNameSpecifier *createOuterNNS(const ASTContext &Ctx, const Decl *D,
- bool FullyQualify,
- bool WithGlobalNsPrefix) {
+static NestedNameSpecifier createOuterNNS(const ASTContext &Ctx, const Decl *D,
+ bool FullyQualify,
+ bool WithGlobalNsPrefix) {
const DeclContext *DC = D->getDeclContext();
if (const auto *NS = dyn_cast<NamespaceDecl>(DC)) {
while (NS && NS->isInline()) {
@@ -195,71 +201,63 @@ static NestedNameSpecifier *createOuterNNS(const ASTContext &Ctx, const Decl *D,
if (NS && NS->getDeclName()) {
return createNestedNameSpecifier(Ctx, NS, WithGlobalNsPrefix);
}
- return nullptr; // no starting '::', no anonymous
- } else if (const auto *TD = dyn_cast<TagDecl>(DC)) {
- return createNestedNameSpecifier(Ctx, TD, FullyQualify, WithGlobalNsPrefix);
- } else if (const auto *TDD = dyn_cast<TypedefNameDecl>(DC)) {
- return createNestedNameSpecifier(
- Ctx, TDD, FullyQualify, WithGlobalNsPrefix);
- } else if (WithGlobalNsPrefix && DC->isTranslationUnit()) {
- return NestedNameSpecifier::GlobalSpecifier(Ctx);
+ return std::nullopt; // no starting '::', no anonymous
}
- return nullptr; // no starting '::' if |WithGlobalNsPrefix| is false
+ if (const auto *TD = dyn_cast<TagDecl>(DC))
+ return createNestedNameSpecifier(Ctx, TD, FullyQualify, WithGlobalNsPrefix);
+ if (const auto *TDD = dyn_cast<TypedefNameDecl>(DC))
+ return createNestedNameSpecifier(Ctx, TDD, FullyQualify,
+ WithGlobalNsPrefix);
+ if (WithGlobalNsPrefix && DC->isTranslationUnit())
+ return NestedNameSpecifier::getGlobal();
+ return std::nullopt; // no starting '::' if |WithGlobalNsPrefix| is false
}
/// Return a fully qualified version of this name specifier.
-static NestedNameSpecifier *getFullyQualifiedNestedNameSpecifier(
- const ASTContext &Ctx, NestedNameSpecifier *Scope,
- bool WithGlobalNsPrefix) {
- switch (Scope->getKind()) {
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- // Already fully qualified
- return Scope;
- case NestedNameSpecifier::Namespace:
- return TypeName::createNestedNameSpecifier(
- Ctx, Scope->getAsNamespace()->getNamespace(), WithGlobalNsPrefix);
- case NestedNameSpecifier::Identifier:
- // A function or some other construct that makes it un-namable
- // at the end of the TU. Skip the current component of the name,
- // but use the name of it's prefix.
- return getFullyQualifiedNestedNameSpecifier(
- Ctx, Scope->getPrefix(), WithGlobalNsPrefix);
- case NestedNameSpecifier::TypeSpec: {
- const Type *Type = Scope->getAsType();
- // Find decl context.
- const TagDecl *TD = nullptr;
- if (const TagType *TagDeclType = Type->getAs<TagType>()) {
- TD = TagDeclType->getDecl();
- } else {
- TD = Type->getAsCXXRecordDecl();
- }
- if (TD) {
- return TypeName::createNestedNameSpecifier(Ctx, TD,
- true /*FullyQualified*/,
- WithGlobalNsPrefix);
- } else if (const auto *TDD = dyn_cast<TypedefType>(Type)) {
- return TypeName::createNestedNameSpecifier(Ctx, TDD->getDecl(),
- true /*FullyQualified*/,
- WithGlobalNsPrefix);
- }
+static NestedNameSpecifier getFullyQualifiedNestedNameSpecifier(
+ const ASTContext &Ctx, NestedNameSpecifier Scope, bool WithGlobalNsPrefix) {
+ switch (Scope.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("can't fully qualify the empty nested name specifier");
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ // Already fully qualified
+ return Scope;
+ case NestedNameSpecifier::Kind::Namespace:
+ return TypeName::createNestedNameSpecifier(
+ Ctx, Scope.getAsNamespaceAndPrefix().Namespace->getNamespace(),
+ WithGlobalNsPrefix);
+ case NestedNameSpecifier::Kind::Type: {
+ const Type *Type = Scope.getAsType();
+ // Find decl context.
+ const TypeDecl *TD;
+ if (const TagType *TagDeclType = Type->getAs<TagType>())
+ TD = TagDeclType->getOriginalDecl();
+ else if (const auto *D = dyn_cast<TypedefType>(Type))
+ TD = D->getDecl();
+ else
return Scope;
- }
+ return TypeName::createNestedNameSpecifier(Ctx, TD, /*FullyQualify=*/true,
+ WithGlobalNsPrefix);
+ }
}
llvm_unreachable("bad NNS kind");
}
/// Create a nested name specifier for the declaring context of
/// the type.
-static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
- const ASTContext &Ctx, const Decl *Decl,
- bool FullyQualified, bool WithGlobalNsPrefix) {
+static NestedNameSpecifier
+createNestedNameSpecifierForScopeOf(const ASTContext &Ctx, const Decl *Decl,
+ bool FullyQualified,
+ bool WithGlobalNsPrefix) {
assert(Decl);
const DeclContext *DC = Decl->getDeclContext()->getRedeclContext();
const auto *Outer = dyn_cast<NamedDecl>(DC);
const auto *OuterNS = dyn_cast<NamespaceDecl>(DC);
- if (Outer && !(OuterNS && OuterNS->isAnonymousNamespace())) {
+ if (OuterNS && OuterNS->isAnonymousNamespace())
+ OuterNS = dyn_cast<NamespaceDecl>(OuterNS->getParent());
+ if (Outer) {
if (const auto *CxxDecl = dyn_cast<CXXRecordDecl>(DC)) {
if (ClassTemplateDecl *ClassTempl =
CxxDecl->getDescribedClassTemplate()) {
@@ -288,76 +286,80 @@ static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
Ctx, TD, FullyQualified, WithGlobalNsPrefix);
} else if (isa<TranslationUnitDecl>(Outer)) {
// Context is the TU. Nothing needs to be done.
- return nullptr;
+ return std::nullopt;
} else {
// Decl's context was neither the TU, a namespace, nor a
// TagDecl, which means it is a type local to a scope, and not
// accessible at the end of the TU.
- return nullptr;
+ return std::nullopt;
}
} else if (WithGlobalNsPrefix && DC->isTranslationUnit()) {
- return NestedNameSpecifier::GlobalSpecifier(Ctx);
+ return NestedNameSpecifier::getGlobal();
}
- return nullptr;
+ return std::nullopt;
}
/// Create a nested name specifier for the declaring context of
/// the type.
-static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
- const ASTContext &Ctx, const Type *TypePtr,
- bool FullyQualified, bool WithGlobalNsPrefix) {
- if (!TypePtr) return nullptr;
+static NestedNameSpecifier
+createNestedNameSpecifierForScopeOf(const ASTContext &Ctx, const Type *TypePtr,
+ bool FullyQualified,
+ bool WithGlobalNsPrefix) {
+ if (!TypePtr)
+ return std::nullopt;
Decl *Decl = nullptr;
// There are probably other cases ...
if (const auto *TDT = dyn_cast<TypedefType>(TypePtr)) {
Decl = TDT->getDecl();
} else if (const auto *TagDeclType = dyn_cast<TagType>(TypePtr)) {
- Decl = TagDeclType->getDecl();
+ Decl = TagDeclType->getOriginalDecl();
} else if (const auto *TST = dyn_cast<TemplateSpecializationType>(TypePtr)) {
Decl = TST->getTemplateName().getAsTemplateDecl();
} else {
Decl = TypePtr->getAsCXXRecordDecl();
}
- if (!Decl) return nullptr;
+ if (!Decl)
+ return std::nullopt;
return createNestedNameSpecifierForScopeOf(
Ctx, Decl, FullyQualified, WithGlobalNsPrefix);
}
-NestedNameSpecifier *createNestedNameSpecifier(const ASTContext &Ctx,
- const NamespaceDecl *Namespace,
- bool WithGlobalNsPrefix) {
+static NestedNameSpecifier
+createNestedNameSpecifier(const ASTContext &Ctx, const NamespaceDecl *Namespace,
+ bool WithGlobalNsPrefix) {
while (Namespace && Namespace->isInline()) {
// Ignore inline namespace;
Namespace = dyn_cast<NamespaceDecl>(Namespace->getDeclContext());
}
- if (!Namespace) return nullptr;
+ if (!Namespace)
+ return std::nullopt;
- bool FullyQualified = true; // doesn't matter, DeclContexts are namespaces
- return NestedNameSpecifier::Create(
- Ctx,
- createOuterNNS(Ctx, Namespace, FullyQualified, WithGlobalNsPrefix),
- Namespace);
+ bool FullyQualify = true; // doesn't matter, DeclContexts are namespaces
+ return NestedNameSpecifier(
+ Ctx, Namespace,
+ createOuterNNS(Ctx, Namespace, FullyQualify, WithGlobalNsPrefix));
}
-NestedNameSpecifier *createNestedNameSpecifier(const ASTContext &Ctx,
- const TypeDecl *TD,
- bool FullyQualify,
- bool WithGlobalNsPrefix) {
- const Type *TypePtr = TD->getTypeForDecl();
- if (isa<const TemplateSpecializationType>(TypePtr) ||
- isa<const RecordType>(TypePtr)) {
+NestedNameSpecifier createNestedNameSpecifier(const ASTContext &Ctx,
+ const TypeDecl *TD,
+ bool FullyQualify,
+ bool WithGlobalNsPrefix) {
+ const Type *TypePtr = Ctx.getTypeDeclType(TD).getTypePtr();
+ if (auto *RD = dyn_cast<TagType>(TypePtr)) {
// We are asked to fully qualify and we have a Record Type (which
// may point to a template specialization) or Template
// Specialization Type. We need to fully qualify their arguments.
-
- TypePtr = getFullyQualifiedTemplateType(Ctx, TypePtr, WithGlobalNsPrefix);
+ TypePtr = getFullyQualifiedTemplateType(
+ Ctx, RD, ElaboratedTypeKeyword::None,
+ createOuterNNS(Ctx, TD, FullyQualify, WithGlobalNsPrefix),
+ WithGlobalNsPrefix);
+ } else if (auto *TST = dyn_cast<TemplateSpecializationType>(TypePtr)) {
+ TypePtr = getFullyQualifiedTemplateType(Ctx, TST, WithGlobalNsPrefix);
}
-
- return NestedNameSpecifier::Create(
- Ctx, createOuterNNS(Ctx, TD, FullyQualify, WithGlobalNsPrefix), TypePtr);
+ return NestedNameSpecifier(TypePtr);
}
/// Return the fully qualified type, including fully-qualified
@@ -381,7 +383,7 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
Qualifiers Quals = QT.getQualifiers();
// Fully qualify the pointee and class types.
QT = getFullyQualifiedType(QT->getPointeeType(), Ctx, WithGlobalNsPrefix);
- NestedNameSpecifier *Qualifier = getFullyQualifiedNestedNameSpecifier(
+ NestedNameSpecifier Qualifier = getFullyQualifiedNestedNameSpecifier(
Ctx, MPT->getQualifier(), WithGlobalNsPrefix);
QT = Ctx.getMemberPointerType(QT, Qualifier,
MPT->getMostRecentCXXRecordDecl());
@@ -434,45 +436,48 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
QT = Ctx.getQualifiedType(QT, Quals);
}
- NestedNameSpecifier *Prefix = nullptr;
+ if (const auto *TST =
+ dyn_cast<const TemplateSpecializationType>(QT.getTypePtr())) {
+
+ const Type *T = getFullyQualifiedTemplateType(Ctx, TST, WithGlobalNsPrefix);
+ if (T == TST)
+ return QT;
+ return Ctx.getQualifiedType(T, QT.getQualifiers());
+ }
+
// Local qualifiers are attached to the QualType outside of the
// elaborated type. Retrieve them before descending into the
// elaborated type.
Qualifiers PrefixQualifiers = QT.getLocalQualifiers();
QT = QualType(QT.getTypePtr(), 0);
- ElaboratedTypeKeyword Keyword = ElaboratedTypeKeyword::None;
- if (const auto *ETypeInput = dyn_cast<ElaboratedType>(QT.getTypePtr())) {
- QT = ETypeInput->getNamedType();
- assert(!QT.hasLocalQualifiers());
- Keyword = ETypeInput->getKeyword();
- }
// We don't consider the alias introduced by `using a::X` as a new type.
// The qualified name is still a::X.
if (const auto *UT = QT->getAs<UsingType>()) {
- QT = Ctx.getQualifiedType(UT->getUnderlyingType(), PrefixQualifiers);
+ QT = Ctx.getQualifiedType(UT->desugar(), PrefixQualifiers);
return getFullyQualifiedType(QT, Ctx, WithGlobalNsPrefix);
}
// Create a nested name specifier if needed.
- Prefix = createNestedNameSpecifierForScopeOf(Ctx, QT.getTypePtr(),
- true /*FullyQualified*/,
- WithGlobalNsPrefix);
+ NestedNameSpecifier Prefix = createNestedNameSpecifierForScopeOf(
+ Ctx, QT.getTypePtr(), true /*FullyQualified*/, WithGlobalNsPrefix);
// In case of template specializations iterate over the arguments and
// fully qualify them as well.
- if (isa<const TemplateSpecializationType>(QT.getTypePtr()) ||
- isa<const RecordType>(QT.getTypePtr())) {
+ if (const auto *TT = dyn_cast<TagType>(QT.getTypePtr())) {
// We are asked to fully qualify and we have a Record Type (which
// may point to a template specialization) or Template
// Specialization Type. We need to fully qualify their arguments.
const Type *TypePtr = getFullyQualifiedTemplateType(
- Ctx, QT.getTypePtr(), WithGlobalNsPrefix);
+ Ctx, TT, TT->getKeyword(), Prefix, WithGlobalNsPrefix);
QT = QualType(TypePtr, 0);
- }
- if (Prefix || Keyword != ElaboratedTypeKeyword::None) {
- QT = Ctx.getElaboratedType(Keyword, Prefix, QT);
+ } else if (const auto *TT = dyn_cast<TypedefType>(QT.getTypePtr())) {
+ QT = Ctx.getTypedefType(
+ TT->getKeyword(), Prefix, TT->getDecl(),
+ getFullyQualifiedType(TT->desugar(), Ctx, WithGlobalNsPrefix));
+ } else {
+ assert(!Prefix && "Unhandled type node");
}
QT = Ctx.getQualifiedType(QT, PrefixQualifiers);
return QT;
@@ -486,5 +491,12 @@ std::string getFullyQualifiedName(QualType QT,
return FQQT.getAsString(Policy);
}
+NestedNameSpecifier getFullyQualifiedDeclaredContext(const ASTContext &Ctx,
+ const Decl *Decl,
+ bool WithGlobalNsPrefix) {
+ return createNestedNameSpecifierForScopeOf(Ctx, Decl, /*FullyQualified=*/true,
+ WithGlobalNsPrefix);
+}
+
} // end namespace TypeName
} // end namespace clang
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
index 760b2fc..4b312c5 100644
--- a/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -204,15 +204,13 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
// Check the fields.
for (const FieldDecl *FD : Class->fields()) {
- const RecordType *RT =
- Context.getBaseElementType(FD->getType())->getAs<RecordType>();
-
- // We only care about record types.
- if (!RT)
+ // We only care about records.
+ const auto *MemberDecl =
+ Context.getBaseElementType(FD->getType())->getAsCXXRecordDecl();
+ if (!MemberDecl)
continue;
CharUnits EmptySize;
- const CXXRecordDecl *MemberDecl = RT->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(MemberDecl);
if (MemberDecl->isEmpty()) {
// If the class decl is empty, get its size.
@@ -433,11 +431,10 @@ EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
// If we have an array type we need to look at every element.
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
QualType ElemTy = Context.getBaseElementType(AT);
- const RecordType *RT = ElemTy->getAs<RecordType>();
- if (!RT)
+ const auto *RD = ElemTy->getAsCXXRecordDecl();
+ if (!RD)
return true;
- const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
uint64_t NumElements = Context.getConstantArrayElementCount(AT);
@@ -533,11 +530,10 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(
// If we have an array type we need to update every element.
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
QualType ElemTy = Context.getBaseElementType(AT);
- const RecordType *RT = ElemTy->getAs<RecordType>();
- if (!RT)
+ const auto *RD = ElemTy->getAsCXXRecordDecl();
+ if (!RD)
return;
- const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
uint64_t NumElements = Context.getConstantArrayElementCount(AT);
@@ -2011,9 +2007,8 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
CTy->getElementType()->castAs<BuiltinType>());
} else if (const BuiltinType *BTy = BaseTy->getAs<BuiltinType>()) {
performBuiltinTypeAlignmentUpgrade(BTy);
- } else if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- assert(RD && "Expected non-null RecordDecl.");
+ } else if (const RecordType *RT = BaseTy->getAsCanonical<RecordType>()) {
+ const RecordDecl *RD = RT->getOriginalDecl();
const ASTRecordLayout &FieldRecord = Context.getASTRecordLayout(RD);
PreferredAlign = FieldRecord.getPreferredAlignment();
}
@@ -2128,7 +2123,8 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// TODO: Takes no account the alignment of the outer struct
if (FieldOffset % OriginalFieldAlign != 0)
Diag(D->getLocation(), diag::warn_unaligned_access)
- << Context.getTypeDeclType(RD) << D->getName() << D->getType();
+ << Context.getCanonicalTagType(RD) << D->getName()
+ << D->getType();
}
}
@@ -2193,8 +2189,7 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
InBits = false;
}
Diag(RD->getLocation(), diag::warn_padded_struct_size)
- << Context.getTypeDeclType(RD)
- << PadSize
+ << Context.getCanonicalTagType(RD) << PadSize
<< (InBits ? 1 : 0); // (byte|bit)
}
@@ -2212,7 +2207,7 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
Context.getLangOpts().getClangABICompat() <=
LangOptions::ClangABI::Ver15))
Diag(D->getLocation(), diag::warn_unnecessary_packed)
- << Context.getTypeDeclType(RD);
+ << Context.getCanonicalTagType(RD);
}
}
@@ -2306,7 +2301,7 @@ static void CheckFieldPadding(const ASTContext &Context, bool IsUnion,
Context.getDiagnostics().Report(D->getLocation(),
Diagnostic)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
- << Context.getTypeDeclType(D->getParent()) << PadSize
+ << Context.getCanonicalTagType(D->getParent()) << PadSize
<< (InBits ? 1 : 0) // (byte|bit)
<< D->getIdentifier();
} else {
@@ -2315,7 +2310,7 @@ static void CheckFieldPadding(const ASTContext &Context, bool IsUnion,
Context.getDiagnostics().Report(D->getLocation(),
Diagnostic)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
- << Context.getTypeDeclType(D->getParent()) << PadSize
+ << Context.getCanonicalTagType(D->getParent()) << PadSize
<< (InBits ? 1 : 0); // (byte|bit)
}
}
@@ -2712,9 +2707,10 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
// alignment when it is applied to bitfields.
Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
else {
- if (auto RT =
- FD->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
- auto const &Layout = Context.getASTRecordLayout(RT->getDecl());
+ if (const auto *RT = FD->getType()
+ ->getBaseElementTypeUnsafe()
+ ->getAsCanonical<RecordType>()) {
+ auto const &Layout = Context.getASTRecordLayout(RT->getOriginalDecl());
EndsWithZeroSizedObject = Layout.endsWithZeroSizedObject();
FieldRequiredAlignment = std::max(FieldRequiredAlignment,
Layout.getRequiredAlignment());
@@ -3273,7 +3269,7 @@ void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) {
Context.getDiagnostics().Report(RD->getLocation(),
diag::warn_padded_struct_size)
- << Context.getTypeDeclType(RD) << PadSize
+ << Context.getCanonicalTagType(RD) << PadSize
<< (InBits ? 1 : 0); // (byte|bit)
}
}
@@ -3631,7 +3627,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
auto CXXRD = dyn_cast<CXXRecordDecl>(RD);
PrintOffset(OS, Offset, IndentLevel);
- OS << C.getTypeDeclType(const_cast<RecordDecl *>(RD));
+ OS << C.getCanonicalTagType(const_cast<RecordDecl *>(RD));
if (Description)
OS << ' ' << Description;
if (CXXRD && CXXRD->isEmpty())
@@ -3696,8 +3692,8 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
Offset + C.toCharUnitsFromBits(LocalFieldOffsetInBits);
// Recursively dump fields of record type.
- if (auto RT = Field->getType()->getAs<RecordType>()) {
- DumpRecordLayout(OS, RT->getDecl(), C, FieldOffset, IndentLevel,
+ if (const auto *RD = Field->getType()->getAsRecordDecl()) {
+ DumpRecordLayout(OS, RD, C, FieldOffset, IndentLevel,
Field->getName().data(),
/*PrintSizeInfo=*/false,
/*IncludeVirtualBases=*/true);
@@ -3781,7 +3777,7 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
// in libFrontend.
const ASTRecordLayout &Info = getASTRecordLayout(RD);
- OS << "Type: " << getTypeDeclType(RD) << "\n";
+ OS << "Type: " << getCanonicalTagType(RD) << "\n";
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
OS << " Size:" << toBits(Info.getSize()) << "\n";
diff --git a/clang/lib/AST/ScanfFormatString.cpp b/clang/lib/AST/ScanfFormatString.cpp
index 1227edd..41cf71a 100644
--- a/clang/lib/AST/ScanfFormatString.cpp
+++ b/clang/lib/AST/ScanfFormatString.cpp
@@ -430,11 +430,11 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
QualType PT = QT->getPointeeType();
// If it's an enum, get its underlying type.
- if (const EnumType *ETy = PT->getAs<EnumType>()) {
+ if (const auto *ED = PT->getAsEnumDecl()) {
// Don't try to fix incomplete enums.
- if (!ETy->getDecl()->isComplete())
+ if (!ED->isComplete())
return false;
- PT = ETy->getDecl()->getIntegerType();
+ PT = ED->getIntegerType();
}
const BuiltinType *BT = PT->getAs<BuiltinType>();
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index 6ba5ec8..afccba8 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -454,10 +454,7 @@ void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
else
OS << "__if_not_exists (";
- if (NestedNameSpecifier *Qualifier
- = Node->getQualifierLoc().getNestedNameSpecifier())
- Qualifier->print(OS, Policy);
-
+ Node->getQualifierLoc().getNestedNameSpecifier().print(OS, Policy);
OS << Node->getNameInfo() << ") ";
PrintRawCompoundStmt(Node->getSubStmt());
@@ -1309,8 +1306,7 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
TPOD->printAsExpr(OS, Policy);
return;
}
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
@@ -1359,8 +1355,7 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
void StmtPrinter::VisitDependentScopeDeclRefExpr(
DependentScopeDeclRefExpr *Node) {
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getNameInfo();
@@ -1369,8 +1364,7 @@ void StmtPrinter::VisitDependentScopeDeclRefExpr(
}
void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
- if (Node->getQualifier())
- Node->getQualifier()->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getNameInfo();
@@ -1778,8 +1772,7 @@ void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
if (FD->isAnonymousStructOrUnion())
return;
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
@@ -2177,9 +2170,7 @@ void StmtPrinter::VisitMSPropertyRefExpr(MSPropertyRefExpr *Node) {
OS << "->";
else
OS << ".";
- if (NestedNameSpecifier *Qualifier =
- Node->getQualifierLoc().getNestedNameSpecifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifierLoc().getNestedNameSpecifier().print(OS, Policy);
OS << Node->getPropertyDecl()->getDeclName();
}
@@ -2509,8 +2500,7 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
OS << "->";
else
OS << '.';
- if (E->getQualifier())
- E->getQualifier()->print(OS, Policy);
+ E->getQualifier().print(OS, Policy);
OS << "~";
if (const IdentifierInfo *II = E->getDestroyedTypeIdentifier())
@@ -2572,8 +2562,7 @@ void StmtPrinter::VisitCXXDependentScopeMemberExpr(
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->" : ".");
}
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
@@ -2586,8 +2575,7 @@ void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->" : ".");
}
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
@@ -2678,8 +2666,7 @@ void StmtPrinter::VisitCXXParenListInitExpr(CXXParenListInitExpr *Node) {
void StmtPrinter::VisitConceptSpecializationExpr(ConceptSpecializationExpr *E) {
NestedNameSpecifierLoc NNS = E->getNestedNameSpecifierLoc();
- if (NNS)
- NNS.getNestedNameSpecifier()->print(OS, Policy);
+ NNS.getNestedNameSpecifier().print(OS, Policy);
if (E->getTemplateKWLoc().isValid())
OS << "template ";
OS << E->getFoundDecl()->getName();
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 0297f9c..2035fa7 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -65,7 +65,7 @@ namespace {
/// Visit a nested-name-specifier that occurs within an expression
/// or statement.
- virtual void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) = 0;
+ virtual void VisitNestedNameSpecifier(NestedNameSpecifier NNS) = 0;
/// Visit a template name that occurs within an expression or
/// statement.
@@ -167,10 +167,10 @@ namespace {
ID.AddPointer(II);
}
- void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) override {
+ void VisitNestedNameSpecifier(NestedNameSpecifier NNS) override {
if (Canonical)
- NNS = Context.getCanonicalNestedNameSpecifier(NNS);
- ID.AddPointer(NNS);
+ NNS = NNS.getCanonical();
+ NNS.Profile(ID);
}
void VisitTemplateName(TemplateName Name) override {
@@ -226,11 +226,10 @@ namespace {
void VisitTemplateName(TemplateName Name) override {
Hash.AddTemplateName(Name);
}
- void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) override {
- ID.AddBoolean(NNS);
- if (NNS) {
+ void VisitNestedNameSpecifier(NestedNameSpecifier NNS) override {
+ ID.AddBoolean(bool(NNS));
+ if (NNS)
Hash.AddNestedNameSpecifier(NNS);
- }
}
};
}
@@ -441,37 +440,37 @@ public:
#define GEN_CLANG_CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(const Class *C);
#include "llvm/Frontend/OpenMP/OMP.inc"
- void VistOMPClauseWithPreInit(const OMPClauseWithPreInit *C);
- void VistOMPClauseWithPostUpdate(const OMPClauseWithPostUpdate *C);
+ void VisitOMPClauseWithPreInit(const OMPClauseWithPreInit *C);
+ void VisitOMPClauseWithPostUpdate(const OMPClauseWithPostUpdate *C);
};
-void OMPClauseProfiler::VistOMPClauseWithPreInit(
+void OMPClauseProfiler::VisitOMPClauseWithPreInit(
const OMPClauseWithPreInit *C) {
if (auto *S = C->getPreInitStmt())
Profiler->VisitStmt(S);
}
-void OMPClauseProfiler::VistOMPClauseWithPostUpdate(
+void OMPClauseProfiler::VisitOMPClauseWithPostUpdate(
const OMPClauseWithPostUpdate *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (auto *E = C->getPostUpdateExpr())
Profiler->VisitStmt(E);
}
void OMPClauseProfiler::VisitOMPIfClause(const OMPIfClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getCondition())
Profiler->VisitStmt(C->getCondition());
}
void OMPClauseProfiler::VisitOMPFinalClause(const OMPFinalClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getCondition())
Profiler->VisitStmt(C->getCondition());
}
void OMPClauseProfiler::VisitOMPNumThreadsClause(const OMPNumThreadsClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getNumThreads())
Profiler->VisitStmt(C->getNumThreads());
}
@@ -527,13 +526,13 @@ void OMPClauseProfiler::VisitOMPDetachClause(const OMPDetachClause *C) {
}
void OMPClauseProfiler::VisitOMPNovariantsClause(const OMPNovariantsClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getCondition())
Profiler->VisitStmt(C->getCondition());
}
void OMPClauseProfiler::VisitOMPNocontextClause(const OMPNocontextClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getCondition())
Profiler->VisitStmt(C->getCondition());
}
@@ -569,7 +568,7 @@ void OMPClauseProfiler::VisitOMPMessageClause(const OMPMessageClause *C) {
}
void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (auto *S = C->getChunkSize())
Profiler->VisitStmt(S);
}
@@ -647,7 +646,7 @@ void OMPClauseProfiler::VisitOMPDestroyClause(const OMPDestroyClause *C) {
}
void OMPClauseProfiler::VisitOMPFilterClause(const OMPFilterClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getThreadID())
Profiler->VisitStmt(C->getThreadID());
}
@@ -670,7 +669,7 @@ void OMPClauseProfiler::VisitOMPPrivateClause(const OMPPrivateClause *C) {
void
OMPClauseProfiler::VisitOMPFirstprivateClause(const OMPFirstprivateClause *C) {
VisitOMPClauseList(C);
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
for (auto *E : C->private_copies()) {
if (E)
Profiler->VisitStmt(E);
@@ -683,7 +682,7 @@ OMPClauseProfiler::VisitOMPFirstprivateClause(const OMPFirstprivateClause *C) {
void
OMPClauseProfiler::VisitOMPLastprivateClause(const OMPLastprivateClause *C) {
VisitOMPClauseList(C);
- VistOMPClauseWithPostUpdate(C);
+ VisitOMPClauseWithPostUpdate(C);
for (auto *E : C->source_exprs()) {
if (E)
Profiler->VisitStmt(E);
@@ -706,7 +705,7 @@ void OMPClauseProfiler::VisitOMPReductionClause(
C->getQualifierLoc().getNestedNameSpecifier());
Profiler->VisitName(C->getNameInfo().getName());
VisitOMPClauseList(C);
- VistOMPClauseWithPostUpdate(C);
+ VisitOMPClauseWithPostUpdate(C);
for (auto *E : C->privates()) {
if (E)
Profiler->VisitStmt(E);
@@ -744,7 +743,7 @@ void OMPClauseProfiler::VisitOMPTaskReductionClause(
C->getQualifierLoc().getNestedNameSpecifier());
Profiler->VisitName(C->getNameInfo().getName());
VisitOMPClauseList(C);
- VistOMPClauseWithPostUpdate(C);
+ VisitOMPClauseWithPostUpdate(C);
for (auto *E : C->privates()) {
if (E)
Profiler->VisitStmt(E);
@@ -768,7 +767,7 @@ void OMPClauseProfiler::VisitOMPInReductionClause(
C->getQualifierLoc().getNestedNameSpecifier());
Profiler->VisitName(C->getNameInfo().getName());
VisitOMPClauseList(C);
- VistOMPClauseWithPostUpdate(C);
+ VisitOMPClauseWithPostUpdate(C);
for (auto *E : C->privates()) {
if (E)
Profiler->VisitStmt(E);
@@ -792,7 +791,7 @@ void OMPClauseProfiler::VisitOMPInReductionClause(
}
void OMPClauseProfiler::VisitOMPLinearClause(const OMPLinearClause *C) {
VisitOMPClauseList(C);
- VistOMPClauseWithPostUpdate(C);
+ VisitOMPClauseWithPostUpdate(C);
for (auto *E : C->privates()) {
if (E)
Profiler->VisitStmt(E);
@@ -874,25 +873,25 @@ void OMPClauseProfiler::VisitOMPAllocateClause(const OMPAllocateClause *C) {
}
void OMPClauseProfiler::VisitOMPNumTeamsClause(const OMPNumTeamsClause *C) {
VisitOMPClauseList(C);
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
}
void OMPClauseProfiler::VisitOMPThreadLimitClause(
const OMPThreadLimitClause *C) {
VisitOMPClauseList(C);
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
}
void OMPClauseProfiler::VisitOMPPriorityClause(const OMPPriorityClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getPriority())
Profiler->VisitStmt(C->getPriority());
}
void OMPClauseProfiler::VisitOMPGrainsizeClause(const OMPGrainsizeClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getGrainsize())
Profiler->VisitStmt(C->getGrainsize());
}
void OMPClauseProfiler::VisitOMPNumTasksClause(const OMPNumTasksClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (C->getNumTasks())
Profiler->VisitStmt(C->getNumTasks());
}
@@ -953,7 +952,7 @@ void OMPClauseProfiler::VisitOMPOrderClause(const OMPOrderClause *C) {}
void OMPClauseProfiler::VisitOMPBindClause(const OMPBindClause *C) {}
void OMPClauseProfiler::VisitOMPXDynCGroupMemClause(
const OMPXDynCGroupMemClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (Expr *Size = C->getSize())
Profiler->VisitStmt(Size);
}
@@ -1230,7 +1229,7 @@ void StmtProfiler::VisitOMPDistributeDirective(
void OMPClauseProfiler::VisitOMPDistScheduleClause(
const OMPDistScheduleClause *C) {
- VistOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPreInit(C);
if (auto *S = C->getChunkSize())
Profiler->VisitStmt(S);
}
@@ -2749,6 +2748,14 @@ void OpenACCClauseProfiler::VisitGangClause(const OpenACCGangClause &Clause) {
void OpenACCClauseProfiler::VisitReductionClause(
const OpenACCReductionClause &Clause) {
VisitClauseWithVarList(Clause);
+
+ for (auto &Recipe : Clause.getRecipes()) {
+ Profiler.VisitDecl(Recipe.RecipeDecl);
+ // TODO: OpenACC: Make sure we remember to update this when we figure out
+ // what we're adding for the operation recipe, in the meantime, a static
+ // assert will make sure we don't add something.
+ static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *));
+ }
}
void OpenACCClauseProfiler::VisitBindClause(const OpenACCBindClause &Clause) {
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
index 7a0f740..76f96fb 100644
--- a/clang/lib/AST/TemplateBase.cpp
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -56,8 +56,8 @@ static void printIntegral(const TemplateArgument &TemplArg, raw_ostream &Out,
const llvm::APSInt &Val = TemplArg.getAsIntegral();
if (Policy.UseEnumerators) {
- if (const EnumType *ET = T->getAs<EnumType>()) {
- for (const EnumConstantDecl *ECD : ET->getDecl()->enumerators()) {
+ if (const auto *ED = T->getAsEnumDecl()) {
+ for (const EnumConstantDecl *ECD : ED->enumerators()) {
// In Sema::CheckTemplateArugment, enum template arguments value are
// extended to the size of the integer underlying the enum type. This
// may create a size difference between the enum value and template
@@ -596,6 +596,29 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
// TemplateArgumentLoc Implementation
//===----------------------------------------------------------------------===//
+TemplateArgumentLoc::TemplateArgumentLoc(ASTContext &Ctx,
+ const TemplateArgument &Argument,
+ SourceLocation TemplateKWLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateNameLoc,
+ SourceLocation EllipsisLoc)
+ : Argument(Argument),
+ LocInfo(Ctx, TemplateKWLoc, QualifierLoc, TemplateNameLoc, EllipsisLoc) {
+ assert(Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion);
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ Argument.getAsTemplateOrTemplatePattern().getQualifier());
+}
+
+NestedNameSpecifierLoc TemplateArgumentLoc::getTemplateQualifierLoc() const {
+ if (Argument.getKind() != TemplateArgument::Template &&
+ Argument.getKind() != TemplateArgument::TemplateExpansion)
+ return NestedNameSpecifierLoc();
+ return NestedNameSpecifierLoc(
+ Argument.getAsTemplateOrTemplatePattern().getQualifier(),
+ LocInfo.getTemplate()->QualifierLocData);
+}
+
SourceRange TemplateArgumentLoc::getSourceRange() const {
switch (Argument.getKind()) {
case TemplateArgument::Expression:
@@ -702,10 +725,11 @@ const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
}
clang::TemplateArgumentLocInfo::TemplateArgumentLocInfo(
- ASTContext &Ctx, NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateNameLoc, SourceLocation EllipsisLoc) {
+ ASTContext &Ctx, SourceLocation TemplateKWLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateNameLoc,
+ SourceLocation EllipsisLoc) {
TemplateTemplateArgLocInfo *Template = new (Ctx) TemplateTemplateArgLocInfo;
- Template->Qualifier = QualifierLoc.getNestedNameSpecifier();
+ Template->TemplateKwLoc = TemplateKWLoc;
Template->QualifierLocData = QualifierLoc.getOpaqueData();
Template->TemplateNameLoc = TemplateNameLoc;
Template->EllipsisLoc = EllipsisLoc;
diff --git a/clang/lib/AST/TemplateName.cpp b/clang/lib/AST/TemplateName.cpp
index 5b7abc4..f2cb15d 100644
--- a/clang/lib/AST/TemplateName.cpp
+++ b/clang/lib/AST/TemplateName.cpp
@@ -293,6 +293,21 @@ DependentTemplateName *TemplateName::getAsDependentTemplateName() const {
return Storage.dyn_cast<DependentTemplateName *>();
}
+std::tuple<NestedNameSpecifier, bool>
+TemplateName::getQualifierAndTemplateKeyword() const {
+ for (std::optional<TemplateName> Cur = *this; Cur;
+ Cur = Cur->desugar(/*IgnoreDeduced=*/true)) {
+ if (DependentTemplateName *N = Cur->getAsDependentTemplateName())
+ return {N->getQualifier(), N->hasTemplateKeyword()};
+ if (QualifiedTemplateName *N = Cur->getAsQualifiedTemplateName())
+ return {N->getQualifier(), N->hasTemplateKeyword()};
+ if (Cur->getAsSubstTemplateTemplateParm() ||
+ Cur->getAsSubstTemplateTemplateParmPack())
+ break;
+ }
+ return {std::nullopt, false};
+}
+
UsingShadowDecl *TemplateName::getAsUsingShadowDecl() const {
if (Decl *D = Storage.dyn_cast<Decl *>())
if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
@@ -303,24 +318,21 @@ UsingShadowDecl *TemplateName::getAsUsingShadowDecl() const {
}
DependentTemplateStorage::DependentTemplateStorage(
- NestedNameSpecifier *Qualifier, IdentifierOrOverloadedOperator Name,
+ NestedNameSpecifier Qualifier, IdentifierOrOverloadedOperator Name,
bool HasTemplateKeyword)
: Qualifier(Qualifier, HasTemplateKeyword), Name(Name) {
- assert((!Qualifier || Qualifier->isDependent()) &&
+ assert((!Qualifier || Qualifier.isDependent()) &&
"Qualifier must be dependent");
}
TemplateNameDependence DependentTemplateStorage::getDependence() const {
- auto D = TemplateNameDependence::DependentInstantiation;
- if (NestedNameSpecifier *Qualifier = getQualifier())
- D |= toTemplateNameDependence(Qualifier->getDependence());
- return D;
+ return toTemplateNameDependence(getQualifier().getDependence()) |
+ TemplateNameDependence::DependentInstantiation;
}
void DependentTemplateStorage::print(raw_ostream &OS,
const PrintingPolicy &Policy) const {
- if (NestedNameSpecifier *NNS = getQualifier())
- NNS->print(OS, Policy);
+ getQualifier().print(OS, Policy);
if (hasTemplateKeyword())
OS << "template ";
@@ -363,16 +375,13 @@ TemplateNameDependence TemplateName::getDependence() const {
case NameKind::QualifiedTemplate: {
QualifiedTemplateName *S = getAsQualifiedTemplateName();
TemplateNameDependence D = S->getUnderlyingTemplate().getDependence();
- if (NestedNameSpecifier *NNS = S->getQualifier())
- D |= toTemplateNameDependence(NNS->getDependence());
+ D |= toTemplateNameDependence(S->getQualifier().getDependence());
return D;
}
case NameKind::DependentTemplate: {
DependentTemplateName *S = getAsDependentTemplateName();
- auto D = TemplateNameDependence::DependentInstantiation;
- if (NestedNameSpecifier *Qualifier = S->getQualifier())
- D |= toTemplateNameDependence(Qualifier->getDependence());
- return D;
+ return toTemplateNameDependence(S->getQualifier().getDependence()) |
+ TemplateNameDependence::DependentInstantiation;
}
case NameKind::SubstTemplateTemplateParm: {
auto *S = getAsSubstTemplateTemplateParm();
@@ -434,18 +443,26 @@ void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
Template = cast<TemplateDecl>(Template->getCanonicalDecl());
if (handleAnonymousTTP(Template, OS))
return;
- if (Qual == Qualified::None)
- OS << *Template;
- else
- Template->printQualifiedName(OS, Policy);
+ if (Qual == Qualified::None || isa<TemplateTemplateParmDecl>(Template) ||
+ Policy.SuppressScope) {
+ if (IdentifierInfo *II = Template->getIdentifier();
+ Policy.CleanUglifiedParameters && II &&
+ isa<TemplateTemplateParmDecl>(Template))
+ OS << II->deuglifiedName();
+ else
+ OS << *Template;
+ } else {
+ PrintingPolicy NestedNamePolicy = Policy;
+ NestedNamePolicy.SuppressUnwrittenScope = true;
+ Template->printQualifiedName(OS, NestedNamePolicy);
+ }
} else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
if (Policy.PrintAsCanonical) {
QTN->getUnderlyingTemplate().print(OS, Policy, Qual);
return;
}
- if (NestedNameSpecifier *NNS = QTN->getQualifier();
- Qual != Qualified::None && NNS)
- NNS->print(OS, Policy);
+ if (Qual != Qualified::None)
+ QTN->getQualifier().print(OS, Policy);
if (QTN->hasTemplateKeyword())
OS << "template ";
@@ -458,12 +475,7 @@ void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
if (handleAnonymousTTP(UTD, OS))
return;
- if (IdentifierInfo *II = UTD->getIdentifier();
- Policy.CleanUglifiedParameters && II &&
- isa<TemplateTemplateParmDecl>(UTD))
- OS << II->deuglifiedName();
- else
- OS << *UTD;
+ OS << *UTD;
} else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
DTN->print(OS, Policy);
} else if (SubstTemplateTemplateParmStorage *subst =
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 6b524cf..9dca5cf 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -1037,35 +1037,34 @@ void clang::TextNodeDumper::dumpTemplateSpecializationKind(
}
}
-void clang::TextNodeDumper::dumpNestedNameSpecifier(const NestedNameSpecifier *NNS) {
+void clang::TextNodeDumper::dumpNestedNameSpecifier(NestedNameSpecifier NNS) {
if (!NNS)
return;
AddChild([=] {
OS << "NestedNameSpecifier";
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- OS << " Identifier";
- OS << " '" << NNS->getAsIdentifier()->getName() << "'";
- break;
- case NestedNameSpecifier::Namespace:
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
OS << " "; // "Namespace" is printed as the decl kind.
- dumpBareDeclRef(NNS->getAsNamespace());
+ dumpBareDeclRef(Namespace);
+ dumpNestedNameSpecifier(Prefix);
break;
- case NestedNameSpecifier::TypeSpec:
+ }
+ case NestedNameSpecifier::Kind::Type:
OS << " TypeSpec";
- dumpType(QualType(NNS->getAsType(), 0));
+ dumpType(QualType(NNS.getAsType(), 0));
break;
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
OS << " Global";
break;
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
OS << " Super";
break;
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
-
- dumpNestedNameSpecifier(NNS->getPrefix());
});
}
@@ -1401,8 +1400,8 @@ static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
if (!First)
OS << " -> ";
- const auto *RD =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ const auto *RD = cast<CXXRecordDecl>(
+ Base->getType()->castAsCanonical<RecordType>()->getOriginalDecl());
if (Base->isVirtual())
OS << "virtual ";
@@ -2112,19 +2111,32 @@ void TextNodeDumper::VisitFunctionProtoType(const FunctionProtoType *T) {
}
void TextNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
+ dumpNestedNameSpecifier(T->getQualifier());
dumpDeclRef(T->getDecl());
}
void TextNodeDumper::VisitUsingType(const UsingType *T) {
- dumpDeclRef(T->getFoundDecl());
- if (!T->typeMatchesDecl())
- OS << " divergent";
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
+ dumpNestedNameSpecifier(T->getQualifier());
+ dumpDeclRef(T->getDecl());
+ dumpType(T->desugar());
}
void TextNodeDumper::VisitTypedefType(const TypedefType *T) {
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
+ dumpNestedNameSpecifier(T->getQualifier());
dumpDeclRef(T->getDecl());
- if (!T->typeMatchesDecl())
+ if (!T->typeMatchesDecl()) {
OS << " divergent";
+ dumpType(T->desugar());
+ }
}
void TextNodeDumper::VisitUnaryTransformType(const UnaryTransformType *T) {
@@ -2138,7 +2150,17 @@ void TextNodeDumper::VisitUnaryTransformType(const UnaryTransformType *T) {
}
void TextNodeDumper::VisitTagType(const TagType *T) {
- dumpDeclRef(T->getDecl());
+ if (T->isCanonicalUnqualified())
+ OS << " canonical";
+ if (T->isTagOwned())
+ OS << " owns_tag";
+ if (T->isInjected())
+ OS << " injected";
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
+ dumpNestedNameSpecifier(T->getQualifier());
+ dumpDeclRef(T->getOriginalDecl());
}
void TextNodeDumper::VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
@@ -2182,12 +2204,15 @@ void TextNodeDumper::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
if (T->isTypeAlias())
OS << " alias";
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
dumpTemplateName(T->getTemplateName(), "name");
}
void TextNodeDumper::VisitInjectedClassNameType(
const InjectedClassNameType *T) {
- dumpDeclRef(T->getDecl());
+ dumpDeclRef(T->getOriginalDecl());
}
void TextNodeDumper::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
@@ -2778,8 +2803,7 @@ void TextNodeDumper::VisitTemplateTemplateParmDecl(
void TextNodeDumper::VisitUsingDecl(const UsingDecl *D) {
OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ D->getQualifier().print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getDeclName();
dumpNestedNameSpecifier(D->getQualifier());
}
@@ -2792,16 +2816,14 @@ void TextNodeDumper::VisitUsingEnumDecl(const UsingEnumDecl *D) {
void TextNodeDumper::VisitUnresolvedUsingTypenameDecl(
const UnresolvedUsingTypenameDecl *D) {
OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ D->getQualifier().print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getDeclName();
}
void TextNodeDumper::VisitUnresolvedUsingValueDecl(
const UnresolvedUsingValueDecl *D) {
OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ D->getQualifier().print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getDeclName();
dumpType(D->getType());
}
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index 141edc8..3432810 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -109,12 +109,12 @@ bool Qualifiers::isTargetAddressSpaceSupersetOf(LangAS A, LangAS B,
const IdentifierInfo *QualType::getBaseTypeIdentifier() const {
const Type *ty = getTypePtr();
NamedDecl *ND = nullptr;
+ if (const auto *DNT = ty->getAs<DependentNameType>())
+ return DNT->getIdentifier();
if (ty->isPointerOrReferenceType())
return ty->getPointeeType().getBaseTypeIdentifier();
- else if (ty->isRecordType())
- ND = ty->castAs<RecordType>()->getDecl();
- else if (ty->isEnumeralType())
- ND = ty->castAs<EnumType>()->getDecl();
+ if (const auto *TT = ty->getAs<TagType>())
+ ND = TT->getOriginalDecl();
else if (ty->getTypeClass() == Type::Typedef)
ND = ty->castAs<TypedefType>()->getDecl();
else if (ty->isArrayType())
@@ -670,61 +670,59 @@ const Type *Type::getUnqualifiedDesugaredType() const {
}
bool Type::isClassType() const {
- if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->isClass();
+ if (const auto *RT = getAsCanonical<RecordType>())
+ return RT->getOriginalDecl()->isClass();
return false;
}
bool Type::isStructureType() const {
- if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->isStruct();
+ if (const auto *RT = getAsCanonical<RecordType>())
+ return RT->getOriginalDecl()->isStruct();
return false;
}
bool Type::isStructureTypeWithFlexibleArrayMember() const {
- const auto *RT = getAs<RecordType>();
+ const auto *RT = getAsCanonical<RecordType>();
if (!RT)
return false;
- const auto *Decl = RT->getDecl();
+ const auto *Decl = RT->getOriginalDecl();
if (!Decl->isStruct())
return false;
- return Decl->hasFlexibleArrayMember();
+ return Decl->getDefinitionOrSelf()->hasFlexibleArrayMember();
}
bool Type::isObjCBoxableRecordType() const {
- if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->hasAttr<ObjCBoxableAttr>();
+ if (const auto *RD = getAsRecordDecl())
+ return RD->hasAttr<ObjCBoxableAttr>();
return false;
}
bool Type::isInterfaceType() const {
- if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->isInterface();
+ if (const auto *RT = getAsCanonical<RecordType>())
+ return RT->getOriginalDecl()->isInterface();
return false;
}
bool Type::isStructureOrClassType() const {
- if (const auto *RT = getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
- return RD->isStruct() || RD->isClass() || RD->isInterface();
- }
+ if (const auto *RT = getAsCanonical<RecordType>())
+ return RT->getOriginalDecl()->isStructureOrClass();
return false;
}
bool Type::isVoidPointerType() const {
- if (const auto *PT = getAs<PointerType>())
+ if (const auto *PT = getAsCanonical<PointerType>())
return PT->getPointeeType()->isVoidType();
return false;
}
bool Type::isUnionType() const {
- if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->isUnion();
+ if (const auto *RT = getAsCanonical<RecordType>())
+ return RT->getOriginalDecl()->isUnion();
return false;
}
bool Type::isComplexType() const {
- if (const auto *CT = dyn_cast<ComplexType>(CanonicalType))
+ if (const auto *CT = getAsCanonical<ComplexType>())
return CT->getElementType()->isFloatingType();
return false;
}
@@ -735,8 +733,8 @@ bool Type::isComplexIntegerType() const {
}
bool Type::isScopedEnumeralType() const {
- if (const auto *ET = getAs<EnumType>())
- return ET->getDecl()->isScoped();
+ if (const auto *ET = getAsCanonical<EnumType>())
+ return ET->getOriginalDecl()->isScoped();
return false;
}
@@ -770,13 +768,13 @@ QualType Type::getPointeeType() const {
const RecordType *Type::getAsStructureType() const {
// If this is directly a structure type, return it.
if (const auto *RT = dyn_cast<RecordType>(this)) {
- if (RT->getDecl()->isStruct())
+ if (RT->getOriginalDecl()->isStruct())
return RT;
}
// If the canonical form of this type isn't the right kind, reject it.
if (const auto *RT = dyn_cast<RecordType>(CanonicalType)) {
- if (!RT->getDecl()->isStruct())
+ if (!RT->getOriginalDecl()->isStruct())
return nullptr;
// If this is a typedef for a structure type, strip the typedef off without
@@ -789,13 +787,13 @@ const RecordType *Type::getAsStructureType() const {
const RecordType *Type::getAsUnionType() const {
// If this is directly a union type, return it.
if (const auto *RT = dyn_cast<RecordType>(this)) {
- if (RT->getDecl()->isUnion())
+ if (RT->getOriginalDecl()->isUnion())
return RT;
}
// If the canonical form of this type isn't the right kind, reject it.
if (const auto *RT = dyn_cast<RecordType>(CanonicalType)) {
- if (!RT->getDecl()->isUnion())
+ if (!RT->getOriginalDecl()->isUnion())
return nullptr;
// If this is a typedef for a union type, strip the typedef off without
@@ -1272,9 +1270,6 @@ public:
TRIVIAL_TYPE_CLASS(Record)
TRIVIAL_TYPE_CLASS(Enum)
- // FIXME: Non-trivial to implement, but important for C++
- SUGARED_TYPE_CLASS(Elaborated)
-
QualType VisitAttributedType(const AttributedType *T) {
QualType modifiedType = recurse(T->getModifiedType());
if (modifiedType.isNull())
@@ -1626,7 +1621,7 @@ bool QualType::UseExcessPrecision(const ASTContext &Ctx) {
switch (BT->getKind()) {
case BuiltinType::Kind::Float16: {
const TargetInfo &TI = Ctx.getTargetInfo();
- if (TI.hasFloat16Type() && !TI.hasLegalHalfType() &&
+ if (TI.hasFloat16Type() && !TI.hasFastHalfType() &&
Ctx.getLangOpts().getFloat16ExcessPrecision() !=
Ctx.getLangOpts().ExcessPrecisionKind::FPP_None)
return true;
@@ -1913,34 +1908,13 @@ const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const {
const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const {
QualType PointeeType;
- if (const auto *PT = getAs<PointerType>())
+ if (const auto *PT = getAsCanonical<PointerType>())
PointeeType = PT->getPointeeType();
- else if (const auto *RT = getAs<ReferenceType>())
+ else if (const auto *RT = getAsCanonical<ReferenceType>())
PointeeType = RT->getPointeeType();
else
return nullptr;
-
- if (const auto *RT = PointeeType->getAs<RecordType>())
- return dyn_cast<CXXRecordDecl>(RT->getDecl());
-
- return nullptr;
-}
-
-CXXRecordDecl *Type::getAsCXXRecordDecl() const {
- return dyn_cast_or_null<CXXRecordDecl>(getAsTagDecl());
-}
-
-RecordDecl *Type::getAsRecordDecl() const {
- return dyn_cast_or_null<RecordDecl>(getAsTagDecl());
-}
-
-TagDecl *Type::getAsTagDecl() const {
- if (const auto *TT = getAs<TagType>())
- return TT->getDecl();
- if (const auto *Injected = getAs<InjectedClassNameType>())
- return Injected->getDecl();
-
- return nullptr;
+ return PointeeType->getAsCXXRecordDecl();
}
const TemplateSpecializationType *
@@ -1951,6 +1925,33 @@ Type::getAsNonAliasTemplateSpecializationType() const {
return TST;
}
+NestedNameSpecifier Type::getPrefix() const {
+ switch (getTypeClass()) {
+ case Type::DependentName:
+ return cast<DependentNameType>(this)->getQualifier();
+ case Type::TemplateSpecialization:
+ return cast<TemplateSpecializationType>(this)
+ ->getTemplateName()
+ .getQualifier();
+ case Type::DependentTemplateSpecialization:
+ return cast<DependentTemplateSpecializationType>(this)
+ ->getDependentTemplateName()
+ .getQualifier();
+ case Type::Enum:
+ case Type::Record:
+ case Type::InjectedClassName:
+ return cast<TagType>(this)->getQualifier();
+ case Type::Typedef:
+ return cast<TypedefType>(this)->getQualifier();
+ case Type::UnresolvedUsing:
+ return cast<UnresolvedUsingType>(this)->getQualifier();
+ case Type::Using:
+ return cast<UsingType>(this)->getQualifier();
+ default:
+ return std::nullopt;
+ }
+}
+
bool Type::hasAttr(attr::Kind AK) const {
const Type *Cur = this;
while (const auto *AT = Cur->getAs<AttributedType>()) {
@@ -1989,10 +1990,6 @@ public:
return Visit(T->getReplacementType());
}
- Type *VisitElaboratedType(const ElaboratedType *T) {
- return Visit(T->getNamedType());
- }
-
Type *VisitPointerType(const PointerType *T) {
return Visit(T->getPointeeType());
}
@@ -2114,7 +2111,7 @@ bool Type::isIntegralType(const ASTContext &Ctx) const {
// Complete enum types are integral in C.
if (!Ctx.getLangOpts().CPlusPlus)
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
- return ET->getDecl()->isComplete();
+ return IsEnumDeclComplete(ET->getOriginalDecl());
return isBitIntType();
}
@@ -2131,7 +2128,7 @@ bool Type::isIntegralOrUnscopedEnumerationType() const {
bool Type::isUnscopedEnumerationType() const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
- return !ET->getDecl()->isScoped();
+ return !ET->getOriginalDecl()->isScoped();
return false;
}
@@ -2213,11 +2210,12 @@ bool Type::isSignedIntegerType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isSignedInteger();
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (const auto *ED = getAsEnumDecl()) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
- if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
- return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ if (!ED->isComplete() || ED->isScoped())
+ return false;
+ return ED->getIntegerType()->isSignedIntegerType();
}
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
@@ -2232,9 +2230,11 @@ bool Type::isSignedIntegerOrEnumerationType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isSignedInteger();
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType);
- ET && ET->getDecl()->isComplete())
- return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ if (const auto *ED = getAsEnumDecl()) {
+ if (!ED->isComplete())
+ return false;
+ return ED->getIntegerType()->isSignedIntegerType();
+ }
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
return IT->isSigned();
@@ -2258,11 +2258,12 @@ bool Type::isUnsignedIntegerType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isUnsignedInteger();
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (const auto *ED = getAsEnumDecl()) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
- if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
- return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ if (!ED->isComplete() || ED->isScoped())
+ return false;
+ return ED->getIntegerType()->isUnsignedIntegerType();
}
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
@@ -2277,9 +2278,11 @@ bool Type::isUnsignedIntegerOrEnumerationType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isUnsignedInteger();
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType);
- ET && ET->getDecl()->isComplete())
- return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ if (const auto *ED = getAsEnumDecl()) {
+ if (!ED->isComplete())
+ return false;
+ return ED->getIntegerType()->isUnsignedIntegerType();
+ }
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
return IT->isUnsigned();
@@ -2328,8 +2331,10 @@ bool Type::isRealType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Ibm128;
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
- return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
+ const auto *ED = ET->getOriginalDecl();
+ return !ED->isScoped() && ED->getDefinitionOrSelf()->isComplete();
+ }
return isBitIntType();
}
@@ -2337,24 +2342,24 @@ bool Type::isArithmeticType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Ibm128;
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
//
// C++0x: Enumerations are not arithmetic types. For now, just return
// false for scoped enumerations since that will disable any
// unwanted implicit conversions.
- return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
+ const auto *ED = ET->getOriginalDecl();
+ return !ED->isScoped() && ED->getDefinitionOrSelf()->isComplete();
+ }
return isa<ComplexType>(CanonicalType) || isBitIntType();
}
bool Type::hasBooleanRepresentation() const {
if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isBooleanType();
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
- return ET->getDecl()->isComplete() &&
- ET->getDecl()->getIntegerType()->isBooleanType();
- }
+ if (const auto *ED = getAsEnumDecl())
+ return ED->isComplete() && ED->getIntegerType()->isBooleanType();
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
return IT->getNumBits() == 1;
return isBooleanType();
@@ -2385,7 +2390,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
} else if (isa<MemberPointerType>(T)) {
return STK_MemberPointer;
} else if (isa<EnumType>(T)) {
- assert(cast<EnumType>(T)->getDecl()->isComplete());
+ assert(T->castAsEnumDecl()->isComplete());
return STK_Integral;
} else if (const auto *CT = dyn_cast<ComplexType>(T)) {
if (CT->getElementType()->isRealFloatingType())
@@ -2409,7 +2414,8 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
/// includes union types.
bool Type::isAggregateType() const {
if (const auto *Record = dyn_cast<RecordType>(CanonicalType)) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
+ if (const auto *ClassDecl =
+ dyn_cast<CXXRecordDecl>(Record->getOriginalDecl()))
return ClassDecl->isAggregate();
return true;
@@ -2443,7 +2449,7 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
// be completed.
return isVoidType();
case Enum: {
- EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl();
+ auto *EnumD = castAsEnumDecl();
if (Def)
*Def = EnumD;
return !EnumD->isComplete();
@@ -2451,13 +2457,13 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
case Record: {
// A tagged type (struct/union/enum/class) is incomplete if the decl is a
// forward declaration, but not a full definition (C99 6.2.5p22).
- RecordDecl *Rec = cast<RecordType>(CanonicalType)->getDecl();
+ auto *Rec = castAsRecordDecl();
if (Def)
*Def = Rec;
return !Rec->isCompleteDefinition();
}
case InjectedClassName: {
- CXXRecordDecl *Rec = cast<InjectedClassNameType>(CanonicalType)->getDecl();
+ auto *Rec = castAsCXXRecordDecl();
if (!Rec->isBeingDefined())
return false;
if (Def)
@@ -2517,7 +2523,7 @@ bool Type::isAlwaysIncompleteType() const {
// Forward declarations of structs, classes, enums, and unions could be later
// completed in a compilation unit by providing a type definition.
- if (getAsTagDecl())
+ if (isa<TagType>(CanonicalType))
return false;
// Other types are incompletable.
@@ -2715,6 +2721,11 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
return false;
QualType CanonicalType = getTypePtr()->CanonicalType;
+
+ // Any type that is, or contains, address discriminated data is never POD.
+ if (Context.containsAddressDiscriminatedPointerAuth(CanonicalType))
+ return false;
+
switch (CanonicalType->getTypeClass()) {
// Everything not explicitly mentioned is not POD.
default:
@@ -2739,8 +2750,8 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
return true;
case Type::Record:
- if (const auto *ClassDecl =
- dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(
+ cast<RecordType>(CanonicalType)->getOriginalDecl()))
return ClassDecl->isPOD();
// C struct/union is POD.
@@ -2773,6 +2784,11 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
if (CanonicalType->isDependentType())
return false;
+ // Any type that is, or contains, address discriminated data is never a
+ // trivial type.
+ if (Context.containsAddressDiscriminatedPointerAuth(CanonicalType))
+ return false;
+
// C++0x [basic.types]p9:
// Scalar types, trivial class types, arrays of such types, and
// cv-qualified versions of these types are collectively called trivial
@@ -2781,22 +2797,22 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
// As an extension, Clang treats vector types as Scalar types.
if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
return true;
- if (const auto *RT = CanonicalType->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- // C++20 [class]p6:
- // A trivial class is a class that is trivially copyable, and
- // has one or more eligible default constructors such that each is
- // trivial.
- // FIXME: We should merge this definition of triviality into
- // CXXRecordDecl::isTrivial. Currently it computes the wrong thing.
- return ClassDecl->hasTrivialDefaultConstructor() &&
- !ClassDecl->hasNonTrivialDefaultConstructor() &&
- ClassDecl->isTriviallyCopyable();
- }
- return true;
+ if (const auto *ClassDecl = CanonicalType->getAsCXXRecordDecl()) {
+ // C++20 [class]p6:
+ // A trivial class is a class that is trivially copyable, and
+ // has one or more eligible default constructors such that each is
+ // trivial.
+ // FIXME: We should merge this definition of triviality into
+ // CXXRecordDecl::isTrivial. Currently it computes the wrong thing.
+ return ClassDecl->hasTrivialDefaultConstructor() &&
+ !ClassDecl->hasNonTrivialDefaultConstructor() &&
+ ClassDecl->isTriviallyCopyable();
}
+ if (isa<RecordType>(CanonicalType))
+ return true;
+
// No other types can match.
return false;
}
@@ -2840,15 +2856,13 @@ static bool isTriviallyCopyableTypeImpl(const QualType &type,
if (CanonicalType->isMFloat8Type())
return true;
- if (const auto *RT = CanonicalType->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (IsCopyConstructible) {
+ if (const auto *RD = CanonicalType->getAsRecordDecl()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
+ if (IsCopyConstructible)
return ClassDecl->isTriviallyCopyConstructible();
- } else {
- return ClassDecl->isTriviallyCopyable();
- }
+ return ClassDecl->isTriviallyCopyable();
}
- return !RT->getDecl()->isNonTrivialToPrimitiveCopy();
+ return !RD->isNonTrivialToPrimitiveCopy();
}
// No other types can match.
return false;
@@ -2870,6 +2884,12 @@ bool QualType::isBitwiseCloneableType(const ASTContext &Context) const {
if (CanonicalType->isIncompleteType())
return false;
+
+ // Any type that is, or contains, address discriminated data is never
+ // bitwise clonable.
+ if (Context.containsAddressDiscriminatedPointerAuth(CanonicalType))
+ return false;
+
const auto *RD = CanonicalType->getAsRecordDecl(); // struct/union/class
if (!RD)
return true;
@@ -2936,9 +2956,9 @@ bool QualType::isWebAssemblyFuncrefType() const {
QualType::PrimitiveDefaultInitializeKind
QualType::isNonTrivialToPrimitiveDefaultInitialize() const {
- if (const auto *RT =
- getTypePtr()->getBaseElementTypeUnsafe()->getAs<RecordType>())
- if (RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize())
+ if (const auto *RD =
+ getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
+ if (RD->isNonTrivialToPrimitiveDefaultInitialize())
return PDIK_Struct;
switch (getQualifiers().getObjCLifetime()) {
@@ -2952,9 +2972,9 @@ QualType::isNonTrivialToPrimitiveDefaultInitialize() const {
}
QualType::PrimitiveCopyKind QualType::isNonTrivialToPrimitiveCopy() const {
- if (const auto *RT =
- getTypePtr()->getBaseElementTypeUnsafe()->getAs<RecordType>())
- if (RT->getDecl()->isNonTrivialToPrimitiveCopy())
+ if (const auto *RD =
+ getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
+ if (RD->isNonTrivialToPrimitiveCopy())
return PCK_Struct;
Qualifiers Qs = getQualifiers();
@@ -3011,7 +3031,7 @@ bool Type::isLiteralType(const ASTContext &Ctx) const {
if (BaseTy->isReferenceType())
return true;
// -- a class type that has all of the following properties:
- if (const auto *RT = BaseTy->getAs<RecordType>()) {
+ if (const auto *RD = BaseTy->getAsRecordDecl()) {
// -- a trivial destructor,
// -- every constructor call and full-expression in the
// brace-or-equal-initializers for non-static data members (if any)
@@ -3022,7 +3042,7 @@ bool Type::isLiteralType(const ASTContext &Ctx) const {
// -- all non-static data members and base classes of literal types
//
// We resolve DR1361 by ignoring the second bullet.
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
return ClassDecl->isLiteral();
return true;
@@ -3075,10 +3095,10 @@ bool Type::isStandardLayoutType() const {
// As an extension, Clang treats vector types as Scalar types.
if (BaseTy->isScalarType() || BaseTy->isVectorType())
return true;
- if (const auto *RT = BaseTy->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- if (!ClassDecl->isStandardLayout())
- return false;
+ if (const auto *RD = BaseTy->getAsRecordDecl()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD);
+ ClassDecl && !ClassDecl->isStandardLayout())
+ return false;
// Default to 'true' for non-C++ class types.
// FIXME: This is a bit dubious, but plain C structs should trivially meet
@@ -3115,11 +3135,15 @@ bool QualType::isCXX11PODType(const ASTContext &Context) const {
if (BaseTy->isIncompleteType())
return false;
+ // Any type that is, or contains, address discriminated data is non-POD.
+ if (Context.containsAddressDiscriminatedPointerAuth(*this))
+ return false;
+
// As an extension, Clang treats vector types as Scalar types.
if (BaseTy->isScalarType() || BaseTy->isVectorType())
return true;
- if (const auto *RT = BaseTy->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (const auto *RD = BaseTy->getAsRecordDecl()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
// C++11 [class]p10:
// A POD struct is a non-union class that is both a trivial class [...]
if (!ClassDecl->isTrivial())
@@ -3158,18 +3182,20 @@ bool Type::isNothrowT() const {
}
bool Type::isAlignValT() const {
- if (const auto *ET = getAs<EnumType>()) {
- IdentifierInfo *II = ET->getDecl()->getIdentifier();
- if (II && II->isStr("align_val_t") && ET->getDecl()->isInStdNamespace())
+ if (const auto *ET = getAsCanonical<EnumType>()) {
+ const auto *ED = ET->getOriginalDecl();
+ IdentifierInfo *II = ED->getIdentifier();
+ if (II && II->isStr("align_val_t") && ED->isInStdNamespace())
return true;
}
return false;
}
bool Type::isStdByteType() const {
- if (const auto *ET = getAs<EnumType>()) {
- IdentifierInfo *II = ET->getDecl()->getIdentifier();
- if (II && II->isStr("byte") && ET->getDecl()->isInStdNamespace())
+ if (const auto *ET = getAsCanonical<EnumType>()) {
+ const auto *ED = ET->getOriginalDecl();
+ IdentifierInfo *II = ED->getIdentifier();
+ if (II && II->isStr("byte") && ED->isInStdNamespace())
return true;
}
return false;
@@ -3188,7 +3214,6 @@ bool Type::isSpecifierType() const {
case TemplateTypeParm:
case SubstTemplateTypeParm:
case TemplateSpecialization:
- case Elaborated:
case DependentName:
case DependentTemplateSpecialization:
case ObjCInterface:
@@ -3199,8 +3224,7 @@ bool Type::isSpecifierType() const {
}
}
-ElaboratedTypeKeyword
-TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
+ElaboratedTypeKeyword KeywordHelpers::getKeywordForTypeSpec(unsigned TypeSpec) {
switch (TypeSpec) {
default:
return ElaboratedTypeKeyword::None;
@@ -3219,7 +3243,7 @@ TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
}
}
-TagTypeKind TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
+TagTypeKind KeywordHelpers::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
switch (TypeSpec) {
case TST_class:
return TagTypeKind::Class;
@@ -3237,7 +3261,7 @@ TagTypeKind TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
}
ElaboratedTypeKeyword
-TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
+KeywordHelpers::getKeywordForTagTypeKind(TagTypeKind Kind) {
switch (Kind) {
case TagTypeKind::Class:
return ElaboratedTypeKeyword::Class;
@@ -3254,7 +3278,7 @@ TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
}
TagTypeKind
-TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
+KeywordHelpers::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ElaboratedTypeKeyword::Class:
return TagTypeKind::Class;
@@ -3273,7 +3297,7 @@ TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
llvm_unreachable("Unknown elaborated type keyword.");
}
-bool TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
+bool KeywordHelpers::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ElaboratedTypeKeyword::None:
case ElaboratedTypeKeyword::Typename:
@@ -3288,7 +3312,7 @@ bool TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
llvm_unreachable("Unknown elaborated type keyword.");
}
-StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
+StringRef KeywordHelpers::getKeywordName(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ElaboratedTypeKeyword::None:
return {};
@@ -3338,13 +3362,21 @@ void DependentTemplateSpecializationType::Profile(
bool Type::isElaboratedTypeSpecifier() const {
ElaboratedTypeKeyword Keyword;
- if (const auto *Elab = dyn_cast<ElaboratedType>(this))
- Keyword = Elab->getKeyword();
+ if (const auto *TST = dyn_cast<TemplateSpecializationType>(this))
+ Keyword = TST->getKeyword();
else if (const auto *DepName = dyn_cast<DependentNameType>(this))
Keyword = DepName->getKeyword();
else if (const auto *DepTST =
dyn_cast<DependentTemplateSpecializationType>(this))
Keyword = DepTST->getKeyword();
+ else if (const auto *T = dyn_cast<TagType>(this))
+ Keyword = T->getKeyword();
+ else if (const auto *T = dyn_cast<TypedefType>(this))
+ Keyword = T->getKeyword();
+ else if (const auto *T = dyn_cast<UnresolvedUsingType>(this))
+ Keyword = T->getKeyword();
+ else if (const auto *T = dyn_cast<UsingType>(this))
+ Keyword = T->getKeyword();
else
return false;
@@ -3677,6 +3709,16 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
FunctionTypeBits.HasExtraBitfields = false;
}
+ // Propagate any extra attribute information.
+ if (epi.requiresFunctionProtoTypeExtraAttributeInfo()) {
+ auto &ExtraAttrInfo = *getTrailingObjects<FunctionTypeExtraAttributeInfo>();
+ ExtraAttrInfo.CFISalt = epi.ExtraAttributeInfo.CFISalt;
+
+ // Also set the bit in FunctionTypeExtraBitfields.
+ auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
+ ExtraBits.HasExtraAttributeInfo = true;
+ }
+
if (epi.requiresFunctionProtoTypeArmAttributes()) {
auto &ArmTypeAttrs = *getTrailingObjects<FunctionTypeArmAttributes>();
ArmTypeAttrs = FunctionTypeArmAttributes();
@@ -3894,7 +3936,8 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
// This is followed by the ext info:
// int
// Finally we have a trailing return type flag (bool)
- // combined with AArch64 SME Attributes, to save space:
+ // combined with AArch64 SME Attributes and extra attribute info, to save
+ // space:
// int
// combined with any FunctionEffects
//
@@ -3929,6 +3972,7 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
}
epi.ExtInfo.Profile(ID);
+ epi.ExtraAttributeInfo.Profile(ID);
unsigned EffectCount = epi.FunctionEffects.size();
bool HasConds = !epi.FunctionEffects.Conditions.empty();
@@ -4011,34 +4055,53 @@ StringRef CountAttributedType::getAttributeName(bool WithMacroPrefix) const {
#undef ENUMERATE_ATTRS
}
-TypedefType::TypedefType(TypeClass tc, const TypedefNameDecl *D,
- QualType UnderlyingType, bool HasTypeDifferentFromDecl)
- : Type(tc, UnderlyingType.getCanonicalType(),
- toSemanticDependence(UnderlyingType->getDependence())),
+TypedefType::TypedefType(TypeClass TC, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypedefNameDecl *D, QualType UnderlyingType,
+ bool HasTypeDifferentFromDecl)
+ : TypeWithKeyword(
+ Keyword, TC, UnderlyingType.getCanonicalType(),
+ toSemanticDependence(UnderlyingType->getDependence()) |
+ (Qualifier
+ ? toTypeDependence(Qualifier.getDependence() &
+ ~NestedNameSpecifierDependence::Dependent)
+ : TypeDependence{})),
Decl(const_cast<TypedefNameDecl *>(D)) {
- TypedefBits.hasTypeDifferentFromDecl = HasTypeDifferentFromDecl;
- if (!typeMatchesDecl())
- *getTrailingObjects() = UnderlyingType;
+ if ((TypedefBits.hasQualifier = !!Qualifier))
+ *getTrailingObjects<NestedNameSpecifier>() = Qualifier;
+ if ((TypedefBits.hasTypeDifferentFromDecl = HasTypeDifferentFromDecl))
+ *getTrailingObjects<QualType>() = UnderlyingType;
}
QualType TypedefType::desugar() const {
- return typeMatchesDecl() ? Decl->getUnderlyingType() : *getTrailingObjects();
-}
-
-UsingType::UsingType(const UsingShadowDecl *Found, QualType Underlying,
- QualType Canon)
- : Type(Using, Canon, toSemanticDependence(Canon->getDependence())),
- Found(const_cast<UsingShadowDecl *>(Found)) {
- UsingBits.hasTypeDifferentFromDecl = !Underlying.isNull();
- if (!typeMatchesDecl())
- *getTrailingObjects() = Underlying;
-}
-
-QualType UsingType::getUnderlyingType() const {
- return typeMatchesDecl()
- ? QualType(
- cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(), 0)
- : *getTrailingObjects();
+ return typeMatchesDecl() ? Decl->getUnderlyingType()
+ : *getTrailingObjects<QualType>();
+}
+
+UnresolvedUsingType::UnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D,
+ const Type *CanonicalType)
+ : TypeWithKeyword(
+ Keyword, UnresolvedUsing, QualType(CanonicalType, 0),
+ TypeDependence::DependentInstantiation |
+ (Qualifier
+ ? toTypeDependence(Qualifier.getDependence() &
+ ~NestedNameSpecifierDependence::Dependent)
+ : TypeDependence{})),
+ Decl(const_cast<UnresolvedUsingTypenameDecl *>(D)) {
+ if ((UnresolvedUsingBits.hasQualifier = !!Qualifier))
+ *getTrailingObjects<NestedNameSpecifier>() = Qualifier;
+}
+
+UsingType::UsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const UsingShadowDecl *D,
+ QualType UnderlyingType)
+ : TypeWithKeyword(Keyword, Using, UnderlyingType.getCanonicalType(),
+ toSemanticDependence(UnderlyingType->getDependence())),
+ D(const_cast<UsingShadowDecl *>(D)), UnderlyingType(UnderlyingType) {
+ if ((UsingBits.hasQualifier = !!Qualifier))
+ *getTrailingObjects() = Qualifier;
}
QualType MacroQualifiedType::desugar() const { return getUnderlyingType(); }
@@ -4212,24 +4275,79 @@ UnaryTransformType::UnaryTransformType(QualType BaseType,
: Type(UnaryTransform, CanonicalType, BaseType->getDependence()),
BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind) {}
-TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
- : Type(TC, can,
- D->isDependentType() ? TypeDependence::DependentInstantiation
- : TypeDependence::None),
- decl(const_cast<TagDecl *>(D)) {}
-
-static TagDecl *getInterestingTagDecl(TagDecl *decl) {
- for (auto *I : decl->redecls()) {
- if (I->isCompleteDefinition() || I->isBeingDefined())
- return I;
+TagType::TagType(TypeClass TC, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *Tag,
+ bool OwnsTag, bool ISInjected, const Type *CanonicalType)
+ : TypeWithKeyword(
+ Keyword, TC, QualType(CanonicalType, 0),
+ (Tag->isDependentType() ? TypeDependence::DependentInstantiation
+ : TypeDependence::None) |
+ (Qualifier
+ ? toTypeDependence(Qualifier.getDependence() &
+ ~NestedNameSpecifierDependence::Dependent)
+ : TypeDependence{})),
+ decl(const_cast<TagDecl *>(Tag)) {
+ if ((TagTypeBits.HasQualifier = !!Qualifier))
+ getTrailingQualifier() = Qualifier;
+ TagTypeBits.OwnsTag = !!OwnsTag;
+ TagTypeBits.IsInjected = ISInjected;
+}
+
+void *TagType::getTrailingPointer() const {
+ switch (getTypeClass()) {
+ case Type::Enum:
+ return const_cast<EnumType *>(cast<EnumType>(this) + 1);
+ case Type::Record:
+ return const_cast<RecordType *>(cast<RecordType>(this) + 1);
+ case Type::InjectedClassName:
+ return const_cast<InjectedClassNameType *>(
+ cast<InjectedClassNameType>(this) + 1);
+ default:
+ llvm_unreachable("unexpected type class");
}
- // If there's no definition (not even in progress), return what we have.
- return decl;
}
-TagDecl *TagType::getDecl() const { return getInterestingTagDecl(decl); }
+NestedNameSpecifier &TagType::getTrailingQualifier() const {
+ assert(TagTypeBits.HasQualifier);
+ return *reinterpret_cast<NestedNameSpecifier *>(llvm::alignAddr(
+ getTrailingPointer(), llvm::Align::Of<NestedNameSpecifier *>()));
+}
-bool TagType::isBeingDefined() const { return getDecl()->isBeingDefined(); }
+NestedNameSpecifier TagType::getQualifier() const {
+ return TagTypeBits.HasQualifier ? getTrailingQualifier() : std::nullopt;
+}
+
+ClassTemplateDecl *TagType::getTemplateDecl() const {
+ auto *Decl = dyn_cast<CXXRecordDecl>(decl);
+ if (!Decl)
+ return nullptr;
+ if (auto *RD = dyn_cast<ClassTemplateSpecializationDecl>(Decl))
+ return RD->getSpecializedTemplate();
+ return Decl->getDescribedClassTemplate();
+}
+
+TemplateName TagType::getTemplateName(const ASTContext &Ctx) const {
+ auto *TD = getTemplateDecl();
+ if (!TD)
+ return TemplateName();
+ if (isCanonicalUnqualified())
+ return TemplateName(TD);
+ return Ctx.getQualifiedTemplateName(getQualifier(), /*TemplateKeyword=*/false,
+ TemplateName(TD));
+}
+
+ArrayRef<TemplateArgument>
+TagType::getTemplateArgs(const ASTContext &Ctx) const {
+ auto *Decl = dyn_cast<CXXRecordDecl>(decl);
+ if (!Decl)
+ return {};
+
+ if (auto *RD = dyn_cast<ClassTemplateSpecializationDecl>(Decl))
+ return RD->getTemplateArgs().asArray();
+ if (ClassTemplateDecl *TD = Decl->getDescribedClassTemplate())
+ return TD->getTemplateParameters()->getInjectedTemplateArgs(Ctx);
+ return {};
+}
bool RecordType::hasConstFields() const {
std::vector<const RecordType *> RecordTypeList;
@@ -4237,13 +4355,15 @@ bool RecordType::hasConstFields() const {
unsigned NextToCheckIndex = 0;
while (RecordTypeList.size() > NextToCheckIndex) {
- for (FieldDecl *FD :
- RecordTypeList[NextToCheckIndex]->getDecl()->fields()) {
+ for (FieldDecl *FD : RecordTypeList[NextToCheckIndex]
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->fields()) {
QualType FieldTy = FD->getType();
if (FieldTy.isConstQualified())
return true;
FieldTy = FieldTy.getCanonicalType();
- if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) {
+ if (const auto *FieldRecTy = FieldTy->getAsCanonical<RecordType>()) {
if (!llvm::is_contained(RecordTypeList, FieldRecTy))
RecordTypeList.push_back(FieldRecTy);
}
@@ -4253,6 +4373,13 @@ bool RecordType::hasConstFields() const {
return false;
}
+InjectedClassNameType::InjectedClassNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TagDecl *TD, bool IsInjected,
+ const Type *CanonicalType)
+ : TagType(TypeClass::InjectedClassName, Keyword, Qualifier, TD,
+ /*OwnsTag=*/false, IsInjected, CanonicalType) {}
+
AttributedType::AttributedType(QualType canon, const Attr *attr,
QualType modified, QualType equivalent)
: AttributedType(canon, attr->getKind(), attr, modified, equivalent) {}
@@ -4340,10 +4467,6 @@ bool AttributedType::isCallingConv() const {
llvm_unreachable("invalid attr kind");
}
-CXXRecordDecl *InjectedClassNameType::getDecl() const {
- return cast<CXXRecordDecl>(getInterestingTagDecl(Decl));
-}
-
IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier();
}
@@ -4393,17 +4516,45 @@ void SubstTemplateTypeParmType::Profile(llvm::FoldingSetNodeID &ID,
ID.AddBoolean(Final);
}
+SubstPackType::SubstPackType(TypeClass Derived, QualType Canon,
+ const TemplateArgument &ArgPack)
+ : Type(Derived, Canon,
+ TypeDependence::DependentInstantiation |
+ TypeDependence::UnexpandedPack),
+ Arguments(ArgPack.pack_begin()) {
+ assert(llvm::all_of(
+ ArgPack.pack_elements(),
+ [](auto &P) { return P.getKind() == TemplateArgument::Type; }) &&
+ "non-type argument to SubstPackType?");
+ SubstPackTypeBits.NumArgs = ArgPack.pack_size();
+}
+
+TemplateArgument SubstPackType::getArgumentPack() const {
+ return TemplateArgument(llvm::ArrayRef(Arguments, getNumArgs()));
+}
+
+void SubstPackType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getArgumentPack());
+}
+
+void SubstPackType::Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateArgument &ArgPack) {
+ ID.AddInteger(ArgPack.pack_size());
+ for (const auto &P : ArgPack.pack_elements())
+ ID.AddPointer(P.getAsType().getAsOpaquePtr());
+}
+
SubstTemplateTypeParmPackType::SubstTemplateTypeParmPackType(
QualType Canon, Decl *AssociatedDecl, unsigned Index, bool Final,
const TemplateArgument &ArgPack)
- : Type(SubstTemplateTypeParmPack, Canon,
- TypeDependence::DependentInstantiation |
- TypeDependence::UnexpandedPack),
- Arguments(ArgPack.pack_begin()),
+ : SubstPackType(SubstTemplateTypeParmPack, Canon, ArgPack),
AssociatedDeclAndFinal(AssociatedDecl, Final) {
- SubstTemplateTypeParmPackTypeBits.Index = Index;
- SubstTemplateTypeParmPackTypeBits.NumArgs = ArgPack.pack_size();
assert(AssociatedDecl != nullptr);
+
+ SubstPackTypeBits.SubstTemplTypeParmPackIndex = Index;
+ assert(getNumArgs() == ArgPack.pack_size() &&
+ "Parent bitfields in SubstPackType were overwritten."
+ "Check NumSubstPackTypeBits.");
}
Decl *SubstTemplateTypeParmPackType::getAssociatedDecl() const {
@@ -4423,10 +4574,6 @@ IdentifierInfo *SubstTemplateTypeParmPackType::getIdentifier() const {
return getReplacedParameter()->getIdentifier();
}
-TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
- return TemplateArgument(llvm::ArrayRef(Arguments, getNumArgs()));
-}
-
void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getAssociatedDecl(), getIndex(), getFinal(), getArgumentPack());
}
@@ -4438,11 +4585,13 @@ void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
ID.AddPointer(AssociatedDecl);
ID.AddInteger(Index);
ID.AddBoolean(Final);
- ID.AddInteger(ArgPack.pack_size());
- for (const auto &P : ArgPack.pack_elements())
- ID.AddPointer(P.getAsType().getAsOpaquePtr());
+ SubstPackType::Profile(ID, ArgPack);
}
+SubstBuiltinTemplatePackType::SubstBuiltinTemplatePackType(
+ QualType Canon, const TemplateArgument &ArgPack)
+ : SubstPackType(SubstBuiltinTemplatePack, Canon, ArgPack) {}
+
bool TemplateSpecializationType::anyDependentTemplateArguments(
const TemplateArgumentListInfo &Args,
ArrayRef<TemplateArgument> Converted) {
@@ -4466,17 +4615,28 @@ bool TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
return false;
}
+static TypeDependence
+getTemplateSpecializationTypeDependence(QualType Underlying, TemplateName T) {
+ TypeDependence D = Underlying.isNull()
+ ? TypeDependence::DependentInstantiation
+ : toSemanticDependence(Underlying->getDependence());
+ D |= toTypeDependence(T.getDependence()) & TypeDependence::UnexpandedPack;
+ if (isPackProducingBuiltinTemplateName(T)) {
+ if (Underlying.isNull()) // Dependent, will produce a pack on substitution.
+ D |= TypeDependence::UnexpandedPack;
+ else
+ D |= (Underlying->getDependence() & TypeDependence::UnexpandedPack);
+ }
+ return D;
+}
+
TemplateSpecializationType::TemplateSpecializationType(
- TemplateName T, bool IsAlias, ArrayRef<TemplateArgument> Args,
- QualType Underlying)
- : Type(TemplateSpecialization,
- Underlying.isNull() ? QualType(this, 0)
- : Underlying.getCanonicalType(),
- (Underlying.isNull()
- ? TypeDependence::DependentInstantiation
- : toSemanticDependence(Underlying->getDependence())) |
- (toTypeDependence(T.getDependence()) &
- TypeDependence::UnexpandedPack)),
+ ElaboratedTypeKeyword Keyword, TemplateName T, bool IsAlias,
+ ArrayRef<TemplateArgument> Args, QualType Underlying)
+ : TypeWithKeyword(Keyword, TemplateSpecialization,
+ Underlying.isNull() ? QualType(this, 0)
+ : Underlying.getCanonicalType(),
+ getTemplateSpecializationTypeDependence(Underlying, T)),
Template(T) {
TemplateSpecializationTypeBits.NumArgs = Args.size();
TemplateSpecializationTypeBits.TypeAlias = IsAlias;
@@ -4522,6 +4682,12 @@ QualType TemplateSpecializationType::getAliasedType() const {
return *reinterpret_cast<const QualType *>(template_arguments().end());
}
+bool clang::TemplateSpecializationType::isSugared() const {
+ return !isDependentType() || isCurrentInstantiation() || isTypeAlias() ||
+ (isPackProducingBuiltinTemplateName(Template) &&
+ isa<SubstBuiltinTemplatePackType>(*getCanonicalTypeInternal()));
+}
+
void TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Ctx) {
Profile(ID, Template, template_arguments(),
@@ -4699,7 +4865,8 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::Record:
case Type::Enum: {
- const TagDecl *Tag = cast<TagType>(T)->getDecl();
+ const TagDecl *Tag =
+ cast<TagType>(T)->getOriginalDecl()->getDefinitionOrSelf();
// C++ [basic.link]p8:
// - it is a class or enumeration type that is named (or has a name
@@ -4726,12 +4893,9 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::MemberPointer: {
const auto *MPT = cast<MemberPointerType>(T);
CachedProperties Cls = [&] {
- if (auto *RD = MPT->getMostRecentCXXRecordDecl())
- return Cache::get(QualType(RD->getTypeForDecl(), 0));
- if (const Type *T = MPT->getQualifier()->getAsType())
- return Cache::get(T);
- // Treat as a dependent type.
- return CachedProperties(Linkage::External, false);
+ if (MPT->isSugared())
+ MPT = cast<MemberPointerType>(MPT->getCanonicalTypeInternal());
+ return Cache::get(MPT->getQualifier().getAsType());
}();
return merge(Cls, Cache::get(MPT->getPointeeType()));
}
@@ -4811,7 +4975,8 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
case Type::Record:
case Type::Enum:
- return getDeclLinkageAndVisibility(cast<TagType>(T)->getDecl());
+ return getDeclLinkageAndVisibility(
+ cast<TagType>(T)->getOriginalDecl()->getDefinitionOrSelf());
case Type::Complex:
return computeTypeLinkageInfo(cast<ComplexType>(T)->getElementType());
@@ -4827,8 +4992,8 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
LinkageInfo LV;
if (auto *D = MPT->getMostRecentCXXRecordDecl()) {
LV.merge(getDeclLinkageAndVisibility(D));
- } else if (auto *Ty = MPT->getQualifier()->getAsType()) {
- LV.merge(computeTypeLinkageInfo(Ty));
+ } else {
+ LV.merge(computeTypeLinkageInfo(MPT->getQualifier().getAsType()));
}
LV.merge(computeTypeLinkageInfo(MPT->getPointeeType()));
return LV;
@@ -4938,6 +5103,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::UnaryTransform:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
+ case Type::SubstBuiltinTemplatePack:
case Type::DependentName:
case Type::DependentTemplateSpecialization:
case Type::Auto:
@@ -5014,7 +5180,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
llvm_unreachable("unknown builtin type");
case Type::Record: {
- const RecordDecl *RD = cast<RecordType>(type)->getDecl();
+ const RecordDecl *RD = cast<RecordType>(type)->getOriginalDecl();
// For template specializations, look only at primary template attributes.
// This is a consistent regardless of whether the instantiation is known.
if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
@@ -5201,7 +5367,7 @@ bool Type::isObjCARCBridgableType() const {
/// Determine whether the given type T is a "bridgeable" C type.
bool Type::isCARCBridgableType() const {
- const auto *Pointer = getAs<PointerType>();
+ const auto *Pointer = getAsCanonical<PointerType>();
if (!Pointer)
return false;
@@ -5211,15 +5377,19 @@ bool Type::isCARCBridgableType() const {
/// Check if the specified type is the CUDA device builtin surface type.
bool Type::isCUDADeviceBuiltinSurfaceType() const {
- if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>();
+ if (const auto *RT = getAsCanonical<RecordType>())
+ return RT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>();
return false;
}
/// Check if the specified type is the CUDA device builtin texture type.
bool Type::isCUDADeviceBuiltinTextureType() const {
- if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->hasAttr<CUDADeviceBuiltinTextureTypeAttr>();
+ if (const auto *RT = getAsCanonical<RecordType>())
+ return RT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<CUDADeviceBuiltinTextureTypeAttr>();
return false;
}
@@ -5246,6 +5416,15 @@ bool Type::isHLSLResourceRecord() const {
return HLSLAttributedResourceType::findHandleTypeOnResource(this) != nullptr;
}
+bool Type::isHLSLResourceRecordArray() const {
+ const Type *Ty = getUnqualifiedDesugaredType();
+ if (!Ty->isArrayType())
+ return false;
+ while (isa<ConstantArrayType>(Ty))
+ Ty = Ty->getArrayElementTypeNoTypeQual();
+ return Ty->isHLSLResourceRecord();
+}
+
bool Type::isHLSLIntangibleType() const {
const Type *Ty = getUnqualifiedDesugaredType();
@@ -5282,8 +5461,7 @@ QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
return DK_objc_weak_lifetime;
}
- if (const auto *RT = type->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ if (const auto *RD = type->getBaseElementTypeUnsafe()->getAsRecordDecl()) {
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
/// Check if this is a C++ object with a non-trivial destructor.
if (CXXRD->hasDefinition() && !CXXRD->hasTrivialDestructor())
@@ -5301,16 +5479,16 @@ QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
bool MemberPointerType::isSugared() const {
CXXRecordDecl *D1 = getMostRecentCXXRecordDecl(),
- *D2 = getQualifier()->getAsRecordDecl();
+ *D2 = getQualifier().getAsRecordDecl();
assert(!D1 == !D2);
return D1 != D2 && D1->getCanonicalDecl() != D2->getCanonicalDecl();
}
void MemberPointerType::Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
- const NestedNameSpecifier *Qualifier,
+ const NestedNameSpecifier Qualifier,
const CXXRecordDecl *Cls) {
ID.AddPointer(Pointee.getAsOpaquePtr());
- ID.AddPointer(Qualifier);
+ Qualifier.Profile(ID);
if (Cls)
ID.AddPointer(Cls->getCanonicalDecl());
}
@@ -5318,14 +5496,14 @@ void MemberPointerType::Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
CXXRecordDecl *MemberPointerType::getCXXRecordDecl() const {
return dyn_cast<MemberPointerType>(getCanonicalTypeInternal())
->getQualifier()
- ->getAsRecordDecl();
+ .getAsRecordDecl();
}
CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
auto *RD = getCXXRecordDecl();
if (!RD)
return nullptr;
- return RD->getMostRecentNonInjectedDecl();
+ return RD->getMostRecentDecl();
}
void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
diff --git a/clang/lib/AST/TypeLoc.cpp b/clang/lib/AST/TypeLoc.cpp
index 5c45c59..3e9597f 100644
--- a/clang/lib/AST/TypeLoc.cpp
+++ b/clang/lib/AST/TypeLoc.cpp
@@ -195,15 +195,6 @@ SourceLocation TypeLoc::getBeginLoc() const {
TypeLoc LeftMost = Cur;
while (true) {
switch (Cur.getTypeLocClass()) {
- case Elaborated:
- if (Cur.getLocalSourceRange().getBegin().isValid()) {
- LeftMost = Cur;
- break;
- }
- Cur = Cur.getNextTypeLoc();
- if (Cur.isNull())
- break;
- continue;
case FunctionProto:
if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr()
->hasTrailingReturn()) {
@@ -275,7 +266,6 @@ SourceLocation TypeLoc::getEndLoc() const {
Last = Cur;
break;
case Qualified:
- case Elaborated:
break;
}
Cur = Cur.getNextTypeLoc();
@@ -313,9 +303,8 @@ bool TypeSpecTypeLoc::isKind(const TypeLoc &TL) {
}
bool TagTypeLoc::isDefinition() const {
- TagDecl *D = getDecl();
- return D->isCompleteDefinition() &&
- (D->getIdentifier() == nullptr || D->getLocation() == getNameLoc());
+ return getTypePtr()->isTagOwned() &&
+ getOriginalDecl()->isCompleteDefinition();
}
// Reimplemented to account for GNU/C++ extension
@@ -482,6 +471,134 @@ TypeLoc TypeLoc::findExplicitQualifierLoc() const {
return {};
}
+NestedNameSpecifierLoc TypeLoc::getPrefix() const {
+ switch (getTypeLocClass()) {
+ case TypeLoc::DependentName:
+ return castAs<DependentNameTypeLoc>().getQualifierLoc();
+ case TypeLoc::TemplateSpecialization:
+ return castAs<TemplateSpecializationTypeLoc>().getQualifierLoc();
+ case TypeLoc::DependentTemplateSpecialization:
+ return castAs<DependentTemplateSpecializationTypeLoc>().getQualifierLoc();
+ case TypeLoc::DeducedTemplateSpecialization:
+ return castAs<DeducedTemplateSpecializationTypeLoc>().getQualifierLoc();
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ return castAs<TagTypeLoc>().getQualifierLoc();
+ case TypeLoc::Typedef:
+ return castAs<TypedefTypeLoc>().getQualifierLoc();
+ case TypeLoc::UnresolvedUsing:
+ return castAs<UnresolvedUsingTypeLoc>().getQualifierLoc();
+ case TypeLoc::Using:
+ return castAs<UsingTypeLoc>().getQualifierLoc();
+ default:
+ return NestedNameSpecifierLoc();
+ }
+}
+
+SourceLocation TypeLoc::getNonPrefixBeginLoc() const {
+ switch (getTypeLocClass()) {
+ case TypeLoc::TemplateSpecialization: {
+ auto TL = castAs<TemplateSpecializationTypeLoc>();
+ SourceLocation Loc = TL.getTemplateKeywordLoc();
+ if (!Loc.isValid())
+ Loc = TL.getTemplateNameLoc();
+ return Loc;
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto TL = castAs<DependentTemplateSpecializationTypeLoc>();
+ SourceLocation Loc = TL.getTemplateKeywordLoc();
+ if (!Loc.isValid())
+ Loc = TL.getTemplateNameLoc();
+ return Loc;
+ }
+ case TypeLoc::DeducedTemplateSpecialization: {
+ auto TL = castAs<DeducedTemplateSpecializationTypeLoc>();
+ SourceLocation Loc = TL.getTemplateKeywordLoc();
+ if (!Loc.isValid())
+ Loc = TL.getTemplateNameLoc();
+ return Loc;
+ }
+ case TypeLoc::DependentName:
+ return castAs<DependentNameTypeLoc>().getNameLoc();
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ return castAs<TagTypeLoc>().getNameLoc();
+ case TypeLoc::Typedef:
+ return castAs<TypedefTypeLoc>().getNameLoc();
+ case TypeLoc::UnresolvedUsing:
+ return castAs<UnresolvedUsingTypeLoc>().getNameLoc();
+ case TypeLoc::Using:
+ return castAs<UsingTypeLoc>().getNameLoc();
+ default:
+ return getBeginLoc();
+ }
+}
+
+SourceLocation TypeLoc::getNonElaboratedBeginLoc() const {
+ // For elaborated types (e.g. `struct a::A`) we want the portion after the
+ // `struct` but including the namespace qualifier, `a::`.
+ switch (getTypeLocClass()) {
+ case TypeLoc::Qualified:
+ return castAs<QualifiedTypeLoc>()
+ .getUnqualifiedLoc()
+ .getNonElaboratedBeginLoc();
+ case TypeLoc::TemplateSpecialization: {
+ auto T = castAs<TemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getTemplateNameLoc();
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto T = castAs<DependentTemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getTemplateNameLoc();
+ }
+ case TypeLoc::DeducedTemplateSpecialization: {
+ auto T = castAs<DeducedTemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getTemplateNameLoc();
+ }
+ case TypeLoc::DependentName: {
+ auto T = castAs<DependentNameTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName: {
+ auto T = castAs<TagTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ case TypeLoc::Typedef: {
+ auto T = castAs<TypedefTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ case TypeLoc::UnresolvedUsing: {
+ auto T = castAs<UnresolvedUsingTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ case TypeLoc::Using: {
+ auto T = castAs<UsingTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ default:
+ return getBeginLoc();
+ }
+}
+
void ObjCTypeParamTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setNameLoc(Loc);
@@ -555,9 +672,9 @@ static void initializeElaboratedKeyword(TL T, SourceLocation Loc) {
: SourceLocation());
}
-static NestedNameSpecifierLoc
-initializeQualifier(ASTContext &Context, NestedNameSpecifier *Qualifier,
- SourceLocation Loc) {
+static NestedNameSpecifierLoc initializeQualifier(ASTContext &Context,
+ NestedNameSpecifier Qualifier,
+ SourceLocation Loc) {
if (!Qualifier)
return NestedNameSpecifierLoc();
NestedNameSpecifierLocBuilder Builder;
@@ -565,15 +682,6 @@ initializeQualifier(ASTContext &Context, NestedNameSpecifier *Qualifier,
return Builder.getWithLocInContext(Context);
}
-void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
- SourceLocation Loc) {
- if (isEmpty())
- return;
- initializeElaboratedKeyword(*this, Loc);
- setQualifierLoc(
- initializeQualifier(Context, getTypePtr()->getQualifier(), Loc));
-}
-
void DependentNameTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
initializeElaboratedKeyword(*this, Loc);
@@ -596,6 +704,76 @@ DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
Context, getTypePtr()->template_arguments(), getArgInfos(), Loc);
}
+void TemplateSpecializationTypeLoc::set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc,
+ SourceLocation NameLoc,
+ SourceLocation LAngleLoc,
+ SourceLocation RAngleLoc) {
+ TemplateSpecializationLocInfo &Data = *getLocalData();
+
+ Data.ElaboratedKWLoc = ElaboratedKeywordLoc;
+ SourceLocation BeginLoc = ElaboratedKeywordLoc;
+
+ getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
+
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ getTypePtr()->getTemplateName().getQualifier());
+ Data.QualifierData = QualifierLoc ? QualifierLoc.getOpaqueData() : nullptr;
+ if (QualifierLoc && !BeginLoc.isValid())
+ BeginLoc = QualifierLoc.getBeginLoc();
+
+ Data.TemplateKWLoc = TemplateKeywordLoc;
+ if (!BeginLoc.isValid())
+ BeginLoc = TemplateKeywordLoc;
+
+ Data.NameLoc = NameLoc;
+ if (!BeginLoc.isValid())
+ BeginLoc = NameLoc;
+
+ Data.LAngleLoc = LAngleLoc;
+ Data.SR = SourceRange(BeginLoc, RAngleLoc);
+}
+
+void TemplateSpecializationTypeLoc::set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc,
+ SourceLocation NameLoc,
+ const TemplateArgumentListInfo &TAL) {
+ set(ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
+ TAL.getLAngleLoc(), TAL.getRAngleLoc());
+ MutableArrayRef<TemplateArgumentLocInfo> ArgInfos = getArgLocInfos();
+ assert(TAL.size() == ArgInfos.size());
+ for (unsigned I = 0, N = TAL.size(); I != N; ++I)
+ ArgInfos[I] = TAL[I].getLocInfo();
+}
+
+void TemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+
+ auto [Qualifier, HasTemplateKeyword] =
+ getTypePtr()->getTemplateName().getQualifierAndTemplateKeyword();
+
+ SourceLocation ElaboratedKeywordLoc =
+ getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None
+ ? Loc
+ : SourceLocation();
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (Qualifier) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, Qualifier, Loc);
+ QualifierLoc = Builder.getWithLocInContext(Context);
+ }
+
+ TemplateArgumentListInfo TAL(Loc, Loc);
+ set(ElaboratedKeywordLoc, QualifierLoc,
+ /*TemplateKeywordLoc=*/HasTemplateKeyword ? Loc : SourceLocation(),
+ /*NameLoc=*/Loc, /*LAngleLoc=*/Loc, /*RAngleLoc=*/Loc);
+ initializeArgLocs(Context, getTypePtr()->template_arguments(), getArgInfos(),
+ Loc);
+}
+
void TemplateSpecializationTypeLoc::initializeArgLocs(
ASTContext &Context, ArrayRef<TemplateArgument> Args,
TemplateArgumentLocInfo *ArgInfos, SourceLocation Loc) {
@@ -631,7 +809,7 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(
Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
ArgInfos[i] = TemplateArgumentLocInfo(
- Context, Builder.getWithLocInContext(Context), Loc,
+ Context, Loc, Builder.getWithLocInContext(Context), Loc,
Args[i].getKind() == TemplateArgument::Template ? SourceLocation()
: Loc);
break;
@@ -680,6 +858,14 @@ void AutoTypeLoc::initializeLocal(ASTContext &Context, SourceLocation Loc) {
}
}
+void DeducedTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ initializeElaboratedKeyword(*this, Loc);
+ setQualifierLoc(initializeQualifier(
+ Context, getTypePtr()->getTemplateName().getQualifier(), Loc));
+ setTemplateNameLoc(Loc);
+}
+
namespace {
class GetContainedAutoTypeLocVisitor :
@@ -693,10 +879,6 @@ namespace {
// Only these types can contain the desired 'auto' type.
- TypeLoc VisitElaboratedTypeLoc(ElaboratedTypeLoc T) {
- return Visit(T.getNamedTypeLoc());
- }
-
TypeLoc VisitQualifiedTypeLoc(QualifiedTypeLoc T) {
return Visit(T.getUnqualifiedLoc());
}
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index deb453f..54ca42d 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -133,7 +133,7 @@ public:
void printAfter(QualType T, raw_ostream &OS);
void AppendScope(DeclContext *DC, raw_ostream &OS,
DeclarationName NameInScope);
- void printTag(TagDecl *T, raw_ostream &OS);
+ void printTagType(const TagType *T, raw_ostream &OS);
void printFunctionAfter(const FunctionType::ExtInfo &Info, raw_ostream &OS);
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) \
@@ -230,9 +230,9 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::UnaryTransform:
case Type::Record:
case Type::Enum:
- case Type::Elaborated:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
+ case Type::SubstBuiltinTemplatePack:
case Type::DeducedTemplateSpecialization:
case Type::TemplateSpecialization:
case Type::InjectedClassName:
@@ -504,11 +504,7 @@ void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
OS << '(';
-
- PrintingPolicy InnerPolicy(Policy);
- InnerPolicy.IncludeTagDefinition = false;
- T->getQualifier()->print(OS, InnerPolicy);
-
+ T->getQualifier().print(OS, Policy);
OS << "*";
}
@@ -1211,29 +1207,50 @@ void TypePrinter::printTypeSpec(NamedDecl *D, raw_ostream &OS) {
void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
raw_ostream &OS) {
- printTypeSpec(T->getDecl(), OS);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << ' ';
+ auto *D = T->getDecl();
+ if (Policy.FullyQualifiedName || T->isCanonicalUnqualified()) {
+ AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ } else {
+ T->getQualifier().print(OS, Policy);
+ }
+ OS << D->getIdentifier()->getName();
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T,
raw_ostream &OS) {}
void TypePrinter::printUsingBefore(const UsingType *T, raw_ostream &OS) {
- // After `namespace b { using a::X }`, is the type X within B a::X or b::X?
- //
- // - b::X is more formally correct given the UsingType model
- // - b::X makes sense if "re-exporting" a symbol in a new namespace
- // - a::X makes sense if "importing" a symbol for convenience
- //
- // The "importing" use seems much more common, so we print a::X.
- // This could be a policy option, but the right choice seems to rest more
- // with the intent of the code than the caller.
- printTypeSpec(T->getFoundDecl()->getUnderlyingDecl(), OS);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << ' ';
+ auto *D = T->getDecl();
+ if (Policy.FullyQualifiedName) {
+ AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ } else {
+ T->getQualifier().print(OS, Policy);
+ }
+ OS << D->getIdentifier()->getName();
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printUsingAfter(const UsingType *T, raw_ostream &OS) {}
void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
- printTypeSpec(T->getDecl(), OS);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << ' ';
+ auto *D = T->getDecl();
+ if (Policy.FullyQualifiedName) {
+ AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ } else {
+ T->getQualifier().print(OS, Policy);
+ }
+ OS << D->getIdentifier()->getName();
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printMacroQualifiedBefore(const MacroQualifiedType *T,
@@ -1354,14 +1371,53 @@ void TypePrinter::printAutoAfter(const AutoType *T, raw_ostream &OS) {
void TypePrinter::printDeducedTemplateSpecializationBefore(
const DeducedTemplateSpecializationType *T, raw_ostream &OS) {
- // If the type has been deduced, print the deduced type.
+ if (ElaboratedTypeKeyword Keyword = T->getKeyword();
+ T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << KeywordHelpers::getKeywordName(Keyword) << ' ';
+
+ TemplateName Name = T->getTemplateName();
+
+ // If the type has been deduced, print the template arguments, as if this was
+ // printing the deduced type, but including elaboration and template name
+ // qualification.
+ // FIXME: There should probably be a policy which controls this.
+ // We would probably want to do this on diagnostics, but not on -ast-print.
+ ArrayRef<TemplateArgument> Args;
+ TemplateDecl *DeducedTD = nullptr;
if (!T->getDeducedType().isNull()) {
- printBefore(T->getDeducedType(), OS);
- } else {
+ if (const auto *TST =
+ dyn_cast<TemplateSpecializationType>(T->getDeducedType())) {
+ DeducedTD = TST->getTemplateName().getAsTemplateDecl(
+ /*IgnoreDeduced=*/true);
+ Args = TST->template_arguments();
+ } else {
+ // Should only get here for canonical types.
+ const auto *CD = cast<ClassTemplateSpecializationDecl>(
+ cast<RecordType>(T->getDeducedType())->getOriginalDecl());
+ DeducedTD = CD->getSpecializedTemplate();
+ Args = CD->getTemplateArgs().asArray();
+ }
+
+ // FIXME: Workaround for alias template CTAD not producing guides which
+ // include the alias template specialization type.
+ // Purposefully disregard qualification when building this TemplateName;
+ // any qualification we might have, might not make sense in the
+ // context this was deduced.
+ if (!declaresSameEntity(DeducedTD, Name.getAsTemplateDecl(
+ /*IgnoreDeduced=*/true)))
+ Name = TemplateName(DeducedTD);
+ }
+
+ {
IncludeStrongLifetimeRAII Strong(Policy);
- T->getTemplateName().print(OS, Policy);
- spaceBeforePlaceHolder(OS);
+ Name.print(OS, Policy);
}
+ if (DeducedTD) {
+ printTemplateArgumentList(OS, Args, Policy,
+ DeducedTD->getTemplateParameters());
+ }
+
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printDeducedTemplateSpecializationAfter(
@@ -1480,30 +1536,37 @@ void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS,
}
}
-void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
- if (Policy.IncludeTagDefinition) {
- PrintingPolicy SubPolicy = Policy;
- SubPolicy.IncludeTagDefinition = false;
- D->print(OS, SubPolicy, Indentation);
+void TypePrinter::printTagType(const TagType *T, raw_ostream &OS) {
+ TagDecl *D = T->getOriginalDecl();
+
+ if (Policy.IncludeTagDefinition && T->isTagOwned()) {
+ D->print(OS, Policy, Indentation);
spaceBeforePlaceHolder(OS);
return;
}
bool HasKindDecoration = false;
- // We don't print tags unless this is an elaborated type.
- // In C, we just assume every RecordType is an elaborated type.
- if (!Policy.SuppressTagKeyword && !D->getTypedefNameForAnonDecl()) {
- HasKindDecoration = true;
- OS << D->getKindName();
- OS << ' ';
+ if (T->isCanonicalUnqualified()) {
+ if (!Policy.SuppressTagKeyword && !D->getTypedefNameForAnonDecl()) {
+ HasKindDecoration = true;
+ OS << D->getKindName();
+ OS << ' ';
+ }
+ } else {
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << ' ';
}
- // Compute the full nested-name-specifier for this type.
- // In C, this will always be empty except when the type
- // being printed is anonymous within other Record.
- if (!Policy.SuppressScope)
+ if (!Policy.FullyQualifiedName && !T->isCanonicalUnqualified()) {
+ T->getQualifier().print(OS, Policy);
+ } else if (!Policy.SuppressScope) {
+ // Compute the full nested-name-specifier for this type.
+ // In C, this will always be empty except when the type
+ // being printed is anonymous within other Record.
AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ }
if (const IdentifierInfo *II = D->getIdentifier())
OS << II->getName();
@@ -1578,9 +1641,11 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
// Print the preferred name if we have one for this type.
if (Policy.UsePreferredNames) {
- for (const auto *PNA : T->getDecl()->specific_attrs<PreferredNameAttr>()) {
+ for (const auto *PNA : T->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->specific_attrs<PreferredNameAttr>()) {
if (!declaresSameEntity(PNA->getTypedefType()->getAsCXXRecordDecl(),
- T->getDecl()))
+ T->getOriginalDecl()))
continue;
// Find the outermost typedef or alias template.
QualType T = PNA->getTypedefType();
@@ -1594,17 +1659,44 @@ void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
}
}
- printTag(T->getDecl(), OS);
+ printTagType(T, OS);
}
void TypePrinter::printRecordAfter(const RecordType *T, raw_ostream &OS) {}
void TypePrinter::printEnumBefore(const EnumType *T, raw_ostream &OS) {
- printTag(T->getDecl(), OS);
+ printTagType(T, OS);
}
void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) {}
+void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
+ raw_ostream &OS) {
+ const ASTContext &Ctx = T->getOriginalDecl()->getASTContext();
+ IncludeStrongLifetimeRAII Strong(Policy);
+ T->getTemplateName(Ctx).print(OS, Policy);
+ if (Policy.PrintInjectedClassNameWithArguments) {
+ auto *Decl = T->getOriginalDecl();
+ // FIXME: Use T->getTemplateArgs(Ctx) when that supports as-written
+ // arguments.
+ if (auto *RD = dyn_cast<ClassTemplateSpecializationDecl>(Decl)) {
+ printTemplateArgumentList(OS, RD->getTemplateArgsAsWritten()->arguments(),
+ Policy,
+ T->getTemplateDecl()->getTemplateParameters());
+ } else {
+ ClassTemplateDecl *TD = Decl->getDescribedClassTemplate();
+ assert(TD);
+ printTemplateArgumentList(
+ OS, TD->getTemplateParameters()->getInjectedTemplateArgs(Ctx), Policy,
+ T->getTemplateDecl()->getTemplateParameters());
+ }
+ }
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
+ raw_ostream &OS) {}
+
void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
raw_ostream &OS) {
TemplateTypeParmDecl *D = T->getDecl();
@@ -1640,6 +1732,15 @@ void TypePrinter::printSubstTemplateTypeParmAfter(
printAfter(T->getReplacementType(), OS);
}
+void TypePrinter::printSubstBuiltinTemplatePackBefore(
+ const SubstBuiltinTemplatePackType *T, raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ OS << "type-pack";
+}
+
+void TypePrinter::printSubstBuiltinTemplatePackAfter(
+ const SubstBuiltinTemplatePackType *T, raw_ostream &OS) {}
+
void TypePrinter::printSubstTemplateTypeParmPackBefore(
const SubstTemplateTypeParmPackType *T,
raw_ostream &OS) {
@@ -1671,6 +1772,10 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
raw_ostream &OS, bool FullyQualify) {
IncludeStrongLifetimeRAII Strong(Policy);
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << TypeWithKeyword::getKeywordName(K) << ' ';
+
TemplateDecl *TD =
T->getTemplateName().getAsTemplateDecl(/*IgnoreDeduced=*/true);
// FIXME: Null TD never exercised in test suite.
@@ -1680,7 +1785,10 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
OS << TD->getName();
} else {
- T->getTemplateName().print(OS, Policy, TemplateName::Qualified::None);
+ T->getTemplateName().print(OS, Policy,
+ !Policy.SuppressScope
+ ? TemplateName::Qualified::AsWritten
+ : TemplateName::Qualified::None);
}
DefaultTemplateArgsPolicyRAII TemplateArgs(Policy);
@@ -1699,77 +1807,6 @@ void TypePrinter::printTemplateSpecializationAfter(
const TemplateSpecializationType *T,
raw_ostream &OS) {}
-void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
- raw_ostream &OS) {
- if (Policy.PrintInjectedClassNameWithArguments)
- return printTemplateSpecializationBefore(T->getInjectedTST(), OS);
-
- IncludeStrongLifetimeRAII Strong(Policy);
- T->getTemplateName().print(OS, Policy);
- spaceBeforePlaceHolder(OS);
-}
-
-void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
- raw_ostream &OS) {}
-
-void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
- raw_ostream &OS) {
- if (Policy.IncludeTagDefinition && T->getOwnedTagDecl()) {
- TagDecl *OwnedTagDecl = T->getOwnedTagDecl();
- assert(OwnedTagDecl->getTypeForDecl() == T->getNamedType().getTypePtr() &&
- "OwnedTagDecl expected to be a declaration for the type");
- PrintingPolicy SubPolicy = Policy;
- SubPolicy.IncludeTagDefinition = false;
- OwnedTagDecl->print(OS, SubPolicy, Indentation);
- spaceBeforePlaceHolder(OS);
- return;
- }
-
- if (Policy.SuppressElaboration) {
- printBefore(T->getNamedType(), OS);
- return;
- }
-
- // The tag definition will take care of these.
- if (!Policy.IncludeTagDefinition)
- {
- OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ElaboratedTypeKeyword::None)
- OS << " ";
- NestedNameSpecifier *Qualifier = T->getQualifier();
- if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
- !Policy.SuppressUnwrittenScope) {
- bool OldTagKeyword = Policy.SuppressTagKeyword;
- bool OldSupressScope = Policy.SuppressScope;
- Policy.SuppressTagKeyword = true;
- Policy.SuppressScope = false;
- printBefore(T->getNamedType(), OS);
- Policy.SuppressTagKeyword = OldTagKeyword;
- Policy.SuppressScope = OldSupressScope;
- return;
- }
- if (Qualifier)
- Qualifier->print(OS, Policy);
- }
-
- ElaboratedTypePolicyRAII PolicyRAII(Policy);
- printBefore(T->getNamedType(), OS);
-}
-
-void TypePrinter::printElaboratedAfter(const ElaboratedType *T,
- raw_ostream &OS) {
- if (Policy.IncludeTagDefinition && T->getOwnedTagDecl())
- return;
-
- if (Policy.SuppressElaboration) {
- printAfter(T->getNamedType(), OS);
- return;
- }
-
- ElaboratedTypePolicyRAII PolicyRAII(Policy);
- printAfter(T->getNamedType(), OS);
-}
-
void TypePrinter::printParenBefore(const ParenType *T, raw_ostream &OS) {
if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
printBefore(T->getInnerType(), OS);
@@ -1791,9 +1828,7 @@ void TypePrinter::printDependentNameBefore(const DependentNameType *T,
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
if (T->getKeyword() != ElaboratedTypeKeyword::None)
OS << " ";
-
- T->getQualifier()->print(OS, Policy);
-
+ T->getQualifier().print(OS, Policy);
OS << T->getIdentifier()->getName();
spaceBeforePlaceHolder(OS);
}
@@ -2129,6 +2164,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::ExtVectorType:
OS << "ext_vector_type";
break;
+ case attr::CFISalt:
+ OS << "cfi_salt(\"" << cast<CFISaltAttr>(T->getAttr())->getSalt() << "\")";
+ break;
}
OS << "))";
}
@@ -2344,7 +2382,7 @@ static bool isSubstitutedType(ASTContext &Ctx, QualType T, QualType Pattern,
return true;
// A type parameter matches its argument.
- if (auto *TTPT = Pattern->getAs<TemplateTypeParmType>()) {
+ if (auto *TTPT = Pattern->getAsCanonical<TemplateTypeParmType>()) {
if (TTPT->getDepth() == Depth && TTPT->getIndex() < Args.size() &&
Args[TTPT->getIndex()].getKind() == TemplateArgument::Type) {
QualType SubstArg = Ctx.getQualifiedType(
diff --git a/clang/lib/AST/VTTBuilder.cpp b/clang/lib/AST/VTTBuilder.cpp
index de01184..89b58b5 100644
--- a/clang/lib/AST/VTTBuilder.cpp
+++ b/clang/lib/AST/VTTBuilder.cpp
@@ -63,9 +63,7 @@ void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
if (I.isVirtual())
continue;
- const auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
+ const auto *BaseDecl = I.getType()->castAsCXXRecordDecl();
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
CharUnits BaseOffset = Base.getBaseOffset() +
Layout.getBaseClassOffset(BaseDecl);
@@ -89,8 +87,7 @@ VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
return;
for (const auto &I : RD->bases()) {
- const auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *BaseDecl = I.getType()->castAsCXXRecordDecl();
// Itanium C++ ABI 2.6.2:
// Secondary virtual pointers are present for all bases with either
@@ -153,8 +150,7 @@ VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
VisitedVirtualBasesSetTy &VBases) {
for (const auto &I : RD->bases()) {
- const auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *BaseDecl = I.getType()->castAsCXXRecordDecl();
// Check if this is a virtual base.
if (I.isVirtual()) {
diff --git a/clang/lib/AST/VTableBuilder.cpp b/clang/lib/AST/VTableBuilder.cpp
index 0001745..6cec526 100644
--- a/clang/lib/AST/VTableBuilder.cpp
+++ b/clang/lib/AST/VTableBuilder.cpp
@@ -313,10 +313,12 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
}
const CXXRecordDecl *DerivedRD =
- cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
+ cast<CXXRecordDecl>(
+ cast<RecordType>(CanDerivedReturnType)->getOriginalDecl())
+ ->getDefinitionOrSelf();
- const CXXRecordDecl *BaseRD =
- cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
+ const CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(
+ cast<RecordType>(CanBaseReturnType)->getOriginalDecl());
return ComputeBaseOffset(Context, BaseRD, DerivedRD);
}
diff --git a/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index 6d0ba0b..e8a0004 100644
--- a/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -122,15 +122,15 @@ public:
else if (const Stmt *S = DynNode.get<Stmt>())
traverse(*S);
else if (const NestedNameSpecifier *NNS =
- DynNode.get<NestedNameSpecifier>())
+ DynNode.get<NestedNameSpecifier>())
traverse(*NNS);
else if (const NestedNameSpecifierLoc *NNSLoc =
DynNode.get<NestedNameSpecifierLoc>())
traverse(*NNSLoc);
else if (const QualType *Q = DynNode.get<QualType>())
- traverse(*Q);
+ traverse(*Q, /*TraverseQualifier=*/true);
else if (const TypeLoc *T = DynNode.get<TypeLoc>())
- traverse(*T);
+ traverse(*T, /*TraverseQualifier=*/true);
else if (const auto *C = DynNode.get<CXXCtorInitializer>())
traverse(*C);
else if (const TemplateArgumentLoc *TALoc =
@@ -194,7 +194,7 @@ public:
}
// We assume that the QualType and the contained type are on the same
// hierarchy level. Thus, we try to match either of them.
- bool TraverseType(QualType TypeNode) {
+ bool TraverseType(QualType TypeNode, bool TraverseQualifier = true) {
if (TypeNode.isNull())
return true;
ScopedIncrement ScopedDepth(&CurrentDepth);
@@ -202,11 +202,11 @@ public:
if (!match(*TypeNode))
return false;
// The QualType is matched inside traverse.
- return traverse(TypeNode);
+ return traverse(TypeNode, TraverseQualifier);
}
// We assume that the TypeLoc, contained QualType and contained Type all are
// on the same hierarchy level. Thus, we try to match all of them.
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ bool TraverseTypeLoc(TypeLoc TypeLocNode, bool TraverseQualifier = true) {
if (TypeLocNode.isNull())
return true;
ScopedIncrement ScopedDepth(&CurrentDepth);
@@ -217,17 +217,17 @@ public:
if (!match(TypeLocNode.getType()))
return false;
// The TypeLoc is matched inside traverse.
- return traverse(TypeLocNode);
+ return traverse(TypeLocNode, TraverseQualifier);
}
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS) {
ScopedIncrement ScopedDepth(&CurrentDepth);
- return (NNS == nullptr) || traverse(*NNS);
+ return !NNS || traverse(NNS);
}
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
if (!NNS)
return true;
ScopedIncrement ScopedDepth(&CurrentDepth);
- if (!match(*NNS.getNestedNameSpecifier()))
+ if (!match(NNS.getNestedNameSpecifier()))
return false;
return traverse(NNS);
}
@@ -340,15 +340,14 @@ private:
bool baseTraverse(const Stmt &StmtNode) {
return VisitorBase::TraverseStmt(const_cast<Stmt*>(&StmtNode));
}
- bool baseTraverse(QualType TypeNode) {
- return VisitorBase::TraverseType(TypeNode);
+ bool baseTraverse(QualType TypeNode, bool TraverseQualifier) {
+ return VisitorBase::TraverseType(TypeNode, TraverseQualifier);
}
- bool baseTraverse(TypeLoc TypeLocNode) {
- return VisitorBase::TraverseTypeLoc(TypeLocNode);
+ bool baseTraverse(TypeLoc TypeLocNode, bool TraverseQualifier) {
+ return VisitorBase::TraverseTypeLoc(TypeLocNode, TraverseQualifier);
}
- bool baseTraverse(const NestedNameSpecifier &NNS) {
- return VisitorBase::TraverseNestedNameSpecifier(
- const_cast<NestedNameSpecifier*>(&NNS));
+ bool baseTraverse(NestedNameSpecifier NNS) {
+ return VisitorBase::TraverseNestedNameSpecifier(NNS);
}
bool baseTraverse(NestedNameSpecifierLoc NNS) {
return VisitorBase::TraverseNestedNameSpecifierLoc(NNS);
@@ -396,13 +395,13 @@ private:
// Traverses the subtree rooted at 'Node'; returns true if the
// traversal should continue after this function returns.
- template <typename T>
- bool traverse(const T &Node) {
+ template <typename T, class... Args>
+ bool traverse(const T &Node, Args &&...args) {
static_assert(IsBaseType<T>::value,
"traverse can only be instantiated with base type");
if (!match(Node))
return false;
- return baseTraverse(Node);
+ return baseTraverse(Node, std::forward<Args>(args)...);
}
const DynTypedMatcher *const Matcher;
@@ -501,9 +500,9 @@ public:
bool TraverseDecl(Decl *DeclNode);
bool TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue = nullptr);
- bool TraverseType(QualType TypeNode);
- bool TraverseTypeLoc(TypeLoc TypeNode);
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
+ bool TraverseType(QualType TypeNode, bool TraverseQualifier = true);
+ bool TraverseTypeLoc(TypeLoc TypeNode, bool TraverseQualifier = true);
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS);
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
bool TraverseConstructorInitializer(CXXCtorInitializer *CtorInit);
bool TraverseTemplateArgumentLoc(TemplateArgumentLoc TAL);
@@ -577,13 +576,13 @@ public:
const auto *T = Proto.getTypePtr();
for (const auto &E : T->exceptions())
- TraverseType(E);
+ TraverseType(E, /*TraverseQualifier=*/true);
if (Expr *NE = T->getNoexceptExpr())
TraverseStmt(NE, Queue);
if (LE->hasExplicitResultType())
- TraverseTypeLoc(Proto.getReturnLoc());
+ TraverseTypeLoc(Proto.getReturnLoc(), /*TraverseQualifier=*/true);
TraverseStmt(
const_cast<Expr *>(LE->getTrailingRequiresClause().ConstraintExpr));
}
@@ -1289,33 +1288,42 @@ private:
if (Aliases == TypeAliases.end())
return false;
- if (const auto *ElaboratedTypeNode =
- llvm::dyn_cast<ElaboratedType>(TypeNode)) {
- if (ElaboratedTypeNode->isSugared() && Aliases->second.size() > 1) {
- const auto &DesugaredTypeName =
- ElaboratedTypeNode->desugar().getAsString();
+ auto matches = [&](const TypedefNameDecl *Alias) {
+ BoundNodesTreeBuilder Result(*Builder);
+ if (Matcher.matches(*Alias, this, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ return false;
+ };
- for (const TypedefNameDecl *Alias : Aliases->second) {
- if (Alias->getName() != DesugaredTypeName) {
- continue;
- }
+ if (const auto *T = TypeNode->getAs<TypedefType>()) {
+ const auto *TD = T->getDecl()->getCanonicalDecl();
- BoundNodesTreeBuilder Result(*Builder);
- if (Matcher.matches(*Alias, this, &Result)) {
- *Builder = std::move(Result);
- return true;
- }
+ // Prioritize exact matches.
+ SmallVector<const TypedefNameDecl *, 8> NonExactMatches;
+ for (const TypedefNameDecl *Alias : Aliases->second) {
+ if (!declaresSameEntity(TD, Alias)) {
+ NonExactMatches.push_back(Alias);
+ continue;
}
+ if (matches(Alias))
+ return true;
}
- }
- for (const TypedefNameDecl *Alias : Aliases->second) {
- BoundNodesTreeBuilder Result(*Builder);
- if (Matcher.matches(*Alias, this, &Result)) {
- *Builder = std::move(Result);
- return true;
+ for (const TypedefNameDecl *Alias : NonExactMatches) {
+ BoundNodesTreeBuilder Result(*Builder);
+ if (Matcher.matches(*Alias, this, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
}
+ return false;
}
+
+ for (const TypedefNameDecl *Alias : Aliases->second)
+ if (matches(Alias))
+ return true;
return false;
}
@@ -1336,6 +1344,41 @@ private:
return false;
}
+ template <typename T> static SourceLocation getNodeLocation(const T &Node) {
+ return Node.getBeginLoc();
+ }
+
+ static SourceLocation getNodeLocation(const CXXCtorInitializer &Node) {
+ return Node.getSourceLocation();
+ }
+
+ static SourceLocation getNodeLocation(const TemplateArgumentLoc &Node) {
+ return Node.getLocation();
+ }
+
+ static SourceLocation getNodeLocation(const Attr &Node) {
+ return Node.getLocation();
+ }
+
+ bool isInSystemHeader(SourceLocation Loc) {
+ const SourceManager &SM = getASTContext().getSourceManager();
+ return SM.isInSystemHeader(Loc);
+ }
+
+ template <typename T> bool shouldSkipNode(T &Node) {
+ if (Options.IgnoreSystemHeaders && isInSystemHeader(getNodeLocation(Node)))
+ return true;
+ return false;
+ }
+
+ template <typename T> bool shouldSkipNode(T *Node) {
+ return (Node == nullptr) || shouldSkipNode(*Node);
+ }
+
+ bool shouldSkipNode(QualType &) { return false; }
+
+ bool shouldSkipNode(NestedNameSpecifier &) { return false; }
+
/// Bucket to record map.
///
/// Used to get the appropriate bucket for each matcher.
@@ -1465,9 +1508,8 @@ bool MatchASTVisitor::objcClassIsDerivedFrom(
}
bool MatchASTVisitor::TraverseDecl(Decl *DeclNode) {
- if (!DeclNode) {
+ if (shouldSkipNode(DeclNode))
return true;
- }
bool ScopedTraversal =
TraversingASTNodeNotSpelledInSource || DeclNode->isImplicit();
@@ -1495,9 +1537,9 @@ bool MatchASTVisitor::TraverseDecl(Decl *DeclNode) {
}
bool MatchASTVisitor::TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue) {
- if (!StmtNode) {
+ if (shouldSkipNode(StmtNode))
return true;
- }
+
bool ScopedTraversal = TraversingASTNodeNotSpelledInSource ||
TraversingASTChildrenNotSpelledInSource;
@@ -1506,12 +1548,19 @@ bool MatchASTVisitor::TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue) {
return RecursiveASTVisitor<MatchASTVisitor>::TraverseStmt(StmtNode, Queue);
}
-bool MatchASTVisitor::TraverseType(QualType TypeNode) {
+bool MatchASTVisitor::TraverseType(QualType TypeNode, bool TraverseQualifier) {
+ if (shouldSkipNode(TypeNode))
+ return true;
+
match(TypeNode);
- return RecursiveASTVisitor<MatchASTVisitor>::TraverseType(TypeNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseType(TypeNode,
+ TraverseQualifier);
}
-bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLocNode) {
+bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLocNode,
+ bool TraverseQualifier) {
+ if (shouldSkipNode(TypeLocNode))
+ return true;
// The RecursiveASTVisitor only visits types if they're not within TypeLocs.
// We still want to find those types via matchers, so we match them here. Note
// that the TypeLocs are structurally a shadow-hierarchy to the expressed
@@ -1519,11 +1568,15 @@ bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLocNode) {
// each TypeLoc.
match(TypeLocNode);
match(TypeLocNode.getType());
- return RecursiveASTVisitor<MatchASTVisitor>::TraverseTypeLoc(TypeLocNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseTypeLoc(
+ TypeLocNode, TraverseQualifier);
}
-bool MatchASTVisitor::TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
- match(*NNS);
+bool MatchASTVisitor::TraverseNestedNameSpecifier(NestedNameSpecifier NNS) {
+ if (shouldSkipNode(NNS))
+ return true;
+
+ match(NNS);
return RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifier(NNS);
}
@@ -1532,19 +1585,22 @@ bool MatchASTVisitor::TraverseNestedNameSpecifierLoc(
if (!NNS)
return true;
+ if (shouldSkipNode(NNS))
+ return true;
+
match(NNS);
// We only match the nested name specifier here (as opposed to traversing it)
// because the traversal is already done in the parallel "Loc"-hierarchy.
if (NNS.hasQualifier())
- match(*NNS.getNestedNameSpecifier());
+ match(NNS.getNestedNameSpecifier());
return
RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifierLoc(NNS);
}
bool MatchASTVisitor::TraverseConstructorInitializer(
CXXCtorInitializer *CtorInit) {
- if (!CtorInit)
+ if (shouldSkipNode(CtorInit))
return true;
bool ScopedTraversal = TraversingASTNodeNotSpelledInSource ||
@@ -1562,11 +1618,17 @@ bool MatchASTVisitor::TraverseConstructorInitializer(
}
bool MatchASTVisitor::TraverseTemplateArgumentLoc(TemplateArgumentLoc Loc) {
+ if (shouldSkipNode(Loc))
+ return true;
+
match(Loc);
return RecursiveASTVisitor<MatchASTVisitor>::TraverseTemplateArgumentLoc(Loc);
}
bool MatchASTVisitor::TraverseAttr(Attr *AttrNode) {
+ if (shouldSkipNode(AttrNode))
+ return true;
+
match(*AttrNode);
return RecursiveASTVisitor<MatchASTVisitor>::TraverseAttr(AttrNode);
}
diff --git a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 80dc88881..653b381 100644
--- a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -755,6 +755,8 @@ const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, UsingShadowDecl>
+ usingShadowDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
const internal::VariadicAllOfMatcher<Decl> decl;
@@ -808,8 +810,6 @@ const internal::VariadicDynCastAllOfMatcher<TypeLoc, ReferenceTypeLoc>
const internal::VariadicDynCastAllOfMatcher<TypeLoc,
TemplateSpecializationTypeLoc>
templateSpecializationTypeLoc;
-const internal::VariadicDynCastAllOfMatcher<TypeLoc, ElaboratedTypeLoc>
- elaboratedTypeLoc;
const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
@@ -1103,7 +1103,6 @@ const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType;
const AstTypeMatcher<UnaryTransformType> unaryTransformType;
const AstTypeMatcher<RecordType> recordType;
const AstTypeMatcher<TagType> tagType;
-const AstTypeMatcher<ElaboratedType> elaboratedType;
const AstTypeMatcher<UsingType> usingType;
const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType;
const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
diff --git a/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 562df71..48a7b91 100644
--- a/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -235,13 +235,12 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(designatorCountIs);
REGISTER_MATCHER(doStmt);
REGISTER_MATCHER(eachOf);
- REGISTER_MATCHER(elaboratedType);
- REGISTER_MATCHER(elaboratedTypeLoc);
REGISTER_MATCHER(usingType);
REGISTER_MATCHER(enumConstantDecl);
REGISTER_MATCHER(enumDecl);
REGISTER_MATCHER(enumType);
REGISTER_MATCHER(equalsBoundNode);
+ REGISTER_MATCHER(declaresSameEntityAsBoundNode);
REGISTER_MATCHER(equalsIntegralValue);
REGISTER_MATCHER(explicitCastExpr);
REGISTER_MATCHER(exportDecl);
@@ -341,7 +340,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasMemberName);
REGISTER_MATCHER(hasMethod);
REGISTER_MATCHER(hasName);
- REGISTER_MATCHER(hasNamedTypeLoc);
REGISTER_MATCHER(hasNullSelector);
REGISTER_MATCHER(hasObjectExpression);
REGISTER_MATCHER(hasOperands);
@@ -503,7 +501,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(memberHasSameNameAsBoundNode);
REGISTER_MATCHER(memberPointerType);
REGISTER_MATCHER(namedDecl);
- REGISTER_MATCHER(namesType);
REGISTER_MATCHER(namespaceAliasDecl);
REGISTER_MATCHER(namespaceDecl);
REGISTER_MATCHER(nestedNameSpecifier);
@@ -593,6 +590,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(typeLoc);
REGISTER_MATCHER(typedefDecl);
REGISTER_MATCHER(typedefNameDecl);
+ REGISTER_MATCHER(usingShadowDecl);
REGISTER_MATCHER(typedefType);
REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
REGISTER_MATCHER(unaryOperator);
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index d960d51..60a2d11 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -2833,7 +2833,8 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
if (!FD->isVariadic())
findConstructionContextsForArguments(C);
- if (FD->isNoReturn() || C->isBuiltinAssumeFalse(*Context))
+ if (FD->isNoReturn() || FD->isAnalyzerNoReturn() ||
+ C->isBuiltinAssumeFalse(*Context))
NoReturn = true;
if (FD->hasAttr<NoThrowAttr>())
AddEHEdge = false;
diff --git a/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index 823d754..3fcd348 100644
--- a/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -703,7 +703,8 @@ ExprMutationAnalyzer::Analyzer::findFunctionArgMutation(const Expr *Exp) {
// definition and see whether the param is mutated inside.
if (const auto *RefType = ParmType->getAs<RValueReferenceType>()) {
if (!RefType->getPointeeType().getQualifiers() &&
- RefType->getPointeeType()->getAs<TemplateTypeParmType>()) {
+ isa<TemplateTypeParmType>(
+ RefType->getPointeeType().getCanonicalType())) {
FunctionParmMutationAnalyzer *Analyzer =
FunctionParmMutationAnalyzer::getFunctionParmMutationAnalyzer(
*Func, Context, Memorized);
diff --git a/clang/lib/Analysis/FlowSensitive/CMakeLists.txt b/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
index 0c30df8..97e09c9 100644
--- a/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
+++ b/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
@@ -6,6 +6,7 @@ add_clang_library(clangAnalysisFlowSensitive
DataflowAnalysisContext.cpp
DataflowEnvironment.cpp
Formula.cpp
+ FormulaSerialization.cpp
HTMLLogger.cpp
Logger.cpp
RecordOps.cpp
diff --git a/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
index 6421ad3..4196d68 100644
--- a/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
+++ b/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
@@ -208,6 +208,24 @@ bool DataflowAnalysisContext::equivalentFormulas(const Formula &Val1,
return isUnsatisfiable(std::move(Constraints));
}
+llvm::DenseSet<Atom> DataflowAnalysisContext::collectDependencies(
+ llvm::DenseSet<Atom> Tokens) const {
+ // Use a worklist algorithm, with `Remaining` holding the worklist and
+ // `Tokens` tracking which atoms have already been added to the worklist.
+ std::vector<Atom> Remaining(Tokens.begin(), Tokens.end());
+ while (!Remaining.empty()) {
+ Atom CurrentToken = Remaining.back();
+ Remaining.pop_back();
+ if (auto DepsIt = FlowConditionDeps.find(CurrentToken);
+ DepsIt != FlowConditionDeps.end())
+ for (Atom A : DepsIt->second)
+ if (Tokens.insert(A).second)
+ Remaining.push_back(A);
+ }
+
+ return Tokens;
+}
+
void DataflowAnalysisContext::addTransitiveFlowConditionConstraints(
Atom Token, llvm::SetVector<const Formula *> &Constraints) {
llvm::DenseSet<Atom> AddedTokens;
@@ -224,6 +242,8 @@ void DataflowAnalysisContext::addTransitiveFlowConditionConstraints(
auto ConstraintsIt = FlowConditionConstraints.find(Token);
if (ConstraintsIt == FlowConditionConstraints.end()) {
+ // The flow condition is unconstrained. Just add the atom directly, which
+ // is equivalent to asserting it is true.
Constraints.insert(&arena().makeAtomRef(Token));
} else {
// Bind flow condition token via `iff` to its set of constraints:
@@ -239,6 +259,65 @@ void DataflowAnalysisContext::addTransitiveFlowConditionConstraints(
}
}
+static void getReferencedAtoms(const Formula &F,
+ llvm::DenseSet<dataflow::Atom> &Refs) {
+ switch (F.kind()) {
+ case Formula::AtomRef:
+ Refs.insert(F.getAtom());
+ break;
+ case Formula::Literal:
+ break;
+ case Formula::Not:
+ getReferencedAtoms(*F.operands()[0], Refs);
+ break;
+ case Formula::And:
+ case Formula::Or:
+ case Formula::Implies:
+ case Formula::Equal:
+ ArrayRef<const Formula *> Operands = F.operands();
+ getReferencedAtoms(*Operands[0], Refs);
+ getReferencedAtoms(*Operands[1], Refs);
+ break;
+ }
+}
+
+SimpleLogicalContext DataflowAnalysisContext::exportLogicalContext(
+ llvm::DenseSet<dataflow::Atom> TargetTokens) const {
+ SimpleLogicalContext LC;
+
+ // Copy `Invariant` even if it is null, to initialize the field.
+ LC.Invariant = Invariant;
+ if (Invariant != nullptr)
+ getReferencedAtoms(*Invariant, TargetTokens);
+
+ llvm::DenseSet<dataflow::Atom> Dependencies =
+ collectDependencies(std::move(TargetTokens));
+
+ for (dataflow::Atom Token : Dependencies) {
+ // Only process the token if it is constrained. Unconstrained tokens don't
+ // have dependencies.
+ const Formula *Constraints = FlowConditionConstraints.lookup(Token);
+ if (Constraints == nullptr)
+ continue;
+ LC.TokenDefs[Token] = Constraints;
+
+ if (auto DepsIt = FlowConditionDeps.find(Token);
+ DepsIt != FlowConditionDeps.end())
+ LC.TokenDeps[Token] = DepsIt->second;
+ }
+
+ return LC;
+}
+
+void DataflowAnalysisContext::initLogicalContext(SimpleLogicalContext LC) {
+ Invariant = LC.Invariant;
+ FlowConditionConstraints = std::move(LC.TokenDefs);
+ // TODO: The dependencies in `LC.TokenDeps` can be reconstructed from
+ // `LC.TokenDefs`. Give the caller the option to reconstruct, rather than
+ // providing them directly, to save caller space (memory/disk).
+ FlowConditionDeps = std::move(LC.TokenDeps);
+}
+
static void printAtomList(const llvm::SmallVector<Atom> &Atoms,
llvm::raw_ostream &OS) {
OS << "(";
diff --git a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index 256ea182..f14cb43 100644
--- a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -532,9 +532,11 @@ void Environment::initialize() {
} else if (auto *FieldBeingInitialized =
dyn_cast<FieldDecl>(Parent->getLambdaContextDecl())) {
// This is in a field initializer, rather than a method.
+ const RecordDecl *RD = FieldBeingInitialized->getParent();
+ const ASTContext &Ctx = RD->getASTContext();
+ CanQualType T = Ctx.getCanonicalTagType(RD);
setThisPointeeStorageLocation(
- cast<RecordStorageLocation>(createObject(QualType(
- FieldBeingInitialized->getParent()->getTypeForDecl(), 0))));
+ cast<RecordStorageLocation>(createObject(T)));
} else {
assert(false && "Unexpected this-capturing lambda context.");
}
diff --git a/clang/lib/Analysis/FlowSensitive/FormulaSerialization.cpp b/clang/lib/Analysis/FlowSensitive/FormulaSerialization.cpp
new file mode 100644
index 0000000..df15a1d
--- /dev/null
+++ b/clang/lib/Analysis/FlowSensitive/FormulaSerialization.cpp
@@ -0,0 +1,153 @@
+//===- FormulaSerialization.cpp ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/FormulaSerialization.h"
+#include "clang/Analysis/FlowSensitive/Arena.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+namespace clang::dataflow {
+
+// Returns the leading indicator of operation formulas. `AtomRef` and `Literal`
+// are handled differently.
+static char compactSigil(Formula::Kind K) {
+ switch (K) {
+ case Formula::AtomRef:
+ case Formula::Literal:
+ // No sigil.
+ return '\0';
+ case Formula::Not:
+ return '!';
+ case Formula::And:
+ return '&';
+ case Formula::Or:
+ return '|';
+ case Formula::Implies:
+ return '>';
+ case Formula::Equal:
+ return '=';
+ }
+ llvm_unreachable("unhandled formula kind");
+}
+
+void serializeFormula(const Formula &F, llvm::raw_ostream &OS) {
+ switch (Formula::numOperands(F.kind())) {
+ case 0:
+ switch (F.kind()) {
+ case Formula::AtomRef:
+ OS << F.getAtom();
+ break;
+ case Formula::Literal:
+ OS << (F.literal() ? 'T' : 'F');
+ break;
+ default:
+ llvm_unreachable("unhandled formula kind");
+ }
+ break;
+ case 1:
+ OS << compactSigil(F.kind());
+ serializeFormula(*F.operands()[0], OS);
+ break;
+ case 2:
+ OS << compactSigil(F.kind());
+ serializeFormula(*F.operands()[0], OS);
+ serializeFormula(*F.operands()[1], OS);
+ break;
+ default:
+ llvm_unreachable("unhandled formula arity");
+ }
+}
+
+static llvm::Expected<const Formula *>
+parsePrefix(llvm::StringRef &Str, Arena &A,
+ llvm::DenseMap<unsigned, Atom> &AtomMap) {
+ if (Str.empty())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "unexpected end of input");
+
+ char Prefix = Str[0];
+ Str = Str.drop_front();
+
+ switch (Prefix) {
+ case 'T':
+ return &A.makeLiteral(true);
+ case 'F':
+ return &A.makeLiteral(false);
+ case 'V': {
+ unsigned AtomID;
+ if (Str.consumeInteger(10, AtomID))
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "expected atom id");
+ auto [It, Inserted] = AtomMap.try_emplace(AtomID, Atom());
+ if (Inserted)
+ It->second = A.makeAtom();
+ return &A.makeAtomRef(It->second);
+ }
+ case '!': {
+ auto OperandOrErr = parsePrefix(Str, A, AtomMap);
+ if (!OperandOrErr)
+ return OperandOrErr.takeError();
+ return &A.makeNot(**OperandOrErr);
+ }
+ case '&':
+ case '|':
+ case '>':
+ case '=': {
+ auto LeftOrErr = parsePrefix(Str, A, AtomMap);
+ if (!LeftOrErr)
+ return LeftOrErr.takeError();
+
+ auto RightOrErr = parsePrefix(Str, A, AtomMap);
+ if (!RightOrErr)
+ return RightOrErr.takeError();
+
+ const Formula &LHS = **LeftOrErr;
+ const Formula &RHS = **RightOrErr;
+
+ switch (Prefix) {
+ case '&':
+ return &A.makeAnd(LHS, RHS);
+ case '|':
+ return &A.makeOr(LHS, RHS);
+ case '>':
+ return &A.makeImplies(LHS, RHS);
+ case '=':
+ return &A.makeEquals(LHS, RHS);
+ default:
+ llvm_unreachable("unexpected binary op");
+ }
+ }
+ default:
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "unexpected prefix character: %c", Prefix);
+ }
+}
+
+llvm::Expected<const Formula *>
+parseFormula(llvm::StringRef Str, Arena &A,
+ llvm::DenseMap<unsigned, Atom> &AtomMap) {
+ size_t OriginalSize = Str.size();
+ llvm::Expected<const Formula *> F = parsePrefix(Str, A, AtomMap);
+ if (!F)
+ return F.takeError();
+ if (!Str.empty())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ ("unexpected suffix of length: " +
+ llvm::Twine(Str.size() - OriginalSize))
+ .str());
+ return F;
+}
+
+} // namespace clang::dataflow
diff --git a/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/clang/lib/Analysis/FlowSensitive/Transfer.cpp
index 86a816e..23a6de4 100644
--- a/clang/lib/Analysis/FlowSensitive/Transfer.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -20,14 +20,17 @@
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/FlowSensitive/ASTOps.h"
#include "clang/Analysis/FlowSensitive/AdornedCFG.h"
#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/NoopAnalysis.h"
#include "clang/Analysis/FlowSensitive/RecordOps.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "llvm/Support/Casting.h"
#include <assert.h>
@@ -287,7 +290,7 @@ public:
}
}
- void VisitImplicitCastExpr(const ImplicitCastExpr *S) {
+ void VisitCastExpr(const CastExpr *S) {
const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
@@ -317,6 +320,60 @@ public:
break;
}
+ case CK_BaseToDerived: {
+ // This is a cast of (single-layer) pointer or reference to a record type.
+ // We should now model the fields for the derived type.
+
+ // Get the RecordStorageLocation for the record object underneath.
+ RecordStorageLocation *Loc = nullptr;
+ if (S->getType()->isPointerType()) {
+ auto *PV = Env.get<PointerValue>(*SubExpr);
+ assert(PV != nullptr);
+ if (PV == nullptr)
+ break;
+ Loc = cast<RecordStorageLocation>(&PV->getPointeeLoc());
+ } else {
+ assert(S->getType()->isRecordType());
+ if (SubExpr->isGLValue()) {
+ Loc = Env.get<RecordStorageLocation>(*SubExpr);
+ } else {
+ Loc = &Env.getResultObjectLocation(*SubExpr);
+ }
+ }
+ if (!Loc) {
+ // Nowhere to add children or propagate from, so we're done.
+ break;
+ }
+
+ // Get the derived record type underneath the reference or pointer.
+ QualType Derived = S->getType().getNonReferenceType();
+ if (Derived->isPointerType()) {
+ Derived = Derived->getPointeeType();
+ }
+
+ // Add children to the storage location for fields (including synthetic
+ // fields) of the derived type and initialize their values.
+ for (const FieldDecl *Field :
+ Env.getDataflowAnalysisContext().getModeledFields(Derived)) {
+ assert(Field != nullptr);
+ QualType FieldType = Field->getType();
+ if (FieldType->isReferenceType()) {
+ Loc->addChild(*Field, nullptr);
+ } else {
+ Loc->addChild(*Field, &Env.createStorageLocation(FieldType));
+ }
+
+ for (const auto &Entry :
+ Env.getDataflowAnalysisContext().getSyntheticFields(Derived)) {
+ Loc->addSyntheticField(Entry.getKey(),
+ Env.createStorageLocation(Entry.getValue()));
+ }
+ }
+ Env.initializeFieldsWithValues(*Loc, Derived);
+
+ // Fall through to propagate SubExpr's StorageLocation to the CastExpr.
+ [[fallthrough]];
+ }
case CK_IntegralCast:
// FIXME: This cast creates a new integral value from the
// subexpression. But, because we don't model integers, we don't
@@ -324,10 +381,9 @@ public:
// modeling is added, then update this code to create a fresh location and
// value.
case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase:
case CK_ConstructorConversion:
case CK_UserDefinedConversion:
- // FIXME: Add tests that excercise CK_UncheckedDerivedToBase,
- // CK_ConstructorConversion, and CK_UserDefinedConversion.
case CK_NoOp: {
// FIXME: Consider making `Environment::getStorageLocation` skip noop
// expressions (this and other similar expressions in the file) instead
@@ -684,15 +740,6 @@ public:
propagateValue(*SubExpr, *S, Env);
}
- void VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
- if (S->getCastKind() == CK_NoOp) {
- const Expr *SubExpr = S->getSubExpr();
- assert(SubExpr != nullptr);
-
- propagateValueOrStorageLocation(*SubExpr, *S, Env);
- }
- }
-
void VisitConditionalOperator(const ConditionalOperator *S) {
const Environment *TrueEnv = StmtToEnv.getEnvironment(*S->getTrueExpr());
const Environment *FalseEnv = StmtToEnv.getEnvironment(*S->getFalseExpr());
diff --git a/clang/lib/Analysis/LifetimeSafety.cpp b/clang/lib/Analysis/LifetimeSafety.cpp
index f39998c..c2e6dd74 100644
--- a/clang/lib/Analysis/LifetimeSafety.cpp
+++ b/clang/lib/Analysis/LifetimeSafety.cpp
@@ -45,10 +45,11 @@ struct Loan {
/// is represented as empty LoanSet
LoanID ID;
AccessPath Path;
- SourceLocation IssueLoc;
+ /// The expression that creates the loan, e.g., &x.
+ const Expr *IssueExpr;
- Loan(LoanID id, AccessPath path, SourceLocation loc)
- : ID(id), Path(path), IssueLoc(loc) {}
+ Loan(LoanID id, AccessPath path, const Expr *IssueExpr)
+ : ID(id), Path(path), IssueExpr(IssueExpr) {}
};
/// An Origin is a symbolic identifier that represents the set of possible
@@ -82,8 +83,8 @@ class LoanManager {
public:
LoanManager() = default;
- Loan &addLoan(AccessPath Path, SourceLocation Loc) {
- AllLoans.emplace_back(getNextLoanID(), Path, Loc);
+ Loan &addLoan(AccessPath Path, const Expr *IssueExpr) {
+ AllLoans.emplace_back(getNextLoanID(), Path, IssueExpr);
return AllLoans.back();
}
@@ -174,6 +175,18 @@ public:
return NewID;
}
+ void dump(OriginID OID, llvm::raw_ostream &OS) const {
+ OS << OID << " (";
+ Origin O = getOrigin(OID);
+ if (const ValueDecl *VD = O.getDecl())
+ OS << "Decl: " << VD->getNameAsString();
+ else if (const Expr *E = O.getExpr())
+ OS << "Expr: " << E->getStmtClassName();
+ else
+ OS << "Unknown";
+ OS << ")";
+ }
+
private:
OriginID getNextOriginID() { return NextOriginID++; }
@@ -199,6 +212,8 @@ public:
AssignOrigin,
/// An origin escapes the function by flowing into the return value.
ReturnOfOrigin,
+ /// An origin is used (eg. dereferencing a pointer).
+ Use,
/// A marker for a specific point in the code, for testing.
TestPoint,
};
@@ -219,7 +234,7 @@ public:
return nullptr;
}
- virtual void dump(llvm::raw_ostream &OS) const {
+ virtual void dump(llvm::raw_ostream &OS, const OriginManager &) const {
OS << "Fact (Kind: " << static_cast<int>(K) << ")\n";
}
};
@@ -234,21 +249,27 @@ public:
IssueFact(LoanID LID, OriginID OID) : Fact(Kind::Issue), LID(LID), OID(OID) {}
LoanID getLoanID() const { return LID; }
OriginID getOriginID() const { return OID; }
- void dump(llvm::raw_ostream &OS) const override {
- OS << "Issue (LoanID: " << getLoanID() << ", OriginID: " << getOriginID()
- << ")\n";
+ void dump(llvm::raw_ostream &OS, const OriginManager &OM) const override {
+ OS << "Issue (LoanID: " << getLoanID() << ", ToOrigin: ";
+ OM.dump(getOriginID(), OS);
+ OS << ")\n";
}
};
class ExpireFact : public Fact {
LoanID LID;
+ SourceLocation ExpiryLoc;
public:
static bool classof(const Fact *F) { return F->getKind() == Kind::Expire; }
- ExpireFact(LoanID LID) : Fact(Kind::Expire), LID(LID) {}
+ ExpireFact(LoanID LID, SourceLocation ExpiryLoc)
+ : Fact(Kind::Expire), LID(LID), ExpiryLoc(ExpiryLoc) {}
+
LoanID getLoanID() const { return LID; }
- void dump(llvm::raw_ostream &OS) const override {
+ SourceLocation getExpiryLoc() const { return ExpiryLoc; }
+
+ void dump(llvm::raw_ostream &OS, const OriginManager &OM) const override {
OS << "Expire (LoanID: " << getLoanID() << ")\n";
}
};
@@ -266,9 +287,12 @@ public:
: Fact(Kind::AssignOrigin), OIDDest(OIDDest), OIDSrc(OIDSrc) {}
OriginID getDestOriginID() const { return OIDDest; }
OriginID getSrcOriginID() const { return OIDSrc; }
- void dump(llvm::raw_ostream &OS) const override {
- OS << "AssignOrigin (DestID: " << getDestOriginID()
- << ", SrcID: " << getSrcOriginID() << ")\n";
+ void dump(llvm::raw_ostream &OS, const OriginManager &OM) const override {
+ OS << "AssignOrigin (Dest: ";
+ OM.dump(getDestOriginID(), OS);
+ OS << ", Src: ";
+ OM.dump(getSrcOriginID(), OS);
+ OS << ")\n";
}
};
@@ -282,8 +306,30 @@ public:
ReturnOfOriginFact(OriginID OID) : Fact(Kind::ReturnOfOrigin), OID(OID) {}
OriginID getReturnedOriginID() const { return OID; }
- void dump(llvm::raw_ostream &OS) const override {
- OS << "ReturnOfOrigin (OriginID: " << getReturnedOriginID() << ")\n";
+ void dump(llvm::raw_ostream &OS, const OriginManager &OM) const override {
+ OS << "ReturnOfOrigin (";
+ OM.dump(getReturnedOriginID(), OS);
+ OS << ")\n";
+ }
+};
+
+class UseFact : public Fact {
+ OriginID UsedOrigin;
+ const Expr *UseExpr;
+
+public:
+ static bool classof(const Fact *F) { return F->getKind() == Kind::Use; }
+
+ UseFact(OriginID UsedOrigin, const Expr *UseExpr)
+ : Fact(Kind::Use), UsedOrigin(UsedOrigin), UseExpr(UseExpr) {}
+
+ OriginID getUsedOrigin() const { return UsedOrigin; }
+ const Expr *getUseExpr() const { return UseExpr; }
+
+ void dump(llvm::raw_ostream &OS, const OriginManager &OM) const override {
+ OS << "Use (";
+ OM.dump(getUsedOrigin(), OS);
+ OS << ")\n";
}
};
@@ -300,7 +346,7 @@ public:
StringRef getAnnotation() const { return Annotation; }
- void dump(llvm::raw_ostream &OS) const override {
+ void dump(llvm::raw_ostream &OS, const OriginManager &) const override {
OS << "TestPoint (Annotation: \"" << getAnnotation() << "\")\n";
}
};
@@ -339,7 +385,7 @@ public:
if (It != BlockToFactsMap.end()) {
for (const Fact *F : It->second) {
llvm::dbgs() << " ";
- F->dump(llvm::dbgs());
+ F->dump(llvm::dbgs(), OriginMgr);
}
}
llvm::dbgs() << " End of Block\n";
@@ -417,13 +463,17 @@ public:
if (VD->hasLocalStorage()) {
OriginID OID = FactMgr.getOriginMgr().getOrCreate(*UO);
AccessPath AddrOfLocalVarPath(VD);
- const Loan &L = FactMgr.getLoanMgr().addLoan(AddrOfLocalVarPath,
- UO->getOperatorLoc());
+ const Loan &L =
+ FactMgr.getLoanMgr().addLoan(AddrOfLocalVarPath, UO);
CurrentBlockFacts.push_back(
FactMgr.createFact<IssueFact>(L.ID, OID));
}
}
}
+ } else if (UO->getOpcode() == UO_Deref) {
+ // This is a pointer use, like '*p'.
+ OriginID OID = FactMgr.getOriginMgr().get(*UO->getSubExpr());
+ CurrentBlockFacts.push_back(FactMgr.createFact<UseFact>(OID, UO));
}
}
@@ -492,7 +542,8 @@ private:
// Check if the loan is for a stack variable and if that variable
// is the one being destructed.
if (LoanPath.D == DestructedVD)
- CurrentBlockFacts.push_back(FactMgr.createFact<ExpireFact>(L.ID));
+ CurrentBlockFacts.push_back(FactMgr.createFact<ExpireFact>(
+ L.ID, DtorOpt.getTriggerStmt()->getEndLoc()));
}
}
@@ -618,6 +669,7 @@ public:
}
}
+protected:
Lattice getState(ProgramPoint P) const { return PerPointStates.lookup(P); }
Lattice getInState(const CFGBlock *B) const { return InStates.lookup(B); }
@@ -665,6 +717,8 @@ private:
return D->transfer(In, *F->getAs<AssignOriginFact>());
case Fact::Kind::ReturnOfOrigin:
return D->transfer(In, *F->getAs<ReturnOfOriginFact>());
+ case Fact::Kind::Use:
+ return D->transfer(In, *F->getAs<UseFact>());
case Fact::Kind::TestPoint:
return D->transfer(In, *F->getAs<TestPointFact>());
}
@@ -676,6 +730,7 @@ public:
Lattice transfer(Lattice In, const ExpireFact &) { return In; }
Lattice transfer(Lattice In, const AssignOriginFact &) { return In; }
Lattice transfer(Lattice In, const ReturnOfOriginFact &) { return In; }
+ Lattice transfer(Lattice In, const UseFact &) { return In; }
Lattice transfer(Lattice In, const TestPointFact &) { return In; }
};
@@ -693,6 +748,20 @@ static llvm::ImmutableSet<T> join(llvm::ImmutableSet<T> A,
return A;
}
+/// Checks if set A is a subset of set B.
+template <typename T>
+static bool isSubsetOf(const llvm::ImmutableSet<T> &A,
+ const llvm::ImmutableSet<T> &B) {
+ // Empty set is a subset of all sets.
+ if (A.isEmpty())
+ return true;
+
+ for (const T &Elem : A)
+ if (!B.contains(Elem))
+ return false;
+ return true;
+}
+
/// Computes the key-wise union of two ImmutableMaps.
// TODO(opt): This key-wise join is a performance bottleneck. A more
// efficient merge could be implemented using a Patricia Trie or HAMT
@@ -700,7 +769,7 @@ static llvm::ImmutableSet<T> join(llvm::ImmutableSet<T> A,
template <typename K, typename V, typename Joiner>
static llvm::ImmutableMap<K, V>
join(llvm::ImmutableMap<K, V> A, llvm::ImmutableMap<K, V> B,
- typename llvm::ImmutableMap<K, V>::Factory &F, Joiner joinValues) {
+ typename llvm::ImmutableMap<K, V>::Factory &F, Joiner JoinValues) {
if (A.getHeight() < B.getHeight())
std::swap(A, B);
@@ -710,7 +779,7 @@ join(llvm::ImmutableMap<K, V> A, llvm::ImmutableMap<K, V> B,
const K &Key = Entry.first;
const V &ValB = Entry.second;
if (const V *ValA = A.lookup(Key))
- A = F.add(A, Key, joinValues(*ValA, ValB));
+ A = F.add(A, Key, JoinValues(*ValA, ValB));
else
A = F.add(A, Key, ValB);
}
@@ -723,17 +792,14 @@ join(llvm::ImmutableMap<K, V> A, llvm::ImmutableMap<K, V> B,
// ========================================================================= //
using OriginLoanMap = llvm::ImmutableMap<OriginID, LoanSet>;
+using ExpiredLoanMap = llvm::ImmutableMap<LoanID, const ExpireFact *>;
/// An object to hold the factories for immutable collections, ensuring
/// that all created states share the same underlying memory management.
struct LifetimeFactory {
OriginLoanMap::Factory OriginMapFactory;
LoanSet::Factory LoanSetFactory;
-
- /// Creates a singleton set containing only the given loan ID.
- LoanSet createLoanSet(LoanID LID) {
- return LoanSetFactory.add(LoanSetFactory.getEmptySet(), LID);
- }
+ ExpiredLoanMap::Factory ExpiredLoanMapFactory;
};
/// Represents the dataflow lattice for loan propagation.
@@ -774,13 +840,15 @@ struct LoanPropagationLattice {
class LoanPropagationAnalysis
: public DataflowAnalysis<LoanPropagationAnalysis, LoanPropagationLattice,
Direction::Forward> {
-
- LifetimeFactory &Factory;
+ OriginLoanMap::Factory &OriginLoanMapFactory;
+ LoanSet::Factory &LoanSetFactory;
public:
LoanPropagationAnalysis(const CFG &C, AnalysisDeclContext &AC, FactManager &F,
- LifetimeFactory &Factory)
- : DataflowAnalysis(C, AC, F), Factory(Factory) {}
+ LifetimeFactory &LFactory)
+ : DataflowAnalysis(C, AC, F),
+ OriginLoanMapFactory(LFactory.OriginMapFactory),
+ LoanSetFactory(LFactory.LoanSetFactory) {}
using Base::transfer;
@@ -792,9 +860,9 @@ public:
// TODO(opt): Keep the state small by removing origins which become dead.
Lattice join(Lattice A, Lattice B) {
OriginLoanMap JoinedOrigins =
- utils::join(A.Origins, B.Origins, Factory.OriginMapFactory,
- [this](LoanSet S1, LoanSet S2) {
- return utils::join(S1, S2, Factory.LoanSetFactory);
+ utils::join(A.Origins, B.Origins, OriginLoanMapFactory,
+ [&](LoanSet S1, LoanSet S2) {
+ return utils::join(S1, S2, LoanSetFactory);
});
return Lattice(JoinedOrigins);
}
@@ -803,8 +871,9 @@ public:
Lattice transfer(Lattice In, const IssueFact &F) {
OriginID OID = F.getOriginID();
LoanID LID = F.getLoanID();
- return LoanPropagationLattice(Factory.OriginMapFactory.add(
- In.Origins, OID, Factory.createLoanSet(LID)));
+ return LoanPropagationLattice(OriginLoanMapFactory.add(
+ In.Origins, OID,
+ LoanSetFactory.add(LoanSetFactory.getEmptySet(), LID)));
}
/// The destination origin's loan set is replaced by the source's.
@@ -814,7 +883,7 @@ public:
OriginID SrcOID = F.getSrcOriginID();
LoanSet SrcLoans = getLoans(In, SrcOID);
return LoanPropagationLattice(
- Factory.OriginMapFactory.add(In.Origins, DestOID, SrcLoans));
+ OriginLoanMapFactory.add(In.Origins, DestOID, SrcLoans));
}
LoanSet getLoans(OriginID OID, ProgramPoint P) {
@@ -825,7 +894,7 @@ private:
LoanSet getLoans(Lattice L, OriginID OID) {
if (auto *Loans = L.Origins.lookup(OID))
return *Loans;
- return Factory.LoanSetFactory.getEmptySet();
+ return LoanSetFactory.getEmptySet();
}
};
@@ -835,10 +904,11 @@ private:
/// The dataflow lattice for tracking the set of expired loans.
struct ExpiredLattice {
- LoanSet Expired;
+ /// Map from an expired `LoanID` to the `ExpireFact` that made it expire.
+ ExpiredLoanMap Expired;
ExpiredLattice() : Expired(nullptr) {};
- explicit ExpiredLattice(LoanSet S) : Expired(S) {}
+ explicit ExpiredLattice(ExpiredLoanMap M) : Expired(M) {}
bool operator==(const ExpiredLattice &Other) const {
return Expired == Other.Expired;
@@ -851,8 +921,8 @@ struct ExpiredLattice {
OS << "ExpiredLattice State:\n";
if (Expired.isEmpty())
OS << " <empty>\n";
- for (const LoanID &LID : Expired)
- OS << " Loan " << LID << " is expired\n";
+ for (const auto &[ID, _] : Expired)
+ OS << " Loan " << ID << " is expired\n";
}
};
@@ -861,26 +931,31 @@ class ExpiredLoansAnalysis
: public DataflowAnalysis<ExpiredLoansAnalysis, ExpiredLattice,
Direction::Forward> {
- LoanSet::Factory &Factory;
+ ExpiredLoanMap::Factory &Factory;
public:
ExpiredLoansAnalysis(const CFG &C, AnalysisDeclContext &AC, FactManager &F,
LifetimeFactory &Factory)
- : DataflowAnalysis(C, AC, F), Factory(Factory.LoanSetFactory) {}
+ : DataflowAnalysis(C, AC, F), Factory(Factory.ExpiredLoanMapFactory) {}
using Base::transfer;
StringRef getAnalysisName() const { return "ExpiredLoans"; }
- Lattice getInitialState() { return Lattice(Factory.getEmptySet()); }
+ Lattice getInitialState() { return Lattice(Factory.getEmptyMap()); }
- /// Merges two lattices by taking the union of the expired loan sets.
- Lattice join(Lattice L1, Lattice L2) const {
- return Lattice(utils::join(L1.Expired, L2.Expired, Factory));
+ /// Merges two lattices by taking the union of the two expired loans.
+ Lattice join(Lattice L1, Lattice L2) {
+ return Lattice(
+ utils::join(L1.Expired, L2.Expired, Factory,
+ // Take the last expiry fact to make this hermetic.
+ [](const ExpireFact *F1, const ExpireFact *F2) {
+ return F1->getExpiryLoc() > F2->getExpiryLoc() ? F1 : F2;
+ }));
}
Lattice transfer(Lattice In, const ExpireFact &F) {
- return Lattice(Factory.add(In.Expired, F.getLoanID()));
+ return Lattice(Factory.add(In.Expired, F.getLoanID(), &F));
}
// Removes the loan from the set of expired loans.
@@ -912,15 +987,116 @@ public:
Lattice transfer(Lattice In, const IssueFact &F) {
return Lattice(Factory.remove(In.Expired, F.getLoanID()));
}
+
+ ExpiredLoanMap getExpiredLoans(ProgramPoint P) { return getState(P).Expired; }
};
// ========================================================================= //
-// TODO:
-// - Modify loan expiry analysis to answer `bool isExpired(Loan L, Point P)`
-// - Modify origin liveness analysis to answer `bool isLive(Origin O, Point P)`
-// - Using the above three to perform the final error reporting.
+// Lifetime checker and Error reporter
// ========================================================================= //
+/// Struct to store the complete context for a potential lifetime violation.
+struct PendingWarning {
+ SourceLocation ExpiryLoc; // Where the loan expired.
+ const Expr *UseExpr; // Where the origin holding this loan was used.
+ Confidence ConfidenceLevel;
+};
+
+class LifetimeChecker {
+private:
+ llvm::DenseMap<LoanID, PendingWarning> FinalWarningsMap;
+ LoanPropagationAnalysis &LoanPropagation;
+ ExpiredLoansAnalysis &ExpiredLoans;
+ FactManager &FactMgr;
+ AnalysisDeclContext &ADC;
+ LifetimeSafetyReporter *Reporter;
+
+public:
+ LifetimeChecker(LoanPropagationAnalysis &LPA, ExpiredLoansAnalysis &ELA,
+ FactManager &FM, AnalysisDeclContext &ADC,
+ LifetimeSafetyReporter *Reporter)
+ : LoanPropagation(LPA), ExpiredLoans(ELA), FactMgr(FM), ADC(ADC),
+ Reporter(Reporter) {}
+
+ void run() {
+ llvm::TimeTraceScope TimeProfile("LifetimeChecker");
+ for (const CFGBlock *B : *ADC.getAnalysis<PostOrderCFGView>())
+ for (const Fact *F : FactMgr.getFacts(B))
+ if (const auto *UF = F->getAs<UseFact>())
+ checkUse(UF);
+ issuePendingWarnings();
+ }
+
+ /// Checks for use-after-free errors for a given use of an Origin.
+ ///
+ /// This method is called for each 'UseFact' identified in the control flow
+ /// graph. It determines if the loans held by the used origin have expired
+ /// at the point of use.
+ void checkUse(const UseFact *UF) {
+
+ OriginID O = UF->getUsedOrigin();
+
+ // Get the set of loans that the origin might hold at this program point.
+ LoanSet HeldLoans = LoanPropagation.getLoans(O, UF);
+
+ // Get the set of all loans that have expired at this program point.
+ ExpiredLoanMap AllExpiredLoans = ExpiredLoans.getExpiredLoans(UF);
+
+ // If the pointer holds no loans or no loans have expired, there's nothing
+ // to check.
+ if (HeldLoans.isEmpty() || AllExpiredLoans.isEmpty())
+ return;
+
+ // Identify loans that which have expired but are held by the pointer. Using
+ // them is a use-after-free.
+ llvm::SmallVector<LoanID> DefaultedLoans;
+ // A definite UaF error occurs if all loans the origin might hold have
+ // expired.
+ bool IsDefiniteError = true;
+ for (LoanID L : HeldLoans) {
+ if (AllExpiredLoans.contains(L))
+ DefaultedLoans.push_back(L);
+ else
+ // If at least one loan is not expired, this use is not a definite UaF.
+ IsDefiniteError = false;
+ }
+ // If there are no defaulted loans, the use is safe.
+ if (DefaultedLoans.empty())
+ return;
+
+ // Determine the confidence level of the error (definite or maybe).
+ Confidence CurrentConfidence =
+ IsDefiniteError ? Confidence::Definite : Confidence::Maybe;
+
+ // For each expired loan, create a pending warning.
+ for (LoanID DefaultedLoan : DefaultedLoans) {
+ // If we already have a warning for this loan with a higher or equal
+ // confidence, skip this one.
+ if (FinalWarningsMap.count(DefaultedLoan) &&
+ CurrentConfidence <= FinalWarningsMap[DefaultedLoan].ConfidenceLevel)
+ continue;
+
+ auto *EF = AllExpiredLoans.lookup(DefaultedLoan);
+ assert(EF && "Could not find ExpireFact for an expired loan.");
+
+ FinalWarningsMap[DefaultedLoan] = {/*ExpiryLoc=*/(*EF)->getExpiryLoc(),
+ /*UseExpr=*/UF->getUseExpr(),
+ /*ConfidenceLevel=*/CurrentConfidence};
+ }
+ }
+
+ void issuePendingWarnings() {
+ if (!Reporter)
+ return;
+ for (const auto &[LID, Warning] : FinalWarningsMap) {
+ const Loan &L = FactMgr.getLoanMgr().getLoan(LID);
+ const Expr *IssueExpr = L.IssueExpr;
+ Reporter->reportUseAfterFree(IssueExpr, Warning.UseExpr,
+ Warning.ExpiryLoc, Warning.ConfidenceLevel);
+ }
+ }
+};
+
// ========================================================================= //
// LifetimeSafetyAnalysis Class Implementation
// ========================================================================= //
@@ -928,8 +1104,9 @@ public:
// We need this here for unique_ptr with forward declared class.
LifetimeSafetyAnalysis::~LifetimeSafetyAnalysis() = default;
-LifetimeSafetyAnalysis::LifetimeSafetyAnalysis(AnalysisDeclContext &AC)
- : AC(AC), Factory(std::make_unique<LifetimeFactory>()),
+LifetimeSafetyAnalysis::LifetimeSafetyAnalysis(AnalysisDeclContext &AC,
+ LifetimeSafetyReporter *Reporter)
+ : AC(AC), Reporter(Reporter), Factory(std::make_unique<LifetimeFactory>()),
FactMgr(std::make_unique<FactManager>()) {}
void LifetimeSafetyAnalysis::run() {
@@ -952,6 +1129,8 @@ void LifetimeSafetyAnalysis::run() {
/// blocks; only Decls are visible. Therefore, loans in a block that
/// never reach an Origin associated with a Decl can be safely dropped by
/// the analysis.
+ /// 3. Collapse ExpireFacts belonging to same source location into a single
+ /// Fact.
LoanPropagation =
std::make_unique<LoanPropagationAnalysis>(Cfg, AC, *FactMgr, *Factory);
LoanPropagation->run();
@@ -959,6 +1138,10 @@ void LifetimeSafetyAnalysis::run() {
ExpiredLoans =
std::make_unique<ExpiredLoansAnalysis>(Cfg, AC, *FactMgr, *Factory);
ExpiredLoans->run();
+
+ LifetimeChecker Checker(*LoanPropagation, *ExpiredLoans, *FactMgr, AC,
+ Reporter);
+ Checker.run();
}
LoanSet LifetimeSafetyAnalysis::getLoansAtPoint(OriginID OID,
@@ -967,9 +1150,13 @@ LoanSet LifetimeSafetyAnalysis::getLoansAtPoint(OriginID OID,
return LoanPropagation->getLoans(OID, PP);
}
-LoanSet LifetimeSafetyAnalysis::getExpiredLoansAtPoint(ProgramPoint PP) const {
+std::vector<LoanID>
+LifetimeSafetyAnalysis::getExpiredLoansAtPoint(ProgramPoint PP) const {
assert(ExpiredLoans && "ExpiredLoansAnalysis has not been run.");
- return ExpiredLoans->getState(PP).Expired;
+ std::vector<LoanID> Result;
+ for (const auto &pair : ExpiredLoans->getExpiredLoans(PP))
+ Result.push_back(pair.first);
+ return Result;
}
std::optional<OriginID>
@@ -1009,8 +1196,9 @@ llvm::StringMap<ProgramPoint> LifetimeSafetyAnalysis::getTestPoints() const {
}
} // namespace internal
-void runLifetimeSafetyAnalysis(AnalysisDeclContext &AC) {
- internal::LifetimeSafetyAnalysis Analysis(AC);
+void runLifetimeSafetyAnalysis(AnalysisDeclContext &AC,
+ LifetimeSafetyReporter *Reporter) {
+ internal::LifetimeSafetyAnalysis Analysis(AC, Reporter);
Analysis.run();
}
} // namespace clang::lifetimes
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index c9fd9cc..131170d 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -1392,8 +1392,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
}
// Check before/after constraints
- if (Handler.issueBetaWarnings() &&
- !Entry->asserted() && !Entry->declared()) {
+ if (!Entry->asserted() && !Entry->declared()) {
GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
Entry->loc(), Entry->getKind());
}
@@ -1929,7 +1928,9 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
assert(inserted.second && "Are we visiting the same expression again?");
if (isa<CXXConstructExpr>(Exp))
Self = Placeholder;
- if (TagT->getDecl()->hasAttr<ScopedLockableAttr>())
+ if (TagT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<ScopedLockableAttr>())
Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
}
diff --git a/clang/lib/Analysis/ThreadSafetyCommon.cpp b/clang/lib/Analysis/ThreadSafetyCommon.cpp
index ddbd0a9..68c27ee 100644
--- a/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -83,13 +83,11 @@ static std::pair<StringRef, bool> classifyCapability(QualType QT) {
// We need to look at the declaration of the type of the value to determine
// which it is. The type should either be a record or a typedef, or a pointer
// or reference thereof.
- if (const auto *RT = QT->getAs<RecordType>()) {
- if (const auto *RD = RT->getDecl())
- return classifyCapability(*RD);
- } else if (const auto *TT = QT->getAs<TypedefType>()) {
- if (const auto *TD = TT->getDecl())
- return classifyCapability(*TD);
- } else if (QT->isPointerOrReferenceType())
+ if (const auto *RD = QT->getAsRecordDecl())
+ return classifyCapability(*RD);
+ if (const auto *TT = QT->getAs<TypedefType>())
+ return classifyCapability(*TT->getDecl());
+ if (QT->isPointerOrReferenceType())
return classifyCapability(QT->getPointeeType());
return ClassifyCapabilityFallback;
diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp
index f4ead3d..1d7b872 100644
--- a/clang/lib/Analysis/UnsafeBufferUsage.cpp
+++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -182,18 +182,22 @@ public:
return DynamicRecursiveASTVisitor::TraverseUnaryExprOrTypeTraitExpr(Node);
}
- bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node) override {
+ bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node,
+ bool TraverseQualifier) override {
// Unevaluated context.
if (ignoreUnevaluatedContext)
return true;
- return DynamicRecursiveASTVisitor::TraverseTypeOfExprTypeLoc(Node);
+ return DynamicRecursiveASTVisitor::TraverseTypeOfExprTypeLoc(
+ Node, TraverseQualifier);
}
- bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node) override {
+ bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node,
+ bool TraverseQualifier) override {
// Unevaluated context.
if (ignoreUnevaluatedContext)
return true;
- return DynamicRecursiveASTVisitor::TraverseDecltypeTypeLoc(Node);
+ return DynamicRecursiveASTVisitor::TraverseDecltypeTypeLoc(
+ Node, TraverseQualifier);
}
bool TraverseCXXNoexceptExpr(CXXNoexceptExpr *Node) override {
@@ -2252,7 +2256,7 @@ namespace {
// declarations to its uses and make sure we've covered all uses with our
// analysis before we try to fix the declaration.
class DeclUseTracker {
- using UseSetTy = llvm::SmallSet<const DeclRefExpr *, 16>;
+ using UseSetTy = llvm::SmallPtrSet<const DeclRefExpr *, 16>;
using DefMapTy = llvm::DenseMap<const VarDecl *, const DeclStmt *>;
// Allocate on the heap for easier move.
diff --git a/clang/lib/Basic/Diagnostic.cpp b/clang/lib/Basic/Diagnostic.cpp
index e33e843..dc3778b 100644
--- a/clang/lib/Basic/Diagnostic.cpp
+++ b/clang/lib/Basic/Diagnostic.cpp
@@ -664,6 +664,8 @@ void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
void DiagnosticsEngine::Report(Level DiagLevel, const Diagnostic &Info) {
assert(DiagLevel != Ignored && "Cannot emit ignored diagnostics!");
+ assert(!getDiagnosticIDs()->isTrapDiag(Info.getID()) &&
+ "Trap diagnostics should not be consumed by the DiagnosticsEngine");
Client->HandleDiagnostic(DiagLevel, Info);
if (Client->IncludeInDiagnosticCounts()) {
if (DiagLevel == Warning)
diff --git a/clang/lib/Basic/DiagnosticIDs.cpp b/clang/lib/Basic/DiagnosticIDs.cpp
index 73f24a82..a1d9d0f 100644
--- a/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/clang/lib/Basic/DiagnosticIDs.cpp
@@ -69,6 +69,7 @@ enum DiagnosticClass {
CLASS_WARNING = DiagnosticIDs::CLASS_WARNING,
CLASS_EXTENSION = DiagnosticIDs::CLASS_EXTENSION,
CLASS_ERROR = DiagnosticIDs::CLASS_ERROR,
+ CLASS_TRAP = DiagnosticIDs::CLASS_TRAP,
};
struct StaticDiagInfoRec {
@@ -139,6 +140,7 @@ VALIDATE_DIAG_SIZE(SEMA)
VALIDATE_DIAG_SIZE(ANALYSIS)
VALIDATE_DIAG_SIZE(REFACTORING)
VALIDATE_DIAG_SIZE(INSTALLAPI)
+VALIDATE_DIAG_SIZE(TRAP)
#undef VALIDATE_DIAG_SIZE
#undef STRINGIFY_NAME
@@ -171,6 +173,7 @@ const StaticDiagInfoRec StaticDiagInfo[] = {
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
#include "clang/Basic/DiagnosticInstallAPIKinds.inc"
+#include "clang/Basic/DiagnosticTrapKinds.inc"
// clang-format on
#undef DIAG
};
@@ -214,6 +217,7 @@ CATEGORY(SEMA, CROSSTU)
CATEGORY(ANALYSIS, SEMA)
CATEGORY(REFACTORING, ANALYSIS)
CATEGORY(INSTALLAPI, REFACTORING)
+CATEGORY(TRAP, INSTALLAPI)
#undef CATEGORY
// Avoid out of bounds reads.
diff --git a/clang/lib/Basic/SourceManager.cpp b/clang/lib/Basic/SourceManager.cpp
index 343c26e..d8ec837 100644
--- a/clang/lib/Basic/SourceManager.cpp
+++ b/clang/lib/Basic/SourceManager.cpp
@@ -1171,14 +1171,14 @@ unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
if (Buf[FilePos - 1] == '\r' || Buf[FilePos - 1] == '\n')
--FilePos;
}
- return FilePos - LineStart + 1;
+ return (FilePos - LineStart) + 1;
}
}
unsigned LineStart = FilePos;
while (LineStart && Buf[LineStart-1] != '\n' && Buf[LineStart-1] != '\r')
--LineStart;
- return FilePos-LineStart+1;
+ return (FilePos - LineStart) + 1;
}
// isInvalid - Return the result of calling loc.isInvalid(), and
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index 21fc084..2fbf1ee 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -62,7 +62,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
TLSSupported = true;
VLASupported = true;
NoAsmVariants = false;
- HasLegalHalfType = false;
+ HasFastHalfType = false;
HalfArgsAndReturns = false;
HasFloat128 = false;
HasIbm128 = false;
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 2b023e5..9e03a08 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -142,7 +142,7 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
AddrSpaceMap = &ARM64AddrSpaceMap;
// All AArch64 implementations support ARMv8 FP, which makes half a legal type.
- HasLegalHalfType = true;
+ HasFastHalfType = true;
HalfArgsAndReturns = true;
HasFloat16 = true;
HasStrictFP = true;
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index 52cbdbc..87de9e6 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -197,12 +197,11 @@ bool AMDGPUTargetInfo::initFeatureMap(
const std::vector<std::string> &FeatureVec) const {
using namespace llvm::AMDGPU;
- fillAMDGPUFeatureMap(CPU, getTriple(), Features);
+
if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec))
return false;
- // TODO: Should move this logic into TargetParser
- auto HasError = insertWaveSizeFeature(CPU, getTriple(), Features);
+ auto HasError = fillAMDGPUFeatureMap(CPU, getTriple(), Features);
switch (HasError.first) {
default:
break;
@@ -251,7 +250,7 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
BFloat16Format = &llvm::APFloat::BFloat();
}
- HasLegalHalfType = true;
+ HasFastHalfType = true;
HasFloat16 = true;
WavefrontSize = (GPUFeatures & llvm::AMDGPU::FEATURE_WAVE32) ? 32 : 64;
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 75fdf38..3de17d2 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -585,13 +585,13 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
} else if (Feature == "+fp16") {
HW_FP |= HW_FP_HP;
} else if (Feature == "+fullfp16") {
- HasLegalHalfType = true;
+ HasFastHalfType = true;
} else if (Feature == "+dotprod") {
DotProd = true;
} else if (Feature == "+mve") {
MVE |= MVE_INT;
} else if (Feature == "+mve.fp") {
- HasLegalHalfType = true;
+ HasFastHalfType = true;
FPU |= FPARMV8;
MVE |= MVE_INT | MVE_FP;
HW_FP |= HW_FP_SP | HW_FP_HP;
@@ -1014,11 +1014,11 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_FP_FAST", "1");
// Armv8.2-A FP16 vector intrinsic
- if ((FPU & NeonFPU) && HasLegalHalfType)
+ if ((FPU & NeonFPU) && HasFastHalfType)
Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
// Armv8.2-A FP16 scalar intrinsics
- if (HasLegalHalfType)
+ if (HasFastHalfType)
Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
// Armv8.2-A dot product intrinsics
diff --git a/clang/lib/Basic/Targets/AVR.h b/clang/lib/Basic/Targets/AVR.h
index 75c969f..b6667786 100644
--- a/clang/lib/Basic/Targets/AVR.h
+++ b/clang/lib/Basic/Targets/AVR.h
@@ -57,7 +57,7 @@ public:
Int16Type = SignedInt;
Char32Type = UnsignedLong;
SigAtomicType = SignedChar;
- resetDataLayout("e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8");
+ resetDataLayout("e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8:16-a:8");
}
void getTargetDefines(const LangOptions &Opts,
diff --git a/clang/lib/Basic/Targets/DirectX.h b/clang/lib/Basic/Targets/DirectX.h
index 17240cf..bd13c9e 100644
--- a/clang/lib/Basic/Targets/DirectX.h
+++ b/clang/lib/Basic/Targets/DirectX.h
@@ -59,7 +59,7 @@ public:
VLASupported = false;
AddrSpaceMap = &DirectXAddrSpaceMap;
UseAddrSpaceMapMangling = true;
- HasLegalHalfType = true;
+ HasFastHalfType = true;
HasFloat16 = true;
NoAsmVariants = true;
PlatformMinVersion = Triple.getOSVersion();
diff --git a/clang/lib/Basic/Targets/Hexagon.cpp b/clang/lib/Basic/Targets/Hexagon.cpp
index 06dcac0..cea64f9 100644
--- a/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/clang/lib/Basic/Targets/Hexagon.cpp
@@ -149,7 +149,7 @@ bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAudio = true;
}
if (CPU.compare("hexagonv68") >= 0) {
- HasLegalHalfType = true;
+ HasFastHalfType = true;
HasFloat16 = true;
}
return true;
diff --git a/clang/lib/Basic/Targets/LoongArch.cpp b/clang/lib/Basic/Targets/LoongArch.cpp
index f6915df..8e29bb7 100644
--- a/clang/lib/Basic/Targets/LoongArch.cpp
+++ b/clang/lib/Basic/Targets/LoongArch.cpp
@@ -461,6 +461,8 @@ LoongArchTargetInfo::parseTargetAttr(StringRef Features) const {
case AttrFeatureKind::Feature:
Ret.Features.push_back("+" + Value.str());
+ if (Value == "lasx")
+ Ret.Features.push_back("+lsx");
break;
}
}
diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp
index 79995cc..f7abc05 100644
--- a/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/clang/lib/Basic/Targets/NVPTX.cpp
@@ -65,18 +65,19 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
GPU = OffloadArch::UNUSED;
// PTX supports f16 as a fundamental type.
- HasLegalHalfType = true;
+ HasFastHalfType = true;
HasFloat16 = true;
if (TargetPointerWidth == 32)
- resetDataLayout(
- "e-p:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
+ resetDataLayout("e-p:32:32-p6:32:32-p7:32:32-i64:64-i128:128-i256:256-v16:"
+ "16-v32:32-n16:32:64");
else if (Opts.NVPTXUseShortPointers)
- resetDataLayout(
- "e-p3:32:32-p4:32:32-p5:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:"
- "16-v32:32-n16:32:64");
+ resetDataLayout("e-p3:32:32-p4:32:32-p5:32:32-p6:32:32-p7:32:32-i64:64-"
+ "i128:128-i256:256-v16:"
+ "16-v32:32-n16:32:64");
else
- resetDataLayout("e-p6:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
+ resetDataLayout(
+ "e-p6:32:32-i64:64-i128:128-i256:256-v16:16-v32:32-n16:32:64");
// If possible, get a TargetInfo for our host triple, so we can match its
// types.
diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h
index 94b018a..a733f6e 100644
--- a/clang/lib/Basic/Targets/OSTargets.h
+++ b/clang/lib/Basic/Targets/OSTargets.h
@@ -174,6 +174,9 @@ protected:
DefineStd(Builder, "unix", Opts);
if (this->HasFloat128)
Builder.defineMacro("__FLOAT128__");
+
+ if (Opts.C11)
+ Builder.defineMacro("__STDC_NO_THREADS__");
}
public:
diff --git a/clang/lib/Basic/Targets/PPC.cpp b/clang/lib/Basic/Targets/PPC.cpp
index ef18354..a6e1ad10 100644
--- a/clang/lib/Basic/Targets/PPC.cpp
+++ b/clang/lib/Basic/Targets/PPC.cpp
@@ -89,6 +89,8 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
}
static void defineXLCompatMacros(MacroBuilder &Builder) {
+ Builder.defineMacro("__builtin_bcdcopysign", "__builtin_ppc_bcdcopysign");
+ Builder.defineMacro("__builtin_bcdsetsign", "__builtin_ppc_bcdsetsign");
Builder.defineMacro("__builtin_national2packed",
"__builtin_ppc_national2packed");
Builder.defineMacro("__builtin_packed2national",
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index a6a5ec4..04da4e6 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -427,7 +427,7 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
ABI = ISAInfo->computeDefaultABI().str();
if (ISAInfo->hasExtension("zfh") || ISAInfo->hasExtension("zhinx"))
- HasLegalHalfType = true;
+ HasFastHalfType = true;
FastScalarUnalignedAccess =
llvm::is_contained(Features, "+unaligned-scalar-mem");
diff --git a/clang/lib/Basic/Targets/SPIR.h b/clang/lib/Basic/Targets/SPIR.h
index 9d0ced2..8bb0428 100644
--- a/clang/lib/Basic/Targets/SPIR.h
+++ b/clang/lib/Basic/Targets/SPIR.h
@@ -106,7 +106,7 @@ protected:
LongWidth = LongAlign = 64;
AddrSpaceMap = &SPIRDefIsPrivMap;
UseAddrSpaceMapMangling = true;
- HasLegalHalfType = true;
+ HasFastHalfType = true;
HasFloat16 = true;
// Define available target features
// These must be defined in sorted order!
@@ -219,8 +219,11 @@ public:
setAddressSpaceMap(
/*DefaultIsGeneric=*/Opts.SYCLIsDevice ||
// The address mapping from HIP/CUDA language for device code is only
- // defined for SPIR-V.
- (getTriple().isSPIRV() && Opts.CUDAIsDevice));
+ // defined for SPIR-V, and all Intel SPIR-V code should have the default
+ // AS as generic.
+ (getTriple().isSPIRV() &&
+ (Opts.CUDAIsDevice ||
+ getTriple().getVendor() == llvm::Triple::Intel)));
}
void setSupportedOpenCLOpts() override {
@@ -427,7 +430,7 @@ public:
BFloat16Width = BFloat16Align = 16;
BFloat16Format = &llvm::APFloat::BFloat();
- HasLegalHalfType = true;
+ HasFastHalfType = true;
HasFloat16 = true;
HalfArgsAndReturns = true;
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index 7f7dcf8..dc2185e 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -104,7 +104,7 @@ public:
// -ffloat16-excess-precision=none is given, no conversions will be made
// and instead the backend will promote each half operation to float
// individually.
- HasLegalHalfType = false;
+ HasFastHalfType = false;
HasStrictFP = true;
}
diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp
index 24ecec2..f9424cb 100644
--- a/clang/lib/Basic/Targets/X86.cpp
+++ b/clang/lib/Basic/Targets/X86.cpp
@@ -348,7 +348,7 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512BF16 = true;
} else if (Feature == "+avx512fp16") {
HasAVX512FP16 = true;
- HasLegalHalfType = true;
+ HasFastHalfType = true;
} else if (Feature == "+avx512dq") {
HasAVX512DQ = true;
} else if (Feature == "+avx512bitalg") {
@@ -1029,8 +1029,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__CF__");
if (HasZU)
Builder.defineMacro("__ZU__");
- if (HasEGPR && HasPush2Pop2 && HasPPX && HasNDD && HasCCMP && HasNF &&
- HasCF && HasZU)
+ if (HasEGPR && HasPush2Pop2 && HasPPX && HasNDD && HasCCMP && HasNF && HasZU)
Builder.defineMacro("__APX_F__");
if (HasEGPR && HasInlineAsmUseGPR32)
Builder.defineMacro("__APX_INLINE_ASM_USE_GPR32__");
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h
index 6c927e9..a851d06 100644
--- a/clang/lib/CIR/CodeGen/Address.h
+++ b/clang/lib/CIR/CodeGen/Address.h
@@ -68,6 +68,12 @@ public:
return pointerAndKnownNonNull.getPointer() != nullptr;
}
+ /// Return address with different pointer, but same element type and
+ /// alignment.
+ Address withPointer(mlir::Value newPtr) const {
+ return Address(newPtr, getElementType(), getAlignment());
+ }
+
/// Return address with different element type, a bitcast pointer, and
/// the same alignment.
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const;
diff --git a/clang/lib/CIR/CodeGen/CIRGenAsm.cpp b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
new file mode 100644
index 0000000..17dffb3
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
@@ -0,0 +1,136 @@
+//===--- CIRGenAsm.cpp - Inline Assembly Support for CIR CodeGen ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to emit inline assembly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+using namespace cir;
+
+static AsmFlavor inferFlavor(const CIRGenModule &cgm, const AsmStmt &s) {
+ AsmFlavor gnuAsmFlavor =
+ cgm.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
+ ? AsmFlavor::x86_att
+ : AsmFlavor::x86_intel;
+
+ return isa<MSAsmStmt>(&s) ? AsmFlavor::x86_intel : gnuAsmFlavor;
+}
+
+static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &s,
+ std::string &constraints, bool &hasUnwindClobber,
+ bool &readOnly, bool readNone) {
+
+ hasUnwindClobber = false;
+ const CIRGenModule &cgm = cgf.getCIRGenModule();
+
+ // Clobbers
+ for (unsigned i = 0, e = s.getNumClobbers(); i != e; i++) {
+ std::string clobber = s.getClobber(i);
+ if (clobber == "memory") {
+ readOnly = readNone = false;
+ } else if (clobber == "unwind") {
+ hasUnwindClobber = true;
+ continue;
+ } else if (clobber != "cc") {
+ clobber = cgf.getTarget().getNormalizedGCCRegisterName(clobber);
+ if (cgm.getCodeGenOpts().StackClashProtector &&
+ cgf.getTarget().isSPRegName(clobber))
+ cgm.getDiags().Report(s.getAsmLoc(),
+ diag::warn_stack_clash_protection_inline_asm);
+ }
+
+ if (isa<MSAsmStmt>(&s)) {
+ if (clobber == "eax" || clobber == "edx") {
+ if (constraints.find("=&A") != std::string::npos)
+ continue;
+ std::string::size_type position1 =
+ constraints.find("={" + clobber + "}");
+ if (position1 != std::string::npos) {
+ constraints.insert(position1 + 1, "&");
+ continue;
+ }
+ std::string::size_type position2 = constraints.find("=A");
+ if (position2 != std::string::npos) {
+ constraints.insert(position2 + 1, "&");
+ continue;
+ }
+ }
+ }
+ if (!constraints.empty())
+ constraints += ',';
+
+ constraints += "~{";
+ constraints += clobber;
+ constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string_view machineClobbers = cgf.getTarget().getClobbers();
+ if (!machineClobbers.empty()) {
+ if (!constraints.empty())
+ constraints += ',';
+ constraints += machineClobbers;
+ }
+}
+
+mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
+ // Assemble the final asm string.
+ std::string asmString = s.generateAsmString(getContext());
+
+ bool isGCCAsmGoto = false;
+
+ std::string constraints;
+ std::vector<mlir::Value> outArgs;
+ std::vector<mlir::Value> inArgs;
+ std::vector<mlir::Value> inOutArgs;
+
+ // An inline asm can be marked readonly if it meets the following conditions:
+ // - it doesn't have any sideeffects
+ // - it doesn't clobber memory
+ // - it doesn't return a value by-reference
+ // It can be marked readnone if it doesn't have any input memory constraints
+ // in addition to meeting the conditions listed above.
+ bool readOnly = true, readNone = true;
+
+ if (s.getNumInputs() != 0 || s.getNumOutputs() != 0) {
+ assert(!cir::MissingFeatures::asmInputOperands());
+ assert(!cir::MissingFeatures::asmOutputOperands());
+ cgm.errorNYI(s.getAsmLoc(), "asm with operands");
+ }
+
+ bool hasUnwindClobber = false;
+ collectClobbers(*this, s, constraints, hasUnwindClobber, readOnly, readNone);
+
+ std::array<mlir::ValueRange, 3> operands = {outArgs, inArgs, inOutArgs};
+
+ mlir::Type resultType;
+
+ bool hasSideEffect = s.isVolatile() || s.getNumOutputs() == 0;
+
+ cir::InlineAsmOp ia = builder.create<cir::InlineAsmOp>(
+ getLoc(s.getAsmLoc()), resultType, operands, asmString, constraints,
+ hasSideEffect, inferFlavor(cgm, s), mlir::ArrayAttr());
+
+ if (isGCCAsmGoto) {
+ assert(!cir::MissingFeatures::asmGoto());
+ } else if (hasUnwindClobber) {
+ assert(!cir::MissingFeatures::asmUnwindClobber());
+ } else {
+ assert(!cir::MissingFeatures::asmMemoryEffects());
+ }
+
+ llvm::SmallVector<mlir::Attribute> operandAttrs;
+ ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
+
+ return mlir::success();
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
new file mode 100644
index 0000000..d8981c8
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -0,0 +1,569 @@
+//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the code for emitting atomic operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+using namespace cir;
+
+namespace {
+class AtomicInfo {
+ CIRGenFunction &cgf;
+ QualType atomicTy;
+ QualType valueTy;
+ uint64_t atomicSizeInBits = 0;
+ uint64_t valueSizeInBits = 0;
+ CharUnits atomicAlign;
+ CharUnits valueAlign;
+ TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
+ LValue lvalue;
+ mlir::Location loc;
+
+public:
+ AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
+ : cgf(cgf), loc(loc) {
+ assert(!lvalue.isGlobalReg());
+ ASTContext &ctx = cgf.getContext();
+ if (lvalue.isSimple()) {
+ atomicTy = lvalue.getType();
+ if (auto *ty = atomicTy->getAs<AtomicType>())
+ valueTy = ty->getValueType();
+ else
+ valueTy = atomicTy;
+ evaluationKind = cgf.getEvaluationKind(valueTy);
+
+ TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
+ TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
+ uint64_t valueAlignInBits = valueTypeInfo.Align;
+ uint64_t atomicAlignInBits = atomicTypeInfo.Align;
+ valueSizeInBits = valueTypeInfo.Width;
+ atomicSizeInBits = atomicTypeInfo.Width;
+ assert(valueSizeInBits <= atomicSizeInBits);
+ assert(valueAlignInBits <= atomicAlignInBits);
+
+ atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
+ valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
+ if (lvalue.getAlignment().isZero())
+ lvalue.setAlignment(atomicAlign);
+
+ this->lvalue = lvalue;
+ } else {
+ assert(!cir::MissingFeatures::atomicInfo());
+ cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
+ }
+
+ assert(!cir::MissingFeatures::atomicUseLibCall());
+ }
+
+ QualType getValueType() const { return valueTy; }
+ CharUnits getAtomicAlignment() const { return atomicAlign; }
+ TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
+ mlir::Value getAtomicPointer() const {
+ if (lvalue.isSimple())
+ return lvalue.getPointer();
+ assert(!cir::MissingFeatures::atomicInfoGetAtomicPointer());
+ return nullptr;
+ }
+ Address getAtomicAddress() const {
+ mlir::Type elemTy;
+ if (lvalue.isSimple()) {
+ elemTy = lvalue.getAddress().getElementType();
+ } else {
+ assert(!cir::MissingFeatures::atomicInfoGetAtomicAddress());
+ cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
+ }
+ return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
+ }
+
+ /// Is the atomic size larger than the underlying value type?
+ ///
+ /// Note that the absence of padding does not mean that atomic
+ /// objects are completely interchangeable with non-atomic
+ /// objects: we might have promoted the alignment of a type
+ /// without making it bigger.
+ bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
+
+ bool emitMemSetZeroIfNecessary() const;
+
+ /// Cast the given pointer to an integer pointer suitable for atomic
+ /// operations on the source.
+ Address castToAtomicIntPointer(Address addr) const;
+
+ /// If addr is compatible with the iN that will be used for an atomic
+ /// operation, bitcast it. Otherwise, create a temporary that is suitable and
+ /// copy the value across.
+ Address convertToAtomicIntPointer(Address addr) const;
+
+ /// Copy an atomic r-value into atomic-layout memory.
+ void emitCopyIntoMemory(RValue rvalue) const;
+
+ /// Project an l-value down to the value field.
+ LValue projectValue() const {
+ assert(lvalue.isSimple());
+ Address addr = getAtomicAddress();
+ if (hasPadding()) {
+ cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
+ }
+
+ assert(!cir::MissingFeatures::opTBAA());
+ return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
+ }
+
+ /// Creates temp alloca for intermediate operations on atomic value.
+ Address createTempAlloca() const;
+
+private:
+ bool requiresMemSetZero(mlir::Type ty) const;
+};
+} // namespace
+
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static Address emitValToTemp(CIRGenFunction &cgf, Expr *e) {
+ Address declPtr = cgf.createMemTemp(
+ e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
+ cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
+ /*Init*/ true);
+ return declPtr;
+}
+
+/// Does a store of the given IR type modify the full expected width?
+static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
+ uint64_t expectedSize) {
+ return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
+}
+
+/// Does the atomic type require memsetting to zero before initialization?
+///
+/// The IR type is provided as a way of making certain queries faster.
+bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
+ // If the atomic type has size padding, we definitely need a memset.
+ if (hasPadding())
+ return true;
+
+ // Otherwise, do some simple heuristics to try to avoid it:
+ switch (getEvaluationKind()) {
+ // For scalars and complexes, check whether the store size of the
+ // type uses the full size.
+ case cir::TEK_Scalar:
+ return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
+ case cir::TEK_Complex:
+ cgf.cgm.errorNYI(loc, "AtomicInfo::requiresMemSetZero: complex type");
+ return false;
+
+ // Padding in structs has an undefined bit pattern. User beware.
+ case cir::TEK_Aggregate:
+ return false;
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
+ mlir::Type ty = addr.getElementType();
+ uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
+ if (sourceSizeInBits != atomicSizeInBits) {
+ cgf.cgm.errorNYI(
+ loc,
+ "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
+ }
+
+ return castToAtomicIntPointer(addr);
+}
+
+Address AtomicInfo::createTempAlloca() const {
+ Address tempAlloca = cgf.createMemTemp(
+ (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
+ : atomicTy,
+ getAtomicAlignment(), loc, "atomic-temp");
+
+ // Cast to pointer to value type for bitfields.
+ if (lvalue.isBitField()) {
+ cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
+ }
+
+ return tempAlloca;
+}
+
+Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
+ auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
+ // Don't bother with int casts if the integer size is the same.
+ if (intTy && intTy.getWidth() == atomicSizeInBits)
+ return addr;
+ auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
+ return addr.withElementType(cgf.getBuilder(), ty);
+}
+
+bool AtomicInfo::emitMemSetZeroIfNecessary() const {
+ assert(lvalue.isSimple());
+ Address addr = lvalue.getAddress();
+ if (!requiresMemSetZero(addr.getElementType()))
+ return false;
+
+ cgf.cgm.errorNYI(loc,
+ "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
+ return false;
+}
+
+/// Copy an r-value into memory as part of storing to an atomic type.
+/// This needs to create a bit-pattern suitable for atomic operations.
+void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
+ assert(lvalue.isSimple());
+
+ // If we have an r-value, the rvalue should be of the atomic type,
+ // which means that the caller is responsible for having zeroed
+ // any padding. Just do an aggregate copy of that type.
+ if (rvalue.isAggregate()) {
+ cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
+ return;
+ }
+
+ // Okay, otherwise we're copying stuff.
+
+ // Zero out the buffer if necessary.
+ emitMemSetZeroIfNecessary();
+
+ // Drill past the padding if present.
+ LValue tempLValue = projectValue();
+
+ // Okay, store the rvalue in.
+ if (rvalue.isScalar()) {
+ cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
+ } else {
+ cgf.cgm.errorNYI("copying complex into atomic lvalue");
+ }
+}
+
+static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
+ Address ptr, Address val1, uint64_t size,
+ cir::MemOrder order) {
+ std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
+ if (scopeModel) {
+ assert(!cir::MissingFeatures::atomicScope());
+ cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
+ return;
+ }
+
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ mlir::Location loc = cgf.getLoc(expr->getSourceRange());
+ auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+
+ switch (expr->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("already handled!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
+ cir::LoadOp load =
+ builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
+
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ load->setAttr("mem_order", orderAttr);
+
+ builder.createStore(loc, load->getResult(0), dest);
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_store: {
+ cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
+
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
+ /*align=*/mlir::IntegerAttr{}, orderAttr);
+ return;
+ }
+
+ case AtomicExpr::AO__opencl_atomic_init:
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
+
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
+
+ case AtomicExpr::AO__opencl_atomic_load:
+ case AtomicExpr::AO__hip_atomic_load:
+ case AtomicExpr::AO__scoped_atomic_load_n:
+ case AtomicExpr::AO__scoped_atomic_load:
+
+ case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__hip_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store_n:
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__hip_atomic_exchange:
+ case AtomicExpr::AO__opencl_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_exchange:
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__scoped_atomic_add_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__scoped_atomic_fetch_add:
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__scoped_atomic_sub_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__scoped_atomic_fetch_sub:
+
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__scoped_atomic_fetch_min:
+
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__scoped_atomic_fetch_max:
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__scoped_atomic_and_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__hip_atomic_fetch_and:
+ case AtomicExpr::AO__opencl_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__scoped_atomic_fetch_and:
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__scoped_atomic_or_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__hip_atomic_fetch_or:
+ case AtomicExpr::AO__opencl_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__scoped_atomic_fetch_or:
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__scoped_atomic_xor_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__hip_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__scoped_atomic_fetch_xor:
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__scoped_atomic_nand_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_fetch_nand:
+
+ case AtomicExpr::AO__atomic_test_and_set:
+
+ case AtomicExpr::AO__atomic_clear:
+ cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
+ break;
+ }
+}
+
+static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
+ if (!cir::isValidCIRAtomicOrderingCABI(order))
+ return false;
+ auto memOrder = static_cast<cir::MemOrder>(order);
+ if (isStore)
+ return memOrder != cir::MemOrder::Consume &&
+ memOrder != cir::MemOrder::Acquire &&
+ memOrder != cir::MemOrder::AcquireRelease;
+ if (isLoad)
+ return memOrder != cir::MemOrder::Release &&
+ memOrder != cir::MemOrder::AcquireRelease;
+ return true;
+}
+
+RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
+ QualType atomicTy = e->getPtr()->getType()->getPointeeType();
+ QualType memTy = atomicTy;
+ if (const auto *ty = atomicTy->getAs<AtomicType>())
+ memTy = ty->getValueType();
+
+ Address val1 = Address::invalid();
+ Address dest = Address::invalid();
+ Address ptr = emitPointerWithAlignment(e->getPtr());
+
+ assert(!cir::MissingFeatures::openCL());
+ if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ LValue lvalue = makeAddrLValue(ptr, atomicTy);
+ emitAtomicInit(e->getVal1(), lvalue);
+ return RValue::get(nullptr);
+ }
+
+ TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
+ uint64_t size = typeInfo.Width.getQuantity();
+
+ Expr::EvalResult orderConst;
+ mlir::Value order;
+ if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
+ order = emitScalarExpr(e->getOrder());
+
+ bool shouldCastToIntPtrTy = true;
+
+ switch (e->getOp()) {
+ default:
+ cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
+ return RValue::get(nullptr);
+
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("already handled above with emitAtomicInit");
+
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__c11_atomic_load:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ dest = emitPointerWithAlignment(e->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ val1 = emitPointerWithAlignment(e->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__c11_atomic_store:
+ val1 = emitValToTemp(*this, e->getVal1());
+ break;
+ }
+
+ QualType resultTy = e->getType().getUnqualifiedType();
+
+ // The inlined atomics only function on iN types, where N is a power of 2. We
+ // need to make sure (via temporaries if necessary) that all incoming values
+ // are compatible.
+ LValue atomicValue = makeAddrLValue(ptr, atomicTy);
+ AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
+
+ if (shouldCastToIntPtrTy) {
+ ptr = atomics.castToAtomicIntPointer(ptr);
+ if (val1.isValid())
+ val1 = atomics.convertToAtomicIntPointer(val1);
+ }
+ if (dest.isValid()) {
+ if (shouldCastToIntPtrTy)
+ dest = atomics.castToAtomicIntPointer(dest);
+ } else if (!resultTy->isVoidType()) {
+ dest = atomics.createTempAlloca();
+ if (shouldCastToIntPtrTy)
+ dest = atomics.castToAtomicIntPointer(dest);
+ }
+
+ bool powerOf2Size = (size & (size - 1)) == 0;
+ bool useLibCall = !powerOf2Size || (size > 16);
+
+ // For atomics larger than 16 bytes, emit a libcall from the frontend. This
+ // avoids the overhead of dealing with excessively-large value types in IR.
+ // Non-power-of-2 values also lower to libcall here, as they are not currently
+ // permitted in IR instructions (although that constraint could be relaxed in
+ // the future). For other cases where a libcall is required on a given
+ // platform, we let the backend handle it (this includes handling for all of
+ // the size-optimized libcall variants, which are only valid up to 16 bytes.)
+ //
+ // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
+ if (useLibCall) {
+ assert(!cir::MissingFeatures::atomicUseLibCall());
+ cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
+ return RValue::get(nullptr);
+ }
+
+ bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
+ e->getOp() == AtomicExpr::AO__hip_atomic_store ||
+ e->getOp() == AtomicExpr::AO__atomic_store ||
+ e->getOp() == AtomicExpr::AO__atomic_store_n ||
+ e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
+ e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
+ e->getOp() == AtomicExpr::AO__atomic_clear;
+ bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
+ e->getOp() == AtomicExpr::AO__hip_atomic_load ||
+ e->getOp() == AtomicExpr::AO__atomic_load ||
+ e->getOp() == AtomicExpr::AO__atomic_load_n ||
+ e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
+ e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
+
+ if (!order) {
+ // We have evaluated the memory order as an integer constant in orderConst.
+ // We should not ever get to a case where the ordering isn't a valid CABI
+ // value, but it's hard to enforce that in general.
+ uint64_t ord = orderConst.Val.getInt().getZExtValue();
+ if (isMemOrderValid(ord, isStore, isLoad))
+ emitAtomicOp(*this, e, dest, ptr, val1, size,
+ static_cast<cir::MemOrder>(ord));
+ } else {
+ assert(!cir::MissingFeatures::atomicExpr());
+ cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
+ return RValue::get(nullptr);
+ }
+
+ if (resultTy->isVoidType())
+ return RValue::get(nullptr);
+
+ return convertTempToRValue(
+ dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
+ e->getExprLoc());
+}
+
+void CIRGenFunction::emitAtomicInit(Expr *init, LValue dest) {
+ AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
+
+ switch (atomics.getEvaluationKind()) {
+ case cir::TEK_Scalar: {
+ mlir::Value value = emitScalarExpr(init);
+ atomics.emitCopyIntoMemory(RValue::get(value));
+ return;
+ }
+
+ case cir::TEK_Complex:
+ cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: complex type");
+ return;
+
+ case cir::TEK_Aggregate:
+ cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: aggregate type");
+ return;
+ }
+
+ llvm_unreachable("bad evaluation kind");
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
index 4a5a1dd5..755c76c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "CIRGenBuilder.h"
+#include "llvm/ADT/TypeSwitch.h"
using namespace clang::CIRGen;
@@ -66,6 +67,69 @@ clang::CIRGen::CIRGenBuilderTy::getConstFP(mlir::Location loc, mlir::Type t,
return create<cir::ConstantOp>(loc, cir::FPAttr::get(t, fpVal));
}
+void CIRGenBuilderTy::computeGlobalViewIndicesFromFlatOffset(
+ int64_t offset, mlir::Type ty, cir::CIRDataLayout layout,
+ llvm::SmallVectorImpl<int64_t> &indices) {
+ if (!offset)
+ return;
+
+ auto getIndexAndNewOffset =
+ [](int64_t offset, int64_t eltSize) -> std::pair<int64_t, int64_t> {
+ int64_t divRet = offset / eltSize;
+ if (divRet < 0)
+ divRet -= 1; // make sure offset is positive
+ int64_t modRet = offset - (divRet * eltSize);
+ return {divRet, modRet};
+ };
+
+ mlir::Type subType =
+ llvm::TypeSwitch<mlir::Type, mlir::Type>(ty)
+ .Case<cir::ArrayType>([&](auto arrayTy) {
+ int64_t eltSize = layout.getTypeAllocSize(arrayTy.getElementType());
+ const auto [index, newOffset] =
+ getIndexAndNewOffset(offset, eltSize);
+ indices.push_back(index);
+ offset = newOffset;
+ return arrayTy.getElementType();
+ })
+ .Case<cir::RecordType>([&](auto recordTy) {
+ ArrayRef<mlir::Type> elts = recordTy.getMembers();
+ int64_t pos = 0;
+ for (size_t i = 0; i < elts.size(); ++i) {
+ int64_t eltSize =
+ (int64_t)layout.getTypeAllocSize(elts[i]).getFixedValue();
+ unsigned alignMask = layout.getABITypeAlign(elts[i]).value() - 1;
+ if (recordTy.getPacked())
+ alignMask = 0;
+ // Union's fields have the same offset, so no need to change pos
+ // here, we just need to find eltSize that is greater then the
+ // required offset. The same is true for the similar union type
+ // check below
+ if (!recordTy.isUnion())
+ pos = (pos + alignMask) & ~alignMask;
+ assert(offset >= 0);
+ if (offset < pos + eltSize) {
+ indices.push_back(i);
+ offset -= pos;
+ return elts[i];
+ }
+ // No need to update pos here, see the comment above.
+ if (!recordTy.isUnion())
+ pos += eltSize;
+ }
+ llvm_unreachable("offset was not found within the record");
+ })
+ .Default([](mlir::Type otherTy) {
+ llvm_unreachable("unexpected type");
+ return otherTy; // Even though this is unreachable, we need to
+ // return a type to satisfy the return type of the
+ // lambda.
+ });
+
+ assert(subType);
+ computeGlobalViewIndicesFromFlatOffset(offset, subType, layout, indices);
+}
+
// This can't be defined in Address.h because that file is included by
// CIRGenBuilder.h
Address Address::withElementType(CIRGenBuilderTy &builder,
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index ff8e121..d5cb6d4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -12,6 +12,7 @@
#include "Address.h"
#include "CIRGenRecordLayout.h"
#include "CIRGenTypeCache.h"
+#include "clang/CIR/Dialect/IR/CIRDataLayout.h"
#include "clang/CIR/Interfaces/CIRTypeInterfaces.h"
#include "clang/CIR/MissingFeatures.h"
@@ -59,6 +60,23 @@ public:
trailingZerosNum);
}
+ cir::ConstRecordAttr getAnonConstRecord(mlir::ArrayAttr arrayAttr,
+ bool packed = false,
+ bool padded = false,
+ mlir::Type ty = {}) {
+ llvm::SmallVector<mlir::Type, 4> members;
+ for (auto &f : arrayAttr) {
+ auto ta = mlir::cast<mlir::TypedAttr>(f);
+ members.push_back(ta.getType());
+ }
+
+ if (!ty)
+ ty = getAnonRecordTy(members, packed, padded);
+
+ auto sTy = mlir::cast<cir::RecordType>(ty);
+ return cir::ConstRecordAttr::get(sTy, arrayAttr);
+ }
+
std::string getUniqueAnonRecordName() { return getUniqueRecordName("anon"); }
std::string getUniqueRecordName(const std::string &baseName) {
@@ -83,6 +101,10 @@ public:
llvm_unreachable("Unsupported format for long double");
}
+ mlir::Type getPtrToVPtrType() {
+ return getPointerTo(cir::VPtrType::get(getContext()));
+ }
+
/// Get a CIR record kind from a AST declaration tag.
cir::RecordType::RecordKind getRecordKind(const clang::TagTypeKind kind) {
switch (kind) {
@@ -244,6 +266,17 @@ public:
}
bool isInt(mlir::Type i) { return mlir::isa<cir::IntType>(i); }
+ // Fetch the type representing a pointer to unsigned int8 values.
+ cir::PointerType getUInt8PtrTy() { return typeCache.UInt8PtrTy; }
+
+ /// Get a CIR anonymous record type.
+ cir::RecordType getAnonRecordTy(llvm::ArrayRef<mlir::Type> members,
+ bool packed = false, bool padded = false) {
+ assert(!cir::MissingFeatures::astRecordDeclAttr());
+ auto kind = cir::RecordType::RecordKind::Struct;
+ return getType<cir::RecordType>(members, packed, padded, kind);
+ }
+
//
// Constant creation helpers
// -------------------------
@@ -251,11 +284,14 @@ public:
cir::ConstantOp getSInt32(int32_t c, mlir::Location loc) {
return getConstantInt(loc, getSInt32Ty(), c);
}
+ cir::ConstantOp getUInt32(uint32_t c, mlir::Location loc) {
+ return getConstantInt(loc, getUInt32Ty(), c);
+ }
// Creates constant nullptr for pointer type ty.
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) {
assert(!cir::MissingFeatures::targetCodeGenInfoGetNullPointer());
- return create<cir::ConstantOp>(loc, getConstPtrAttr(ty, 0));
+ return cir::ConstantOp::create(*this, loc, getConstPtrAttr(ty, 0));
}
mlir::Value createNeg(mlir::Value value) {
@@ -264,7 +300,7 @@ public:
// Source is a unsigned integer: first cast it to signed.
if (intTy.isUnsigned())
value = createIntCast(value, getSIntNTy(intTy.getWidth()));
- return create<cir::UnaryOp>(value.getLoc(), value.getType(),
+ return cir::UnaryOp::create(*this, value.getLoc(), value.getType(),
cir::UnaryOpKind::Minus, value);
}
@@ -276,8 +312,8 @@ public:
mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType) {
assert(!cir::MissingFeatures::fpConstraints());
- return create<cir::CastOp>(v.getLoc(), destType, cir::CastKind::floating,
- v);
+ return cir::CastOp::create(*this, v.getLoc(), destType,
+ cir::CastKind::floating, v);
}
mlir::Value createFSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
@@ -285,7 +321,7 @@ public:
assert(!cir::MissingFeatures::fpConstraints());
assert(!cir::MissingFeatures::fastMathFlags());
- return create<cir::BinOp>(loc, cir::BinOpKind::Sub, lhs, rhs);
+ return cir::BinOp::create(*this, loc, cir::BinOpKind::Sub, lhs, rhs);
}
mlir::Value createFAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
@@ -293,21 +329,21 @@ public:
assert(!cir::MissingFeatures::fpConstraints());
assert(!cir::MissingFeatures::fastMathFlags());
- return create<cir::BinOp>(loc, cir::BinOpKind::Add, lhs, rhs);
+ return cir::BinOp::create(*this, loc, cir::BinOpKind::Add, lhs, rhs);
}
mlir::Value createFMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
assert(!cir::MissingFeatures::metaDataNode());
assert(!cir::MissingFeatures::fpConstraints());
assert(!cir::MissingFeatures::fastMathFlags());
- return create<cir::BinOp>(loc, cir::BinOpKind::Mul, lhs, rhs);
+ return cir::BinOp::create(*this, loc, cir::BinOpKind::Mul, lhs, rhs);
}
mlir::Value createFDiv(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
assert(!cir::MissingFeatures::metaDataNode());
assert(!cir::MissingFeatures::fpConstraints());
assert(!cir::MissingFeatures::fastMathFlags());
- return create<cir::BinOp>(loc, cir::BinOpKind::Div, lhs, rhs);
+ return cir::BinOp::create(*this, loc, cir::BinOpKind::Div, lhs, rhs);
}
Address createBaseClassAddr(mlir::Location loc, Address addr,
@@ -317,8 +353,9 @@ public:
return addr;
auto ptrTy = getPointerTo(destType);
- auto baseAddr = create<cir::BaseClassAddrOp>(
- loc, ptrTy, addr.getPointer(), mlir::APInt(64, offset), assumeNotNull);
+ auto baseAddr =
+ cir::BaseClassAddrOp::create(*this, loc, ptrTy, addr.getPointer(),
+ mlir::APInt(64, offset), assumeNotNull);
return Address(baseAddr, destType, addr.getAlignment());
}
@@ -337,15 +374,19 @@ public:
cir::LoadOp createLoad(mlir::Location loc, Address addr,
bool isVolatile = false) {
mlir::IntegerAttr align = getAlignmentAttr(addr.getAlignment());
- return create<cir::LoadOp>(loc, addr.getPointer(), /*isDeref=*/false,
- align);
+ return cir::LoadOp::create(*this, loc, addr.getPointer(), /*isDeref=*/false,
+ /*alignment=*/align,
+ /*mem_order=*/cir::MemOrderAttr{});
}
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst,
- mlir::IntegerAttr align = {}) {
+ bool isVolatile = false,
+ mlir::IntegerAttr align = {},
+ cir::MemOrderAttr order = {}) {
if (!align)
align = getAlignmentAttr(dst.getAlignment());
- return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), align);
+ return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), isVolatile,
+ align, order);
}
/// Create a cir.complex.real_ptr operation that derives a pointer to the real
@@ -353,8 +394,8 @@ public:
mlir::Value createComplexRealPtr(mlir::Location loc, mlir::Value value) {
auto srcPtrTy = mlir::cast<cir::PointerType>(value.getType());
auto srcComplexTy = mlir::cast<cir::ComplexType>(srcPtrTy.getPointee());
- return create<cir::ComplexRealPtrOp>(
- loc, getPointerTo(srcComplexTy.getElementType()), value);
+ return cir::ComplexRealPtrOp::create(
+ *this, loc, getPointerTo(srcComplexTy.getElementType()), value);
}
Address createComplexRealPtr(mlir::Location loc, Address addr) {
@@ -368,8 +409,8 @@ public:
mlir::Value createComplexImagPtr(mlir::Location loc, mlir::Value value) {
auto srcPtrTy = mlir::cast<cir::PointerType>(value.getType());
auto srcComplexTy = mlir::cast<cir::ComplexType>(srcPtrTy.getPointee());
- return create<cir::ComplexImagPtrOp>(
- loc, getPointerTo(srcComplexTy.getElementType()), value);
+ return cir::ComplexImagPtrOp::create(
+ *this, loc, getPointerTo(srcComplexTy.getElementType()), value);
}
Address createComplexImagPtr(mlir::Location loc, Address addr) {
@@ -390,12 +431,20 @@ public:
mlir::Value maybeBuildArrayDecay(mlir::Location loc, mlir::Value arrayPtr,
mlir::Type eltTy);
+ // Convert byte offset to sequence of high-level indices suitable for
+ // GlobalViewAttr. Ideally we shouldn't deal with low-level offsets at all
+ // but currently some parts of Clang AST, which we don't want to touch just
+ // yet, return them.
+ void computeGlobalViewIndicesFromFlatOffset(
+ int64_t offset, mlir::Type ty, cir::CIRDataLayout layout,
+ llvm::SmallVectorImpl<int64_t> &indices);
+
/// Creates a versioned global variable. If the symbol is already taken, an ID
/// will be appended to the symbol. The returned global must always be queried
/// for its name so it can be referenced correctly.
[[nodiscard]] cir::GlobalOp
createVersionedGlobal(mlir::ModuleOp module, mlir::Location loc,
- mlir::StringRef name, mlir::Type type,
+ mlir::StringRef name, mlir::Type type, bool isConstant,
cir::GlobalLinkageKind linkage) {
// Create a unique name if the given name is already taken.
std::string uniqueName;
@@ -404,7 +453,7 @@ public:
else
uniqueName = name.str();
- return createGlobal(module, loc, uniqueName, type, linkage);
+ return createGlobal(module, loc, uniqueName, type, isConstant, linkage);
}
mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType,
@@ -419,9 +468,9 @@ public:
useVolatile ? cir::IntType::get(storageType.getContext(),
info.volatileStorageSize, info.isSigned)
: storageType;
- return create<cir::SetBitfieldOp>(
- loc, resultType, dstAddr.getPointer(), storageType, src, info.name,
- info.size, offset, info.isSigned, isLvalueVolatile,
+ return cir::SetBitfieldOp::create(
+ *this, loc, resultType, dstAddr.getPointer(), storageType, src,
+ info.name, info.size, offset, info.isSigned, isLvalueVolatile,
dstAddr.getAlignment().getAsAlign().value());
}
@@ -437,7 +486,7 @@ public:
useVolatile ? cir::IntType::get(storageType.getContext(),
info.volatileStorageSize, info.isSigned)
: storageType;
- return create<cir::GetBitfieldOp>(loc, resultType, addr.getPointer(),
+ return cir::GetBitfieldOp::create(*this, loc, resultType, addr.getPointer(),
storageType, info.name, info.size, offset,
info.isSigned, isLvalueVolatile,
addr.getAlignment().getAsAlign().value());
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 7767bf4..b6a6299 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -72,6 +72,19 @@ RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
return RValue::get(r);
}
+template <class Operation>
+static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf,
+ const CallExpr &e) {
+ mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
+
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ assert(!cir::MissingFeatures::fpConstraints());
+
+ auto call =
+ Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
+ return RValue::get(call->getResult(0));
+}
+
RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
const CallExpr *e,
ReturnValueSlot returnValue) {
@@ -112,6 +125,32 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
default:
break;
+ // C stdarg builtins.
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__va_start: {
+ mlir::Value vaList = builtinID == Builtin::BI__va_start
+ ? emitScalarExpr(e->getArg(0))
+ : emitVAListRef(e->getArg(0)).getPointer();
+ mlir::Value count = emitScalarExpr(e->getArg(1));
+ emitVAStart(vaList, count);
+ return {};
+ }
+
+ case Builtin::BI__builtin_va_end:
+ emitVAEnd(emitVAListRef(e->getArg(0)).getPointer());
+ return {};
+
+ case Builtin::BIfabs:
+ case Builtin::BIfabsf:
+ case Builtin::BIfabsl:
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsf16:
+ case Builtin::BI__builtin_fabsl:
+ case Builtin::BI__builtin_fabsf128:
+ return emitUnaryMaybeConstrainedFPBuiltin<cir::FAbsOp>(*this, *e);
+
case Builtin::BI__assume:
case Builtin::BI__builtin_assume: {
if (e->getArg(0)->HasSideEffects(getContext()))
@@ -129,6 +168,24 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_assume_aligned: {
+ const Expr *ptrExpr = e->getArg(0);
+ mlir::Value ptrValue = emitScalarExpr(ptrExpr);
+ mlir::Value offsetValue =
+ (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
+
+ std::optional<llvm::APSInt> alignment =
+ e->getArg(1)->getIntegerConstantExpr(getContext());
+ assert(alignment.has_value() &&
+ "the second argument to __builtin_assume_aligned must be an "
+ "integral constant expression");
+
+ mlir::Value result =
+ emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
+ alignment->getSExtValue(), offsetValue);
+ return RValue::get(result);
+ }
+
case Builtin::BI__builtin_complex: {
mlir::Value real = emitScalarExpr(e->getArg(0));
mlir::Value imag = emitScalarExpr(e->getArg(1));
@@ -271,6 +328,20 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
case Builtin::BI__builtin_rotateright64:
return emitRotate(e, /*isRotateLeft=*/false);
+ case Builtin::BI__builtin_return_address:
+ case Builtin::BI__builtin_frame_address: {
+ mlir::Location loc = getLoc(e->getExprLoc());
+ llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
+ if (builtinID == Builtin::BI__builtin_return_address) {
+ return RValue::get(cir::ReturnAddrOp::create(
+ builder, loc,
+ builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
+ }
+ return RValue::get(cir::FrameAddrOp::create(
+ builder, loc,
+ builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
+ }
+
case Builtin::BI__builtin_trap:
emitTrap(loc, /*createNewBlock=*/true);
return RValue::get(nullptr);
@@ -320,3 +391,25 @@ mlir::Value CIRGenFunction::emitCheckedArgForAssume(const Expr *e) {
"emitCheckedArgForAssume: sanitizers are NYI");
return {};
}
+
+void CIRGenFunction::emitVAStart(mlir::Value vaList, mlir::Value count) {
+ // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
+ // early, defer to LLVM lowering.
+ cir::VAStartOp::create(builder, vaList.getLoc(), vaList, count);
+}
+
+void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
+ cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
+}
+
+// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
+// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
+// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
+mlir::Value CIRGenFunction::emitVAArg(VAArgExpr *ve) {
+ assert(!cir::MissingFeatures::msabi());
+ assert(!cir::MissingFeatures::vlas());
+ mlir::Location loc = cgm.getLoc(ve->getExprLoc());
+ mlir::Type type = convertType(ve->getType());
+ mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
+ return cir::VAArgOp::create(builder, loc, type, vaList);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index 5929568..7c62030 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -37,6 +37,12 @@ public:
void setCXXABIThisValue(CIRGenFunction &cgf, mlir::Value thisPtr);
+ /// Emit the code to initialize hidden members required to handle virtual
+ /// inheritance, if needed by the ABI.
+ virtual void
+ initializeHiddenVirtualInheritanceMembers(CIRGenFunction &cgf,
+ const CXXRecordDecl *rd) {}
+
/// Emit a single constructor/destructor with the gen type from a C++
/// constructor/destructor Decl.
virtual void emitCXXStructor(clang::GlobalDecl gd) = 0;
@@ -47,9 +53,11 @@ public:
}
/// Emit the ABI-specific prolog for the function
- virtual void emitInstanceFunctionProlog(SourceLocation Loc,
+ virtual void emitInstanceFunctionProlog(SourceLocation loc,
CIRGenFunction &cgf) = 0;
+ virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0;
+
/// Get the type of the implicit "this" parameter used by a method. May return
/// zero if no specific type is applicable, e.g. if the ABI expects the "this"
/// parameter to point to some artificial offset in a complete object due to
@@ -63,6 +71,16 @@ public:
/// parameter.
virtual bool needsVTTParameter(clang::GlobalDecl gd) { return false; }
+ /// Perform ABI-specific "this" argument adjustment required prior to
+ /// a call of a virtual function.
+ /// The "VirtualCall" argument is true iff the call itself is virtual.
+ virtual Address adjustThisArgumentForVirtualFunctionCall(CIRGenFunction &cgf,
+ clang::GlobalDecl gd,
+ Address thisPtr,
+ bool virtualCall) {
+ return thisPtr;
+ }
+
/// Build a parameter variable suitable for 'this'.
void buildThisParam(CIRGenFunction &cgf, FunctionArgList &params);
@@ -80,6 +98,15 @@ public:
bool forVirtualBase, bool delegating,
Address thisAddr, QualType thisTy) = 0;
+ /// Checks if ABI requires extra virtual offset for vtable field.
+ virtual bool
+ isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
+ CIRGenFunction::VPtr vptr) = 0;
+
+ /// Emits the VTable definitions required for the given record type.
+ virtual void emitVTableDefinitions(CIRGenVTables &cgvt,
+ const CXXRecordDecl *rd) = 0;
+
/// Returns true if the given destructor type should be emitted as a linkonce
/// delegating thunk, regardless of whether the dtor is defined in this TU or
/// not.
@@ -90,6 +117,33 @@ public:
getCXXDestructorLinkage(GVALinkage linkage, const CXXDestructorDecl *dtor,
CXXDtorType dt) const;
+ /// Get the address of the vtable for the given record decl which should be
+ /// used for the vptr at the given offset in RD.
+ virtual cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *rd,
+ CharUnits vptrOffset) = 0;
+
+ /// Build a virtual function pointer in the ABI-specific way.
+ virtual CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &cgf,
+ clang::GlobalDecl gd,
+ Address thisAddr,
+ mlir::Type ty,
+ SourceLocation loc) = 0;
+
+ /// Get the address point of the vtable for the given base subobject.
+ virtual mlir::Value
+ getVTableAddressPoint(BaseSubobject base,
+ const CXXRecordDecl *vtableClass) = 0;
+
+ /// Get the address point of the vtable for the given base subobject while
+ /// building a constructor or a destructor.
+ virtual mlir::Value getVTableAddressPointInStructor(
+ CIRGenFunction &cgf, const CXXRecordDecl *vtableClass, BaseSubobject base,
+ const CXXRecordDecl *nearestVBase) = 0;
+
+ /// Checks if ABI requires to initialize vptrs for given dynamic class.
+ virtual bool
+ doStructorsInitializeVPtrs(const clang::CXXRecordDecl *vtableClass) = 0;
+
/// Returns true if the given constructor or destructor is one of the kinds
/// that the ABI says returns 'this' (only applies when called non-virtually
/// for destructors).
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
index 67d8988..c9e4ed9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
@@ -75,15 +75,14 @@ static MemberCallInfo commonBuildCXXMemberOrOperatorCall(
RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *ce, const CXXMethodDecl *md, ReturnValueSlot returnValue,
- bool hasQualifier, NestedNameSpecifier *qualifier, bool isArrow,
+ bool hasQualifier, NestedNameSpecifier qualifier, bool isArrow,
const Expr *base) {
assert(isa<CXXMemberCallExpr>(ce) || isa<CXXOperatorCallExpr>(ce));
- if (md->isVirtual()) {
- cgm.errorNYI(ce->getSourceRange(),
- "emitCXXMemberOrOperatorMemberCallExpr: virtual call");
- return RValue::get(nullptr);
- }
+ // Compute the object pointer.
+ bool canUseVirtualCall = md->isVirtual() && !hasQualifier;
+ const CXXMethodDecl *devirtualizedMethod = nullptr;
+ assert(!cir::MissingFeatures::devirtualizeMemberFunction());
// Note on trivial assignment
// --------------------------
@@ -127,7 +126,8 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr(
return RValue::get(nullptr);
// Compute the function type we're calling
- const CXXMethodDecl *calleeDecl = md;
+ const CXXMethodDecl *calleeDecl =
+ devirtualizedMethod ? devirtualizedMethod : md;
const CIRGenFunctionInfo *fInfo = nullptr;
if (isa<CXXDestructorDecl>(calleeDecl)) {
cgm.errorNYI(ce->getSourceRange(),
@@ -137,25 +137,46 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr(
fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl);
- mlir::Type ty = cgm.getTypes().getFunctionType(*fInfo);
+ cir::FuncType ty = cgm.getTypes().getFunctionType(*fInfo);
assert(!cir::MissingFeatures::sanitizers());
assert(!cir::MissingFeatures::emitTypeCheck());
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ bool useVirtualCall = canUseVirtualCall && !devirtualizedMethod;
+
if (isa<CXXDestructorDecl>(calleeDecl)) {
cgm.errorNYI(ce->getSourceRange(),
"emitCXXMemberOrOperatorMemberCallExpr: destructor call");
return RValue::get(nullptr);
}
- assert(!cir::MissingFeatures::sanitizers());
- if (getLangOpts().AppleKext) {
- cgm.errorNYI(ce->getSourceRange(),
- "emitCXXMemberOrOperatorMemberCallExpr: AppleKext");
- return RValue::get(nullptr);
+ CIRGenCallee callee;
+ if (useVirtualCall) {
+ callee = CIRGenCallee::forVirtual(ce, md, thisPtr.getAddress(), ty);
+ } else {
+ assert(!cir::MissingFeatures::sanitizers());
+ if (getLangOpts().AppleKext) {
+ cgm.errorNYI(ce->getSourceRange(),
+ "emitCXXMemberOrOperatorMemberCallExpr: AppleKext");
+ return RValue::get(nullptr);
+ }
+
+ callee = CIRGenCallee::forDirect(cgm.getAddrOfFunction(calleeDecl, ty),
+ GlobalDecl(calleeDecl));
+ }
+
+ if (md->isVirtual()) {
+ Address newThisAddr =
+ cgm.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
+ *this, calleeDecl, thisPtr.getAddress(), useVirtualCall);
+ thisPtr.setAddress(newThisAddr);
}
- CIRGenCallee callee =
- CIRGenCallee::forDirect(cgm.getAddrOfFunction(md, ty), GlobalDecl(md));
return emitCXXMemberOrOperatorCall(
calleeDecl, callee, returnValue, thisPtr.getPointer(),
@@ -169,7 +190,7 @@ CIRGenFunction::emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e,
assert(md->isInstance() &&
"Trying to emit a member call expr on a static method!");
return emitCXXMemberOrOperatorMemberCallExpr(
- e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
/*IsArrow=*/false, e->getArg(0));
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index fc208ff..2585988 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -42,21 +42,41 @@ CIRGenFunctionInfo::create(CanQualType resultType,
return fi;
}
-cir::FuncType CIRGenTypes::getFunctionType(const CIRGenFunctionInfo &fi) {
- mlir::Type resultType = convertType(fi.getReturnType());
+cir::FuncType CIRGenTypes::getFunctionType(GlobalDecl gd) {
+ const CIRGenFunctionInfo &fi = arrangeGlobalDeclaration(gd);
+ return getFunctionType(fi);
+}
+
+cir::FuncType CIRGenTypes::getFunctionType(const CIRGenFunctionInfo &info) {
+ mlir::Type resultType = convertType(info.getReturnType());
SmallVector<mlir::Type, 8> argTypes;
- argTypes.reserve(fi.getNumRequiredArgs());
+ argTypes.reserve(info.getNumRequiredArgs());
- for (const CanQualType &argType : fi.requiredArguments())
+ for (const CanQualType &argType : info.requiredArguments())
argTypes.push_back(convertType(argType));
return cir::FuncType::get(argTypes,
(resultType ? resultType : builder.getVoidTy()),
- fi.isVariadic());
+ info.isVariadic());
+}
+
+cir::FuncType CIRGenTypes::getFunctionTypeForVTable(GlobalDecl gd) {
+ const CXXMethodDecl *md = cast<CXXMethodDecl>(gd.getDecl());
+ const FunctionProtoType *fpt = md->getType()->getAs<FunctionProtoType>();
+
+ if (!isFuncTypeConvertible(fpt))
+ cgm.errorNYI("getFunctionTypeForVTable: non-convertible function type");
+
+ return getFunctionType(gd);
}
CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &cgf) const {
- assert(!cir::MissingFeatures::opCallVirtual());
+ if (isVirtual()) {
+ const CallExpr *ce = getVirtualCallExpr();
+ return cgf.cgm.getCXXABI().getVirtualFunctionPointer(
+ cgf, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
+ ce ? ce->getBeginLoc() : SourceLocation());
+ }
return *this;
}
@@ -203,9 +223,9 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl gd) {
/// when calling a method pointer.
CanQualType CIRGenTypes::deriveThisType(const CXXRecordDecl *rd,
const CXXMethodDecl *md) {
- QualType recTy;
+ CanQualType recTy;
if (rd) {
- recTy = getASTContext().getTagDeclType(rd)->getCanonicalTypeInternal();
+ recTy = getASTContext().getCanonicalTagType(rd);
} else {
// This can happen with the MS ABI. It shouldn't need anything more than
// setting recTy to VoidTy here, but we're flagging it for now because we
@@ -215,9 +235,9 @@ CanQualType CIRGenTypes::deriveThisType(const CXXRecordDecl *rd,
}
if (md)
- recTy = getASTContext().getAddrSpaceQualType(
- recTy, md->getMethodQualifiers().getAddressSpace());
- return getASTContext().getPointerType(CanQualType::CreateUnsafe(recTy));
+ recTy = CanQualType::CreateUnsafe(getASTContext().getAddrSpaceQualType(
+ recTy, md->getMethodQualifiers().getAddressSpace()));
+ return getASTContext().getPointerType(recTy);
}
/// Arrange the CIR function layout for a value of the given function type, on
@@ -267,7 +287,7 @@ void CIRGenFunction::emitDelegateCallArg(CallArgList &args,
// Deactivate the cleanup for the callee-destructed param that was pushed.
assert(!cir::MissingFeatures::thunks());
if (type->isRecordType() &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
+ type->castAsRecordDecl()->isParamDestroyedInCallee() &&
param->needsDestruction(getContext())) {
cgm.errorNYI(param->getSourceRange(),
"emitDelegateCallArg: callee-destructed param");
@@ -668,7 +688,7 @@ void CIRGenFunction::emitCallArg(CallArgList &args, const clang::Expr *e,
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
if (argType->isRecordType() &&
- argType->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ argType->castAsRecordDecl()->isParamDestroyedInCallee()) {
assert(!cir::MissingFeatures::msabi());
cgm.errorNYI(e->getSourceRange(), "emitCallArg: msabi is NYI");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h
index 28576a1..81cbb85 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -46,20 +46,33 @@ class CIRGenCallee {
enum class SpecialKind : uintptr_t {
Invalid,
Builtin,
+ PseudoDestructor,
+ Virtual,
- Last = Builtin,
+ Last = Virtual
};
struct BuiltinInfoStorage {
const clang::FunctionDecl *decl;
unsigned id;
};
+ struct PseudoDestructorInfoStorage {
+ const clang::CXXPseudoDestructorExpr *expr;
+ };
+ struct VirtualInfoStorage {
+ const clang::CallExpr *ce;
+ clang::GlobalDecl md;
+ Address addr;
+ cir::FuncType fTy;
+ };
SpecialKind kindOrFunctionPtr;
union {
CIRGenCalleeInfo abstractInfo;
BuiltinInfoStorage builtinInfo;
+ PseudoDestructorInfoStorage pseudoDestructorInfo;
+ VirtualInfoStorage virtualInfo;
};
explicit CIRGenCallee(SpecialKind kind) : kindOrFunctionPtr(kind) {}
@@ -98,6 +111,22 @@ public:
return result;
}
+ static CIRGenCallee
+ forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr) {
+ CIRGenCallee result(SpecialKind::PseudoDestructor);
+ result.pseudoDestructorInfo.expr = expr;
+ return result;
+ }
+
+ bool isPseudoDestructor() const {
+ return kindOrFunctionPtr == SpecialKind::PseudoDestructor;
+ }
+
+ const CXXPseudoDestructorExpr *getPseudoDestructorExpr() const {
+ assert(isPseudoDestructor());
+ return pseudoDestructorInfo.expr;
+ }
+
bool isOrdinary() const {
return uintptr_t(kindOrFunctionPtr) > uintptr_t(SpecialKind::Last);
}
@@ -107,7 +136,8 @@ public:
CIRGenCallee prepareConcreteCallee(CIRGenFunction &cgf) const;
CIRGenCalleeInfo getAbstractInfo() const {
- assert(!cir::MissingFeatures::opCallVirtual());
+ if (isVirtual())
+ return virtualInfo.md;
assert(isOrdinary());
return abstractInfo;
}
@@ -117,6 +147,39 @@ public:
return reinterpret_cast<mlir::Operation *>(kindOrFunctionPtr);
}
+ bool isVirtual() const { return kindOrFunctionPtr == SpecialKind::Virtual; }
+
+ static CIRGenCallee forVirtual(const clang::CallExpr *ce,
+ clang::GlobalDecl md, Address addr,
+ cir::FuncType fTy) {
+ CIRGenCallee result(SpecialKind::Virtual);
+ result.virtualInfo.ce = ce;
+ result.virtualInfo.md = md;
+ result.virtualInfo.addr = addr;
+ result.virtualInfo.fTy = fTy;
+ return result;
+ }
+
+ const clang::CallExpr *getVirtualCallExpr() const {
+ assert(isVirtual());
+ return virtualInfo.ce;
+ }
+
+ clang::GlobalDecl getVirtualMethodDecl() const {
+ assert(isVirtual());
+ return virtualInfo.md;
+ }
+
+ Address getThisAddress() const {
+ assert(isVirtual());
+ return virtualInfo.addr;
+ }
+
+ cir::FuncType getVirtualFunctionType() const {
+ assert(isVirtual());
+ return virtualInfo.fTy;
+ }
+
void setFunctionPointer(mlir::Operation *functionPtr) {
assert(isOrdinary());
kindOrFunctionPtr = SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index 72b9d17..9a27932 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -14,6 +14,7 @@
#include "CIRGenFunction.h"
#include "CIRGenValue.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
@@ -86,7 +87,7 @@ static void emitMemberInitializer(CIRGenFunction &cgf,
QualType fieldType = field->getType();
mlir::Value thisPtr = cgf.loadCXXThis();
- QualType recordTy = cgf.getContext().getTypeDeclType(classDecl);
+ CanQualType recordTy = cgf.getContext().getCanonicalTagType(classDecl);
// If a base constructor is being emitted, create an LValue that has the
// non-virtual alignment.
@@ -120,11 +121,36 @@ static void emitMemberInitializer(CIRGenFunction &cgf,
static bool isInitializerOfDynamicClass(const CXXCtorInitializer *baseInit) {
const Type *baseType = baseInit->getBaseClass();
- const auto *baseClassDecl =
- cast<CXXRecordDecl>(baseType->castAs<RecordType>()->getDecl());
+ const auto *baseClassDecl = baseType->castAsCXXRecordDecl();
return baseClassDecl->isDynamicClass();
}
+namespace {
+/// A visitor which checks whether an initializer uses 'this' in a
+/// way which requires the vtable to be properly set.
+struct DynamicThisUseChecker
+ : ConstEvaluatedExprVisitor<DynamicThisUseChecker> {
+ using super = ConstEvaluatedExprVisitor<DynamicThisUseChecker>;
+
+ bool usesThis = false;
+
+ DynamicThisUseChecker(const ASTContext &c) : super(c) {}
+
+ // Black-list all explicit and implicit references to 'this'.
+ //
+ // Do we need to worry about external references to 'this' derived
+ // from arbitrary code? If so, then anything which runs arbitrary
+ // external code might potentially access the vtable.
+ void VisitCXXThisExpr(const CXXThisExpr *e) { usesThis = true; }
+};
+} // end anonymous namespace
+
+static bool baseInitializerUsesThis(ASTContext &c, const Expr *init) {
+ DynamicThisUseChecker checker(c);
+ checker.Visit(init);
+ return checker.usesThis;
+}
+
/// Gets the address of a direct base class within a complete object.
/// This should only be used for (1) non-virtual bases or (2) virtual bases
/// when the type is known to be complete (e.g. in complete destructors).
@@ -160,18 +186,15 @@ void CIRGenFunction::emitBaseInitializer(mlir::Location loc,
Address thisPtr = loadCXXThisAddress();
const Type *baseType = baseInit->getBaseClass();
- const auto *baseClassDecl =
- cast<CXXRecordDecl>(baseType->castAs<RecordType>()->getDecl());
+ const auto *baseClassDecl = baseType->castAsCXXRecordDecl();
bool isBaseVirtual = baseInit->isBaseVirtual();
// If the initializer for the base (other than the constructor
// itself) accesses 'this' in any way, we need to initialize the
// vtables.
- if (classDecl->isDynamicClass()) {
- cgm.errorNYI(loc, "emitBaseInitializer: dynamic class");
- return;
- }
+ if (baseInitializerUsesThis(getContext(), baseInit->getInit()))
+ initializeVTablePointers(loc, classDecl);
// We can pretend to be a complete class because it only matters for
// virtual bases, and we only do virtual bases for complete ctors.
@@ -197,19 +220,9 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *cd,
return;
}
- // If there are no member initializers, we can just return.
- if (cd->getNumCtorInitializers() == 0)
- return;
-
const CXXRecordDecl *classDecl = cd->getParent();
- // This code doesn't use range-based iteration because we may need to emit
- // code between the virtual base initializers and the non-virtual base or
- // between the non-virtual base initializers and the member initializers.
- CXXConstructorDecl::init_const_iterator b = cd->init_begin(),
- e = cd->init_end();
-
- // Virtual base initializers first, if any. They aren't needed if:
+ // Virtual base initializers aren't needed if:
// - This is a base ctor variant
// - There are no vbases
// - The class is abstract, so a complete object of it cannot be constructed
@@ -219,40 +232,66 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *cd,
bool constructVBases = ctorType != Ctor_Base &&
classDecl->getNumVBases() != 0 &&
!classDecl->isAbstract();
- if (constructVBases) {
- cgm.errorNYI(cd->getSourceRange(), "emitCtorPrologue: virtual base");
- return;
- }
-
- const mlir::Value oldThisValue = cxxThisValue;
- if (!constructVBases && (*b)->isBaseInitializer() && (*b)->isBaseVirtual()) {
+ if (constructVBases &&
+ !cgm.getTarget().getCXXABI().hasConstructorVariants()) {
cgm.errorNYI(cd->getSourceRange(),
- "emitCtorPrologue: virtual base initializer");
+ "emitCtorPrologue: virtual base without variants");
return;
}
- // Handle non-virtual base initializers.
- for (; b != e && (*b)->isBaseInitializer(); b++) {
- assert(!(*b)->isBaseVirtual());
+ // Create three separate ranges for the different types of initializers.
+ auto allInits = cd->inits();
+
+ // Find the boundaries between the three groups.
+ auto virtualBaseEnd = std::find_if(
+ allInits.begin(), allInits.end(), [](const CXXCtorInitializer *Init) {
+ return !(Init->isBaseInitializer() && Init->isBaseVirtual());
+ });
+
+ auto nonVirtualBaseEnd = std::find_if(virtualBaseEnd, allInits.end(),
+ [](const CXXCtorInitializer *Init) {
+ return !Init->isBaseInitializer();
+ });
+
+ // Create the three ranges.
+ auto virtualBaseInits = llvm::make_range(allInits.begin(), virtualBaseEnd);
+ auto nonVirtualBaseInits =
+ llvm::make_range(virtualBaseEnd, nonVirtualBaseEnd);
+ auto memberInits = llvm::make_range(nonVirtualBaseEnd, allInits.end());
+ const mlir::Value oldThisValue = cxxThisValue;
+
+ auto emitInitializer = [&](CXXCtorInitializer *baseInit) {
if (cgm.getCodeGenOpts().StrictVTablePointers &&
cgm.getCodeGenOpts().OptimizationLevel > 0 &&
- isInitializerOfDynamicClass(*b)) {
+ isInitializerOfDynamicClass(baseInit)) {
+ // It's OK to continue after emitting the error here. The missing code
+ // just "launders" the 'this' pointer.
cgm.errorNYI(cd->getSourceRange(),
- "emitCtorPrologue: strict vtable pointers");
- return;
+ "emitCtorPrologue: strict vtable pointers for vbase");
}
- emitBaseInitializer(getLoc(cd->getBeginLoc()), classDecl, *b);
+ emitBaseInitializer(getLoc(cd->getBeginLoc()), classDecl, baseInit);
+ };
+
+ // Process virtual base initializers.
+ for (CXXCtorInitializer *virtualBaseInit : virtualBaseInits) {
+ if (!constructVBases)
+ continue;
+ emitInitializer(virtualBaseInit);
}
- cxxThisValue = oldThisValue;
+ assert(!cir::MissingFeatures::msabi());
- if (classDecl->isDynamicClass()) {
- cgm.errorNYI(cd->getSourceRange(),
- "emitCtorPrologue: initialize vtable pointers");
- return;
+ // Then, non-virtual base initializers.
+ for (CXXCtorInitializer *nonVirtualBaseInit : nonVirtualBaseInits) {
+ assert(!nonVirtualBaseInit->isBaseVirtual());
+ emitInitializer(nonVirtualBaseInit);
}
+ cxxThisValue = oldThisValue;
+
+ initializeVTablePointers(getLoc(cd->getBeginLoc()), classDecl);
+
// Finally, initialize class members.
FieldConstructionScope fcs(*this, loadCXXThisAddress());
// Classic codegen uses a special class to attempt to replace member
@@ -260,8 +299,7 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *cd,
// lowering or optimization phases to keep the memory accesses more
// explicit. For now, we don't insert memcpy at all.
assert(!cir::MissingFeatures::ctorMemcpyizer());
- for (; b != e; b++) {
- CXXCtorInitializer *member = (*b);
+ for (CXXCtorInitializer *member : memberInits) {
assert(!member->isBaseInitializer());
assert(member->isAnyMemberInitializer() &&
"Delegating initializer on non-delegating constructor");
@@ -269,6 +307,167 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *cd,
}
}
+static Address applyNonVirtualAndVirtualOffset(
+ mlir::Location loc, CIRGenFunction &cgf, Address addr,
+ CharUnits nonVirtualOffset, mlir::Value virtualOffset,
+ const CXXRecordDecl *derivedClass, const CXXRecordDecl *nearestVBase,
+ mlir::Type baseValueTy = {}, bool assumeNotNull = true) {
+ // Assert that we have something to do.
+ assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
+
+ // Compute the offset from the static and dynamic components.
+ if (!nonVirtualOffset.isZero()) {
+ if (virtualOffset) {
+ cgf.cgm.errorNYI(
+ loc,
+ "applyNonVirtualAndVirtualOffset: virtual and non-virtual offset");
+ return Address::invalid();
+ } else {
+ assert(baseValueTy && "expected base type");
+ // If no virtualOffset is present this is the final stop.
+ return cgf.getBuilder().createBaseClassAddr(
+ loc, addr, baseValueTy, nonVirtualOffset.getQuantity(),
+ assumeNotNull);
+ }
+ }
+
+ cgf.cgm.errorNYI(loc, "applyNonVirtualAndVirtualOffset: virtual offset");
+ return Address::invalid();
+}
+
+void CIRGenFunction::initializeVTablePointer(mlir::Location loc,
+ const VPtr &vptr) {
+ // Compute the address point.
+ mlir::Value vtableAddressPoint =
+ cgm.getCXXABI().getVTableAddressPointInStructor(
+ *this, vptr.vtableClass, vptr.base, vptr.nearestVBase);
+
+ if (!vtableAddressPoint)
+ return;
+
+ // Compute where to store the address point.
+ mlir::Value virtualOffset{};
+ CharUnits nonVirtualOffset = CharUnits::Zero();
+
+ mlir::Type baseValueTy;
+ if (cgm.getCXXABI().isVirtualOffsetNeededForVTableField(*this, vptr)) {
+ cgm.errorNYI(loc, "initializeVTablePointer: virtual offset for vtable");
+ } else {
+ // We can just use the base offset in the complete class.
+ nonVirtualOffset = vptr.base.getBaseOffset();
+ baseValueTy =
+ convertType(getContext().getCanonicalTagType(vptr.base.getBase()));
+ }
+
+ // Apply the offsets.
+ Address classAddr = loadCXXThisAddress();
+ if (!nonVirtualOffset.isZero() || virtualOffset) {
+ classAddr = applyNonVirtualAndVirtualOffset(
+ loc, *this, classAddr, nonVirtualOffset, virtualOffset,
+ vptr.vtableClass, vptr.nearestVBase, baseValueTy);
+ }
+
+ // Finally, store the address point. Use the same CIR types as the field.
+ //
+ // vtable field is derived from `this` pointer, therefore they should be in
+ // the same addr space.
+ assert(!cir::MissingFeatures::addressSpace());
+ auto vtablePtr = cir::VTableGetVPtrOp::create(
+ builder, loc, builder.getPtrToVPtrType(), classAddr.getPointer());
+ Address vtableField = Address(vtablePtr, classAddr.getAlignment());
+ builder.createStore(loc, vtableAddressPoint, vtableField);
+ assert(!cir::MissingFeatures::opTBAA());
+ assert(!cir::MissingFeatures::createInvariantGroup());
+}
+
+void CIRGenFunction::initializeVTablePointers(mlir::Location loc,
+ const CXXRecordDecl *rd) {
+ // Ignore classes without a vtable.
+ if (!rd->isDynamicClass())
+ return;
+
+ // Initialize the vtable pointers for this class and all of its bases.
+ if (cgm.getCXXABI().doStructorsInitializeVPtrs(rd))
+ for (const auto &vptr : getVTablePointers(rd))
+ initializeVTablePointer(loc, vptr);
+
+ if (rd->getNumVBases())
+ cgm.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, rd);
+}
+
+CIRGenFunction::VPtrsVector
+CIRGenFunction::getVTablePointers(const CXXRecordDecl *vtableClass) {
+ CIRGenFunction::VPtrsVector vptrsResult;
+ VisitedVirtualBasesSetTy vbases;
+ getVTablePointers(BaseSubobject(vtableClass, CharUnits::Zero()),
+ /*NearestVBase=*/nullptr,
+ /*OffsetFromNearestVBase=*/CharUnits::Zero(),
+ /*BaseIsNonVirtualPrimaryBase=*/false, vtableClass, vbases,
+ vptrsResult);
+ return vptrsResult;
+}
+
+void CIRGenFunction::getVTablePointers(BaseSubobject base,
+ const CXXRecordDecl *nearestVBase,
+ CharUnits offsetFromNearestVBase,
+ bool baseIsNonVirtualPrimaryBase,
+ const CXXRecordDecl *vtableClass,
+ VisitedVirtualBasesSetTy &vbases,
+ VPtrsVector &vptrs) {
+ // If this base is a non-virtual primary base the address point has already
+ // been set.
+ if (!baseIsNonVirtualPrimaryBase) {
+ // Initialize the vtable pointer for this base.
+ VPtr vptr = {base, nearestVBase, offsetFromNearestVBase, vtableClass};
+ vptrs.push_back(vptr);
+ }
+
+ const CXXRecordDecl *rd = base.getBase();
+
+ for (const auto &nextBase : rd->bases()) {
+ const auto *baseDecl =
+ cast<CXXRecordDecl>(
+ nextBase.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
+
+ // Ignore classes without a vtable.
+ if (!baseDecl->isDynamicClass())
+ continue;
+
+ CharUnits baseOffset;
+ CharUnits baseOffsetFromNearestVBase;
+ bool baseDeclIsNonVirtualPrimaryBase;
+ const CXXRecordDecl *nextBaseDecl;
+
+ if (nextBase.isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (!vbases.insert(baseDecl).second)
+ continue;
+
+ const ASTRecordLayout &layout =
+ getContext().getASTRecordLayout(vtableClass);
+
+ nextBaseDecl = nearestVBase;
+ baseOffset = layout.getVBaseClassOffset(baseDecl);
+ baseOffsetFromNearestVBase = CharUnits::Zero();
+ baseDeclIsNonVirtualPrimaryBase = false;
+ } else {
+ const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
+
+ nextBaseDecl = baseDecl;
+ baseOffset = base.getBaseOffset() + layout.getBaseClassOffset(baseDecl);
+ baseOffsetFromNearestVBase =
+ offsetFromNearestVBase + layout.getBaseClassOffset(baseDecl);
+ baseDeclIsNonVirtualPrimaryBase = layout.getPrimaryBase() == baseDecl;
+ }
+
+ getVTablePointers(BaseSubobject(baseDecl, baseOffset), nextBaseDecl,
+ baseOffsetFromNearestVBase,
+ baseDeclIsNonVirtualPrimaryBase, vtableClass, vbases,
+ vptrs);
+ }
+}
+
Address CIRGenFunction::loadCXXThisAddress() {
assert(curFuncDecl && "loading 'this' without a func declaration?");
assert(isa<CXXMethodDecl>(curFuncDecl));
@@ -377,7 +576,7 @@ void CIRGenFunction::emitCXXAggrConstructorCall(
//
// Note that these are complete objects and so we don't need to
// use the non-virtual size or alignment.
- QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CanQualType type = getContext().getCanonicalTagType(ctor->getParent());
CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement(
getContext().getTypeSizeInChars(type));
@@ -483,8 +682,7 @@ void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) {
void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr,
QualType type) {
- const RecordType *rtype = type->castAs<RecordType>();
- const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const auto *record = type->castAsCXXRecordDecl();
const CXXDestructorDecl *dtor = record->getDestructor();
// TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial
// dtors which shall be removed on later CIR passes. However, only remove this
@@ -571,6 +769,37 @@ Address CIRGenFunction::getAddressOfBaseClass(
return value;
}
+// TODO(cir): this can be shared with LLVM codegen.
+bool CIRGenFunction::shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd) {
+ assert(!cir::MissingFeatures::hiddenVisibility());
+ if (!cgm.getCodeGenOpts().WholeProgramVTables)
+ return false;
+
+ if (cgm.getCodeGenOpts().VirtualFunctionElimination)
+ return true;
+
+ assert(!cir::MissingFeatures::sanitizers());
+
+ return false;
+}
+
+mlir::Value CIRGenFunction::getVTablePtr(mlir::Location loc, Address thisAddr,
+ const CXXRecordDecl *rd) {
+ auto vtablePtr = cir::VTableGetVPtrOp::create(
+ builder, loc, builder.getPtrToVPtrType(), thisAddr.getPointer());
+ Address vtablePtrAddr = Address(vtablePtr, thisAddr.getAlignment());
+
+ auto vtable = builder.createLoad(loc, vtablePtrAddr);
+ assert(!cir::MissingFeatures::opTBAA());
+
+ if (cgm.getCodeGenOpts().OptimizationLevel > 0 &&
+ cgm.getCodeGenOpts().StrictVTablePointers) {
+ assert(!cir::MissingFeatures::createInvariantGroup());
+ }
+
+ return vtable;
+}
+
void CIRGenFunction::emitCXXConstructorCall(const clang::CXXConstructorDecl *d,
clang::CXXCtorType type,
bool forVirtualBase,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index be21ce9..4d4d10b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -16,6 +16,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CIRGenCleanup.h"
#include "CIRGenFunction.h"
#include "clang/CIR/MissingFeatures.h"
@@ -33,37 +34,147 @@ using namespace clang::CIRGen;
void EHScopeStack::Cleanup::anchor() {}
-static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) {
- mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
- mlir::Block *cleanup =
- cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
- return cleanup;
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t size) {
+ size = llvm::alignTo(size, ScopeStackAlignment);
+ if (!startOfBuffer) {
+ unsigned capacity = llvm::PowerOf2Ceil(std::max(size, 1024ul));
+ startOfBuffer = std::make_unique<char[]>(capacity);
+ startOfData = endOfBuffer = startOfBuffer.get() + capacity;
+ } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
+ unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
+ unsigned usedCapacity =
+ currentCapacity - (startOfData - startOfBuffer.get());
+ unsigned requiredCapacity = usedCapacity + size;
+ // We know from the 'else if' condition that requiredCapacity is greater
+ // than currentCapacity.
+ unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
+
+ std::unique_ptr<char[]> newStartOfBuffer =
+ std::make_unique<char[]>(newCapacity);
+ char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
+ char *newStartOfData = newEndOfBuffer - usedCapacity;
+ memcpy(newStartOfData, startOfData, usedCapacity);
+ startOfBuffer.swap(newStartOfBuffer);
+ endOfBuffer = newEndOfBuffer;
+ startOfData = newStartOfData;
+ }
+
+ assert(startOfBuffer.get() + size <= startOfData);
+ startOfData -= size;
+ return startOfData;
+}
+
+void EHScopeStack::deallocate(size_t size) {
+ startOfData += llvm::alignTo(size, ScopeStackAlignment);
+}
+
+void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
+ char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
+ bool isEHCleanup = kind & EHCleanup;
+ bool isLifetimeMarker = kind & LifetimeMarker;
+
+ assert(!cir::MissingFeatures::innermostEHScope());
+
+ EHCleanupScope *scope = new (buffer) EHCleanupScope(size);
+
+ if (isLifetimeMarker)
+ cgf->cgm.errorNYI("push lifetime marker cleanup");
+
+ // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
+ if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
+ cgf->getTarget().getCXXABI().isMicrosoft())
+ cgf->cgm.errorNYI("push seh cleanup");
+
+ return scope->getCleanupBuffer();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
+ deallocate(cleanup.getAllocatedSize());
+
+ // Destroy the cleanup.
+ cleanup.destroy();
+
+ assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+}
+
+static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
+ // Ask the cleanup to emit itself.
+ assert(cgf.haveInsertPoint() && "expected insertion point");
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ cleanup->emit(cgf);
+ assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
}
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
void CIRGenFunction::popCleanupBlock() {
- assert(!ehStack.cleanupStack.empty() && "cleanup stack is empty!");
- mlir::OpBuilder::InsertionGuard guard(builder);
- std::unique_ptr<EHScopeStack::Cleanup> cleanup =
- ehStack.cleanupStack.pop_back_val();
+ assert(!ehStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.begin());
+
+ // Remember activation information.
+ bool isActive = scope.isActive();
+
+ assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+
+ // - whether there's a fallthrough
+ mlir::Block *fallthroughSource = builder.getInsertionBlock();
+ bool hasFallthrough = fallthroughSource != nullptr && isActive;
+
+ bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+
+ // If we don't need the cleanup at all, we're done.
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+ if (!requiresNormalCleanup) {
+ ehStack.popCleanup();
+ return;
+ }
+
+ // Copy the cleanup emission data out. This uses either a stack
+ // array or malloc'd memory, depending on the size, which is
+ // behavior that SmallVector would provide, if we could use it
+ // here. Unfortunately, if you ask for a SmallVector<char>, the
+ // alignment isn't sufficient.
+ auto *cleanupSource = reinterpret_cast<char *>(scope.getCleanupBuffer());
+ alignas(EHScopeStack::ScopeStackAlignment) char
+ cleanupBufferStack[8 * sizeof(void *)];
+ std::unique_ptr<char[]> cleanupBufferHeap;
+ size_t cleanupSize = scope.getCleanupSize();
+ EHScopeStack::Cleanup *cleanup;
+
+ // This is necessary because we are going to deallocate the cleanup
+ // (in popCleanup) before we emit it.
+ if (cleanupSize <= sizeof(cleanupBufferStack)) {
+ memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
+ cleanup = reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferStack);
+ } else {
+ cleanupBufferHeap.reset(new char[cleanupSize]);
+ memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
+ cleanup =
+ reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferHeap.get());
+ }
assert(!cir::MissingFeatures::ehCleanupFlags());
- mlir::Block *cleanupEntry = getCurCleanupBlock(*this);
- builder.setInsertionPointToEnd(cleanupEntry);
- cleanup->emit(*this);
+
+ ehStack.popCleanup();
+ scope.markEmitted();
+ emitCleanup(*this, cleanup);
}
/// Pops cleanup blocks until the given savepoint is reached.
-void CIRGenFunction::popCleanupBlocks(size_t oldCleanupStackDepth) {
+void CIRGenFunction::popCleanupBlocks(
+ EHScopeStack::stable_iterator oldCleanupStackDepth) {
assert(!cir::MissingFeatures::ehstackBranches());
- assert(ehStack.getStackDepth() >= oldCleanupStackDepth);
-
// Pop cleanup blocks until we reach the base stack depth for the
// current scope.
- while (ehStack.getStackDepth() > oldCleanupStackDepth) {
+ while (ehStack.stable_begin() != oldCleanupStackDepth) {
popCleanupBlock();
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
new file mode 100644
index 0000000..a4ec8cc
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -0,0 +1,142 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of CIR for cleanups, initially based
+// on LLVM IR cleanup handling, but ought to change as CIR evolves.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
+#define CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
+
+#include "Address.h"
+#include "EHScopeStack.h"
+#include "mlir/IR/Value.h"
+
+namespace clang::CIRGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ class CommonBitFields {
+ friend class EHScope;
+ unsigned kind : 3;
+ };
+ enum { NumCommonBits = 3 };
+
+protected:
+ class CleanupBitFields {
+ friend class EHCleanupScope;
+ unsigned : NumCommonBits;
+
+ /// Whether this cleanup needs to be run along normal edges.
+ unsigned isNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ unsigned isEHCleanup : 1;
+
+ /// Whether this cleanup is currently active.
+ unsigned isActive : 1;
+
+ /// Whether this cleanup is a lifetime marker
+ unsigned isLifetimeMarker : 1;
+
+ /// Whether the normal cleanup should test the activation flag.
+ unsigned testFlagInNormalCleanup : 1;
+
+ /// Whether the EH cleanup should test the activation flag.
+ unsigned testFlagInEHCleanup : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned cleanupSize : 12;
+ };
+
+ union {
+ CommonBitFields commonBits;
+ CleanupBitFields cleanupBits;
+ };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind kind) { commonBits.kind = kind; }
+
+ Kind getKind() const { return static_cast<Kind>(commonBits.kind); }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
+ : public EHScope {
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t size) {
+ return sizeof(EHCleanupScope) + size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHCleanupScope) + cleanupBits.cleanupSize;
+ }
+
+ EHCleanupScope(unsigned cleanupSize) : EHScope(EHScope::Cleanup) {
+ // TODO(cir): When exception handling is upstreamed, isNormalCleanup and
+ // isEHCleanup will be arguments to the constructor.
+ cleanupBits.isNormalCleanup = true;
+ cleanupBits.isEHCleanup = false;
+ cleanupBits.isActive = true;
+ cleanupBits.isLifetimeMarker = false;
+ cleanupBits.testFlagInNormalCleanup = false;
+ cleanupBits.testFlagInEHCleanup = false;
+ cleanupBits.cleanupSize = cleanupSize;
+
+ assert(cleanupBits.cleanupSize == cleanupSize && "cleanup size overflow");
+ }
+
+ void destroy() {}
+ // Objects of EHCleanupScope are not destructed. Use destroy().
+ ~EHCleanupScope() = delete;
+
+ bool isNormalCleanup() const { return cleanupBits.isNormalCleanup; }
+
+ bool isActive() const { return cleanupBits.isActive; }
+
+ size_t getCleanupSize() const { return cleanupBits.cleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup *>(getCleanupBuffer());
+ }
+
+ static bool classof(const EHScope *scope) {
+ return (scope->getKind() == Cleanup);
+ }
+
+ void markEmitted() {}
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *ptr = nullptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *ptr) : ptr(ptr) {}
+
+public:
+ iterator() = default;
+
+ EHScope *get() const { return reinterpret_cast<EHScope *>(ptr); }
+
+ EHScope &operator*() const { return *get(); }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(startOfData);
+}
+
+} // namespace clang::CIRGen
+#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h b/clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h
index d6dac50..d455f6e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h
+++ b/clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h
@@ -80,7 +80,7 @@ public:
// initializer or to propagate to another context; for example,
// side effects, or emitting an initialization that requires a
// reference to its current location.
- mlir::Attribute emitForMemory(mlir::Attribute c, QualType t);
+ mlir::Attribute emitForMemory(mlir::Attribute c, QualType destType);
/// Try to emit the initializer of the given declaration as an abstract
/// constant.
@@ -90,8 +90,9 @@ public:
/// asserting that it succeeded. This is only safe to do when the
/// expression is known to be a constant expression with either a fairly
/// simple type or a known simple form.
+ mlir::Attribute emitAbstract(const Expr *e, QualType destType);
mlir::Attribute emitAbstract(SourceLocation loc, const APValue &value,
- QualType t);
+ QualType destType);
mlir::Attribute tryEmitConstantExpr(const ConstantExpr *ce);
@@ -101,6 +102,7 @@ public:
mlir::Attribute tryEmitPrivateForVarInit(const VarDecl &d);
+ mlir::TypedAttr tryEmitPrivate(const Expr *e, QualType destType);
mlir::Attribute tryEmitPrivate(const APValue &value, QualType destType);
mlir::Attribute tryEmitPrivateForMemory(const APValue &value, QualType t);
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 78d375c..7cc024f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -154,15 +154,19 @@ void CIRGenFunction::emitAutoVarInit(
initializeWhatIsTechnicallyUninitialized(addr);
LValue lv = makeAddrLValue(addr, type, AlignmentSource::Decl);
emitExprAsInit(init, &d, lv);
- // In case lv has uses it means we indeed initialized something
- // out of it while trying to build the expression, mark it as such.
- mlir::Value val = lv.getAddress().getPointer();
- assert(val && "Should have an address");
- auto allocaOp = val.getDefiningOp<cir::AllocaOp>();
- assert(allocaOp && "Address should come straight out of the alloca");
-
- if (!allocaOp.use_empty())
- allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
+
+ if (!emission.wasEmittedAsOffloadClause()) {
+ // In case lv has uses it means we indeed initialized something
+ // out of it while trying to build the expression, mark it as such.
+ mlir::Value val = lv.getAddress().getPointer();
+ assert(val && "Should have an address");
+ auto allocaOp = val.getDefiningOp<cir::AllocaOp>();
+ assert(allocaOp && "Address should come straight out of the alloca");
+
+ if (!allocaOp.use_empty())
+ allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
+ }
+
return;
}
@@ -293,7 +297,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &d,
mlir::Attribute init = builder.getZeroInitAttr(convertType(ty));
cir::GlobalOp gv = builder.createVersionedGlobal(
- getModule(), getLoc(d.getLocation()), name, lty, linkage);
+ getModule(), getLoc(d.getLocation()), name, lty, false, linkage);
// TODO(cir): infer visibility from linkage in global op builder.
gv.setVisibility(getMLIRVisibilityFromCIRLinkage(linkage));
gv.setInitialValueAttr(init);
@@ -667,6 +671,12 @@ struct DestroyObject final : EHScopeStack::Cleanup {
void emit(CIRGenFunction &cgf) override {
cgf.emitDestroy(addr, type, destroyer);
}
+
+ // This is a placeholder until EHCleanupScope is implemented.
+ size_t getSize() const override {
+ assert(!cir::MissingFeatures::ehCleanupScope());
+ return sizeof(DestroyObject);
+ }
};
} // namespace
diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp
new file mode 100644
index 0000000..7fcb39a
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp
@@ -0,0 +1,41 @@
+//===--- CIRGenException.cpp - Emit CIR Code for C++ exceptions -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenCXXABI.h"
+#include "CIRGenFunction.h"
+
+#include "clang/AST/StmtVisitor.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+void CIRGenFunction::emitCXXThrowExpr(const CXXThrowExpr *e) {
+ const llvm::Triple &triple = getTarget().getTriple();
+ if (cgm.getLangOpts().OpenMPIsTargetDevice &&
+ (triple.isNVPTX() || triple.isAMDGCN())) {
+ cgm.errorNYI("emitCXXThrowExpr OpenMP with NVPTX or AMDGCN Triples");
+ return;
+ }
+
+ if (const Expr *subExpr = e->getSubExpr()) {
+ QualType throwType = subExpr->getType();
+ if (throwType->isObjCObjectPointerType()) {
+ cgm.errorNYI("emitCXXThrowExpr ObjCObjectPointerType");
+ return;
+ } else {
+ cgm.errorNYI("emitCXXThrowExpr with subExpr");
+ return;
+ }
+ } else {
+ cgm.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index a0ff08e..4698793 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -73,28 +73,58 @@ Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr,
// Casts:
if (auto const *ce = dyn_cast<CastExpr>(expr)) {
- if (isa<ExplicitCastExpr>(ce)) {
- cgm.errorNYI(expr->getSourceRange(),
- "emitPointerWithAlignment: explicit cast");
- return Address::invalid();
- }
+ if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
+ cgm.emitExplicitCastExprType(ece);
switch (ce->getCastKind()) {
// Non-converting casts (but not C's implicit conversion from void*).
case CK_BitCast:
case CK_NoOp:
case CK_AddressSpaceConversion: {
- cgm.errorNYI(expr->getSourceRange(),
- "emitPointerWithAlignment: noop cast");
- return Address::invalid();
- } break;
+ if (const auto *ptrTy =
+ ce->getSubExpr()->getType()->getAs<PointerType>()) {
+ if (ptrTy->getPointeeType()->isVoidType())
+ break;
+
+ LValueBaseInfo innerBaseInfo;
+ assert(!cir::MissingFeatures::opTBAA());
+ Address addr =
+ emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
+ if (baseInfo)
+ *baseInfo = innerBaseInfo;
+
+ if (isa<ExplicitCastExpr>(ce)) {
+ LValueBaseInfo targetTypeBaseInfo;
+
+ const QualType pointeeType = expr->getType()->getPointeeType();
+ const CharUnits align =
+ cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
+
+ // If the source l-value is opaque, honor the alignment of the
+ // casted-to type.
+ if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
+ if (baseInfo)
+ baseInfo->mergeForCast(targetTypeBaseInfo);
+ addr = Address(addr.getPointer(), addr.getElementType(), align);
+ }
+ }
+
+ assert(!cir::MissingFeatures::sanitizers());
+
+ const mlir::Type eltTy =
+ convertTypeForMem(expr->getType()->getPointeeType());
+ addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
+ addr, eltTy);
+ assert(!cir::MissingFeatures::addressSpace());
+
+ return addr;
+ }
+ break;
+ }
// Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
- case CK_ArrayToPointerDecay: {
- cgm.errorNYI(expr->getSourceRange(),
- "emitPointerWithAlignment: array-to-pointer decay");
- return Address::invalid();
- }
+ case CK_ArrayToPointerDecay:
+ return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
@@ -184,8 +214,11 @@ Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr,
if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
// TODO(cir): maybe we should use cir.unary for pointers here instead.
if (uo->getOpcode() == UO_AddrOf) {
- cgm.errorNYI(expr->getSourceRange(), "emitPointerWithAlignment: unary &");
- return Address::invalid();
+ LValue lv = emitLValue(uo->getSubExpr());
+ if (baseInfo)
+ *baseInfo = lv.getBaseInfo();
+ assert(!cir::MissingFeatures::opTBAA());
+ return lv.getAddress();
}
}
@@ -369,9 +402,10 @@ Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base,
unsigned index) {
mlir::Location loc = getLoc(field->getLocation());
cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
- cir::GetMemberOp sea = getBuilder().createGetMember(
- loc, fieldPtr, base.getPointer(), field->getName(), index);
auto rec = cast<cir::RecordType>(base.getAddress().getElementType());
+ cir::GetMemberOp sea = getBuilder().createGetMember(
+ loc, fieldPtr, base.getPointer(), field->getName(),
+ rec.isUnion() ? field->getFieldIndex() : index);
CharUnits offset = CharUnits::fromQuantity(
rec.getElementOffset(cgm.getDataLayout().layout, index));
return Address(sea, base.getAlignment().alignmentAtOffset(offset));
@@ -550,6 +584,37 @@ RValue CIRGenFunction::emitLoadOfLValue(LValue lv, SourceLocation loc) {
return RValue::get(nullptr);
}
+static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
+ assert(!cir::MissingFeatures::weakRefReference());
+ return cgm.getAddrOfFunction(gd);
+}
+
+static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
+ GlobalDecl gd) {
+ const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
+ cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
+ mlir::Location loc = cgf.getLoc(e->getSourceRange());
+ CharUnits align = cgf.getContext().getDeclAlign(fd);
+
+ assert(!cir::MissingFeatures::sanitizers());
+
+ mlir::Type fnTy = funcOp.getFunctionType();
+ mlir::Type ptrTy = cir::PointerType::get(fnTy);
+ mlir::Value addr = cgf.getBuilder().create<cir::GetGlobalOp>(
+ loc, ptrTy, funcOp.getSymName());
+
+ if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
+ fnTy = cgf.convertType(fd->getType());
+ ptrTy = cir::PointerType::get(fnTy);
+
+ addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
+ cir::CastKind::bitcast, addr);
+ }
+
+ return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
+ AlignmentSource::Decl);
+}
+
LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
const NamedDecl *nd = e->getDecl();
QualType ty = e->getType();
@@ -588,6 +653,12 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
? emitLoadOfReferenceLValue(addr, getLoc(e->getSourceRange()),
vd->getType(), AlignmentSource::Decl)
: makeAddrLValue(addr, ty, AlignmentSource::Decl);
+
+ // Statics are defined as globals, so they are not include in the function's
+ // symbol table.
+ assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
+ "non-static locals should be already mapped");
+
return lv;
}
@@ -600,6 +671,16 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
return emitLValue(bd->getBinding());
}
+ if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
+ LValue lv = emitFunctionDeclLValue(*this, e, fd);
+
+ // Emit debuginfo for the function declaration if the target wants to.
+ if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
+ assert(!cir::MissingFeatures::generateDebugInfo());
+
+ return lv;
+ }
+
cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
return LValue();
}
@@ -1011,9 +1092,7 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const auto *derivedClassTy =
- e->getSubExpr()->getType()->castAs<clang::RecordType>();
- auto *derivedClassDecl = cast<CXXRecordDecl>(derivedClassTy->getDecl());
+ auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
LValue lv = emitLValue(e->getSubExpr());
Address thisAddr = lv.getAddress();
@@ -1037,10 +1116,22 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
llvm_unreachable("Invalid cast kind");
}
+static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf,
+ const MemberExpr *me) {
+ if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
+ // Try to emit static variable member expressions as DREs.
+ return DeclRefExpr::Create(
+ cgf.getContext(), NestedNameSpecifierLoc(), SourceLocation(), vd,
+ /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
+ me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
+ }
+ return nullptr;
+}
+
LValue CIRGenFunction::emitMemberExpr(const MemberExpr *e) {
- if (isa<VarDecl>(e->getMemberDecl())) {
- cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: VarDecl");
- return LValue();
+ if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
+ emitIgnoredExpr(e->getBase());
+ return emitDeclRefLValue(dre);
}
Expr *baseExpr = e->getBase();
@@ -1162,26 +1253,27 @@ static void pushTemporaryCleanup(CIRGenFunction &cgf,
return;
}
- CXXDestructorDecl *referenceTemporaryDtor = nullptr;
- if (const clang::RecordType *rt = e->getType()
- ->getBaseElementTypeUnsafe()
- ->getAs<clang::RecordType>()) {
- // Get the destructor for the reference temporary.
- auto *classDecl = cast<CXXRecordDecl>(rt->getDecl());
- if (!classDecl->hasTrivialDestructor())
- referenceTemporaryDtor = classDecl->getDestructor();
- }
-
- if (!referenceTemporaryDtor)
+ const QualType::DestructionKind dk = e->getType().isDestructedType();
+ if (dk == QualType::DK_none)
return;
- // Call the destructor for the temporary.
switch (m->getStorageDuration()) {
case SD_Static:
- case SD_Thread:
- cgf.cgm.errorNYI(e->getSourceRange(),
- "pushTemporaryCleanup: static/thread storage duration");
- return;
+ case SD_Thread: {
+ CXXDestructorDecl *referenceTemporaryDtor = nullptr;
+ if (const auto *classDecl =
+ e->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ classDecl && !classDecl->hasTrivialDestructor())
+ // Get the destructor for the reference temporary.
+ referenceTemporaryDtor = classDecl->getDestructor();
+
+ if (!referenceTemporaryDtor)
+ return;
+
+ cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
+ "storage duration with destructors");
+ break;
+ }
case SD_FullExpression:
cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
@@ -1373,11 +1465,6 @@ RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot) {
llvm_unreachable("bad evaluation kind");
}
-static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
- assert(!cir::MissingFeatures::weakRefReference());
- return cgm.getAddrOfFunction(gd);
-}
-
// Detect the unusual situation where an inline version is shadowed by a
// non-inline version. In that case we should pick the external one
// everywhere. That's GCC behavior too.
@@ -1540,10 +1627,10 @@ CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *e) {
cgm.errorNYI(e->getSourceRange(),
"emitCallee: call to member function is NYI");
return {};
+ } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
+ return CIRGenCallee::forPseudoDestructor(pde);
}
- assert(!cir::MissingFeatures::opCallPseudoDtor());
-
// Otherwise, we have an indirect reference.
mlir::Value calleePtr;
QualType functionType;
@@ -1595,10 +1682,8 @@ RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *e,
return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
returnValue);
- if (isa<CXXPseudoDestructorExpr>(e->getCallee())) {
- cgm.errorNYI(e->getSourceRange(), "call to pseudo destructor");
- }
- assert(!cir::MissingFeatures::opCallPseudoDtor());
+ if (callee.isPseudoDestructor())
+ return emitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
return emitCall(e->getCallee()->getType(), callee, e, returnValue);
}
@@ -1615,7 +1700,9 @@ void CIRGenFunction::emitIgnoredExpr(const Expr *e) {
emitLValue(e);
}
-Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e) {
+Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e,
+ LValueBaseInfo *baseInfo) {
+ assert(!cir::MissingFeatures::opTBAA());
assert(e->getType()->isArrayType() &&
"Array to pointer decay must have array source type!");
@@ -1831,7 +1918,7 @@ RValue CIRGenFunction::emitCXXMemberCallExpr(const CXXMemberCallExpr *ce,
}
bool hasQualifier = me->hasQualifier();
- NestedNameSpecifier *qualifier = hasQualifier ? me->getQualifier() : nullptr;
+ NestedNameSpecifier qualifier = me->getQualifier();
bool isArrow = me->isArrow();
const Expr *base = me->getBase();
@@ -1885,12 +1972,8 @@ void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *e,
delegating = true;
break;
case CXXConstructionKind::VirtualBase:
- // This should just set 'forVirtualBase' to true and fall through, but
- // virtual base class support is otherwise missing, so this needs to wait
- // until it can be tested.
- cgm.errorNYI(e->getSourceRange(),
- "emitCXXConstructExpr: virtual base constructor");
- return;
+ forVirtualBase = true;
+ [[fallthrough]];
case CXXConstructionKind::NonVirtualBase:
type = Ctor_Base;
break;
@@ -2052,8 +2135,8 @@ cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
///
/// For named members of enums, this is the only way they are emitted.
CIRGenFunction::ConstantEmission
-CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
- ValueDecl *value = refExpr->getDecl();
+CIRGenFunction::tryEmitAsConstant(const DeclRefExpr *refExpr) {
+ const ValueDecl *value = refExpr->getDecl();
// There is a lot more to do here, but for now only EnumConstantDecl is
// supported.
@@ -2086,6 +2169,13 @@ CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
return ConstantEmission::forValue(cstToEmit);
}
+CIRGenFunction::ConstantEmission
+CIRGenFunction::tryEmitAsConstant(const MemberExpr *me) {
+ if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, me))
+ return tryEmitAsConstant(dre);
+ return ConstantEmission();
+}
+
mlir::Value CIRGenFunction::emitScalarConstant(
const CIRGenFunction::ConstantEmission &constant, Expr *e) {
assert(constant && "not a constant");
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 51aab95..113f996 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -69,6 +69,12 @@ public:
void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
void VisitCallExpr(const CallExpr *e);
+ void VisitStmtExpr(const StmtExpr *e) {
+ CIRGenFunction::StmtExprEvaluation eval(cgf);
+ Address retAlloca =
+ cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
+ (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
+ }
void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
@@ -78,6 +84,205 @@ public:
void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
FieldDecl *initializedFieldInUnion,
Expr *arrayFiller);
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
+ assert(!cir::MissingFeatures::aggValueSlotDestructedFlag());
+ Visit(e->getSubExpr());
+ }
+
+ // Stubs -- These should be moved up when they are implemented.
+ void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *e) {
+ // We shouldn't really get here, but we do because of missing handling for
+ // emitting constant aggregate initializers. If we just ignore this, a
+ // fallback handler will do the right thing.
+ assert(!cir::MissingFeatures::constEmitterAggILE());
+ return;
+ }
+ void VisitCastExpr(CastExpr *e) {
+ switch (e->getCastKind()) {
+ case CK_LValueToRValue:
+ assert(!cir::MissingFeatures::aggValueSlotVolatile());
+ [[fallthrough]];
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
+ e->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(e->getSubExpr());
+ break;
+ default:
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ std::string("AggExprEmitter: VisitCastExpr: ") +
+ e->getCastKindName());
+ break;
+ }
+ }
+ void VisitStmt(Stmt *s) {
+ cgf.cgm.errorNYI(s->getSourceRange(),
+ std::string("AggExprEmitter::VisitStmt: ") +
+ s->getStmtClassName());
+ }
+ void VisitParenExpr(ParenExpr *pe) {
+ cgf.cgm.errorNYI(pe->getSourceRange(), "AggExprEmitter: VisitParenExpr");
+ }
+ void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
+ cgf.cgm.errorNYI(ge->getSourceRange(),
+ "AggExprEmitter: VisitGenericSelectionExpr");
+ }
+ void VisitCoawaitExpr(CoawaitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
+ }
+ void VisitCoyieldExpr(CoyieldExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
+ }
+ void VisitUnaryCoawait(UnaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
+ }
+ void VisitUnaryExtension(UnaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitUnaryExtension");
+ }
+ void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
+ }
+ void VisitConstantExpr(ConstantExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
+ }
+ void VisitMemberExpr(MemberExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitMemberExpr");
+ }
+ void VisitUnaryDeref(UnaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryDeref");
+ }
+ void VisitStringLiteral(StringLiteral *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitStringLiteral");
+ }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCompoundLiteralExpr");
+ }
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitArraySubscriptExpr");
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitPredefinedExpr");
+ }
+ void VisitBinaryOperator(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitBinaryOperator");
+ }
+ void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
+ }
+ void VisitBinAssign(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
+ }
+ void VisitBinComma(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinComma");
+ }
+ void VisitBinCmp(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinCmp");
+ }
+ void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
+ }
+ void VisitObjCMessageExpr(ObjCMessageExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitObjCMessageExpr");
+ }
+ void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitObjCIVarRefExpr");
+ }
+
+ void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitDesignatedInitUpdateExpr");
+ }
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitAbstractConditionalOperator");
+ }
+ void VisitChooseExpr(const ChooseExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitChooseExpr");
+ }
+ void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXParenListInitExpr");
+ }
+ void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e,
+ llvm::Value *outerBegin = nullptr) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitArrayInitLoopExpr");
+ }
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitImplicitValueInitExpr");
+ }
+ void VisitNoInitExpr(NoInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
+ }
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
+ cgf.cgm.errorNYI(dae->getSourceRange(),
+ "AggExprEmitter: VisitCXXDefaultArgExpr");
+ }
+ void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
+ cgf.cgm.errorNYI(die->getSourceRange(),
+ "AggExprEmitter: VisitCXXDefaultInitExpr");
+ }
+ void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
+ }
+ void VisitLambdaExpr(LambdaExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitLambdaExpr");
+ }
+ void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXStdInitializerListExpr");
+ }
+
+ void VisitExprWithCleanups(ExprWithCleanups *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitExprWithCleanups");
+ }
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXScalarValueInitExpr");
+ }
+ void VisitCXXTypeidExpr(CXXTypeidExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
+ }
+ void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitMaterializeTemporaryExpr");
+ }
+ void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitOpaqueValueExpr");
+ }
+
+ void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitPseudoObjectExpr");
+ }
+
+ void VisitVAArgExpr(VAArgExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitVAArgExpr");
+ }
+
+ void VisitCXXThrowExpr(const CXXThrowExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
+ }
+ void VisitAtomicExpr(AtomicExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitAtomicExpr");
+ }
};
} // namespace
@@ -124,8 +329,8 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
const QualType elementType =
cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
- if (elementType.isDestructedType()) {
- cgf.cgm.errorNYI(loc, "dtorKind NYI");
+ if (elementType.isDestructedType() && cgf.cgm.getLangOpts().Exceptions) {
+ cgf.cgm.errorNYI(loc, "initialized array requires destruction");
return;
}
@@ -135,9 +340,9 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
const cir::PointerType cirElementPtrType =
builder.getPointerTo(cirElementType);
- auto begin = builder.create<cir::CastOp>(loc, cirElementPtrType,
- cir::CastKind::array_to_ptrdecay,
- destPtr.getPointer());
+ auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
+ cir::CastKind::array_to_ptrdecay,
+ destPtr.getPointer());
const CharUnits elementSize =
cgf.getContext().getTypeSizeInChars(elementType);
@@ -182,8 +387,8 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
// Advance to the start of the rest of the array.
if (numInitElements) {
one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
- element = builder.create<cir::PtrStrideOp>(loc, cirElementPtrType,
- element, one);
+ element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
+ element, one);
}
// Allocate the temporary variable
@@ -193,25 +398,52 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
- // TODO(CIR): Replace this part later with cir::DoWhileOp
- for (unsigned i = numInitElements; i != numArrayElements; ++i) {
- cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
-
- // Emit the actual filler expression.
- const LValue elementLV = cgf.makeAddrLValue(
- Address(currentElement, cirElementType, elementAlign), elementType);
-
- if (arrayFiller)
- emitInitializationToLValue(arrayFiller, elementLV);
- else
- emitNullInitializationToLValue(loc, elementLV);
-
- // Advance pointer and store them to temporary variable
- one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
- cir::PtrStrideOp nextElement =
- builder.createPtrStride(loc, currentElement, one);
- cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
- }
+ // Compute the end of array
+ cir::ConstantOp numArrayElementsConst = builder.getConstInt(
+ loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), numArrayElements);
+ mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
+ begin, numArrayElementsConst);
+
+ builder.createDoWhile(
+ loc,
+ /*condBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
+ mlir::Type boolTy = cgf.convertType(cgf.getContext().BoolTy);
+ cir::CmpOp cmp = cir::CmpOp::create(
+ builder, loc, boolTy, cir::CmpOpKind::ne, currentElement, end);
+ builder.createCondition(cmp);
+ },
+ /*bodyBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
+
+ assert(!cir::MissingFeatures::requiresCleanups());
+
+ // Emit the actual filler expression.
+ LValue elementLV = cgf.makeAddrLValue(
+ Address(currentElement, cirElementType, elementAlign),
+ elementType);
+ if (arrayFiller)
+ emitInitializationToLValue(arrayFiller, elementLV);
+ else
+ emitNullInitializationToLValue(loc, elementLV);
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (cgf.cgm.getLangOpts().Exceptions) {
+ cgf.cgm.errorNYI(loc, "update destructed array element for EH");
+ return;
+ }
+
+ // Advance pointer and store them to temporary variable
+ cir::ConstantOp one = builder.getConstInt(
+ loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 1);
+ auto nextElement = cir::PtrStrideOp::create(
+ builder, loc, cirElementPtrType, currentElement, one);
+ cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
+
+ builder.createYield(loc);
+ });
}
}
@@ -376,7 +608,7 @@ void AggExprEmitter::visitCXXParenListOrInitListExpr(
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned numInitElements = args.size();
- RecordDecl *record = e->getType()->castAs<RecordType>()->getDecl();
+ auto *record = e->getType()->castAsRecordDecl();
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
new file mode 100644
index 0000000..a320508
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -0,0 +1,36 @@
+//===--- CIRGenExprCXX.cpp - Emit CIR Code for C++ expressions ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "clang/AST/ExprCXX.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+RValue CIRGenFunction::emitCXXPseudoDestructorExpr(
+ const CXXPseudoDestructorExpr *expr) {
+ QualType destroyedType = expr->getDestroyedType();
+ if (destroyedType.hasStrongOrWeakObjCLifetime()) {
+ assert(!cir::MissingFeatures::objCLifetime());
+ cgm.errorNYI(expr->getExprLoc(),
+ "emitCXXPseudoDestructorExpr: Objective-C lifetime is NYI");
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ emitIgnoredExpr(expr->getBase());
+ }
+
+ return RValue::get(nullptr);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index c22cf60..cbdd525 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -62,6 +62,14 @@ public:
mlir::Value VisitImplicitCastExpr(ImplicitCastExpr *e);
mlir::Value VisitInitListExpr(InitListExpr *e);
+ mlir::Value VisitMemberExpr(MemberExpr *me) {
+ if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(me)) {
+ cgf.emitIgnoredExpr(me->getBase());
+ return emitConstant(constant, me);
+ }
+ return emitLoadOfLValue(me);
+ }
+
mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
return emitLoadOfLValue(e);
}
@@ -128,17 +136,11 @@ public:
mlir::Value emitBinAdd(const BinOpInfo &op);
mlir::Value emitBinSub(const BinOpInfo &op);
mlir::Value emitBinMul(const BinOpInfo &op);
+ mlir::Value emitBinDiv(const BinOpInfo &op);
QualType getPromotionType(QualType ty, bool isDivOpCode = false) {
if (auto *complexTy = ty->getAs<ComplexType>()) {
QualType elementTy = complexTy->getElementType();
- if (isDivOpCode && elementTy->isFloatingType() &&
- cgf.getLangOpts().getComplexRange() ==
- LangOptions::ComplexRangeKind::CX_Promoted) {
- cgf.cgm.errorNYI("HigherPrecisionTypeForComplexArithmetic");
- return QualType();
- }
-
if (elementTy.UseExcessPrecision(cgf.getContext()))
return cgf.getContext().getComplexType(cgf.getContext().FloatTy);
}
@@ -154,13 +156,14 @@ public:
e->getType(), e->getOpcode() == BinaryOperatorKind::BO_Div); \
mlir::Value result = emitBin##OP(emitBinOps(e, promotionTy)); \
if (!promotionTy.isNull()) \
- cgf.cgm.errorNYI("Binop emitUnPromotedValue"); \
+ result = cgf.emitUnPromotedValue(result, e->getType()); \
return result; \
}
HANDLEBINOP(Add)
HANDLEBINOP(Sub)
HANDLEBINOP(Mul)
+ HANDLEBINOP(Div)
#undef HANDLEBINOP
// Compound assignments.
@@ -171,6 +174,16 @@ public:
mlir::Value VisitBinSubAssign(const CompoundAssignOperator *e) {
return emitCompoundAssign(e, &ComplexExprEmitter::emitBinSub);
}
+
+ mlir::Value VisitBinMulAssign(const CompoundAssignOperator *e) {
+ return emitCompoundAssign(e, &ComplexExprEmitter::emitBinMul);
+ }
+
+ mlir::Value VisitBinDivAssign(const CompoundAssignOperator *e) {
+ return emitCompoundAssign(e, &ComplexExprEmitter::emitBinDiv);
+ }
+
+ mlir::Value VisitVAArgExpr(VAArgExpr *e);
};
} // namespace
@@ -184,12 +197,6 @@ static const ComplexType *getComplexType(QualType type) {
}
#endif // NDEBUG
-static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder,
- mlir::Location loc, mlir::Value real) {
- mlir::Value imag = builder.getNullValue(real.getType(), loc);
- return builder.createComplexCreate(loc, real, imag);
-}
-
LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e,
mlir::Value &value) {
assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
@@ -322,10 +329,8 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) {
QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy);
- if (!promotionTy.isNull()) {
- cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue");
- return {};
- }
+ if (!promotionTy.isNull())
+ return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
return result;
}
@@ -347,10 +352,8 @@ mlir::Value ComplexExprEmitter::VisitPlusMinus(const UnaryOperator *e,
mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) {
QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy);
- if (!promotionTy.isNull()) {
- cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus emitUnPromotedValue");
- return {};
- }
+ if (!promotionTy.isNull())
+ return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
return result;
}
@@ -596,6 +599,10 @@ mlir::Value ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *e) {
return builder.createNot(op);
}
+mlir::Value ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *e) {
+ return cgf.emitVAArg(e);
+}
+
mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e,
QualType promotionTy) {
e = e->IgnoreParens();
@@ -640,8 +647,12 @@ ComplexExprEmitter::emitPromotedComplexOperand(const Expr *e,
return Visit(const_cast<Expr *>(e));
}
- cgf.cgm.errorNYI("emitPromotedComplexOperand non-complex type");
- return {};
+ if (!promotionTy.isNull()) {
+ QualType complexElementTy =
+ promotionTy->castAs<ComplexType>()->getElementType();
+ return cgf.emitPromotedScalarExpr(e, complexElementTy);
+ }
+ return cgf.emitScalarExpr(e);
}
ComplexExprEmitter::BinOpInfo
@@ -686,13 +697,10 @@ LValue ComplexExprEmitter::emitCompoundAssignLValue(
// The RHS should have been converted to the computation type.
if (e->getRHS()->getType()->isRealFloatingType()) {
if (!promotionTypeRHS.isNull()) {
- opInfo.rhs = createComplexFromReal(
- cgf.getBuilder(), loc,
- cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS));
+ opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
} else {
assert(cgf.getContext().hasSameUnqualifiedType(complexElementTy, rhsTy));
- opInfo.rhs = createComplexFromReal(cgf.getBuilder(), loc,
- cgf.emitScalarExpr(e->getRHS()));
+ opInfo.rhs = cgf.emitScalarExpr(e->getRHS());
}
} else {
if (!promotionTypeRHS.isNull()) {
@@ -712,8 +720,27 @@ LValue ComplexExprEmitter::emitCompoundAssignLValue(
QualType destTy = promotionTypeLHS.isNull() ? opInfo.ty : promotionTypeLHS;
opInfo.lhs = emitComplexToComplexCast(lhsValue, lhsTy, destTy, exprLoc);
} else {
- cgf.cgm.errorNYI("emitCompoundAssignLValue emitLoadOfScalar");
- return {};
+ mlir::Value lhsVal = cgf.emitLoadOfScalar(lhs, exprLoc);
+ // For floating point real operands we can directly pass the scalar form
+ // to the binary operator emission and potentially get more efficient code.
+ if (lhsTy->isRealFloatingType()) {
+ QualType promotedComplexElementTy;
+ if (!promotionTypeLHS.isNull()) {
+ promotedComplexElementTy =
+ cast<ComplexType>(promotionTypeLHS)->getElementType();
+ if (!cgf.getContext().hasSameUnqualifiedType(promotedComplexElementTy,
+ promotionTypeLHS))
+ lhsVal = cgf.emitScalarConversion(lhsVal, lhsTy,
+ promotedComplexElementTy, exprLoc);
+ } else {
+ if (!cgf.getContext().hasSameUnqualifiedType(complexElementTy, lhsTy))
+ lhsVal = cgf.emitScalarConversion(lhsVal, lhsTy, complexElementTy,
+ exprLoc);
+ }
+ opInfo.lhs = lhsVal;
+ } else {
+ opInfo.lhs = emitScalarToComplexCast(lhsVal, lhsTy, opInfo.ty, exprLoc);
+ }
}
// Expand the binary operator.
@@ -755,13 +782,45 @@ mlir::Value ComplexExprEmitter::emitCompoundAssign(
mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
- return builder.create<cir::ComplexAddOp>(op.loc, op.lhs, op.rhs);
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
+ mlir::isa<cir::ComplexType>(op.rhs.getType()))
+ return builder.create<cir::ComplexAddOp>(op.loc, op.lhs, op.rhs);
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+ mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+ mlir::Value newReal = builder.createAdd(op.loc, real, op.rhs);
+ return builder.createComplexCreate(op.loc, newReal, imag);
+ }
+
+ assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+ mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
+ mlir::Value newReal = builder.createAdd(op.loc, op.lhs, real);
+ return builder.createComplexCreate(op.loc, newReal, imag);
}
mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
- return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
+ mlir::isa<cir::ComplexType>(op.rhs.getType()))
+ return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+ mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+ mlir::Value newReal = builder.createSub(op.loc, real, op.rhs);
+ return builder.createComplexCreate(op.loc, newReal, imag);
+ }
+
+ assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+ mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
+ mlir::Value newReal = builder.createSub(op.loc, op.lhs, real);
+ return builder.createComplexCreate(op.loc, newReal, imag);
}
static cir::ComplexRangeKind
@@ -776,7 +835,7 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
case LangOptions::CX_Basic:
return cir::ComplexRangeKind::Basic;
case LangOptions::CX_None:
- // The default value for ComplexRangeKind is Full is no option is selected
+ // The default value for ComplexRangeKind is Full if no option is selected
return cir::ComplexRangeKind::Full;
}
}
@@ -784,9 +843,64 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
+ mlir::isa<cir::ComplexType>(op.rhs.getType())) {
+ cir::ComplexRangeKind rangeKind =
+ getComplexRangeAttr(op.fpFeatures.getComplexRange());
+ return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
+ }
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+ mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+ mlir::Value newReal = builder.createMul(op.loc, real, op.rhs);
+ mlir::Value newImag = builder.createMul(op.loc, imag, op.rhs);
+ return builder.createComplexCreate(op.loc, newReal, newImag);
+ }
+
+ assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+ mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
+ mlir::Value newReal = builder.createMul(op.loc, op.lhs, real);
+ mlir::Value newImag = builder.createMul(op.loc, op.lhs, imag);
+ return builder.createComplexCreate(op.loc, newReal, newImag);
+}
+
+mlir::Value ComplexExprEmitter::emitBinDiv(const BinOpInfo &op) {
+ assert(!cir::MissingFeatures::fastMathFlags());
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+
+ // Handle division between two complex values. In the case of complex integer
+ // types mixed with scalar integers, the scalar integer type will always be
+ // promoted to a complex integer value with a zero imaginary component when
+ // the AST is formed.
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
+ mlir::isa<cir::ComplexType>(op.rhs.getType())) {
+ cir::ComplexRangeKind rangeKind =
+ getComplexRangeAttr(op.fpFeatures.getComplexRange());
+ return cir::ComplexDivOp::create(builder, op.loc, op.lhs, op.rhs,
+ rangeKind);
+ }
+
+ // The C99 standard (G.5.1) defines division of a complex value by a real
+ // value in the following simplified form.
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+ assert(mlir::cast<cir::ComplexType>(op.lhs.getType()).getElementType() ==
+ op.rhs.getType());
+ mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+ mlir::Value newReal = builder.createFDiv(op.loc, real, op.rhs);
+ mlir::Value newImag = builder.createFDiv(op.loc, imag, op.rhs);
+ return builder.createComplexCreate(op.loc, newReal, newImag);
+ }
+
+ assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+ cir::ConstantOp nullValue = builder.getNullValue(op.lhs.getType(), op.loc);
+ mlir::Value lhs = builder.createComplexCreate(op.loc, op.lhs, nullValue);
cir::ComplexRangeKind rangeKind =
getComplexRangeAttr(op.fpFeatures.getComplexRange());
- return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
+ return cir::ComplexDivOp::create(builder, op.loc, lhs, op.rhs, rangeKind);
}
LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) {
@@ -813,9 +927,9 @@ using CompoundFunc =
static CompoundFunc getComplexOp(BinaryOperatorKind op) {
switch (op) {
case BO_MulAssign:
- llvm_unreachable("getComplexOp: BO_MulAssign");
+ return &ComplexExprEmitter::emitBinMul;
case BO_DivAssign:
- llvm_unreachable("getComplexOp: BO_DivAssign");
+ return &ComplexExprEmitter::emitBinDiv;
case BO_SubAssign:
return &ComplexExprEmitter::emitBinSub;
case BO_AddAssign:
@@ -884,3 +998,20 @@ mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result,
return builder.createCast(cir::CastKind::float_complex, result,
convertType(promotionType));
}
+
+mlir::Value CIRGenFunction::emitUnPromotedValue(mlir::Value result,
+ QualType unPromotionType) {
+ assert(!mlir::cast<cir::ComplexType>(result.getType()).isIntegerComplex() &&
+ "integral complex will never be promoted");
+ return builder.createCast(cir::CastKind::float_complex, result,
+ convertType(unPromotionType));
+}
+
+LValue CIRGenFunction::emitScalarCompoundAssignWithComplex(
+ const CompoundAssignOperator *e, mlir::Value &result) {
+ CompoundFunc op = getComplexOp(e->getOpcode());
+ RValue value;
+ LValue ret = ComplexExprEmitter(*this).emitCompoundAssignLValue(e, op, value);
+ result = value.getValue();
+ return ret;
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 5b3bf85..262d2548 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -285,7 +285,7 @@ emitArrayConstant(CIRGenModule &cgm, mlir::Type desiredType,
mlir::Type commonElementType, unsigned arrayBound,
SmallVectorImpl<mlir::TypedAttr> &elements,
mlir::TypedAttr filler) {
- const CIRGenBuilderTy &builder = cgm.getBuilder();
+ CIRGenBuilderTy &builder = cgm.getBuilder();
unsigned nonzeroLength = arrayBound;
if (elements.size() < nonzeroLength && builder.isNullValue(filler))
@@ -306,6 +306,33 @@ emitArrayConstant(CIRGenModule &cgm, mlir::Type desiredType,
if (trailingZeroes >= 8) {
assert(elements.size() >= nonzeroLength &&
"missing initializer for non-zero element");
+
+ if (commonElementType && nonzeroLength >= 8) {
+ // If all the elements had the same type up to the trailing zeroes and
+ // there are eight or more nonzero elements, emit a struct of two arrays
+ // (the nonzero data and the zeroinitializer).
+ SmallVector<mlir::Attribute, 4> eles;
+ eles.reserve(nonzeroLength);
+ for (const auto &element : elements)
+ eles.push_back(element);
+ auto initial = cir::ConstArrayAttr::get(
+ cir::ArrayType::get(commonElementType, nonzeroLength),
+ mlir::ArrayAttr::get(builder.getContext(), eles));
+ elements.resize(2);
+ elements[0] = initial;
+ } else {
+ // Otherwise, emit a struct with individual elements for each nonzero
+ // initializer, followed by a zeroinitializer array filler.
+ elements.resize(nonzeroLength + 1);
+ }
+
+ mlir::Type fillerType =
+ commonElementType
+ ? commonElementType
+ : mlir::cast<cir::ArrayType>(desiredType).getElementType();
+ fillerType = cir::ArrayType::get(fillerType, trailingZeroes);
+ elements.back() = cir::ZeroAttr::get(fillerType);
+ commonElementType = nullptr;
} else if (elements.size() != arrayBound) {
elements.resize(arrayBound, filler);
@@ -325,8 +352,13 @@ emitArrayConstant(CIRGenModule &cgm, mlir::Type desiredType,
mlir::ArrayAttr::get(builder.getContext(), eles));
}
- cgm.errorNYI("array with different type elements");
- return {};
+ SmallVector<mlir::Attribute, 4> eles;
+ eles.reserve(elements.size());
+ for (auto const &element : elements)
+ eles.push_back(element);
+
+ auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), eles);
+ return builder.getAnonConstRecord(arrAttr, /*isPacked=*/true);
}
//===----------------------------------------------------------------------===//
@@ -340,7 +372,11 @@ struct ConstantLValue {
llvm::PointerUnion<mlir::Value, mlir::Attribute> value;
bool hasOffsetApplied;
- ConstantLValue(std::nullptr_t) : value(nullptr), hasOffsetApplied(false) {}
+ /*implicit*/ ConstantLValue(std::nullptr_t)
+ : value(nullptr), hasOffsetApplied(false) {}
+ /*implicit*/ ConstantLValue(cir::GlobalViewAttr address)
+ : value(address), hasOffsetApplied(false) {}
+
ConstantLValue() : value(nullptr), hasOffsetApplied(false) {}
};
@@ -380,6 +416,43 @@ private:
ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *e);
ConstantLValue
VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
+
+ /// Return GEP-like value offset
+ mlir::ArrayAttr getOffset(mlir::Type ty) {
+ int64_t offset = value.getLValueOffset().getQuantity();
+ cir::CIRDataLayout layout(cgm.getModule());
+ SmallVector<int64_t, 3> idxVec;
+ cgm.getBuilder().computeGlobalViewIndicesFromFlatOffset(offset, ty, layout,
+ idxVec);
+
+ llvm::SmallVector<mlir::Attribute, 3> indices;
+ for (int64_t i : idxVec) {
+ mlir::IntegerAttr intAttr = cgm.getBuilder().getI32IntegerAttr(i);
+ indices.push_back(intAttr);
+ }
+
+ if (indices.empty())
+ return {};
+ return cgm.getBuilder().getArrayAttr(indices);
+ }
+
+ /// Apply the value offset to the given constant.
+ ConstantLValue applyOffset(ConstantLValue &c) {
+ // Handle attribute constant LValues.
+ if (auto attr = mlir::dyn_cast<mlir::Attribute>(c.value)) {
+ if (auto gv = mlir::dyn_cast<cir::GlobalViewAttr>(attr)) {
+ auto baseTy = mlir::cast<cir::PointerType>(gv.getType()).getPointee();
+ mlir::Type destTy = cgm.getTypes().convertTypeForMem(destType);
+ assert(!gv.getIndices() && "Global view is already indexed");
+ return cir::GlobalViewAttr::get(destTy, gv.getSymbol(),
+ getOffset(baseTy));
+ }
+ llvm_unreachable("Unsupported attribute type to offset");
+ }
+
+ cgm.errorNYI("ConstantLValue: non-attribute offset");
+ return {};
+ }
};
} // namespace
@@ -411,10 +484,8 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() {
return {};
// Apply the offset if necessary and not already done.
- if (!result.hasOffsetApplied) {
- cgm.errorNYI("ConstantLValueEmitter: apply offset");
- return {};
- }
+ if (!result.hasOffsetApplied)
+ value = applyOffset(result).value;
// Convert to the appropriate type; this could be an lvalue for
// an integer. FIXME: performAddrSpaceCast
@@ -453,15 +524,35 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
if (auto *fd = dyn_cast<FunctionDecl>(d)) {
- cgm.errorNYI(fd->getSourceRange(),
- "ConstantLValueEmitter: function decl");
- return {};
+ cir::FuncOp fop = cgm.getAddrOfFunction(fd);
+ CIRGenBuilderTy &builder = cgm.getBuilder();
+ mlir::MLIRContext *mlirContext = builder.getContext();
+ return cir::GlobalViewAttr::get(
+ builder.getPointerTo(fop.getFunctionType()),
+ mlir::FlatSymbolRefAttr::get(mlirContext, fop.getSymNameAttr()));
}
if (auto *vd = dyn_cast<VarDecl>(d)) {
- cgm.errorNYI(vd->getSourceRange(), "ConstantLValueEmitter: var decl");
- return {};
+ // We can never refer to a variable with local storage.
+ if (!vd->hasLocalStorage()) {
+ if (vd->isFileVarDecl() || vd->hasExternalStorage())
+ return cgm.getAddrOfGlobalVarAttr(vd);
+
+ if (vd->isLocalVarDecl()) {
+ cgm.errorNYI(vd->getSourceRange(),
+ "ConstantLValueEmitter: local var decl");
+ return {};
+ }
+ }
}
+
+ // Classic codegen handles MSGuidDecl,UnnamedGlobalConstantDecl, and
+ // TemplateParamObjectDecl, but it can also fall through from VarDecl,
+ // in which case it silently returns nullptr. For now, let's emit an
+ // error to see what cases we need to handle.
+ cgm.errorNYI(d->getSourceRange(),
+ "ConstantLValueEmitter: unhandled value decl");
+ return {};
}
// Handle typeid(T).
@@ -487,8 +578,7 @@ ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *e) {
ConstantLValue
ConstantLValueEmitter::VisitStringLiteral(const StringLiteral *e) {
- cgm.errorNYI(e->getSourceRange(), "ConstantLValueEmitter: string literal");
- return {};
+ return cgm.getAddrOfConstantStringFromLiteral(e);
}
ConstantLValue
@@ -590,8 +680,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &d) {
// assignments and whatnots). Since this is for globals shouldn't
// be a problem for the near future.
if (cd->isTrivial() && cd->isDefaultConstructor()) {
- const auto *cxxrd =
- cast<CXXRecordDecl>(ty->getAs<RecordType>()->getDecl());
+ const auto *cxxrd = ty->castAsCXXRecordDecl();
if (cxxrd->getNumBases() != 0) {
// There may not be anything additional to do here, but this will
// force us to pause and test this path when it is supported.
@@ -650,6 +739,16 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value,
return (c ? emitForMemory(c, destType) : nullptr);
}
+mlir::Attribute ConstantEmitter::emitAbstract(const Expr *e,
+ QualType destType) {
+ AbstractStateRAII state{*this, true};
+ mlir::Attribute c = mlir::cast<mlir::Attribute>(tryEmitPrivate(e, destType));
+ if (!c)
+ cgm.errorNYI(e->getSourceRange(),
+ "emitAbstract failed, emit null constaant");
+ return c;
+}
+
mlir::Attribute ConstantEmitter::emitAbstract(SourceLocation loc,
const APValue &value,
QualType destType) {
@@ -671,6 +770,32 @@ mlir::Attribute ConstantEmitter::emitForMemory(mlir::Attribute c,
return c;
}
+mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *e,
+ QualType destType) {
+ assert(!destType->isVoidType() && "can't emit a void constant");
+
+ if (mlir::Attribute c =
+ ConstExprEmitter(*this).Visit(const_cast<Expr *>(e), destType))
+ return llvm::dyn_cast<mlir::TypedAttr>(c);
+
+ Expr::EvalResult result;
+
+ bool success = false;
+
+ if (destType->isReferenceType())
+ success = e->EvaluateAsLValue(result, cgm.getASTContext());
+ else
+ success =
+ e->EvaluateAsRValue(result, cgm.getASTContext(), inConstantContext);
+
+ if (success && !result.hasSideEffects()) {
+ mlir::Attribute c = tryEmitPrivate(result.Val, destType);
+ return llvm::dyn_cast<mlir::TypedAttr>(c);
+ }
+
+ return nullptr;
+}
+
mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &value,
QualType destType) {
auto &builder = cgm.getBuilder();
@@ -822,7 +947,7 @@ mlir::Value CIRGenModule::emitNullConstant(QualType t, mlir::Location loc) {
errorNYI("CIRGenModule::emitNullConstant ConstantArrayType");
}
- if (t->getAs<RecordType>())
+ if (t->isRecordType())
errorNYI("CIRGenModule::emitNullConstant RecordType");
assert(t->isMemberDataPointerType() &&
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 3e06513..0e000cc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -92,6 +92,10 @@ public:
mlir::Value value, CastKind kind,
QualType destTy);
+ mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
+ return cgf.cgm.emitNullConstant(ty, loc);
+ }
+
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
return builder.createFloatingCast(result, cgf.convertType(promotionType));
}
@@ -182,9 +186,31 @@ public:
return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
}
+ mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
+ if (e->getType()->isVoidType())
+ return {};
+
+ return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
+ }
+
mlir::Value VisitCastExpr(CastExpr *e);
mlir::Value VisitCallExpr(const CallExpr *e);
+ mlir::Value VisitStmtExpr(StmtExpr *e) {
+ CIRGenFunction::StmtExprEvaluation eval(cgf);
+ if (e->getType()->isVoidType()) {
+ (void)cgf.emitCompoundStmt(*e->getSubStmt());
+ return {};
+ }
+
+ Address retAlloca =
+ cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
+ (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
+
+ return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
+ e->getExprLoc());
+ }
+
mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
if (e->getBase()->getType()->isVectorType()) {
assert(!cir::MissingFeatures::scalableVectors());
@@ -384,6 +410,17 @@ public:
return Visit(e->getReplacement());
}
+ mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
+ QualType ty = ve->getType();
+
+ if (ty->isVariablyModifiedType()) {
+ cgf.cgm.errorNYI(ve->getSourceRange(),
+ "variably modified types in varargs");
+ }
+
+ return cgf.emitVAArg(ve);
+ }
+
mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
mlir::Value
VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
@@ -631,6 +668,11 @@ public:
return cgf.emitCXXNewExpr(e);
}
+ mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
+ cgf.emitCXXThrowExpr(e);
+ return {};
+ }
+
/// Emit a conversion from the specified type to the specified destination
/// type, both of which are CIR scalar types.
/// TODO: do we need ScalarConversionOpts here? Should be done in another
@@ -1060,20 +1102,22 @@ public:
return maybePromoteBoolResult(resOp.getResult(), resTy);
}
+
+ mlir::Value VisitAtomicExpr(AtomicExpr *e) {
+ return cgf.emitAtomicExpr(e).getValue();
+ }
};
LValue ScalarExprEmitter::emitCompoundAssignLValue(
const CompoundAssignOperator *e,
mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
mlir::Value &result) {
+ if (e->getComputationResultType()->isAnyComplexType())
+ return cgf.emitScalarCompoundAssignWithComplex(e, result);
+
QualType lhsTy = e->getLHS()->getType();
BinOpInfo opInfo;
- if (e->getComputationResultType()->isAnyComplexType()) {
- cgf.cgm.errorNYI(result.getLoc(), "complex lvalue assign");
- return LValue();
- }
-
// Emit the RHS first. __block variables need to have the rhs evaluated
// first, plus this should improve codegen a little.
@@ -1877,6 +1921,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
cgf.getLoc(subExpr->getSourceRange()), cgf.convertType(destTy),
Visit(subExpr));
}
+ case CK_FunctionToPointerDecay:
+ return cgf.emitLValue(subExpr).getPointer();
default:
cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
@@ -1936,11 +1982,9 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
cgf.getLoc(e->getSourceRange()), vectorType, elements);
}
- if (numInitElements == 0) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "InitListExpr Non VectorType with 0 init elements");
- return {};
- }
+ // C++11 value-initialization for the scalar.
+ if (numInitElements == 0)
+ return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
return Visit(e->getInit(0));
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index e93dc0b..deabb94 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -28,8 +28,6 @@ CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
bool suppressNewContext)
: CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
ehStack.setCGF(this);
- currentCleanupStackDepth = 0;
- assert(ehStack.getStackDepth() == 0);
}
CIRGenFunction::~CIRGenFunction() {}
@@ -217,7 +215,7 @@ void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
mlir::Location loc, CharUnits alignment,
bool isParam) {
assert(isa<NamedDecl>(var) && "Needs a named decl");
- assert(!cir::MissingFeatures::cgfSymbolTable());
+ assert(!symbolTable.count(var) && "not supposed to be available just yet");
auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
assert(allocaOp && "expected cir::AllocaOp");
@@ -226,6 +224,8 @@ void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
if (ty->isReferenceType() || ty.isConstQualified())
allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
+
+ symbolTable.insert(var, allocaOp);
}
void CIRGenFunction::LexicalScope::cleanup() {
@@ -356,11 +356,8 @@ static bool mayDropFunctionReturn(const ASTContext &astContext,
QualType returnType) {
// We can't just discard the return value for a record type with a complex
// destructor or a non-trivially copyable type.
- if (const RecordType *recordType =
- returnType.getCanonicalType()->getAs<RecordType>()) {
- if (const auto *classDecl = dyn_cast<CXXRecordDecl>(recordType->getDecl()))
- return classDecl->hasTrivialDestructor();
- }
+ if (const auto *classDecl = returnType->getAsCXXRecordDecl())
+ return classDecl->hasTrivialDestructor();
return returnType.isTriviallyCopyableType(astContext);
}
@@ -409,6 +406,8 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
curFuncDecl = d->getNonClosureContext();
+ prologueCleanupDepth = ehStack.stable_begin();
+
mlir::Block *entryBB = &fn.getBlocks().front();
builder.setInsertionPointToStart(entryBB);
@@ -475,22 +474,22 @@ void CIRGenFunction::finishFunction(SourceLocation endLoc) {
// important to do this before we enter the return block or return
// edges will be *really* confused.
// TODO(cir): Use prologueCleanupDepth here.
- bool hasCleanups = ehStack.getStackDepth() != currentCleanupStackDepth;
+ bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
if (hasCleanups) {
assert(!cir::MissingFeatures::generateDebugInfo());
// FIXME(cir): should we clearInsertionPoint? breaks many testcases
- popCleanupBlocks(currentCleanupStackDepth);
+ popCleanupBlocks(prologueCleanupDepth);
}
}
mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
- auto result = mlir::LogicalResult::success();
+ // We start with function level scope for variables.
+ SymTableScopeTy varScope(symbolTable);
+
if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
- emitCompoundStmtWithoutScope(*block);
- else
- result = emitStmt(body, /*useCurrentScope=*/true);
+ return emitCompoundStmtWithoutScope(*block);
- return result;
+ return emitStmt(body, /*useCurrentScope=*/true);
}
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
@@ -530,6 +529,8 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
FunctionArgList args;
QualType retTy = buildFunctionArgList(gd, args);
+ // Create a scope in the symbol table to hold variable declarations.
+ SymTableScopeTy varScope(symbolTable);
{
LexicalScope lexScope(*this, fusedLoc, entryBB);
@@ -553,7 +554,6 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
emitImplicitAssignmentOperatorBody(args);
} else if (body) {
if (mlir::failed(emitFunctionBody(body))) {
- fn.erase();
return nullptr;
}
} else {
@@ -825,12 +825,9 @@ std::string CIRGenFunction::getCounterAggTmpAsString() {
void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
QualType ty) {
// Ignore empty classes in C++.
- if (getLangOpts().CPlusPlus) {
- if (const RecordType *rt = ty->getAs<RecordType>()) {
- if (cast<CXXRecordDecl>(rt->getDecl())->isEmpty())
- return;
- }
- }
+ if (getLangOpts().CPlusPlus)
+ if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
+ return;
// Cast the dest ptr to the appropriate i8 pointer type.
if (builder.isInt8Ty(destPtr.getElementType())) {
@@ -930,6 +927,23 @@ CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
}
+mlir::Value CIRGenFunction::emitAlignmentAssumption(
+ mlir::Value ptrValue, QualType ty, SourceLocation loc,
+ SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
+ assert(!cir::MissingFeatures::sanitizers());
+ return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
+ alignment, offsetValue);
+}
+
+mlir::Value CIRGenFunction::emitAlignmentAssumption(
+ mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
+ int64_t alignment, mlir::Value offsetValue) {
+ QualType ty = expr->getType();
+ SourceLocation loc = expr->getExprLoc();
+ return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
+ offsetValue);
+}
+
// TODO(cir): Most of this function can be shared between CIRGen
// and traditional LLVM codegen
void CIRGenFunction::emitVariablyModifiedType(QualType type) {
@@ -978,10 +992,6 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
case Type::BitInt:
llvm_unreachable("type class is never variably-modified!");
- case Type::Elaborated:
- type = cast<clang::ElaboratedType>(ty)->getNamedType();
- break;
-
case Type::Adjusted:
type = cast<clang::AdjustedType>(ty)->getAdjustedType();
break;
@@ -1057,4 +1067,10 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
} while (type->isVariablyModifiedType());
}
+Address CIRGenFunction::emitVAListRef(const Expr *e) {
+ if (getContext().getBuiltinVaListType()->isArrayType())
+ return emitPointerWithAlignment(e);
+ return emitLValue(e).getAddress();
+}
+
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 2e60cfc..39bacfb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -23,6 +23,7 @@
#include "Address.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/BaseSubobject.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Stmt.h"
@@ -30,6 +31,7 @@
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/MissingFeatures.h"
#include "clang/CIR/TypeEvaluationKind.h"
+#include "llvm/ADT/ScopedHashTable.h"
namespace {
class ScalarExprEmitter;
@@ -102,6 +104,14 @@ public:
/// Sanitizers enabled for this function.
clang::SanitizerSet sanOpts;
+ /// The symbol table maps a variable name to a value in the current scope.
+ /// Entering a function creates a new scope, and the function arguments are
+ /// added to the mapping. When the processing of a function is terminated,
+ /// the scope is destroyed and the mappings created in this scope are
+ /// dropped.
+ using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
+ SymTableTy symbolTable;
+
/// Whether or not a Microsoft-style asm block has been processed within
/// this fuction. These can potentially set the return value.
bool sawAsmBlock = false;
@@ -324,6 +334,9 @@ public:
~SourceLocRAIIObject() { restore(); }
};
+ using SymTableScopeTy =
+ llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
+
/// Hold counters for incrementally naming temporaries
unsigned counterRefTmp = 0;
unsigned counterAggTmp = 0;
@@ -337,6 +350,15 @@ public:
const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
+ /// True if an insertion point is defined. If not, this indicates that the
+ /// current code being emitted is unreachable.
+ /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
+ /// since we don't yet force null insertion point to designate behavior (like
+ /// LLVM's codegen does) and we probably shouldn't.
+ bool haveInsertPoint() const {
+ return builder.getInsertionBlock() != nullptr;
+ }
+
// Wrapper for function prototype sources. Wraps either a FunctionProtoType or
// an ObjCMethodDecl.
struct PrototypeWrapper {
@@ -430,7 +452,8 @@ public:
}
};
- ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
+ ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
+ ConstantEmission tryEmitAsConstant(const MemberExpr *me);
struct AutoVarEmission {
const clang::VarDecl *Variable;
@@ -448,6 +471,10 @@ public:
/// escaping block.
bool IsEscapingByRef = false;
+ /// True if the variable was emitted as an offload recipe, and thus doesn't
+ /// have the same sort of alloca initialization.
+ bool EmittedAsOffload = false;
+
mlir::Value NRVOFlag{};
struct Invalid {};
@@ -460,11 +487,18 @@ public:
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
+ bool wasEmittedAsOffloadClause() const { return EmittedAsOffload; }
+
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself. It is casted to default
/// address space for address space agnostic languages.
Address getAllocatedAddress() const { return Addr; }
+ // Changes the stored address for the emission. This function should only
+ // be used in extreme cases, and isn't required to model normal AST
+ // initialization/variables.
+ void setAllocatedAddress(Address A) { Addr = A; }
+
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
@@ -489,9 +523,42 @@ public:
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
localDeclMap.insert({vd, addr});
- // TODO: Add symbol table support
+
+ // Add to the symbol table if not there already.
+ if (symbolTable.count(vd))
+ return;
+ symbolTable.insert(vd, addr.getPointer());
}
+ // A class to allow reverting changes to a var-decl's registration to the
+ // localDeclMap. This is used in cases where things are being inserted into
+ // the variable list but don't follow normal lookup/search rules, like in
+ // OpenACC recipe generation.
+ class DeclMapRevertingRAII {
+ CIRGenFunction &cgf;
+ const VarDecl *vd;
+ bool shouldDelete = false;
+ Address oldAddr = Address::invalid();
+
+ public:
+ DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
+ : cgf(cgf), vd(vd) {
+ auto mapItr = cgf.localDeclMap.find(vd);
+
+ if (mapItr != cgf.localDeclMap.end())
+ oldAddr = mapItr->second;
+ else
+ shouldDelete = true;
+ }
+
+ ~DeclMapRevertingRAII() {
+ if (shouldDelete)
+ cgf.localDeclMap.erase(vd);
+ else
+ cgf.localDeclMap.insert_or_assign(vd, oldAddr);
+ }
+ };
+
bool shouldNullCheckClassCastValue(const CastExpr *ce);
RValue convertTempToRValue(Address addr, clang::QualType type,
@@ -500,6 +567,33 @@ public:
static bool
isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor);
+ struct VPtr {
+ clang::BaseSubobject base;
+ const clang::CXXRecordDecl *nearestVBase;
+ clang::CharUnits offsetFromNearestVBase;
+ const clang::CXXRecordDecl *vtableClass;
+ };
+
+ using VisitedVirtualBasesSetTy =
+ llvm::SmallPtrSet<const clang::CXXRecordDecl *, 4>;
+
+ using VPtrsVector = llvm::SmallVector<VPtr, 4>;
+ VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass);
+ void getVTablePointers(clang::BaseSubobject base,
+ const clang::CXXRecordDecl *nearestVBase,
+ clang::CharUnits offsetFromNearestVBase,
+ bool baseIsNonVirtualPrimaryBase,
+ const clang::CXXRecordDecl *vtableClass,
+ VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
+ /// Return the Value of the vtable pointer member pointed to by thisAddr.
+ mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
+ const clang::CXXRecordDecl *vtableClass);
+
+ /// Returns whether we should perform a type checked load when loading a
+ /// virtual function for virtual calls to members of RD. This is generally
+ /// true when both vcall CFI and whole-program-vtables are enabled.
+ bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd);
+
/// A scope within which we are constructing the fields of an object which
/// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
/// we need to evaluate the CXXDefaultInitExpr within the evaluation.
@@ -548,6 +642,10 @@ public:
return LValue::makeAddr(addr, ty, baseInfo);
}
+ void initializeVTablePointers(mlir::Location loc,
+ const clang::CXXRecordDecl *rd);
+ void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
+
/// Return the address of a local variable.
Address getAddrOfLocalVar(const clang::VarDecl *vd) {
auto it = localDeclMap.find(vd);
@@ -601,9 +699,13 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// The cleanup depth enclosing all the cleanups associated with the
+ /// parameters.
+ EHScopeStack::stable_iterator prologueCleanupDepth;
+
/// Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
- void popCleanupBlocks(size_t oldCleanupStackDepth);
+ void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth);
void popCleanupBlock();
/// Push a cleanup to be run at the end of the current full-expression. Safe
@@ -622,7 +724,7 @@ public:
/// Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
- size_t cleanupStackDepth, oldCleanupStackDepth;
+ EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
protected:
bool performCleanup;
@@ -638,7 +740,7 @@ public:
/// Enter a new cleanup scope.
explicit RunCleanupsScope(CIRGenFunction &cgf)
: performCleanup(true), cgf(cgf) {
- cleanupStackDepth = cgf.ehStack.getStackDepth();
+ cleanupStackDepth = cgf.ehStack.stable_begin();
oldCleanupStackDepth = cgf.currentCleanupStackDepth;
cgf.currentCleanupStackDepth = cleanupStackDepth;
}
@@ -663,7 +765,7 @@ public:
};
// Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
- size_t currentCleanupStackDepth;
+ EHScopeStack::stable_iterator currentCleanupStackDepth = ehStack.stable_end();
public:
/// Represents a scope, including function bodies, compound statements, and
@@ -825,6 +927,18 @@ public:
/// ----------------------
/// CIR emit functions
/// ----------------------
+public:
+ mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
+ SourceLocation loc,
+ SourceLocation assumptionLoc,
+ int64_t alignment,
+ mlir::Value offsetValue = nullptr);
+
+ mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
+ SourceLocation assumptionLoc,
+ int64_t alignment,
+ mlir::Value offsetValue = nullptr);
+
private:
void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
clang::CharUnits alignment);
@@ -875,7 +989,13 @@ public:
QualType &baseType, Address &addr);
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
- Address emitArrayToPointerDecay(const Expr *array);
+ Address emitArrayToPointerDecay(const Expr *e,
+ LValueBaseInfo *baseInfo = nullptr);
+
+ mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
+
+ RValue emitAtomicExpr(AtomicExpr *e);
+ void emitAtomicInit(Expr *init, LValue dest);
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
mlir::OpBuilder::InsertPoint ip = {});
@@ -886,6 +1006,11 @@ public:
void emitAutoVarDecl(const clang::VarDecl &d);
void emitAutoVarCleanups(const AutoVarEmission &emission);
+ /// Emit the initializer for an allocated variable. If this call is not
+ /// associated with the call to emitAutoVarAlloca (as the address of the
+ /// emission is not directly an alloca), the allocatedSeparately parameter can
+ /// be used to suppress the assertions. However, this should only be used in
+ /// extreme cases, as it doesn't properly reflect the language/AST.
void emitAutoVarInit(const AutoVarEmission &emission);
void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
clang::QualType::DestructionKind dtorKind);
@@ -1007,7 +1132,7 @@ public:
RValue emitCXXMemberOrOperatorMemberCallExpr(
const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
ReturnValueSlot returnValue, bool hasQualifier,
- clang::NestedNameSpecifier *qualifier, bool isArrow,
+ clang::NestedNameSpecifier qualifier, bool isArrow,
const clang::Expr *base);
mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
@@ -1016,6 +1141,10 @@ public:
const CXXMethodDecl *md,
ReturnValueSlot returnValue);
+ RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr);
+
+ void emitCXXThrowExpr(const CXXThrowExpr *e);
+
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor,
clang::CXXCtorType ctorType, FunctionArgList &args);
@@ -1043,6 +1172,8 @@ public:
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
+ mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
+
void emitImplicitAssignmentOperatorBody(FunctionArgList &args);
void emitInitializerForField(clang::FieldDecl *field, LValue lhs,
@@ -1084,10 +1215,17 @@ public:
LValue emitComplexAssignmentLValue(const BinaryOperator *e);
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
+ LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
+ mlir::Value &result);
- void emitCompoundStmt(const clang::CompoundStmt &s);
+ mlir::LogicalResult
+ emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
+ AggValueSlot slot = AggValueSlot::ignored());
- void emitCompoundStmtWithoutScope(const clang::CompoundStmt &s);
+ mlir::LogicalResult
+ emitCompoundStmtWithoutScope(const clang::CompoundStmt &s,
+ Address *lastValue = nullptr,
+ AggValueSlot slot = AggValueSlot::ignored());
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
@@ -1125,6 +1263,9 @@ public:
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
+ mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
+ mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
+
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
/// Emit code to compute the specified expression,
@@ -1181,7 +1322,7 @@ public:
/// reasonable to just ignore the returned alignment when it isn't from an
/// explicit source.
Address emitPointerWithAlignment(const clang::Expr *expr,
- LValueBaseInfo *baseInfo);
+ LValueBaseInfo *baseInfo = nullptr);
/// Emits a reference binding to the passed in expression.
RValue emitReferenceBindingToExpr(const Expr *e);
@@ -1243,6 +1384,8 @@ public:
LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
+ mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
+
/// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
/// checking is enabled. Otherwise, just emit an unreachable instruction.
/// \p createNewBlock indicates whether to create a new block for the IR
@@ -1310,7 +1453,7 @@ public:
mlir::OpBuilder::InsertionGuard guard(builder);
builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
builder.createStore(
- value.getLoc(), value, addr,
+ value.getLoc(), value, addr, /*isVolatile=*/false,
mlir::IntegerAttr::get(
mlir::IntegerType::get(value.getContext(), 64),
(uint64_t)addr.getAlignment().getAsAlign().value()));
@@ -1321,6 +1464,27 @@ public:
// we know if a temporary should be destroyed conditionally.
ConditionalEvaluation *outermostConditional = nullptr;
+ /// An RAII object to record that we're evaluating a statement
+ /// expression.
+ class StmtExprEvaluation {
+ CIRGenFunction &cgf;
+
+ /// We have to save the outermost conditional: cleanups in a
+ /// statement expression aren't conditional just because the
+ /// StmtExpr is.
+ ConditionalEvaluation *savedOutermostConditional;
+
+ public:
+ StmtExprEvaluation(CIRGenFunction &cgf)
+ : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
+ cgf.outermostConditional = nullptr;
+ }
+
+ ~StmtExprEvaluation() {
+ cgf.outermostConditional = savedOutermostConditional;
+ }
+ };
+
template <typename FuncTy>
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
const FuncTy &branchGenFunc);
@@ -1329,6 +1493,35 @@ public:
const clang::Stmt *thenS,
const clang::Stmt *elseS);
+ /// Build a "reference" to a va_list; this is either the address or the value
+ /// of the expression, depending on how va_list is defined.
+ Address emitVAListRef(const Expr *e);
+
+ /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
+ ///
+ /// \param vaList A reference to the \c va_list as emitted by either
+ /// \c emitVAListRef or \c emitMSVAListRef.
+ ///
+ /// \param count The number of arguments in \c vaList
+ void emitVAStart(mlir::Value vaList, mlir::Value count);
+
+ /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
+ ///
+ /// \param vaList A reference to the \c va_list as emitted by either
+ /// \c emitVAListRef or \c emitMSVAListRef.
+ void emitVAEnd(mlir::Value vaList);
+
+ /// Generate code to get an argument from the passed in pointer
+ /// and update it accordingly.
+ ///
+ /// \param ve The \c VAArgExpr for which to generate code.
+ ///
+ /// \param vaListAddr Receives a reference to the \c va_list as emitted by
+ /// either \c emitVAListRef or \c emitMSVAListRef.
+ ///
+ /// \returns SSA value with the argument.
+ mlir::Value emitVAArg(VAArgExpr *ve);
+
/// ----------------------
/// CIR build helpers
/// -----------------
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index e5e4c68..ab7a069 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/VTableBuilder.h"
#include "clang/CIR/MissingFeatures.h"
#include "llvm/Support/ErrorHandling.h"
@@ -31,6 +32,10 @@ using namespace clang::CIRGen;
namespace {
class CIRGenItaniumCXXABI : public CIRGenCXXABI {
+protected:
+ /// All the vtables which have been defined.
+ llvm::DenseMap<const CXXRecordDecl *, cir::GlobalOp> vtables;
+
public:
CIRGenItaniumCXXABI(CIRGenModule &cgm) : CIRGenCXXABI(cgm) {
assert(!cir::MissingFeatures::cxxabiUseARMMethodPtrABI());
@@ -51,6 +56,8 @@ public:
bool delegating, Address thisAddr,
QualType thisTy) override;
+ void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override;
+
bool useThunkForDtorVariant(const CXXDestructorDecl *dtor,
CXXDtorType dt) const override {
// Itanium does not emit any destructor variant as an inline thunk.
@@ -58,6 +65,30 @@ public:
// emitted with external linkage or as linkonce if they are inline and used.
return false;
}
+
+ bool isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
+ CIRGenFunction::VPtr vptr) override;
+
+ cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *rd,
+ CharUnits vptrOffset) override;
+ CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &cgf,
+ clang::GlobalDecl gd, Address thisAddr,
+ mlir::Type ty,
+ SourceLocation loc) override;
+
+ mlir::Value getVTableAddressPoint(BaseSubobject base,
+ const CXXRecordDecl *vtableClass) override;
+
+ mlir::Value getVTableAddressPointInStructor(
+ CIRGenFunction &cgf, const clang::CXXRecordDecl *vtableClass,
+ clang::BaseSubobject base,
+ const clang::CXXRecordDecl *nearestVBase) override;
+ void emitVTableDefinitions(CIRGenVTables &cgvt,
+ const CXXRecordDecl *rd) override;
+
+ bool doStructorsInitializeVPtrs(const CXXRecordDecl *vtableClass) override {
+ return true;
+ }
};
} // namespace
@@ -243,6 +274,67 @@ bool CIRGenItaniumCXXABI::needsVTTParameter(GlobalDecl gd) {
return false;
}
+void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &cgvt,
+ const CXXRecordDecl *rd) {
+ cir::GlobalOp vtable = getAddrOfVTable(rd, CharUnits());
+ if (vtable.hasInitializer())
+ return;
+
+ ItaniumVTableContext &vtContext = cgm.getItaniumVTableContext();
+ const VTableLayout &vtLayout = vtContext.getVTableLayout(rd);
+ cir::GlobalLinkageKind linkage = cgm.getVTableLinkage(rd);
+ mlir::Attribute rtti =
+ cgm.getAddrOfRTTIDescriptor(cgm.getLoc(rd->getBeginLoc()),
+ cgm.getASTContext().getCanonicalTagType(rd));
+
+ // Classic codegen uses ConstantInitBuilder here, which is a very general
+ // and feature-rich class to generate initializers for global values.
+ // For now, this is using a simpler approach to create the initializer in CIR.
+ cgvt.createVTableInitializer(vtable, vtLayout, rtti,
+ cir::isLocalLinkage(linkage));
+
+ // Set the correct linkage.
+ vtable.setLinkage(linkage);
+
+ if (cgm.supportsCOMDAT() && cir::isWeakForLinker(linkage))
+ vtable.setComdat(true);
+
+ // Set the right visibility.
+ cgm.setGVProperties(vtable, rd);
+
+ // If this is the magic class __cxxabiv1::__fundamental_type_info,
+ // we will emit the typeinfo for the fundamental types. This is the
+ // same behaviour as GCC.
+ const DeclContext *DC = rd->getDeclContext();
+ if (rd->getIdentifier() &&
+ rd->getIdentifier()->isStr("__fundamental_type_info") &&
+ isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
+ DC->getParent()->isTranslationUnit()) {
+ cgm.errorNYI(rd->getSourceRange(),
+ "emitVTableDefinitions: __fundamental_type_info");
+ }
+
+ auto vtableAsGlobalValue = dyn_cast<cir::CIRGlobalValueInterface>(*vtable);
+ assert(vtableAsGlobalValue && "VTable must support CIRGlobalValueInterface");
+ // Always emit type metadata on non-available_externally definitions, and on
+ // available_externally definitions if we are performing whole program
+ // devirtualization. For WPD we need the type metadata on all vtable
+ // definitions to ensure we associate derived classes with base classes
+ // defined in headers but with a strong definition only in a shared
+ // library.
+ assert(!cir::MissingFeatures::vtableEmitMetadata());
+ if (cgm.getCodeGenOpts().WholeProgramVTables) {
+ cgm.errorNYI(rd->getSourceRange(),
+ "emitVTableDefinitions: WholeProgramVTables");
+ }
+
+ assert(!cir::MissingFeatures::vtableRelativeLayout());
+ if (vtContext.isRelativeLayout()) {
+ cgm.errorNYI(rd->getSourceRange(), "vtableRelativeLayout");
+ }
+}
+
void CIRGenItaniumCXXABI::emitDestructorCall(
CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) {
@@ -262,6 +354,44 @@ void CIRGenItaniumCXXABI::emitDestructorCall(
vttTy, nullptr);
}
+// The idea here is creating a separate block for the throw with an
+// `UnreachableOp` as the terminator. So, we branch from the current block
+// to the throw block and create a block for the remaining operations.
+static void insertThrowAndSplit(mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value exceptionPtr = {},
+ mlir::FlatSymbolRefAttr typeInfo = {},
+ mlir::FlatSymbolRefAttr dtor = {}) {
+ mlir::Block *currentBlock = builder.getInsertionBlock();
+ mlir::Region *region = currentBlock->getParent();
+
+ if (currentBlock->empty()) {
+ cir::ThrowOp::create(builder, loc, exceptionPtr, typeInfo, dtor);
+ cir::UnreachableOp::create(builder, loc);
+ } else {
+ mlir::Block *throwBlock = builder.createBlock(region);
+
+ cir::ThrowOp::create(builder, loc, exceptionPtr, typeInfo, dtor);
+ cir::UnreachableOp::create(builder, loc);
+
+ builder.setInsertionPointToEnd(currentBlock);
+ cir::BrOp::create(builder, loc, throwBlock);
+ }
+
+ (void)builder.createBlock(region);
+}
+
+void CIRGenItaniumCXXABI::emitRethrow(CIRGenFunction &cgf, bool isNoReturn) {
+ // void __cxa_rethrow();
+ if (isNoReturn) {
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ assert(cgf.currSrcLoc && "expected source location");
+ mlir::Location loc = *cgf.currSrcLoc;
+ insertThrowAndSplit(builder, loc);
+ } else {
+ cgm.errorNYI("emitRethrow with isNoReturn false");
+ }
+}
+
CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) {
switch (cgm.getASTContext().getCXXABIKind()) {
case TargetCXXABI::GenericItanium:
@@ -278,3 +408,136 @@ CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) {
llvm_unreachable("bad or NYI ABI kind");
}
}
+
+cir::GlobalOp CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *rd,
+ CharUnits vptrOffset) {
+ assert(vptrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
+ cir::GlobalOp &vtable = vtables[rd];
+ if (vtable)
+ return vtable;
+
+ // Queue up this vtable for possible deferred emission.
+ assert(!cir::MissingFeatures::deferredVtables());
+
+ SmallString<256> name;
+ llvm::raw_svector_ostream out(name);
+ getMangleContext().mangleCXXVTable(rd, out);
+
+ const VTableLayout &vtLayout =
+ cgm.getItaniumVTableContext().getVTableLayout(rd);
+ mlir::Type vtableType = cgm.getVTables().getVTableType(vtLayout);
+
+ // Use pointer alignment for the vtable. Otherwise we would align them based
+ // on the size of the initializer which doesn't make sense as only single
+ // values are read.
+ unsigned ptrAlign = cgm.getItaniumVTableContext().isRelativeLayout()
+ ? 32
+ : cgm.getTarget().getPointerAlign(LangAS::Default);
+
+ vtable = cgm.createOrReplaceCXXRuntimeVariable(
+ cgm.getLoc(rd->getSourceRange()), name, vtableType,
+ cir::GlobalLinkageKind::ExternalLinkage,
+ cgm.getASTContext().toCharUnitsFromBits(ptrAlign));
+ // LLVM codegen handles unnamedAddr
+ assert(!cir::MissingFeatures::opGlobalUnnamedAddr());
+
+ // In MS C++ if you have a class with virtual functions in which you are using
+ // selective member import/export, then all virtual functions must be exported
+ // unless they are inline, otherwise a link error will result. To match this
+ // behavior, for such classes, we dllimport the vtable if it is defined
+ // externally and all the non-inline virtual methods are marked dllimport, and
+ // we dllexport the vtable if it is defined in this TU and all the non-inline
+ // virtual methods are marked dllexport.
+ if (cgm.getTarget().hasPS4DLLImportExport())
+ cgm.errorNYI(rd->getSourceRange(),
+ "getAddrOfVTable: PS4 DLL import/export");
+
+ cgm.setGVProperties(vtable, rd);
+ return vtable;
+}
+
+CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer(
+ CIRGenFunction &cgf, clang::GlobalDecl gd, Address thisAddr, mlir::Type ty,
+ SourceLocation srcLoc) {
+ CIRGenBuilderTy &builder = cgm.getBuilder();
+ mlir::Location loc = cgf.getLoc(srcLoc);
+ cir::PointerType tyPtr = builder.getPointerTo(ty);
+ auto *methodDecl = cast<CXXMethodDecl>(gd.getDecl());
+ mlir::Value vtable = cgf.getVTablePtr(loc, thisAddr, methodDecl->getParent());
+
+ uint64_t vtableIndex = cgm.getItaniumVTableContext().getMethodVTableIndex(gd);
+ mlir::Value vfunc{};
+ if (cgf.shouldEmitVTableTypeCheckedLoad(methodDecl->getParent())) {
+ cgm.errorNYI(loc, "getVirtualFunctionPointer: emitVTableTypeCheckedLoad");
+ } else {
+ assert(!cir::MissingFeatures::emitTypeMetadataCodeForVCall());
+
+ mlir::Value vfuncLoad;
+ if (cgm.getItaniumVTableContext().isRelativeLayout()) {
+ assert(!cir::MissingFeatures::vtableRelativeLayout());
+ cgm.errorNYI(loc, "getVirtualFunctionPointer: isRelativeLayout");
+ } else {
+ auto vtableSlotPtr = cir::VTableGetVirtualFnAddrOp::create(
+ builder, loc, builder.getPointerTo(tyPtr), vtable, vtableIndex);
+ vfuncLoad = builder.createAlignedLoad(
+ loc, vtableSlotPtr, cgf.getPointerAlign().getQuantity());
+ }
+
+ // Add !invariant.load md to virtual function load to indicate that
+ // function didn't change inside vtable.
+ // It's safe to add it without -fstrict-vtable-pointers, but it would not
+ // help in devirtualization because it will only matter if we will have 2
+ // the same virtual function loads from the same vtable load, which won't
+ // happen without enabled devirtualization with -fstrict-vtable-pointers.
+ if (cgm.getCodeGenOpts().OptimizationLevel > 0 &&
+ cgm.getCodeGenOpts().StrictVTablePointers) {
+ cgm.errorNYI(loc, "getVirtualFunctionPointer: strictVTablePointers");
+ }
+ vfunc = vfuncLoad;
+ }
+
+ CIRGenCallee callee(gd, vfunc.getDefiningOp());
+ return callee;
+}
+
+mlir::Value
+CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject base,
+ const CXXRecordDecl *vtableClass) {
+ cir::GlobalOp vtable = getAddrOfVTable(vtableClass, CharUnits());
+
+ // Find the appropriate vtable within the vtable group, and the address point
+ // within that vtable.
+ VTableLayout::AddressPointLocation addressPoint =
+ cgm.getItaniumVTableContext()
+ .getVTableLayout(vtableClass)
+ .getAddressPoint(base);
+
+ mlir::OpBuilder &builder = cgm.getBuilder();
+ auto vtablePtrTy = cir::VPtrType::get(builder.getContext());
+
+ return builder.create<cir::VTableAddrPointOp>(
+ cgm.getLoc(vtableClass->getSourceRange()), vtablePtrTy,
+ mlir::FlatSymbolRefAttr::get(vtable.getSymNameAttr()),
+ cir::AddressPointAttr::get(cgm.getBuilder().getContext(),
+ addressPoint.VTableIndex,
+ addressPoint.AddressPointIndex));
+}
+
+mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructor(
+ CIRGenFunction &cgf, const clang::CXXRecordDecl *vtableClass,
+ clang::BaseSubobject base, const clang::CXXRecordDecl *nearestVBase) {
+
+ if ((base.getBase()->getNumVBases() || nearestVBase != nullptr) &&
+ needsVTTParameter(cgf.curGD)) {
+ cgm.errorNYI(cgf.curFuncDecl->getLocation(),
+ "getVTableAddressPointInStructorWithVTT");
+ }
+ return getVTableAddressPoint(base, vtableClass);
+}
+
+bool CIRGenItaniumCXXABI::isVirtualOffsetNeededForVTableField(
+ CIRGenFunction &cgf, CIRGenFunction::VPtr vptr) {
+ if (vptr.nearestVBase == nullptr)
+ return false;
+ return needsVTTParameter(cgf.curGD);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 425250d..c7f5484 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -64,7 +64,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
langOpts(astContext.getLangOpts()), codeGenOpts(cgo),
theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&mlirContext))},
diags(diags), target(astContext.getTargetInfo()),
- abi(createCXXABI(*this)), genTypes(*this) {
+ abi(createCXXABI(*this)), genTypes(*this), vtables(*this) {
// Initialize cached types
VoidTy = cir::VoidType::get(&getMLIRContext());
@@ -75,6 +75,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
SInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true);
SInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true);
UInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false);
+ UInt8PtrTy = cir::PointerType::get(UInt8Ty);
UInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false);
UInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false);
UInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false);
@@ -102,6 +103,11 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
PtrDiffTy =
cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/true);
+ std::optional<cir::SourceLanguage> sourceLanguage = getCIRSourceLanguage();
+ if (sourceLanguage)
+ theModule->setAttr(
+ cir::CIRDialect::getSourceLanguageAttrName(),
+ cir::SourceLanguageAttr::get(&mlirContext, *sourceLanguage));
theModule->setAttr(cir::CIRDialect::getTripleAttrName(),
builder.getStringAttr(getTriple().str()));
@@ -437,13 +443,27 @@ void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd,
errorNYI(funcDecl->getSourceRange(), "deferredAnnotations");
}
+void CIRGenModule::handleCXXStaticMemberVarInstantiation(VarDecl *vd) {
+ VarDecl::DefinitionKind dk = vd->isThisDeclarationADefinition();
+ if (dk == VarDecl::Definition && vd->hasAttr<DLLImportAttr>())
+ return;
+
+ TemplateSpecializationKind tsk = vd->getTemplateSpecializationKind();
+ // If we have a definition, this might be a deferred decl. If the
+ // instantiation is explicit, make sure we emit it at the end.
+ if (vd->getDefinition() && tsk == TSK_ExplicitInstantiationDefinition)
+ getAddrOfGlobalVar(vd);
+
+ emitTopLevelDecl(vd);
+}
+
mlir::Operation *CIRGenModule::getGlobalValue(StringRef name) {
return mlir::SymbolTable::lookupSymbolIn(theModule, name);
}
cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &cgm,
mlir::Location loc, StringRef name,
- mlir::Type t,
+ mlir::Type t, bool isConstant,
mlir::Operation *insertPoint) {
cir::GlobalOp g;
CIRGenBuilderTy &builder = cgm.getBuilder();
@@ -464,7 +484,7 @@ cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &cgm,
builder.setInsertionPointToStart(cgm.getModule().getBody());
}
- g = builder.create<cir::GlobalOp>(loc, name, t);
+ g = builder.create<cir::GlobalOp>(loc, name, t, isConstant);
if (!insertPoint)
cgm.lastGlobalOp = g;
@@ -495,6 +515,24 @@ void CIRGenModule::setNonAliasAttributes(GlobalDecl gd, mlir::Operation *op) {
assert(!cir::MissingFeatures::setTargetAttributes());
}
+std::optional<cir::SourceLanguage> CIRGenModule::getCIRSourceLanguage() const {
+ using ClangStd = clang::LangStandard;
+ using CIRLang = cir::SourceLanguage;
+ auto opts = getLangOpts();
+
+ if (opts.CPlusPlus)
+ return CIRLang::CXX;
+ if (opts.C99 || opts.C11 || opts.C17 || opts.C23 || opts.C2y ||
+ opts.LangStd == ClangStd::lang_c89 ||
+ opts.LangStd == ClangStd::lang_gnu89)
+ return CIRLang::C;
+
+ // TODO(cir): support remaining source languages.
+ assert(!cir::MissingFeatures::sourceLanguageCases());
+ errorNYI("CIR does not yet support the given source language");
+ return std::nullopt;
+}
+
static void setLinkageForGV(cir::GlobalOp &gv, const NamedDecl *nd) {
// Set linkage and visibility in case we never see a definition.
LinkageInfo lv = nd->getLinkageAndVisibility();
@@ -566,7 +604,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef mangledName, mlir::Type ty,
// mlir::SymbolTable::Visibility::Public is the default, no need to explicitly
// mark it as such.
cir::GlobalOp gv =
- CIRGenModule::createGlobalOp(*this, loc, mangledName, ty,
+ CIRGenModule::createGlobalOp(*this, loc, mangledName, ty, false,
/*insertPoint=*/entry.getOperation());
// This is the first use or definition of a mangled name. If there is a
@@ -654,6 +692,16 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty,
g.getSymName());
}
+cir::GlobalViewAttr CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *d) {
+ assert(d->hasGlobalStorage() && "Not a global variable");
+ mlir::Type ty = getTypes().convertTypeForMem(d->getType());
+
+ cir::GlobalOp globalOp = getOrCreateCIRGlobal(d, ty, NotForDefinition);
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::PointerType ptrTy = builder.getPointerTo(globalOp.getSymType());
+ return builder.getGlobalViewAttr(ptrTy, globalOp);
+}
+
void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd,
bool isTentative) {
if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) {
@@ -800,7 +848,7 @@ void CIRGenModule::emitGlobalDefinition(clang::GlobalDecl gd,
emitGlobalFunctionDefinition(gd, op);
if (method->isVirtual())
- errorNYI(method->getSourceRange(), "virtual member function");
+ getVTables().emitThunks(gd);
return;
}
@@ -946,6 +994,39 @@ void CIRGenModule::applyReplacements() {
}
}
+cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable(
+ mlir::Location loc, StringRef name, mlir::Type ty,
+ cir::GlobalLinkageKind linkage, clang::CharUnits alignment) {
+ auto gv = mlir::dyn_cast_or_null<cir::GlobalOp>(
+ mlir::SymbolTable::lookupSymbolIn(theModule, name));
+
+ if (gv) {
+ // There should be handling added here to check the type as assert that
+ // gv was a declaration if the type doesn't match and handling below
+ // to replace the variable if it was a declaration.
+ errorNYI(loc, "createOrReplaceCXXRuntimeVariable: already exists");
+ return gv;
+ }
+
+ // Create a new variable.
+ gv = createGlobalOp(*this, loc, name, ty);
+
+ // Set up extra information and add to the module
+ gv.setLinkageAttr(
+ cir::GlobalLinkageKindAttr::get(&getMLIRContext(), linkage));
+ mlir::SymbolTable::setSymbolVisibility(gv,
+ CIRGenModule::getMLIRVisibility(gv));
+
+ if (supportsCOMDAT() && cir::isWeakForLinker(linkage) &&
+ !gv.hasAvailableExternallyLinkage()) {
+ gv.setComdat(true);
+ }
+
+ gv.setAlignmentAttr(getSize(alignment));
+ setDSOLocal(static_cast<mlir::Operation *>(gv));
+ return gv;
+}
+
// TODO(CIR): this could be a common method between LLVM codegen.
static bool isVarDeclStrongDefinition(const ASTContext &astContext,
CIRGenModule &cgm, const VarDecl *vd,
@@ -996,8 +1077,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &astContext,
if (astContext.isAlignmentRequired(varType))
return true;
- if (const auto *rt = varType->getAs<RecordType>()) {
- const RecordDecl *rd = rt->getDecl();
+ if (const auto *rd = varType->getAsRecordDecl()) {
for (const FieldDecl *fd : rd->fields()) {
if (fd->isBitField())
continue;
@@ -1181,8 +1261,8 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr c,
// Create a global variable for this string
// FIXME(cir): check for insertion point in module level.
- cir::GlobalOp gv =
- CIRGenModule::createGlobalOp(cgm, loc, globalName, c.getType());
+ cir::GlobalOp gv = CIRGenModule::createGlobalOp(
+ cgm, loc, globalName, c.getType(), !cgm.getLangOpts().WritableStrings);
// Set up extra information and add to the module
gv.setAlignmentAttr(cgm.getSize(alignment));
@@ -1260,6 +1340,19 @@ cir::GlobalOp CIRGenModule::getGlobalForStringLiteral(const StringLiteral *s,
return gv;
}
+/// Return a pointer to a constant array for the given string literal.
+cir::GlobalViewAttr
+CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *s,
+ StringRef name) {
+ cir::GlobalOp gv = getGlobalForStringLiteral(s, name);
+ auto arrayTy = mlir::dyn_cast<cir::ArrayType>(gv.getSymType());
+ assert(arrayTy && "String literal must be array");
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::PointerType ptrTy = getBuilder().getPointerTo(arrayTy.getElementType());
+
+ return builder.getGlobalViewAttr(ptrTy, gv);
+}
+
void CIRGenModule::emitExplicitCastExprType(const ExplicitCastExpr *e,
CIRGenFunction *cgf) {
if (cgf && e->getType()->isVariablyModifiedType())
@@ -1365,6 +1458,21 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
assert(!cir::MissingFeatures::generateDebugInfo());
assert(!cir::MissingFeatures::cxxRecordStaticMembers());
break;
+
+ case Decl::FileScopeAsm:
+ // File-scope asm is ignored during device-side CUDA compilation.
+ if (langOpts.CUDA && langOpts.CUDAIsDevice)
+ break;
+ // File-scope asm is ignored during device-side OpenMP compilation.
+ if (langOpts.OpenMPIsTargetDevice)
+ break;
+ // File-scope asm is ignored during device-side SYCL compilation.
+ if (langOpts.SYCLIsDevice)
+ break;
+ auto *file_asm = cast<FileScopeAsmDecl>(decl);
+ std::string line = file_asm->getAsmString();
+ globalScopeAsm.push_back(builder.getStringAttr(line));
+ break;
}
}
@@ -1926,6 +2034,15 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name,
}
mlir::SymbolTable::Visibility
+CIRGenModule::getMLIRVisibility(cir::GlobalOp op) {
+ // MLIR doesn't accept public symbols declarations (only
+ // definitions).
+ if (op.isDeclaration())
+ return mlir::SymbolTable::Visibility::Private;
+ return getMLIRVisibilityFromCIRLinkage(op.getLinkage());
+}
+
+mlir::SymbolTable::Visibility
CIRGenModule::getMLIRVisibilityFromCIRLinkage(cir::GlobalLinkageKind glk) {
switch (glk) {
case cir::GlobalLinkageKind::InternalLinkage:
@@ -1978,6 +2095,9 @@ void CIRGenModule::release() {
emitDeferred();
applyReplacements();
+ theModule->setAttr(cir::CIRDialect::getModuleLevelAsmAttrName(),
+ builder.getArrayAttr(globalScopeAsm));
+
// There's a lot of code that is not implemented yet.
assert(!cir::MissingFeatures::cgmRelease());
}
@@ -2033,6 +2153,18 @@ bool CIRGenModule::verifyModule() const {
return mlir::verify(theModule).succeeded();
}
+mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc,
+ QualType ty, bool forEh) {
+ // Return a bogus pointer if RTTI is disabled, unless it's for EH.
+ // FIXME: should we even be calling this method if RTTI is disabled
+ // and it's not for EH?
+ if (!shouldEmitRTTI(forEh))
+ return builder.getConstNullPtrAttr(builder.getUInt8PtrTy());
+
+ errorNYI(loc, "getAddrOfRTTIDescriptor");
+ return mlir::Attribute();
+}
+
// TODO(cir): this can be shared with LLVM codegen.
CharUnits CIRGenModule::computeNonVirtualBaseClassOffset(
const CXXRecordDecl *derivedClass,
@@ -2048,8 +2180,7 @@ CharUnits CIRGenModule::computeNonVirtualBaseClassOffset(
// Get the layout.
const ASTRecordLayout &layout = astContext.getASTRecordLayout(rd);
- const auto *baseDecl = cast<CXXRecordDecl>(
- base->getType()->castAs<clang::RecordType>()->getDecl());
+ const auto *baseDecl = base->getType()->castAsCXXRecordDecl();
// Add the offset.
offset += layout.getBaseClassOffset(baseDecl);
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 5d07d38..4f5c7f8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -17,6 +17,7 @@
#include "CIRGenCall.h"
#include "CIRGenTypeCache.h"
#include "CIRGenTypes.h"
+#include "CIRGenVTables.h"
#include "CIRGenValue.h"
#include "clang/AST/CharUnits.h"
@@ -86,16 +87,22 @@ private:
CIRGenTypes genTypes;
+ /// Holds information about C++ vtables.
+ CIRGenVTables vtables;
+
/// Per-function codegen information. Updated everytime emitCIR is called
/// for FunctionDecls's.
CIRGenFunction *curCGF = nullptr;
+ llvm::SmallVector<mlir::Attribute> globalScopeAsm;
+
public:
mlir::ModuleOp getModule() const { return theModule; }
CIRGenBuilderTy &getBuilder() { return builder; }
clang::ASTContext &getASTContext() const { return astContext; }
const clang::TargetInfo &getTarget() const { return target; }
const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; }
+ clang::DiagnosticsEngine &getDiags() const { return diags; }
CIRGenTypes &getTypes() { return genTypes; }
const clang::LangOptions &getLangOpts() const { return langOpts; }
@@ -114,6 +121,9 @@ public:
mlir::Operation *lastGlobalOp = nullptr;
+ /// Tell the consumer that this variable has been instantiated.
+ void handleCXXStaticMemberVarInstantiation(VarDecl *vd);
+
llvm::DenseMap<const Decl *, cir::GlobalOp> staticLocalDeclMap;
mlir::Operation *getGlobalValue(llvm::StringRef ref);
@@ -140,6 +150,7 @@ public:
static cir::GlobalOp createGlobalOp(CIRGenModule &cgm, mlir::Location loc,
llvm::StringRef name, mlir::Type t,
+ bool isConstant = false,
mlir::Operation *insertPoint = nullptr);
llvm::StringMap<unsigned> cgGlobalNames;
@@ -155,6 +166,9 @@ public:
getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty = {},
ForDefinition_t isForDefinition = NotForDefinition);
+ /// Return the mlir::GlobalViewAttr for the address of the given global.
+ cir::GlobalViewAttr getAddrOfGlobalVarAttr(const VarDecl *d);
+
CharUnits computeNonVirtualBaseClassOffset(
const CXXRecordDecl *derivedClass,
llvm::iterator_range<CastExpr::path_const_iterator> path);
@@ -168,6 +182,24 @@ public:
void constructAttributeList(CIRGenCalleeInfo calleeInfo,
mlir::NamedAttrList &attrs);
+ /// Will return a global variable of the given type. If a variable with a
+ /// different type already exists then a new variable with the right type
+ /// will be created and all uses of the old variable will be replaced with a
+ /// bitcast to the new variable.
+ cir::GlobalOp createOrReplaceCXXRuntimeVariable(
+ mlir::Location loc, llvm::StringRef name, mlir::Type ty,
+ cir::GlobalLinkageKind linkage, clang::CharUnits alignment);
+
+ void emitVTable(const CXXRecordDecl *rd);
+
+ /// Return the appropriate linkage for the vtable, VTT, and type information
+ /// of the given class.
+ cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *rd);
+
+ /// Get the address of the RTTI descriptor for the given type.
+ mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType ty,
+ bool forEH = false);
+
/// Return a constant array for the given string.
mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *e);
@@ -176,6 +208,12 @@ public:
cir::GlobalOp getGlobalForStringLiteral(const StringLiteral *s,
llvm::StringRef name = ".str");
+ /// Return a global symbol reference to a constant array for the given string
+ /// literal.
+ cir::GlobalViewAttr
+ getAddrOfConstantStringFromLiteral(const StringLiteral *s,
+ llvm::StringRef name = ".str");
+
/// Set attributes which are common to any form of a global definition (alias,
/// Objective-C method, function, global variable).
///
@@ -213,6 +251,16 @@ public:
cir::FuncType fnType = nullptr, bool dontDefer = false,
ForDefinition_t isForDefinition = NotForDefinition);
+ mlir::Type getVTableComponentType();
+ CIRGenVTables &getVTables() { return vtables; }
+
+ ItaniumVTableContext &getItaniumVTableContext() {
+ return vtables.getItaniumVTableContext();
+ }
+ const ItaniumVTableContext &getItaniumVTableContext() const {
+ return vtables.getItaniumVTableContext();
+ }
+
/// This contains all the decls which have definitions but which are deferred
/// for emission and therefore should only be output if they are actually
/// used. If a decl is in this, then it is known to have not been referenced
@@ -252,6 +300,13 @@ public:
getAddrOfGlobal(clang::GlobalDecl gd,
ForDefinition_t isForDefinition = NotForDefinition);
+ // Return whether RTTI information should be emitted for this target.
+ bool shouldEmitRTTI(bool forEH = false) {
+ return (forEH || getLangOpts().RTTI) && !getLangOpts().CUDAIsDevice &&
+ !(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
+ getTriple().isNVPTX());
+ }
+
/// Emit type info if type of an expression is a variably modified
/// type. Also emit proper debug info for cast types.
void emitExplicitCastExprType(const ExplicitCastExpr *e,
@@ -355,8 +410,8 @@ public:
static cir::VisibilityKind getGlobalVisibilityKindFromClangVisibility(
clang::VisibilityAttr::VisibilityType visibility);
cir::VisibilityAttr getGlobalVisibilityAttrFromDecl(const Decl *decl);
- static mlir::SymbolTable::Visibility getMLIRVisibility(cir::GlobalOp op);
cir::GlobalLinkageKind getFunctionLinkage(GlobalDecl gd);
+ static mlir::SymbolTable::Visibility getMLIRVisibility(cir::GlobalOp op);
cir::GlobalLinkageKind getCIRLinkageForDeclarator(const DeclaratorDecl *dd,
GVALinkage linkage,
bool isConstantVariable);
@@ -423,6 +478,9 @@ private:
void replacePointerTypeArgs(cir::FuncOp oldF, cir::FuncOp newF);
void setNonAliasAttributes(GlobalDecl gd, mlir::Operation *op);
+
+ /// Map source language used to a CIR attribute.
+ std::optional<cir::SourceLanguage> getCIRSourceLanguage() const;
};
} // namespace CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
index bb9054a..3753336 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
@@ -357,15 +357,12 @@ class OpenACCClauseCIREmitter final
}
template <typename RecipeTy>
- RecipeTy getOrCreateRecipe(ASTContext &astCtx, const Expr *varRef,
- const VarDecl *varRecipe, DeclContext *dc,
- QualType baseType, mlir::Value mainOp) {
- mlir::ModuleOp mod =
- builder.getBlock()->getParent()->getParentOfType<mlir::ModuleOp>();
-
+ std::string getRecipeName(SourceRange loc, QualType baseType,
+ OpenACCReductionOperator reductionOp) {
std::string recipeName;
{
llvm::raw_string_ostream stream(recipeName);
+
if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
stream << "privatization_";
} else if constexpr (std::is_same_v<RecipeTy,
@@ -375,11 +372,40 @@ class OpenACCClauseCIREmitter final
} else if constexpr (std::is_same_v<RecipeTy,
mlir::acc::ReductionRecipeOp>) {
stream << "reduction_";
- // We don't have the reduction operation here well enough to know how to
- // spell this correctly (+ == 'add', etc), so when we implement
- // 'reduction' we have to do that here.
- cgf.cgm.errorNYI(varRef->getSourceRange(),
- "OpeNACC reduction recipe creation");
+ // Values here are a little weird (for bitwise and/or is 'i' prefix, and
+ // logical ops with 'l'), but are chosen to be the same as the MLIR
+ // dialect names as well as to match the Flang versions of these.
+ switch (reductionOp) {
+ case OpenACCReductionOperator::Addition:
+ stream << "add_";
+ break;
+ case OpenACCReductionOperator::Multiplication:
+ stream << "mul_";
+ break;
+ case OpenACCReductionOperator::Max:
+ stream << "max_";
+ break;
+ case OpenACCReductionOperator::Min:
+ stream << "min_";
+ break;
+ case OpenACCReductionOperator::BitwiseAnd:
+ stream << "iand_";
+ break;
+ case OpenACCReductionOperator::BitwiseOr:
+ stream << "ior_";
+ break;
+ case OpenACCReductionOperator::BitwiseXOr:
+ stream << "xor_";
+ break;
+ case OpenACCReductionOperator::And:
+ stream << "land_";
+ break;
+ case OpenACCReductionOperator::Or:
+ stream << "lor_";
+ break;
+ case OpenACCReductionOperator::Invalid:
+ llvm_unreachable("invalid reduction operator");
+ }
} else {
static_assert(!sizeof(RecipeTy), "Unknown Recipe op kind");
}
@@ -387,72 +413,221 @@ class OpenACCClauseCIREmitter final
MangleContext &mc = cgf.cgm.getCXXABI().getMangleContext();
mc.mangleCanonicalTypeName(baseType, stream);
}
+ return recipeName;
+ }
- if (auto recipe = mod.lookupSymbol<RecipeTy>(recipeName))
- return recipe;
-
- mlir::Location loc = cgf.cgm.getLoc(varRef->getBeginLoc());
- mlir::Location locEnd = cgf.cgm.getLoc(varRef->getEndLoc());
+ void createFirstprivateRecipeCopy(
+ mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp,
+ CIRGenFunction::AutoVarEmission tempDeclEmission,
+ mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe,
+ const VarDecl *temporary) {
+ mlir::Block *block = builder.createBlock(
+ &recipe.getCopyRegion(), recipe.getCopyRegion().end(),
+ {mainOp.getType(), mainOp.getType()}, {loc, loc});
+ builder.setInsertionPointToEnd(&recipe.getCopyRegion().back());
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ mlir::BlockArgument fromArg = block->getArgument(0);
+ mlir::BlockArgument toArg = block->getArgument(1);
+
+ mlir::Type elementTy =
+ mlir::cast<cir::PointerType>(mainOp.getType()).getPointee();
+
+ // Set the address of the emission to be the argument, so that we initialize
+ // that instead of the variable in the other block.
+ tempDeclEmission.setAllocatedAddress(
+ Address{toArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)});
+ tempDeclEmission.EmittedAsOffload = true;
+
+ CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary};
+ cgf.setAddrOfLocalVar(
+ temporary,
+ Address{fromArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)});
+
+ cgf.emitAutoVarInit(tempDeclEmission);
+ mlir::acc::YieldOp::create(builder, locEnd);
+ }
- mlir::OpBuilder modBuilder(mod.getBodyRegion());
- auto recipe =
- RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType());
+ // Create the 'init' section of the recipe, including the 'copy' section for
+ // 'firstprivate'. Note that this function is not 'insertion point' clean, in
+ // that it alters the insertion point to be inside of the 'destroy' section of
+ // the recipe, but doesn't restore it aftewards.
+ template <typename RecipeTy>
+ void createRecipeInitCopy(mlir::Location loc, mlir::Location locEnd,
+ SourceRange exprRange, mlir::Value mainOp,
+ RecipeTy recipe, const VarDecl *varRecipe,
+ const VarDecl *temporary) {
+ assert(varRecipe && "Required recipe variable not set?");
CIRGenFunction::AutoVarEmission tempDeclEmission{
CIRGenFunction::AutoVarEmission::invalid()};
-
- // Init section.
- {
- llvm::SmallVector<mlir::Type> argsTys{mainOp.getType()};
- llvm::SmallVector<mlir::Location> argsLocs{loc};
- builder.createBlock(&recipe.getInitRegion(), recipe.getInitRegion().end(),
- argsTys, argsLocs);
- builder.setInsertionPointToEnd(&recipe.getInitRegion().back());
-
- if constexpr (!std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
- // We have only implemented 'init' for private, so make this NYI until
- // we have explicitly implemented everything.
- cgf.cgm.errorNYI(varRef->getSourceRange(),
- "OpenACC non-private recipe init");
+ CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, varRecipe};
+
+ // Do the 'init' section of the recipe IR, which does an alloca, then the
+ // initialization (except for firstprivate).
+ mlir::Block *block = builder.createBlock(&recipe.getInitRegion(),
+ recipe.getInitRegion().end(),
+ {mainOp.getType()}, {loc});
+ builder.setInsertionPointToEnd(&recipe.getInitRegion().back());
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ tempDeclEmission =
+ cgf.emitAutoVarAlloca(*varRecipe, builder.saveInsertionPoint());
+
+ // 'firstprivate' doesn't do its initialization in the 'init' section,
+ // instead does it in the 'copy' section. SO only do init here.
+ // 'reduction' appears to use it too (rather than a 'copy' section), so
+ // we probably have to do it here too, but we can do that when we get to
+ // reduction implementation.
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
+ // We are OK with no init for builtins, arrays of builtins, or pointers,
+ // else we should NYI so we know to go look for these.
+ if (cgf.getContext().getLangOpts().CPlusPlus &&
+ !varRecipe->getType()
+ ->getPointeeOrArrayElementType()
+ ->isBuiltinType() &&
+ !varRecipe->getType()->isPointerType() && !varRecipe->getInit()) {
+ // If we don't have any initialization recipe, we failed during Sema to
+ // initialize this correctly. If we disable the
+ // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll
+ // emit an error to tell us. However, emitting those errors during
+ // production is a violation of the standard, so we cannot do them.
+ cgf.cgm.errorNYI(exprRange, "private default-init recipe");
}
+ cgf.emitAutoVarInit(tempDeclEmission);
+ } else if constexpr (std::is_same_v<RecipeTy,
+ mlir::acc::ReductionRecipeOp>) {
+ // Unlike Private, the recipe here is always required as it has to do
+ // init, not just 'default' init.
+ if (!varRecipe->getInit())
+ cgf.cgm.errorNYI(exprRange, "reduction init recipe");
+ cgf.emitAutoVarInit(tempDeclEmission);
+ }
- if (varRecipe) {
- tempDeclEmission =
- cgf.emitAutoVarAlloca(*varRecipe, builder.saveInsertionPoint());
- cgf.emitAutoVarInit(tempDeclEmission);
+ mlir::acc::YieldOp::create(builder, locEnd);
+
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp>) {
+ if (!varRecipe->getInit()) {
+ // If we don't have any initialization recipe, we failed during Sema to
+ // initialize this correctly. If we disable the
+ // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll
+ // emit an error to tell us. However, emitting those errors during
+ // production is a violation of the standard, so we cannot do them.
+ cgf.cgm.errorNYI(
+ exprRange, "firstprivate copy-init recipe not properly generated");
}
- mlir::acc::YieldOp::create(builder, locEnd);
+ createFirstprivateRecipeCopy(loc, locEnd, mainOp, tempDeclEmission,
+ recipe, varRecipe, temporary);
}
+ }
- // Copy section.
- if constexpr (std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp> ||
- std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) {
- // TODO: OpenACC: 'private' doesn't emit this, but for the other two we
- // have to figure out what 'copy' means here.
- cgf.cgm.errorNYI(varRef->getSourceRange(),
- "OpenACC record type privatization copy section");
+ // This function generates the 'combiner' section for a reduction recipe. Note
+ // that this function is not 'insertion point' clean, in that it alters the
+ // insertion point to be inside of the 'combiner' section of the recipe, but
+ // doesn't restore it aftewards.
+ void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd,
+ mlir::Value mainOp,
+ mlir::acc::ReductionRecipeOp recipe) {
+ mlir::Block *block = builder.createBlock(
+ &recipe.getCombinerRegion(), recipe.getCombinerRegion().end(),
+ {mainOp.getType(), mainOp.getType()}, {loc, loc});
+ builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back());
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ mlir::BlockArgument lhsArg = block->getArgument(0);
+
+ mlir::acc::YieldOp::create(builder, locEnd, lhsArg);
+ }
+
+ // This function generates the 'destroy' section for a recipe. Note
+ // that this function is not 'insertion point' clean, in that it alters the
+ // insertion point to be inside of the 'destroy' section of the recipe, but
+ // doesn't restore it aftewards.
+ void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd,
+ mlir::Value mainOp, CharUnits alignment,
+ QualType baseType,
+ mlir::Region &destroyRegion) {
+ mlir::Block *block = builder.createBlock(
+ &destroyRegion, destroyRegion.end(), {mainOp.getType()}, {loc});
+ builder.setInsertionPointToEnd(&destroyRegion.back());
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ mlir::Type elementTy =
+ mlir::cast<cir::PointerType>(mainOp.getType()).getPointee();
+ Address addr{block->getArgument(0), elementTy, alignment};
+ cgf.emitDestroy(addr, baseType,
+ cgf.getDestroyer(QualType::DK_cxx_destructor));
+
+ mlir::acc::YieldOp::create(builder, locEnd);
+ }
+
+ mlir::acc::ReductionOperator convertReductionOp(OpenACCReductionOperator op) {
+ switch (op) {
+ case OpenACCReductionOperator::Addition:
+ return mlir::acc::ReductionOperator::AccAdd;
+ case OpenACCReductionOperator::Multiplication:
+ return mlir::acc::ReductionOperator::AccMul;
+ case OpenACCReductionOperator::Max:
+ return mlir::acc::ReductionOperator::AccMax;
+ case OpenACCReductionOperator::Min:
+ return mlir::acc::ReductionOperator::AccMin;
+ case OpenACCReductionOperator::BitwiseAnd:
+ return mlir::acc::ReductionOperator::AccIand;
+ case OpenACCReductionOperator::BitwiseOr:
+ return mlir::acc::ReductionOperator::AccIor;
+ case OpenACCReductionOperator::BitwiseXOr:
+ return mlir::acc::ReductionOperator::AccXor;
+ case OpenACCReductionOperator::And:
+ return mlir::acc::ReductionOperator::AccLand;
+ case OpenACCReductionOperator::Or:
+ return mlir::acc::ReductionOperator::AccLor;
+ case OpenACCReductionOperator::Invalid:
+ llvm_unreachable("invalid reduction operator");
}
- // Destroy section (doesn't currently exist).
- if (varRecipe && varRecipe->needsDestruction(cgf.getContext())) {
- llvm::SmallVector<mlir::Type> argsTys{mainOp.getType()};
- llvm::SmallVector<mlir::Location> argsLocs{loc};
- mlir::Block *block = builder.createBlock(&recipe.getDestroyRegion(),
- recipe.getDestroyRegion().end(),
- argsTys, argsLocs);
- builder.setInsertionPointToEnd(&recipe.getDestroyRegion().back());
-
- mlir::Type elementTy =
- mlir::cast<cir::PointerType>(mainOp.getType()).getPointee();
- Address addr{block->getArgument(0), elementTy,
- cgf.getContext().getDeclAlign(varRecipe)};
- cgf.emitDestroy(addr, baseType,
- cgf.getDestroyer(QualType::DK_cxx_destructor));
-
- mlir::acc::YieldOp::create(builder, locEnd);
+ llvm_unreachable("invalid reduction operator");
+ }
+
+ template <typename RecipeTy>
+ RecipeTy getOrCreateRecipe(ASTContext &astCtx, const Expr *varRef,
+ const VarDecl *varRecipe, const VarDecl *temporary,
+ OpenACCReductionOperator reductionOp,
+ DeclContext *dc, QualType baseType,
+ mlir::Value mainOp) {
+ mlir::ModuleOp mod = builder.getBlock()
+ ->getParent()
+ ->template getParentOfType<mlir::ModuleOp>();
+
+ std::string recipeName = getRecipeName<RecipeTy>(varRef->getSourceRange(),
+ baseType, reductionOp);
+ if (auto recipe = mod.lookupSymbol<RecipeTy>(recipeName))
+ return recipe;
+
+ mlir::Location loc = cgf.cgm.getLoc(varRef->getBeginLoc());
+ mlir::Location locEnd = cgf.cgm.getLoc(varRef->getEndLoc());
+
+ mlir::OpBuilder modBuilder(mod.getBodyRegion());
+ RecipeTy recipe;
+
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) {
+ recipe = RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType(),
+ convertReductionOp(reductionOp));
+ } else {
+ recipe = RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType());
}
+ createRecipeInitCopy(loc, locEnd, varRef->getSourceRange(), mainOp, recipe,
+ varRecipe, temporary);
+
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) {
+ createReductionRecipeCombiner(loc, locEnd, mainOp, recipe);
+ }
+
+ if (varRecipe && varRecipe->needsDestruction(cgf.getContext()))
+ createRecipeDestroySection(loc, locEnd, mainOp,
+ cgf.getContext().getDeclAlign(varRecipe),
+ baseType, recipe.getDestroyRegion());
return recipe;
}
@@ -1088,7 +1263,9 @@ public:
{
mlir::OpBuilder::InsertionGuard guardCase(builder);
auto recipe = getOrCreateRecipe<mlir::acc::PrivateRecipeOp>(
- cgf.getContext(), varExpr, varRecipe,
+ cgf.getContext(), varExpr, varRecipe, /*temporary=*/nullptr,
+ OpenACCReductionOperator::Invalid,
+
Decl::castToDeclContext(cgf.curFuncDecl), opInfo.baseType,
privateOp.getResult());
// TODO: OpenACC: The dialect is going to change in the near future to
@@ -1105,6 +1282,77 @@ public:
llvm_unreachable("Unknown construct kind in VisitPrivateClause");
}
}
+
+ void VisitFirstPrivateClause(const OpenACCFirstPrivateClause &clause) {
+ if constexpr (isOneOfTypes<OpTy, mlir::acc::ParallelOp,
+ mlir::acc::SerialOp>) {
+ for (const auto [varExpr, varRecipe] :
+ llvm::zip_equal(clause.getVarList(), clause.getInitRecipes())) {
+ CIRGenFunction::OpenACCDataOperandInfo opInfo =
+ cgf.getOpenACCDataOperandInfo(varExpr);
+ auto firstPrivateOp = mlir::acc::FirstprivateOp::create(
+ builder, opInfo.beginLoc, opInfo.varValue, /*structured=*/true,
+ /*implicit=*/false, opInfo.name, opInfo.bounds);
+
+ firstPrivateOp.setDataClause(mlir::acc::DataClause::acc_firstprivate);
+
+ {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+ auto recipe = getOrCreateRecipe<mlir::acc::FirstprivateRecipeOp>(
+ cgf.getContext(), varExpr, varRecipe.RecipeDecl,
+ varRecipe.InitFromTemporary, OpenACCReductionOperator::Invalid,
+ Decl::castToDeclContext(cgf.curFuncDecl), opInfo.baseType,
+ firstPrivateOp.getResult());
+
+ // TODO: OpenACC: The dialect is going to change in the near future to
+ // have these be on a different operation, so when that changes, we
+ // probably need to change these here.
+ operation.addFirstPrivatization(builder.getContext(), firstPrivateOp,
+ recipe);
+ }
+ }
+ } else if constexpr (isCombinedType<OpTy>) {
+ // Unlike 'private', 'firstprivate' applies to the compute op, not the
+ // loop op.
+ applyToComputeOp(clause);
+ } else {
+ llvm_unreachable("Unknown construct kind in VisitFirstPrivateClause");
+ }
+ }
+
+ void VisitReductionClause(const OpenACCReductionClause &clause) {
+ if constexpr (isOneOfTypes<OpTy, mlir::acc::ParallelOp, mlir::acc::SerialOp,
+ mlir::acc::LoopOp>) {
+ for (const auto [varExpr, varRecipe] :
+ llvm::zip_equal(clause.getVarList(), clause.getRecipes())) {
+ CIRGenFunction::OpenACCDataOperandInfo opInfo =
+ cgf.getOpenACCDataOperandInfo(varExpr);
+
+ auto reductionOp = mlir::acc::ReductionOp::create(
+ builder, opInfo.beginLoc, opInfo.varValue, /*structured=*/true,
+ /*implicit=*/false, opInfo.name, opInfo.bounds);
+ reductionOp.setDataClause(mlir::acc::DataClause::acc_reduction);
+
+ {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+
+ auto recipe = getOrCreateRecipe<mlir::acc::ReductionRecipeOp>(
+ cgf.getContext(), varExpr, varRecipe.RecipeDecl,
+ /*temporary=*/nullptr, clause.getReductionOp(),
+ Decl::castToDeclContext(cgf.curFuncDecl), opInfo.baseType,
+ reductionOp.getResult());
+
+ operation.addReduction(builder.getContext(), reductionOp, recipe);
+ }
+ }
+ } else if constexpr (isCombinedType<OpTy>) {
+ // Despite this being valid on ParallelOp or SerialOp, combined type
+ // applies to the 'loop'.
+ applyToLoopOp(clause);
+ } else {
+ llvm_unreachable("Unknown construct kind in VisitReductionClause");
+ }
+ }
};
template <typename OpTy>
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
index b28afe4..914ef16 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
@@ -141,6 +141,10 @@ private:
// for both virtual and non-virtual bases.
llvm::DenseMap<const clang::CXXRecordDecl *, unsigned> nonVirtualBases;
+ /// Map from virtual bases to their field index in the complete object.
+ llvm::DenseMap<const clang::CXXRecordDecl *, unsigned>
+ completeObjectVirtualBases;
+
/// Map from (bit-field) record field to the corresponding CIR record type
/// field no. This info is populated by record builder.
llvm::DenseMap<const clang::FieldDecl *, CIRGenBitFieldInfo> bitFields;
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index 1764967..6c7cf75 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -41,7 +41,7 @@ struct CIRRecordLowering final {
// member type that ensures correct rounding.
struct MemberInfo final {
CharUnits offset;
- enum class InfoKind { VFPtr, Field, Base } kind;
+ enum class InfoKind { VFPtr, Field, Base, VBase } kind;
mlir::Type data;
union {
const FieldDecl *fieldDecl;
@@ -71,17 +71,18 @@ struct CIRRecordLowering final {
void setBitFieldInfo(const FieldDecl *fd, CharUnits startOffset,
mlir::Type storageType);
- void lower();
+ void lower(bool NonVirtualBaseType);
void lowerUnion();
/// Determines if we need a packed llvm struct.
- void determinePacked();
+ void determinePacked(bool nvBaseType);
/// Inserts padding everywhere it's needed.
void insertPadding();
void computeVolatileBitfields();
- void accumulateBases(const CXXRecordDecl *cxxRecordDecl);
+ void accumulateBases();
void accumulateVPtrs();
+ void accumulateVBases();
void accumulateFields();
RecordDecl::field_iterator
accumulateBitFields(RecordDecl::field_iterator field,
@@ -96,6 +97,17 @@ struct CIRRecordLowering final {
/// Helper function to check if the target machine is BigEndian.
bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
+ // The Itanium base layout rule allows virtual bases to overlap
+ // other bases, which complicates layout in specific ways.
+ //
+ // Note specifically that the ms_struct attribute doesn't change this.
+ bool isOverlappingVBaseABI() {
+ return !astContext.getTargetInfo().getCXXABI().isMicrosoft();
+ }
+ // Recursively searches all of the bases to find out if a vbase is
+ // not the primary vbase of some base class.
+ bool hasOwnStorage(const CXXRecordDecl *decl, const CXXRecordDecl *query);
+
CharUnits bitsToCharUnits(uint64_t bitOffset) {
return astContext.toCharUnitsFromBits(bitOffset);
}
@@ -184,6 +196,7 @@ struct CIRRecordLowering final {
CIRGenBuilderTy &builder;
const ASTContext &astContext;
const RecordDecl *recordDecl;
+ const CXXRecordDecl *cxxRecordDecl;
const ASTRecordLayout &astRecordLayout;
// Helpful intermediate data-structures
std::vector<MemberInfo> members;
@@ -192,6 +205,7 @@ struct CIRRecordLowering final {
llvm::DenseMap<const FieldDecl *, CIRGenBitFieldInfo> bitFields;
llvm::DenseMap<const FieldDecl *, unsigned> fieldIdxMap;
llvm::DenseMap<const CXXRecordDecl *, unsigned> nonVirtualBases;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> virtualBases;
cir::CIRDataLayout dataLayout;
LLVM_PREFERRED_TYPE(bool)
@@ -211,13 +225,14 @@ private:
CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes,
const RecordDecl *recordDecl, bool packed)
- : cirGenTypes(cirGenTypes), builder(cirGenTypes.getBuilder()),
- astContext(cirGenTypes.getASTContext()), recordDecl(recordDecl),
- astRecordLayout(
- cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)),
- dataLayout(cirGenTypes.getCGModule().getModule()),
- zeroInitializable(true), zeroInitializableAsBase(true), packed(packed),
- padded(false) {}
+ : cirGenTypes{cirGenTypes}, builder{cirGenTypes.getBuilder()},
+ astContext{cirGenTypes.getASTContext()}, recordDecl{recordDecl},
+ cxxRecordDecl{llvm::dyn_cast<CXXRecordDecl>(recordDecl)},
+ astRecordLayout{
+ cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)},
+ dataLayout{cirGenTypes.getCGModule().getModule()},
+ zeroInitializable{true}, zeroInitializableAsBase{true}, packed{packed},
+ padded{false} {}
void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
CharUnits startOffset,
@@ -246,27 +261,28 @@ void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
info.volatileStorageOffset = CharUnits::Zero();
}
-void CIRRecordLowering::lower() {
+void CIRRecordLowering::lower(bool nonVirtualBaseType) {
if (recordDecl->isUnion()) {
lowerUnion();
computeVolatileBitfields();
return;
}
- assert(!cir::MissingFeatures::recordLayoutVirtualBases());
- CharUnits size = astRecordLayout.getSize();
+ CharUnits size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize()
+ : astRecordLayout.getSize();
accumulateFields();
- if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(recordDecl)) {
+ if (cxxRecordDecl) {
accumulateVPtrs();
- accumulateBases(cxxRecordDecl);
+ accumulateBases();
if (members.empty()) {
appendPaddingBytes(size);
computeVolatileBitfields();
return;
}
- assert(!cir::MissingFeatures::recordLayoutVirtualBases());
+ if (!nonVirtualBaseType)
+ accumulateVBases();
}
llvm::stable_sort(members);
@@ -275,7 +291,7 @@ void CIRRecordLowering::lower() {
assert(!cir::MissingFeatures::recordZeroInit());
members.push_back(makeStorageInfo(size, getUIntNType(8)));
- determinePacked();
+ determinePacked(nonVirtualBaseType);
insertPadding();
members.pop_back();
@@ -298,8 +314,9 @@ void CIRRecordLowering::fillOutputFields() {
setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back());
} else if (member.kind == MemberInfo::InfoKind::Base) {
nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
+ } else if (member.kind == MemberInfo::InfoKind::VBase) {
+ virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
}
- assert(!cir::MissingFeatures::recordLayoutVirtualBases());
}
}
@@ -426,8 +443,9 @@ CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
limitOffset = bitsToCharUnits(getFieldBitOffset(*probe));
goto FoundLimit;
}
- assert(!cir::MissingFeatures::cxxSupport());
- limitOffset = astRecordLayout.getDataSize();
+ limitOffset = cxxRecordDecl ? astRecordLayout.getNonVirtualSize()
+ : astRecordLayout.getDataSize();
+
FoundLimit:
CharUnits typeSize = getSize(type);
if (beginOffset + typeSize <= limitOffset) {
@@ -524,24 +542,25 @@ void CIRRecordLowering::calculateZeroInit() {
continue;
zeroInitializable = zeroInitializableAsBase = false;
return;
- } else if (member.kind == MemberInfo::InfoKind::Base) {
+ } else if (member.kind == MemberInfo::InfoKind::Base ||
+ member.kind == MemberInfo::InfoKind::VBase) {
if (isZeroInitializable(member.cxxRecordDecl))
continue;
zeroInitializable = false;
if (member.kind == MemberInfo::InfoKind::Base)
zeroInitializableAsBase = false;
}
- assert(!cir::MissingFeatures::recordLayoutVirtualBases());
}
}
-void CIRRecordLowering::determinePacked() {
+void CIRRecordLowering::determinePacked(bool nvBaseType) {
if (packed)
return;
CharUnits alignment = CharUnits::One();
-
- // TODO(cir): handle non-virtual base types
- assert(!cir::MissingFeatures::cxxSupport());
+ CharUnits nvAlignment = CharUnits::One();
+ CharUnits nvSize = !nvBaseType && cxxRecordDecl
+ ? astRecordLayout.getNonVirtualSize()
+ : CharUnits::Zero();
for (const MemberInfo &member : members) {
if (!member.data)
@@ -550,12 +569,19 @@ void CIRRecordLowering::determinePacked() {
// then the entire record must be packed.
if (member.offset % getAlignment(member.data))
packed = true;
+ if (member.offset < nvSize)
+ nvAlignment = std::max(nvAlignment, getAlignment(member.data));
alignment = std::max(alignment, getAlignment(member.data));
}
// If the size of the record (the capstone's offset) is not a multiple of the
// record's alignment, it must be packed.
if (members.back().offset % alignment)
packed = true;
+ // If the non-virtual sub-object is not a multiple of the non-virtual
+ // sub-object's alignment, it must be packed. We cannot have a packed
+ // non-virtual sub-object and an unpacked complete object or vise versa.
+ if (nvSize % nvAlignment)
+ packed = true;
// Update the alignment of the sentinel.
if (!packed)
members.back().data = getUIntNType(astContext.toBits(alignment));
@@ -589,7 +615,7 @@ std::unique_ptr<CIRGenRecordLayout>
CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
CIRRecordLowering lowering(*this, rd, /*packed=*/false);
assert(ty->isIncomplete() && "recomputing record layout?");
- lowering.lower();
+ lowering.lower(/*nonVirtualBaseType=*/false);
// If we're in C++, compute the base subobject type.
cir::RecordType baseTy;
@@ -599,7 +625,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
if (lowering.astRecordLayout.getNonVirtualSize() !=
lowering.astRecordLayout.getSize()) {
CIRRecordLowering baseLowering(*this, rd, /*Packed=*/lowering.packed);
- baseLowering.lower();
+ baseLowering.lower(/*NonVirtualBaseType=*/true);
std::string baseIdentifier = getRecordTypeName(rd, ".base");
baseTy =
builder.getCompleteRecordTy(baseLowering.fieldTypes, baseIdentifier,
@@ -626,8 +652,8 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
assert(!cir::MissingFeatures::recordZeroInit());
rl->nonVirtualBases.swap(lowering.nonVirtualBases);
+ rl->completeObjectVirtualBases.swap(lowering.virtualBases);
- assert(!cir::MissingFeatures::cxxSupport());
assert(!cir::MissingFeatures::bitfields());
// Add all the field numbers.
@@ -754,6 +780,17 @@ void CIRRecordLowering::lowerUnion() {
packed = true;
}
+bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *decl,
+ const CXXRecordDecl *query) {
+ const ASTRecordLayout &declLayout = astContext.getASTRecordLayout(decl);
+ if (declLayout.isPrimaryBaseVirtual() && declLayout.getPrimaryBase() == query)
+ return false;
+ for (const auto &base : decl->bases())
+ if (!hasOwnStorage(base.getType()->getAsCXXRecordDecl(), query))
+ return false;
+ return true;
+}
+
/// The AAPCS that defines that, when possible, bit-fields should
/// be accessed using containers of the declared type width:
/// When a volatile bit-field is read, and its container does not overlap with
@@ -873,7 +910,7 @@ void CIRRecordLowering::computeVolatileBitfields() {
}
}
-void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
+void CIRRecordLowering::accumulateBases() {
// If we've got a primary virtual base, we need to add it with the bases.
if (astRecordLayout.isPrimaryBaseVirtual()) {
cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
@@ -881,12 +918,9 @@ void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
}
// Accumulate the non-virtual bases.
- for ([[maybe_unused]] const auto &base : cxxRecordDecl->bases()) {
- if (base.isVirtual()) {
- cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
- "accumulateBases: virtual base");
+ for (const auto &base : cxxRecordDecl->bases()) {
+ if (base.isVirtual())
continue;
- }
// Bases can be zero-sized even if not technically empty if they
// contain only a trailing array member.
const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
@@ -899,6 +933,31 @@ void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
}
}
+void CIRRecordLowering::accumulateVBases() {
+ for (const auto &base : cxxRecordDecl->vbases()) {
+ const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
+ if (isEmptyRecordForLayout(astContext, base.getType()))
+ continue;
+ CharUnits offset = astRecordLayout.getVBaseClassOffset(baseDecl);
+ // If the vbase is a primary virtual base of some base, then it doesn't
+ // get its own storage location but instead lives inside of that base.
+ if (isOverlappingVBaseABI() && astContext.isNearlyEmpty(baseDecl) &&
+ !hasOwnStorage(cxxRecordDecl, baseDecl)) {
+ members.push_back(
+ MemberInfo(offset, MemberInfo::InfoKind::VBase, nullptr, baseDecl));
+ continue;
+ }
+ // If we've got a vtordisp, add it as a storage type.
+ if (astRecordLayout.getVBaseOffsetsMap()
+ .find(baseDecl)
+ ->second.hasVtorDisp())
+ members.push_back(makeStorageInfo(offset - CharUnits::fromQuantity(4),
+ getUIntNType(32)));
+ members.push_back(MemberInfo(offset, MemberInfo::InfoKind::VBase,
+ getStorageType(baseDecl), baseDecl));
+ }
+}
+
void CIRRecordLowering::accumulateVPtrs() {
if (astRecordLayout.hasOwnVFPtr())
members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr,
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 50642e7..3b0eabe 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -14,6 +14,8 @@
#include "CIRGenFunction.h"
#include "mlir/IR/Builders.h"
+#include "mlir/IR/Location.h"
+#include "mlir/Support/LLVM.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtOpenACC.h"
@@ -23,28 +25,80 @@ using namespace clang;
using namespace clang::CIRGen;
using namespace cir;
-void CIRGenFunction::emitCompoundStmtWithoutScope(const CompoundStmt &s) {
- for (auto *curStmt : s.body()) {
- if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
- getCIRGenModule().errorNYI(curStmt->getSourceRange(),
- std::string("emitCompoundStmtWithoutScope: ") +
- curStmt->getStmtClassName());
+static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf,
+ const Stmt *exprResult,
+ AggValueSlot slot,
+ Address *lastValue) {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ // Similar issues arise for attributed statements.
+ while (!isa<Expr>(exprResult)) {
+ if (const auto *ls = dyn_cast<LabelStmt>(exprResult)) {
+ if (cgf.emitLabel(*ls->getDecl()).failed())
+ return mlir::failure();
+ exprResult = ls->getSubStmt();
+ } else if (const auto *as = dyn_cast<AttributedStmt>(exprResult)) {
+ // FIXME: Update this if we ever have attributes that affect the
+ // semantics of an expression.
+ exprResult = as->getSubStmt();
+ } else {
+ llvm_unreachable("Unknown value statement");
+ }
}
+
+ const Expr *e = cast<Expr>(exprResult);
+ QualType exprTy = e->getType();
+ if (cgf.hasAggregateEvaluationKind(exprTy)) {
+ cgf.emitAggExpr(e, slot);
+ } else {
+ // We can't return an RValue here because there might be cleanups at
+ // the end of the StmtExpr. Because of that, we have to emit the result
+ // here into a temporary alloca.
+ cgf.emitAnyExprToMem(e, *lastValue, Qualifiers(),
+ /*IsInit*/ false);
+ }
+
+ return mlir::success();
}
-void CIRGenFunction::emitCompoundStmt(const CompoundStmt &s) {
+mlir::LogicalResult CIRGenFunction::emitCompoundStmtWithoutScope(
+ const CompoundStmt &s, Address *lastValue, AggValueSlot slot) {
+ mlir::LogicalResult result = mlir::success();
+ const Stmt *exprResult = s.getStmtExprResult();
+ assert((!lastValue || (lastValue && exprResult)) &&
+ "If lastValue is not null then the CompoundStmt must have a "
+ "StmtExprResult");
+
+ for (const Stmt *curStmt : s.body()) {
+ const bool saveResult = lastValue && exprResult == curStmt;
+ if (saveResult) {
+ if (emitStmtWithResult(*this, exprResult, slot, lastValue).failed())
+ result = mlir::failure();
+ } else {
+ if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
+ result = mlir::failure();
+ }
+ }
+ return result;
+}
+
+mlir::LogicalResult CIRGenFunction::emitCompoundStmt(const CompoundStmt &s,
+ Address *lastValue,
+ AggValueSlot slot) {
+ // Add local scope to track new declared variables.
+ SymTableScopeTy varScope(symbolTable);
mlir::Location scopeLoc = getLoc(s.getSourceRange());
mlir::OpBuilder::InsertPoint scopeInsPt;
builder.create<cir::ScopeOp>(
scopeLoc, [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
scopeInsPt = b.saveInsertionPoint();
});
- {
- mlir::OpBuilder::InsertionGuard guard(builder);
- builder.restoreInsertionPoint(scopeInsPt);
- LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
- emitCompoundStmtWithoutScope(s);
- }
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeInsPt);
+ LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
+ return emitCompoundStmtWithoutScope(s, lastValue, slot);
}
void CIRGenFunction::emitStopPoint(const Stmt *s) {
@@ -130,6 +184,9 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
return emitOpenACCCacheConstruct(cast<OpenACCCacheConstruct>(*s));
case Stmt::OpenACCAtomicConstructClass:
return emitOpenACCAtomicConstruct(cast<OpenACCAtomicConstruct>(*s));
+ case Stmt::GCCAsmStmtClass:
+ case Stmt::MSAsmStmtClass:
+ return emitAsmStmt(cast<AsmStmt>(*s));
case Stmt::OMPScopeDirectiveClass:
case Stmt::OMPErrorDirectiveClass:
case Stmt::LabelStmtClass:
@@ -143,8 +200,6 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
case Stmt::CoreturnStmtClass:
case Stmt::CXXTryStmtClass:
case Stmt::IndirectGotoStmtClass:
- case Stmt::GCCAsmStmtClass:
- case Stmt::MSAsmStmtClass:
case Stmt::OMPParallelDirectiveClass:
case Stmt::OMPTaskwaitDirectiveClass:
case Stmt::OMPTaskyieldDirectiveClass:
@@ -246,16 +301,19 @@ mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
return emitDeclStmt(cast<DeclStmt>(*s));
case Stmt::CompoundStmtClass:
if (useCurrentScope)
- emitCompoundStmtWithoutScope(cast<CompoundStmt>(*s));
- else
- emitCompoundStmt(cast<CompoundStmt>(*s));
- break;
+ return emitCompoundStmtWithoutScope(cast<CompoundStmt>(*s));
+ return emitCompoundStmt(cast<CompoundStmt>(*s));
+ case Stmt::GotoStmtClass:
+ return emitGotoStmt(cast<GotoStmt>(*s));
case Stmt::ContinueStmtClass:
return emitContinueStmt(cast<ContinueStmt>(*s));
// NullStmt doesn't need any handling, but we need to say we handled it.
case Stmt::NullStmtClass:
break;
+
+ case Stmt::LabelStmtClass:
+ return emitLabelStmt(cast<LabelStmt>(*s));
case Stmt::CaseStmtClass:
case Stmt::DefaultStmtClass:
// If we reached here, we must not handling a switch case in the top level.
@@ -272,6 +330,17 @@ mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
return mlir::success();
}
+mlir::LogicalResult CIRGenFunction::emitLabelStmt(const clang::LabelStmt &s) {
+
+ if (emitLabel(*s.getDecl()).failed())
+ return mlir::failure();
+
+ if (getContext().getLangOpts().EHAsynch && s.isSideEntry())
+ getCIRGenModule().errorNYI(s.getSourceRange(), "IsEHa: not implemented.");
+
+ return emitStmt(s.getSubStmt(), /*useCurrentScope*/ true);
+}
+
// Add a terminating yield on a body region if no other terminators are used.
static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r,
mlir::Location loc) {
@@ -412,13 +481,31 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
// This should emit a branch through the cleanup block if one exists.
builder.create<cir::BrOp>(loc, retBlock);
- if (ehStack.getStackDepth() != currentCleanupStackDepth)
+ if (ehStack.stable_begin() != currentCleanupStackDepth)
cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
builder.createBlock(builder.getBlock()->getParent());
return mlir::success();
}
+mlir::LogicalResult CIRGenFunction::emitGotoStmt(const clang::GotoStmt &s) {
+ // FIXME: LLVM codegen inserts emit a stop point here for debug info
+ // sake when the insertion point is available, but doesn't do
+ // anything special when there isn't. We haven't implemented debug
+ // info support just yet, look at this again once we have it.
+ assert(!cir::MissingFeatures::generateDebugInfo());
+
+ cir::GotoOp::create(builder, getLoc(s.getSourceRange()),
+ s.getLabel()->getName());
+
+ // A goto marks the end of a block, create a new one for codegen after
+ // emitGotoStmt can resume building in that block.
+ // Insert the new block to continue codegen after goto.
+ builder.createBlock(builder.getBlock()->getParent());
+
+ return mlir::success();
+}
+
mlir::LogicalResult
CIRGenFunction::emitContinueStmt(const clang::ContinueStmt &s) {
builder.createContinue(getLoc(s.getContinueLoc()));
@@ -429,6 +516,32 @@ CIRGenFunction::emitContinueStmt(const clang::ContinueStmt &s) {
return mlir::success();
}
+mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) {
+ // Create a new block to tag with a label and add a branch from
+ // the current one to it. If the block is empty just call attach it
+ // to this label.
+ mlir::Block *currBlock = builder.getBlock();
+ mlir::Block *labelBlock = currBlock;
+
+ if (!currBlock->empty()) {
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ labelBlock = builder.createBlock(builder.getBlock()->getParent());
+ }
+ builder.create<cir::BrOp>(getLoc(d.getSourceRange()), labelBlock);
+ }
+
+ builder.setInsertionPointToEnd(labelBlock);
+ builder.create<cir::LabelOp>(getLoc(d.getSourceRange()), d.getName());
+ builder.setInsertionPointToEnd(labelBlock);
+
+ // FIXME: emit debug info for labels, incrementProfileCounter
+ assert(!cir::MissingFeatures::ehstackBranches());
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ return mlir::success();
+}
+
mlir::LogicalResult CIRGenFunction::emitBreakStmt(const clang::BreakStmt &s) {
builder.createBreak(getLoc(s.getBreakLoc()));
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
index 1d081d5..eb8dcd6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
@@ -59,6 +59,7 @@ struct CIRGenTypeCache {
/// void* in address space 0
cir::PointerType VoidPtrTy;
+ cir::PointerType UInt8PtrTy;
/// The size and alignment of a pointer into the generic address space.
union {
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
index 3e07f6d..bb24933 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
@@ -103,7 +103,8 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl,
policy.SuppressTagKeyword = true;
if (recordDecl->getIdentifier())
- astContext.getRecordType(recordDecl).print(outStream, policy);
+ QualType(astContext.getCanonicalTagType(recordDecl))
+ .print(outStream, policy);
else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl())
typedefNameDecl->printQualifiedName(outStream, policy);
else
@@ -138,7 +139,9 @@ isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt,
if (!alreadyChecked.insert(rd).second)
return true;
- const Type *key = cgt.getASTContext().getTagDeclType(rd).getTypePtr();
+ assert(rd->isCompleteDefinition() &&
+ "Expect RecordDecl to be CompleteDefinition");
+ const Type *key = cgt.getASTContext().getCanonicalTagType(rd).getTypePtr();
// If this type is already laid out, converting it is a noop.
if (cgt.isRecordLayoutComplete(key))
@@ -152,13 +155,14 @@ isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt,
// out, don't do it. This includes virtual base classes which get laid out
// when a class is translated, even though they aren't embedded by-value into
// the class.
- if (auto *crd = dyn_cast<CXXRecordDecl>(rd)) {
- if (crd->getNumBases() > 0) {
- assert(!cir::MissingFeatures::cxxSupport());
- cgt.getCGModule().errorNYI(rd->getSourceRange(),
- "isSafeToConvert: CXXRecordDecl with bases");
- return false;
- }
+ if (const CXXRecordDecl *crd = dyn_cast<CXXRecordDecl>(rd)) {
+ for (const clang::CXXBaseSpecifier &i : crd->bases())
+ if (!isSafeToConvert(i.getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf(),
+ cgt, alreadyChecked))
+ return false;
}
// If this type would require laying out members that are currently being laid
@@ -181,8 +185,8 @@ isSafeToConvert(QualType qt, CIRGenTypes &cgt,
qt = at->getValueType();
// If this is a record, check it.
- if (const auto *rt = qt->getAs<RecordType>())
- return isSafeToConvert(rt->getDecl(), cgt, alreadyChecked);
+ if (const auto *rd = qt->getAsRecordDecl())
+ return isSafeToConvert(rd, cgt, alreadyChecked);
// If this is an array, check the elements, which are embedded inline.
if (const auto *at = cgt.getASTContext().getAsArrayType(qt))
@@ -210,7 +214,7 @@ static bool isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt) {
mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *rd) {
// TagDecl's are not necessarily unique, instead use the (clang) type
// connected to the decl.
- const Type *key = astContext.getTagDeclType(rd).getTypePtr();
+ const Type *key = astContext.getCanonicalTagType(rd).getTypePtr();
cir::RecordType entry = recordDeclTypes[key];
// If we don't have an entry for this record yet, create one.
@@ -242,7 +246,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *rd) {
for (const auto &base : cxxRecordDecl->bases()) {
if (base.isVirtual())
continue;
- convertRecordDeclType(base.getType()->castAs<RecordType>()->getDecl());
+ convertRecordDeclType(base.getType()->castAsRecordDecl());
}
}
@@ -275,7 +279,8 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
// Process record types before the type cache lookup.
if (const auto *recordType = dyn_cast<RecordType>(type))
- return convertRecordDeclType(recordType->getDecl());
+ return convertRecordDeclType(
+ recordType->getOriginalDecl()->getDefinitionOrSelf());
// Has the type already been processed?
TypeCacheTy::iterator tci = typeCache.find(ty);
@@ -457,7 +462,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
}
case Type::Enum: {
- const EnumDecl *ed = cast<EnumType>(ty)->getDecl();
+ const auto *ed = ty->castAsEnumDecl();
if (auto integerType = ed->getIntegerType(); !integerType.isNull())
return convertType(integerType);
// Return a placeholder 'i32' type. This can be changed later when the
@@ -484,6 +489,20 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
break;
}
+ case Type::Atomic: {
+ QualType valueType = cast<AtomicType>(ty)->getValueType();
+ resultType = convertTypeForMem(valueType);
+
+ // Pad out to the inflated size if necessary.
+ uint64_t valueSize = astContext.getTypeSize(valueType);
+ uint64_t atomicSize = astContext.getTypeSize(ty);
+ if (valueSize != atomicSize) {
+ cgm.errorNYI("convertType: atomic type value size != atomic size");
+ }
+
+ break;
+ }
+
default:
cgm.errorNYI(SourceLocation(), "processing of type",
type->getTypeClassName());
@@ -516,7 +535,7 @@ mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType,
/// Return record layout info for the given record decl.
const CIRGenRecordLayout &
CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *rd) {
- const auto *key = astContext.getTagDeclType(rd).getTypePtr();
+ const auto *key = astContext.getCanonicalTagType(rd).getTypePtr();
// If we have already computed the layout, return it.
auto it = cirGenRecordLayouts.find(key);
@@ -547,10 +566,8 @@ bool CIRGenTypes::isZeroInitializable(clang::QualType t) {
return true;
}
- if (const RecordType *rt = t->getAs<RecordType>()) {
- const RecordDecl *rd = rt->getDecl();
+ if (const auto *rd = t->getAsRecordDecl())
return isZeroInitializable(rd);
- }
if (t->getAs<MemberPointerType>()) {
cgm.errorNYI(SourceLocation(), "isZeroInitializable for MemberPointerType",
@@ -623,8 +640,10 @@ void CIRGenTypes::updateCompletedType(const TagDecl *td) {
// declaration of enums, and C doesn't allow an incomplete forward
// declaration with a non-default type.
assert(
- !typeCache.count(ed->getTypeForDecl()) ||
- (convertType(ed->getIntegerType()) == typeCache[ed->getTypeForDecl()]));
+ !typeCache.count(
+ ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()) ||
+ (convertType(ed->getIntegerType()) ==
+ typeCache[ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()]));
// If necessary, provide the full definition of a type only used with a
// declaration so far.
assert(!cir::MissingFeatures::generateDebugInfo());
@@ -639,7 +658,7 @@ void CIRGenTypes::updateCompletedType(const TagDecl *td) {
// Only complete if we converted it already. If we haven't converted it yet,
// we'll just do it lazily.
- if (recordDeclTypes.count(astContext.getTagDeclType(rd).getTypePtr()))
+ if (recordDeclTypes.count(astContext.getCanonicalTagType(rd).getTypePtr()))
convertRecordDeclType(rd);
// If necessary, provide the full definition of a type only used with a
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h
index c2813d7..7af0d95 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h
@@ -130,6 +130,13 @@ public:
/// Get the CIR function type for \arg Info.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info);
+ cir::FuncType getFunctionType(clang::GlobalDecl gd);
+
+ /// Get the CIR function type for use in a vtable, given a CXXMethodDecl. If
+ /// the method has an incomplete return type, and/or incomplete argument
+ /// types, this will return the opaque type.
+ cir::FuncType getFunctionTypeForVTable(clang::GlobalDecl gd);
+
// The arrangement methods are split into three families:
// - those meant to drive the signature and prologue/epilogue
// of a function declaration or definition,
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
new file mode 100644
index 0000000..aca12aa
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
@@ -0,0 +1,244 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenVTables.h"
+
+#include "CIRGenCXXABI.h"
+#include "CIRGenModule.h"
+#include "mlir/IR/Types.h"
+#include "clang/AST/VTableBuilder.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::CIRGen;
+
+CIRGenVTables::CIRGenVTables(CIRGenModule &cgm)
+ : cgm(cgm), vtContext(cgm.getASTContext().getVTableContext()) {}
+
+mlir::Type CIRGenModule::getVTableComponentType() {
+ mlir::Type ptrTy = builder.getUInt8PtrTy();
+ assert(!cir::MissingFeatures::vtableRelativeLayout());
+ return ptrTy;
+}
+
+mlir::Type CIRGenVTables::getVTableComponentType() {
+ return cgm.getVTableComponentType();
+}
+
+cir::RecordType CIRGenVTables::getVTableType(const VTableLayout &layout) {
+ SmallVector<mlir::Type, 4> tys;
+ mlir::Type componentType = getVTableComponentType();
+ for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i)
+ tys.push_back(cir::ArrayType::get(componentType, layout.getVTableSize(i)));
+
+ // FIXME(cir): should VTableLayout be encoded like we do for some
+ // AST nodes?
+ return cgm.getBuilder().getAnonRecordTy(tys, /*incomplete=*/false);
+}
+
+/// This is a callback from Sema to tell us that a particular vtable is
+/// required to be emitted in this translation unit.
+///
+/// This is only called for vtables that _must_ be emitted (mainly due to key
+/// functions). For weak vtables, CodeGen tracks when they are needed and
+/// emits them as-needed.
+void CIRGenModule::emitVTable(const CXXRecordDecl *rd) {
+ vtables.generateClassData(rd);
+}
+
+void CIRGenVTables::generateClassData(const CXXRecordDecl *rd) {
+ assert(!cir::MissingFeatures::generateDebugInfo());
+
+ if (rd->getNumVBases())
+ cgm.errorNYI(rd->getSourceRange(), "emitVirtualInheritanceTables");
+
+ cgm.getCXXABI().emitVTableDefinitions(*this, rd);
+}
+
+mlir::Attribute CIRGenVTables::getVTableComponent(
+ const VTableLayout &layout, unsigned componentIndex, mlir::Attribute rtti,
+ unsigned &nextVTableThunkIndex, unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage) {
+ const VTableComponent &component = layout.vtable_components()[componentIndex];
+
+ CIRGenBuilderTy builder = cgm.getBuilder();
+
+ assert(!cir::MissingFeatures::vtableRelativeLayout());
+
+ switch (component.getKind()) {
+ case VTableComponent::CK_VCallOffset:
+ cgm.errorNYI("getVTableComponent: VCallOffset");
+ return mlir::Attribute();
+ case VTableComponent::CK_VBaseOffset:
+ cgm.errorNYI("getVTableComponent: VBaseOffset");
+ return mlir::Attribute();
+ case VTableComponent::CK_CompleteDtorPointer:
+ cgm.errorNYI("getVTableComponent: CompleteDtorPointer");
+ return mlir::Attribute();
+ case VTableComponent::CK_DeletingDtorPointer:
+ cgm.errorNYI("getVTableComponent: DeletingDtorPointer");
+ return mlir::Attribute();
+ case VTableComponent::CK_UnusedFunctionPointer:
+ cgm.errorNYI("getVTableComponent: UnusedFunctionPointer");
+ return mlir::Attribute();
+
+ case VTableComponent::CK_OffsetToTop:
+ return builder.getConstPtrAttr(builder.getUInt8PtrTy(),
+ component.getOffsetToTop().getQuantity());
+
+ case VTableComponent::CK_RTTI:
+ assert((mlir::isa<cir::GlobalViewAttr>(rtti) ||
+ mlir::isa<cir::ConstPtrAttr>(rtti)) &&
+ "expected GlobalViewAttr or ConstPtrAttr");
+ return rtti;
+
+ case VTableComponent::CK_FunctionPointer: {
+ GlobalDecl gd = component.getGlobalDecl();
+
+ assert(!cir::MissingFeatures::cudaSupport());
+
+ cir::FuncOp fnPtr;
+ if (cast<CXXMethodDecl>(gd.getDecl())->isPureVirtual()) {
+ cgm.errorNYI("getVTableComponent: CK_FunctionPointer: pure virtual");
+ return mlir::Attribute();
+ } else if (cast<CXXMethodDecl>(gd.getDecl())->isDeleted()) {
+ cgm.errorNYI("getVTableComponent: CK_FunctionPointer: deleted virtual");
+ return mlir::Attribute();
+ } else if (nextVTableThunkIndex < layout.vtable_thunks().size() &&
+ layout.vtable_thunks()[nextVTableThunkIndex].first ==
+ componentIndex) {
+ cgm.errorNYI("getVTableComponent: CK_FunctionPointer: thunk");
+ return mlir::Attribute();
+ } else {
+ // Otherwise we can use the method definition directly.
+ cir::FuncType fnTy = cgm.getTypes().getFunctionTypeForVTable(gd);
+ fnPtr = cgm.getAddrOfFunction(gd, fnTy, /*ForVTable=*/true);
+ }
+
+ return cir::GlobalViewAttr::get(
+ builder.getUInt8PtrTy(),
+ mlir::FlatSymbolRefAttr::get(fnPtr.getSymNameAttr()));
+ }
+ }
+
+ llvm_unreachable("Unexpected vtable component kind");
+}
+
+void CIRGenVTables::createVTableInitializer(cir::GlobalOp &vtableOp,
+ const clang::VTableLayout &layout,
+ mlir::Attribute rtti,
+ bool vtableHasLocalLinkage) {
+ mlir::Type componentType = getVTableComponentType();
+
+ const llvm::SmallVectorImpl<unsigned> &addressPoints =
+ layout.getAddressPointIndices();
+ unsigned nextVTableThunkIndex = 0;
+
+ mlir::MLIRContext *mlirContext = &cgm.getMLIRContext();
+
+ SmallVector<mlir::Attribute> vtables;
+ for (auto [vtableIndex, addressPoint] : llvm::enumerate(addressPoints)) {
+ // Build a ConstArrayAttr of the vtable components.
+ size_t vtableStart = layout.getVTableOffset(vtableIndex);
+ size_t vtableEnd = vtableStart + layout.getVTableSize(vtableIndex);
+ llvm::SmallVector<mlir::Attribute> components;
+ components.reserve(vtableEnd - vtableStart);
+ for (size_t componentIndex : llvm::seq(vtableStart, vtableEnd))
+ components.push_back(
+ getVTableComponent(layout, componentIndex, rtti, nextVTableThunkIndex,
+ addressPoint, vtableHasLocalLinkage));
+ // Create a ConstArrayAttr to hold the components.
+ auto arr = cir::ConstArrayAttr::get(
+ cir::ArrayType::get(componentType, components.size()),
+ mlir::ArrayAttr::get(mlirContext, components));
+ vtables.push_back(arr);
+ }
+
+ // Create a ConstRecordAttr to hold the component array.
+ const auto members = mlir::ArrayAttr::get(mlirContext, vtables);
+ cir::ConstRecordAttr record = cgm.getBuilder().getAnonConstRecord(members);
+
+ // Create a VTableAttr
+ auto vtableAttr = cir::VTableAttr::get(record.getType(), record.getMembers());
+
+ // Add the vtable initializer to the vtable global op.
+ cgm.setInitializer(vtableOp, vtableAttr);
+}
+
+/// Compute the required linkage of the vtable for the given class.
+///
+/// Note that we only call this at the end of the translation unit.
+cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *rd) {
+ if (!rd->isExternallyVisible())
+ return cir::GlobalLinkageKind::InternalLinkage;
+
+ // We're at the end of the translation unit, so the current key
+ // function is fully correct.
+ const CXXMethodDecl *keyFunction = astContext.getCurrentKeyFunction(rd);
+ if (keyFunction && !rd->hasAttr<DLLImportAttr>()) {
+ // If this class has a key function, use that to determine the
+ // linkage of the vtable.
+ const FunctionDecl *def = nullptr;
+ if (keyFunction->hasBody(def))
+ keyFunction = cast<CXXMethodDecl>(def);
+
+ // All of the cases below do something different with AppleKext enabled.
+ assert(!cir::MissingFeatures::appleKext());
+ switch (keyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ assert(
+ (def || codeGenOpts.OptimizationLevel > 0 ||
+ codeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo) &&
+ "Shouldn't query vtable linkage without key function, "
+ "optimizations, or debug info");
+ if (!def && codeGenOpts.OptimizationLevel > 0)
+ return cir::GlobalLinkageKind::AvailableExternallyLinkage;
+
+ if (keyFunction->isInlined())
+ return !astContext.getLangOpts().AppleKext
+ ? cir::GlobalLinkageKind::LinkOnceODRLinkage
+ : cir::GlobalLinkageKind::InternalLinkage;
+ return cir::GlobalLinkageKind::ExternalLinkage;
+
+ case TSK_ImplicitInstantiation:
+ return cir::GlobalLinkageKind::LinkOnceODRLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return cir::GlobalLinkageKind::WeakODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable("Should not have been asked to emit this");
+ }
+ }
+
+ errorNYI(rd->getSourceRange(), "getVTableLinkage: no key function");
+ return cir::GlobalLinkageKind::ExternalLinkage;
+}
+
+void CIRGenVTables::emitThunks(GlobalDecl gd) {
+ const CXXMethodDecl *md =
+ cast<CXXMethodDecl>(gd.getDecl())->getCanonicalDecl();
+
+ // We don't need to generate thunks for the base destructor.
+ if (isa<CXXDestructorDecl>(md) && gd.getDtorType() == Dtor_Base)
+ return;
+
+ const VTableContextBase::ThunkInfoVectorTy *thunkInfoVector =
+ vtContext->getThunkInfo(gd);
+
+ if (!thunkInfoVector)
+ return;
+
+ cgm.errorNYI(md->getSourceRange(), "emitThunks");
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h
new file mode 100644
index 0000000..518d7d7
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h
@@ -0,0 +1,74 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H
+#define CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H
+
+#include "mlir/IR/Types.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/VTableBuilder.h"
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
+
+namespace clang {
+class CXXRecordDecl;
+}
+
+namespace clang::CIRGen {
+class CIRGenModule;
+
+class CIRGenVTables {
+ CIRGenModule &cgm;
+
+ clang::VTableContextBase *vtContext;
+
+ mlir::Attribute
+ getVTableComponent(const VTableLayout &layout, unsigned componentIndex,
+ mlir::Attribute rtti, unsigned &nextVTableThunkIndex,
+ unsigned vtableAddressPoint, bool vtableHasLocalLinkage);
+
+ mlir::Type getVTableComponentType();
+
+public:
+ CIRGenVTables(CIRGenModule &cgm);
+
+ /// Add vtable components for the given vtable layout to the given
+ /// global initializer.
+ void createVTableInitializer(cir::GlobalOp &vtable,
+ const clang::VTableLayout &layout,
+ mlir::Attribute rtti,
+ bool vtableHasLocalLinkage);
+
+ clang::ItaniumVTableContext &getItaniumVTableContext() {
+ return *llvm::cast<clang::ItaniumVTableContext>(vtContext);
+ }
+
+ const clang::ItaniumVTableContext &getItaniumVTableContext() const {
+ return *llvm::cast<clang::ItaniumVTableContext>(vtContext);
+ }
+
+ /// Emit the associated thunks for the given global decl.
+ void emitThunks(GlobalDecl gd);
+
+ /// Generate all the class data required to be generated upon definition of a
+ /// KeyFunction. This includes the vtable, the RTTI data structure (if RTTI
+ /// is enabled) and the VTT (if the class has virtual bases).
+ void generateClassData(const CXXRecordDecl *rd);
+
+ /// Returns the type of a vtable with the given layout. Normally a struct of
+ /// arrays of pointers, with one struct element for each vtable in the vtable
+ /// group.
+ cir::RecordType getVTableType(const clang::VTableLayout &layout);
+};
+
+} // namespace clang::CIRGen
+
+#endif // CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index 0832c414..ac7e1cc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -190,6 +190,7 @@ public:
bool isSimple() const { return lvType == Simple; }
bool isVectorElt() const { return lvType == VectorElt; }
bool isBitField() const { return lvType == BitField; }
+ bool isGlobalReg() const { return lvType == GlobalReg; }
bool isVolatile() const { return quals.hasVolatile(); }
bool isVolatileQualified() const { return quals.hasVolatile(); }
@@ -211,6 +212,14 @@ public:
return Address(getPointer(), elementType, getAlignment());
}
+ void setAddress(Address address) {
+ assert(isSimple());
+ v = address.getPointer();
+ elementType = address.getElementType();
+ alignment = address.getAlignment().getQuantity();
+ assert(!cir::MissingFeatures::addressIsKnownNonNull());
+ }
+
const clang::Qualifiers &getQuals() const { return quals; }
clang::Qualifiers &getQuals() { return quals; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp
index b0357d9..aa4d9eb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp
@@ -163,7 +163,7 @@ void CIRGenerator::HandleCXXStaticMemberVarInstantiation(VarDecl *D) {
if (diags.hasErrorOccurred())
return;
- cgm->errorNYI(D->getSourceRange(), "HandleCXXStaticMemberVarInstantiation");
+ cgm->handleCXXStaticMemberVarInstantiation(D);
}
void CIRGenerator::CompleteTentativeDefinition(VarDecl *d) {
@@ -177,5 +177,5 @@ void CIRGenerator::HandleVTable(CXXRecordDecl *rd) {
if (diags.hasErrorOccurred())
return;
- cgm->errorNYI(rd->getSourceRange(), "HandleVTable");
+ cgm->emitVTable(rd);
}
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index ca3a329..6d7072a 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -8,6 +8,8 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
add_clang_library(clangCIR
CIRGenerator.cpp
+ CIRGenAsm.cpp
+ CIRGenAtomic.cpp
CIRGenBuilder.cpp
CIRGenCall.cpp
CIRGenClass.cpp
@@ -18,10 +20,12 @@ add_clang_library(clangCIR
CIRGenBuiltin.cpp
CIRGenDecl.cpp
CIRGenDeclOpenACC.cpp
+ CIRGenException.cpp
CIRGenExpr.cpp
CIRGenExprAggregate.cpp
CIRGenExprComplex.cpp
CIRGenExprConstant.cpp
+ CIRGenExprCXX.cpp
CIRGenExprScalar.cpp
CIRGenFunction.cpp
CIRGenItaniumCXXABI.cpp
@@ -33,6 +37,7 @@ add_clang_library(clangCIR
CIRGenStmtOpenACC.cpp
CIRGenStmtOpenACCLoop.cpp
CIRGenTypes.cpp
+ CIRGenVTables.cpp
TargetInfo.cpp
DEPENDS
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 22750ac..47478f6 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -42,7 +42,47 @@ enum CleanupKind : unsigned {
/// A stack of scopes which respond to exceptions, including cleanups
/// and catch blocks.
class EHScopeStack {
+ friend class CIRGenFunction;
+
public:
+ // TODO(ogcg): Switch to alignof(uint64_t) instead of 8
+ enum { ScopeStackAlignment = 8 };
+
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from startOfData to endOfBuffer.
+ ptrdiff_t size = -1;
+
+ explicit stable_iterator(ptrdiff_t size) : size(size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() = default;
+
+ bool isValid() const { return size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator other) const { return size <= other.size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return size < I.size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.size == B.size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.size != B.size;
+ }
+ };
+
/// Information for lazily generating a cleanup. Subclasses must be
/// POD-like: cleanups will not be destructed, and they will be
/// allocated on the cleanup stack and freely copied and moved
@@ -68,30 +108,75 @@ public:
///
// \param flags cleanup kind.
virtual void emit(CIRGenFunction &cgf) = 0;
- };
- // Classic codegen has a finely tuned custom allocator and a complex stack
- // management scheme. We'll probably eventually want to find a way to share
- // that implementation. For now, we will use a very simplified implementation
- // to get cleanups working.
- llvm::SmallVector<std::unique_ptr<Cleanup>, 8> cleanupStack;
+ // This is a placeholder until EHScope is implemented.
+ virtual size_t getSize() const = 0;
+ };
private:
+ // The implementation for this class is in CIRGenCleanup.h and
+ // CIRGenCleanup.cpp; the definition is here because it's used as a
+ // member of CIRGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ std::unique_ptr<char[]> startOfBuffer;
+
+ /// The end of the buffer.
+ char *endOfBuffer = nullptr;
+
+ /// The first valid entry in the buffer.
+ char *startOfData = nullptr;
+
/// The CGF this Stack belong to
CIRGenFunction *cgf = nullptr;
+ // This class uses a custom allocator for maximum efficiency because cleanups
+ // are allocated and freed very frequently. It's basically a bump pointer
+ // allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
+ // into the buffer as stable iterators.
+ char *allocate(size_t size);
+ void deallocate(size_t size);
+
+ void *pushCleanup(CleanupKind kind, size_t dataSize);
+
public:
EHScopeStack() = default;
~EHScopeStack() = default;
/// Push a lazily-created cleanup on the stack.
template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
- cleanupStack.push_back(std::make_unique<T>(a...));
+ static_assert(alignof(T) <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *buffer = pushCleanup(kind, sizeof(T));
+ [[maybe_unused]] Cleanup *obj = new (buffer) T(a...);
}
void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
- size_t getStackDepth() const { return cleanupStack.size(); }
+ /// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
+ void popCleanup();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return startOfData == endOfBuffer; }
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(endOfBuffer - startOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp
index d2d32bb..62a8c59 100644
--- a/clang/lib/CIR/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp
@@ -6,24 +6,22 @@ using namespace clang::CIRGen;
bool clang::CIRGen::isEmptyRecordForLayout(const ASTContext &context,
QualType t) {
- const RecordType *rt = t->getAs<RecordType>();
- if (!rt)
+ const auto *rd = t->getAsRecordDecl();
+ if (!rd)
return false;
- const RecordDecl *rd = rt->getDecl();
-
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *cxxrd = dyn_cast<CXXRecordDecl>(rd)) {
if (cxxrd->isDynamicClass())
return false;
- for (const auto &I : cxxrd->bases())
- if (!isEmptyRecordForLayout(context, I.getType()))
+ for (const auto &i : cxxrd->bases())
+ if (!isEmptyRecordForLayout(context, i.getType()))
return false;
}
- for (const auto *I : rd->fields())
- if (!isEmptyFieldForLayout(context, I))
+ for (const auto *i : rd->fields())
+ if (!isEmptyFieldForLayout(context, i))
return false;
return true;
diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp
index c039bdc..95faad6 100644
--- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp
@@ -16,6 +16,14 @@
#include "llvm/ADT/TypeSwitch.h"
//===-----------------------------------------------------------------===//
+// RecordMembers
+//===-----------------------------------------------------------------===//
+
+static void printRecordMembers(mlir::AsmPrinter &p, mlir::ArrayAttr members);
+static mlir::ParseResult parseRecordMembers(mlir::AsmParser &parser,
+ mlir::ArrayAttr &members);
+
+//===-----------------------------------------------------------------===//
// IntLiteral
//===-----------------------------------------------------------------===//
@@ -68,6 +76,61 @@ void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const {
llvm_unreachable("unexpected CIR type kind");
}
+static void printRecordMembers(mlir::AsmPrinter &printer,
+ mlir::ArrayAttr members) {
+ printer << '{';
+ llvm::interleaveComma(members, printer);
+ printer << '}';
+}
+
+static ParseResult parseRecordMembers(mlir::AsmParser &parser,
+ mlir::ArrayAttr &members) {
+ llvm::SmallVector<mlir::Attribute, 4> elts;
+
+ auto delimiter = AsmParser::Delimiter::Braces;
+ auto result = parser.parseCommaSeparatedList(delimiter, [&]() {
+ mlir::TypedAttr attr;
+ if (parser.parseAttribute(attr).failed())
+ return mlir::failure();
+ elts.push_back(attr);
+ return mlir::success();
+ });
+
+ if (result.failed())
+ return mlir::failure();
+
+ members = mlir::ArrayAttr::get(parser.getContext(), elts);
+ return mlir::success();
+}
+
+//===----------------------------------------------------------------------===//
+// ConstRecordAttr definitions
+//===----------------------------------------------------------------------===//
+
+LogicalResult
+ConstRecordAttr::verify(function_ref<InFlightDiagnostic()> emitError,
+ mlir::Type type, ArrayAttr members) {
+ auto sTy = mlir::dyn_cast_if_present<cir::RecordType>(type);
+ if (!sTy)
+ return emitError() << "expected !cir.record type";
+
+ if (sTy.getMembers().size() != members.size())
+ return emitError() << "number of elements must match";
+
+ unsigned attrIdx = 0;
+ for (auto &member : sTy.getMembers()) {
+ auto m = mlir::cast<mlir::TypedAttr>(members[attrIdx]);
+ if (member != m.getType())
+ return emitError() << "element at index " << attrIdx << " has type "
+ << m.getType()
+ << " but the expected type for this element is "
+ << member;
+ attrIdx++;
+ }
+
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// OptInfoAttr definitions
//===----------------------------------------------------------------------===//
@@ -362,6 +425,44 @@ cir::ConstVectorAttr::verify(function_ref<InFlightDiagnostic()> emitError,
}
//===----------------------------------------------------------------------===//
+// CIR VTableAttr
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::VTableAttr::verify(
+ llvm::function_ref<mlir::InFlightDiagnostic()> emitError, mlir::Type type,
+ mlir::ArrayAttr data) {
+ auto sTy = mlir::dyn_cast_if_present<cir::RecordType>(type);
+ if (!sTy)
+ return emitError() << "expected !cir.record type result";
+ if (sTy.getMembers().empty() || data.empty())
+ return emitError() << "expected record type with one or more subtype";
+
+ if (cir::ConstRecordAttr::verify(emitError, type, data).failed())
+ return failure();
+
+ for (const auto &element : data.getAsRange<mlir::Attribute>()) {
+ const auto &constArrayAttr = mlir::dyn_cast<cir::ConstArrayAttr>(element);
+ if (!constArrayAttr)
+ return emitError() << "expected constant array subtype";
+
+ LogicalResult eltTypeCheck = success();
+ auto arrayElts = mlir::cast<ArrayAttr>(constArrayAttr.getElts());
+ arrayElts.walkImmediateSubElements(
+ [&](mlir::Attribute attr) {
+ if (mlir::isa<ConstPtrAttr, GlobalViewAttr>(attr))
+ return;
+
+ eltTypeCheck = emitError()
+ << "expected GlobalViewAttr or ConstPtrAttr";
+ },
+ [&](mlir::Type type) {});
+ if (eltTypeCheck.failed())
+ return eltTypeCheck;
+ }
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// CIR Dialect
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
index d835c40..42d4581 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
@@ -1,4 +1,6 @@
#include "clang/CIR/Dialect/IR/CIRDataLayout.h"
+#include "clang/CIR/Dialect/IR/CIRTypes.h"
+#include "clang/CIR/MissingFeatures.h"
using namespace cir;
@@ -20,3 +22,42 @@ void CIRDataLayout::reset(mlir::DataLayoutSpecInterface spec) {
bigEndian = str == mlir::DLTIDialect::kDataLayoutEndiannessBig;
}
}
+
+llvm::Align CIRDataLayout::getAlignment(mlir::Type ty, bool useABIAlign) const {
+ if (auto recTy = llvm::dyn_cast<cir::RecordType>(ty)) {
+ // Packed record types always have an ABI alignment of one.
+ if (recTy && recTy.getPacked() && useABIAlign)
+ return llvm::Align(1);
+
+ // Get the layout annotation... which is lazily created on demand.
+ llvm_unreachable("getAlignment()) for record type is not implemented");
+ }
+
+ // FIXME(cir): This does not account for differnt address spaces, and relies
+ // on CIR's data layout to give the proper alignment.
+ assert(!cir::MissingFeatures::addressSpace());
+
+ // Fetch type alignment from MLIR's data layout.
+ unsigned align = useABIAlign ? layout.getTypeABIAlignment(ty)
+ : layout.getTypePreferredAlignment(ty);
+ return llvm::Align(align);
+}
+
+// The implementation of this method is provided inline as it is particularly
+// well suited to constant folding when called on a specific Type subclass.
+llvm::TypeSize CIRDataLayout::getTypeSizeInBits(mlir::Type ty) const {
+ assert(cir::isSized(ty) && "Cannot getTypeInfo() on a type that is unsized!");
+
+ if (auto recordTy = llvm::dyn_cast<cir::RecordType>(ty)) {
+ // FIXME(cir): CIR record's data layout implementation doesn't do a good job
+ // of handling unions particularities. We should have a separate union type.
+ return recordTy.getTypeSizeInBits(layout, {});
+ }
+
+ // FIXME(cir): This does not account for different address spaces, and relies
+ // on CIR's data layout to give the proper ABI-specific type width.
+ assert(!cir::MissingFeatures::addressSpace());
+
+ // This is calling mlir::DataLayout::getTypeSizeInBits().
+ return llvm::TypeSize::getFixed(layout.getTypeSizeInBits(ty));
+}
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index d3fcac1..80ca2d3 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -22,6 +22,8 @@
#include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc"
#include "clang/CIR/Dialect/IR/CIROpsEnums.cpp.inc"
#include "clang/CIR/MissingFeatures.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/LogicalResult.h"
#include <numeric>
@@ -339,7 +341,9 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType,
}
if (mlir::isa<cir::ConstArrayAttr, cir::ConstVectorAttr,
- cir::ConstComplexAttr, cir::PoisonAttr>(attrType))
+ cir::ConstComplexAttr, cir::ConstRecordAttr,
+ cir::GlobalViewAttr, cir::PoisonAttr, cir::VTableAttr>(
+ attrType))
return success();
assert(isa<TypedAttr>(attrType) && "What else could we be looking at here?");
@@ -1355,11 +1359,14 @@ mlir::LogicalResult cir::GlobalOp::verify() {
void cir::GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState,
llvm::StringRef sym_name, mlir::Type sym_type,
- cir::GlobalLinkageKind linkage) {
+ bool isConstant, cir::GlobalLinkageKind linkage) {
odsState.addAttribute(getSymNameAttrName(odsState.name),
odsBuilder.getStringAttr(sym_name));
odsState.addAttribute(getSymTypeAttrName(odsState.name),
mlir::TypeAttr::get(sym_type));
+ if (isConstant)
+ odsState.addAttribute(getConstantAttrName(odsState.name),
+ odsBuilder.getUnitAttr());
cir::GlobalLinkageKindAttr linkageAttr =
cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage);
@@ -1444,6 +1451,77 @@ cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
}
//===----------------------------------------------------------------------===//
+// VTableAddrPointOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult
+cir::VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
+ StringRef name = getName();
+
+ // Verify that the result type underlying pointer type matches the type of
+ // the referenced cir.global.
+ auto op =
+ symbolTable.lookupNearestSymbolFrom<cir::GlobalOp>(*this, getNameAttr());
+ if (!op)
+ return emitOpError("'")
+ << name << "' does not reference a valid cir.global";
+ std::optional<mlir::Attribute> init = op.getInitialValue();
+ if (!init)
+ return success();
+ if (!isa<cir::VTableAttr>(*init))
+ return emitOpError("Expected #cir.vtable in initializer for global '")
+ << name << "'";
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// VTTAddrPointOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult
+cir::VTTAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
+ // VTT ptr is not coming from a symbol.
+ if (!getName())
+ return success();
+ StringRef name = *getName();
+
+ // Verify that the result type underlying pointer type matches the type of
+ // the referenced cir.global op.
+ auto op =
+ symbolTable.lookupNearestSymbolFrom<cir::GlobalOp>(*this, getNameAttr());
+ if (!op)
+ return emitOpError("'")
+ << name << "' does not reference a valid cir.global";
+ std::optional<mlir::Attribute> init = op.getInitialValue();
+ if (!init)
+ return success();
+ if (!isa<cir::ConstArrayAttr>(*init))
+ return emitOpError(
+ "Expected constant array in initializer for global VTT '")
+ << name << "'";
+ return success();
+}
+
+LogicalResult cir::VTTAddrPointOp::verify() {
+ // The operation uses either a symbol or a value to operate, but not both
+ if (getName() && getSymAddr())
+ return emitOpError("should use either a symbol or value, but not both");
+
+ // If not a symbol, stick with the concrete type used for getSymAddr.
+ if (getSymAddr())
+ return success();
+
+ mlir::Type resultType = getAddr().getType();
+ mlir::Type resTy = cir::PointerType::get(
+ cir::PointerType::get(cir::VoidType::get(getContext())));
+
+ if (resultType != resTy)
+ return emitOpError("result type must be ")
+ << resTy << ", but provided result type is " << resultType;
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// FuncOp
//===----------------------------------------------------------------------===//
@@ -1625,9 +1703,28 @@ void cir::FuncOp::print(OpAsmPrinter &p) {
}
}
-// TODO(CIR): The properties of functions that require verification haven't
-// been implemented yet.
-mlir::LogicalResult cir::FuncOp::verify() { return success(); }
+mlir::LogicalResult cir::FuncOp::verify() {
+
+ llvm::SmallSet<llvm::StringRef, 16> labels;
+ llvm::SmallSet<llvm::StringRef, 16> gotos;
+
+ getOperation()->walk([&](mlir::Operation *op) {
+ if (auto lab = dyn_cast<cir::LabelOp>(op)) {
+ labels.insert(lab.getLabel());
+ } else if (auto goTo = dyn_cast<cir::GotoOp>(op)) {
+ gotos.insert(goTo.getLabel());
+ }
+ });
+
+ if (!labels.empty() || !gotos.empty()) {
+ llvm::SmallSet<llvm::StringRef, 16> mismatched =
+ llvm::set_difference(gotos, labels);
+
+ if (!mismatched.empty())
+ return emitOpError() << "goto/label mismatch";
+ }
+ return success();
+}
//===----------------------------------------------------------------------===//
// BinOp
@@ -1763,6 +1860,19 @@ LogicalResult cir::ShiftOp::verify() {
}
//===----------------------------------------------------------------------===//
+// LabelOp Definitions
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::LabelOp::verify() {
+ mlir::Operation *op = getOperation();
+ mlir::Block *blk = op->getBlock();
+ if (&blk->front() != op)
+ return emitError() << "must be the first operation in a block";
+
+ return mlir::success();
+}
+
+//===----------------------------------------------------------------------===//
// UnaryOp
//===----------------------------------------------------------------------===//
@@ -2385,6 +2495,227 @@ OpFoldResult RotateOp::fold(FoldAdaptor adaptor) {
}
//===----------------------------------------------------------------------===//
+// InlineAsmOp
+//===----------------------------------------------------------------------===//
+
+void cir::InlineAsmOp::print(OpAsmPrinter &p) {
+ p << '(' << getAsmFlavor() << ", ";
+ p.increaseIndent();
+ p.printNewline();
+
+ llvm::SmallVector<std::string, 3> names{"out", "in", "in_out"};
+ auto *nameIt = names.begin();
+ auto *attrIt = getOperandAttrs().begin();
+
+ for (mlir::OperandRange ops : getAsmOperands()) {
+ p << *nameIt << " = ";
+
+ p << '[';
+ llvm::interleaveComma(llvm::make_range(ops.begin(), ops.end()), p,
+ [&](Value value) {
+ p.printOperand(value);
+ p << " : " << value.getType();
+ if (*attrIt)
+ p << " (maybe_memory)";
+ attrIt++;
+ });
+ p << "],";
+ p.printNewline();
+ ++nameIt;
+ }
+
+ p << "{";
+ p.printString(getAsmString());
+ p << " ";
+ p.printString(getConstraints());
+ p << "}";
+ p.decreaseIndent();
+ p << ')';
+ if (getSideEffects())
+ p << " side_effects";
+
+ std::array elidedAttrs{
+ llvm::StringRef("asm_flavor"), llvm::StringRef("asm_string"),
+ llvm::StringRef("constraints"), llvm::StringRef("operand_attrs"),
+ llvm::StringRef("operands_segments"), llvm::StringRef("side_effects")};
+ p.printOptionalAttrDict(getOperation()->getAttrs(), elidedAttrs);
+
+ if (auto v = getRes())
+ p << " -> " << v.getType();
+}
+
+void cir::InlineAsmOp::build(OpBuilder &odsBuilder, OperationState &odsState,
+ ArrayRef<ValueRange> asmOperands,
+ StringRef asmString, StringRef constraints,
+ bool sideEffects, cir::AsmFlavor asmFlavor,
+ ArrayRef<Attribute> operandAttrs) {
+ // Set up the operands_segments for VariadicOfVariadic
+ SmallVector<int32_t> segments;
+ for (auto operandRange : asmOperands) {
+ segments.push_back(operandRange.size());
+ odsState.addOperands(operandRange);
+ }
+
+ odsState.addAttribute(
+ "operands_segments",
+ DenseI32ArrayAttr::get(odsBuilder.getContext(), segments));
+ odsState.addAttribute("asm_string", odsBuilder.getStringAttr(asmString));
+ odsState.addAttribute("constraints", odsBuilder.getStringAttr(constraints));
+ odsState.addAttribute("asm_flavor",
+ AsmFlavorAttr::get(odsBuilder.getContext(), asmFlavor));
+
+ if (sideEffects)
+ odsState.addAttribute("side_effects", odsBuilder.getUnitAttr());
+
+ odsState.addAttribute("operand_attrs", odsBuilder.getArrayAttr(operandAttrs));
+}
+
+ParseResult cir::InlineAsmOp::parse(OpAsmParser &parser,
+ OperationState &result) {
+ llvm::SmallVector<mlir::Attribute> operandAttrs;
+ llvm::SmallVector<int32_t> operandsGroupSizes;
+ std::string asmString, constraints;
+ Type resType;
+ MLIRContext *ctxt = parser.getBuilder().getContext();
+
+ auto error = [&](const Twine &msg) -> LogicalResult {
+ return parser.emitError(parser.getCurrentLocation(), msg);
+ };
+
+ auto expected = [&](const std::string &c) {
+ return error("expected '" + c + "'");
+ };
+
+ if (parser.parseLParen().failed())
+ return expected("(");
+
+ auto flavor = FieldParser<AsmFlavor, AsmFlavor>::parse(parser);
+ if (failed(flavor))
+ return error("Unknown AsmFlavor");
+
+ if (parser.parseComma().failed())
+ return expected(",");
+
+ auto parseValue = [&](Value &v) {
+ OpAsmParser::UnresolvedOperand op;
+
+ if (parser.parseOperand(op) || parser.parseColon())
+ return error("can't parse operand");
+
+ Type typ;
+ if (parser.parseType(typ).failed())
+ return error("can't parse operand type");
+ llvm::SmallVector<mlir::Value> tmp;
+ if (parser.resolveOperand(op, typ, tmp))
+ return error("can't resolve operand");
+ v = tmp[0];
+ return mlir::success();
+ };
+
+ auto parseOperands = [&](llvm::StringRef name) {
+ if (parser.parseKeyword(name).failed())
+ return error("expected " + name + " operands here");
+ if (parser.parseEqual().failed())
+ return expected("=");
+ if (parser.parseLSquare().failed())
+ return expected("[");
+
+ int size = 0;
+ if (parser.parseOptionalRSquare().succeeded()) {
+ operandsGroupSizes.push_back(size);
+ if (parser.parseComma())
+ return expected(",");
+ return mlir::success();
+ }
+
+ auto parseOperand = [&]() {
+ Value val;
+ if (parseValue(val).succeeded()) {
+ result.operands.push_back(val);
+ size++;
+
+ if (parser.parseOptionalLParen().failed()) {
+ operandAttrs.push_back(mlir::Attribute());
+ return mlir::success();
+ }
+
+ if (parser.parseKeyword("maybe_memory").succeeded()) {
+ operandAttrs.push_back(mlir::UnitAttr::get(ctxt));
+ if (parser.parseRParen())
+ return expected(")");
+ return mlir::success();
+ } else {
+ return expected("maybe_memory");
+ }
+ }
+ return mlir::failure();
+ };
+
+ if (parser.parseCommaSeparatedList(parseOperand).failed())
+ return mlir::failure();
+
+ if (parser.parseRSquare().failed() || parser.parseComma().failed())
+ return expected("]");
+ operandsGroupSizes.push_back(size);
+ return mlir::success();
+ };
+
+ if (parseOperands("out").failed() || parseOperands("in").failed() ||
+ parseOperands("in_out").failed())
+ return error("failed to parse operands");
+
+ if (parser.parseLBrace())
+ return expected("{");
+ if (parser.parseString(&asmString))
+ return error("asm string parsing failed");
+ if (parser.parseString(&constraints))
+ return error("constraints string parsing failed");
+ if (parser.parseRBrace())
+ return expected("}");
+ if (parser.parseRParen())
+ return expected(")");
+
+ if (parser.parseOptionalKeyword("side_effects").succeeded())
+ result.attributes.set("side_effects", UnitAttr::get(ctxt));
+
+ if (parser.parseOptionalArrow().succeeded() &&
+ parser.parseType(resType).failed())
+ return mlir::failure();
+
+ if (parser.parseOptionalAttrDict(result.attributes).failed())
+ return mlir::failure();
+
+ result.attributes.set("asm_flavor", AsmFlavorAttr::get(ctxt, *flavor));
+ result.attributes.set("asm_string", StringAttr::get(ctxt, asmString));
+ result.attributes.set("constraints", StringAttr::get(ctxt, constraints));
+ result.attributes.set("operand_attrs", ArrayAttr::get(ctxt, operandAttrs));
+ result.getOrAddProperties<InlineAsmOp::Properties>().operands_segments =
+ parser.getBuilder().getDenseI32ArrayAttr(operandsGroupSizes);
+ if (resType)
+ result.addTypes(TypeRange{resType});
+
+ return mlir::success();
+}
+
+//===----------------------------------------------------------------------===//
+// ThrowOp
+//===----------------------------------------------------------------------===//
+
+mlir::LogicalResult cir::ThrowOp::verify() {
+ // For the no-rethrow version, it must have at least the exception pointer.
+ if (rethrows())
+ return success();
+
+ if (getNumOperands() != 0) {
+ if (getTypeInfo())
+ return success();
+ return emitOpError() << "'type_info' symbol attribute missing";
+ }
+
+ return failure();
+}
+
+//===----------------------------------------------------------------------===//
// TableGen'd op method definitions
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
index 2eaa60c..d41ea0a 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
@@ -47,8 +47,8 @@ struct RemoveRedundantBranches : public OpRewritePattern<BrOp> {
Block *block = op.getOperation()->getBlock();
Block *dest = op.getDest();
- assert(!cir::MissingFeatures::labelOp());
-
+ if (isa<cir::LabelOp>(dest->front()))
+ return failure();
// Single edge between blocks: merge it.
if (block->getNumSuccessors() == 1 &&
dest->getSinglePredecessor() == block) {
diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt
index 18beca7..df7a1a3 100644
--- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt
+++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt
@@ -4,6 +4,7 @@ add_clang_library(MLIRCIRTransforms
FlattenCFG.cpp
HoistAllocas.cpp
LoweringPrepare.cpp
+ GotoSolver.cpp
DEPENDS
MLIRCIRPassIncGen
diff --git a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp
new file mode 100644
index 0000000..c0db984
--- /dev/null
+++ b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp
@@ -0,0 +1,57 @@
+//====- GotoSolver.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "PassDetail.h"
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
+#include "clang/CIR/Dialect/Passes.h"
+#include "llvm/Support/TimeProfiler.h"
+#include <memory>
+
+using namespace mlir;
+using namespace cir;
+
+namespace {
+
+struct GotoSolverPass : public GotoSolverBase<GotoSolverPass> {
+ GotoSolverPass() = default;
+ void runOnOperation() override;
+};
+
+static void process(cir::FuncOp func) {
+ mlir::OpBuilder rewriter(func.getContext());
+ llvm::StringMap<Block *> labels;
+ llvm::SmallVector<cir::GotoOp, 4> gotos;
+
+ func.getBody().walk([&](mlir::Operation *op) {
+ if (auto lab = dyn_cast<cir::LabelOp>(op)) {
+ // Will construct a string copy inplace. Safely erase the label
+ labels.try_emplace(lab.getLabel(), lab->getBlock());
+ lab.erase();
+ } else if (auto goTo = dyn_cast<cir::GotoOp>(op)) {
+ gotos.push_back(goTo);
+ }
+ });
+
+ for (auto goTo : gotos) {
+ mlir::OpBuilder::InsertionGuard guard(rewriter);
+ rewriter.setInsertionPoint(goTo);
+ Block *dest = labels[goTo.getLabel()];
+ cir::BrOp::create(rewriter, goTo.getLoc(), dest);
+ goTo.erase();
+ }
+}
+
+void GotoSolverPass::runOnOperation() {
+ llvm::TimeTraceScope scope("Goto Solver");
+ getOperation()->walk(&process);
+}
+
+} // namespace
+
+std::unique_ptr<Pass> mlir::createGotoSolverPass() {
+ return std::make_unique<GotoSolverPass>();
+}
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index 66260eb..c15637d 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -8,7 +8,7 @@
#include "PassDetail.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/CharUnits.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/Dialect/IR/CIROpsEnums.h"
@@ -27,6 +27,7 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
void runOnOp(mlir::Operation *op);
void lowerCastOp(cir::CastOp op);
+ void lowerComplexDivOp(cir::ComplexDivOp op);
void lowerComplexMulOp(cir::ComplexMulOp op);
void lowerUnaryOp(cir::UnaryOp op);
void lowerArrayDtor(cir::ArrayDtor op);
@@ -182,6 +183,280 @@ static mlir::Value buildComplexBinOpLibCall(
}
static llvm::StringRef
+getComplexDivLibCallName(llvm::APFloat::Semantics semantics) {
+ switch (semantics) {
+ case llvm::APFloat::S_IEEEhalf:
+ return "__divhc3";
+ case llvm::APFloat::S_IEEEsingle:
+ return "__divsc3";
+ case llvm::APFloat::S_IEEEdouble:
+ return "__divdc3";
+ case llvm::APFloat::S_PPCDoubleDouble:
+ return "__divtc3";
+ case llvm::APFloat::S_x87DoubleExtended:
+ return "__divxc3";
+ case llvm::APFloat::S_IEEEquad:
+ return "__divtc3";
+ default:
+ llvm_unreachable("unsupported floating point type");
+ }
+}
+
+static mlir::Value
+buildAlgebraicComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc,
+ mlir::Value lhsReal, mlir::Value lhsImag,
+ mlir::Value rhsReal, mlir::Value rhsImag) {
+ // (a+bi) / (c+di) = ((ac+bd)/(cc+dd)) + ((bc-ad)/(cc+dd))i
+ mlir::Value &a = lhsReal;
+ mlir::Value &b = lhsImag;
+ mlir::Value &c = rhsReal;
+ mlir::Value &d = rhsImag;
+
+ mlir::Value ac = builder.createBinop(loc, a, cir::BinOpKind::Mul, c); // a*c
+ mlir::Value bd = builder.createBinop(loc, b, cir::BinOpKind::Mul, d); // b*d
+ mlir::Value cc = builder.createBinop(loc, c, cir::BinOpKind::Mul, c); // c*c
+ mlir::Value dd = builder.createBinop(loc, d, cir::BinOpKind::Mul, d); // d*d
+ mlir::Value acbd =
+ builder.createBinop(loc, ac, cir::BinOpKind::Add, bd); // ac+bd
+ mlir::Value ccdd =
+ builder.createBinop(loc, cc, cir::BinOpKind::Add, dd); // cc+dd
+ mlir::Value resultReal =
+ builder.createBinop(loc, acbd, cir::BinOpKind::Div, ccdd);
+
+ mlir::Value bc = builder.createBinop(loc, b, cir::BinOpKind::Mul, c); // b*c
+ mlir::Value ad = builder.createBinop(loc, a, cir::BinOpKind::Mul, d); // a*d
+ mlir::Value bcad =
+ builder.createBinop(loc, bc, cir::BinOpKind::Sub, ad); // bc-ad
+ mlir::Value resultImag =
+ builder.createBinop(loc, bcad, cir::BinOpKind::Div, ccdd);
+ return builder.createComplexCreate(loc, resultReal, resultImag);
+}
+
+static mlir::Value
+buildRangeReductionComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc,
+ mlir::Value lhsReal, mlir::Value lhsImag,
+ mlir::Value rhsReal, mlir::Value rhsImag) {
+ // Implements Smith's algorithm for complex division.
+ // SMITH, R. L. Algorithm 116: Complex division. Commun. ACM 5, 8 (1962).
+
+ // Let:
+ // - lhs := a+bi
+ // - rhs := c+di
+ // - result := lhs / rhs = e+fi
+ //
+ // The algorithm pseudocode looks like follows:
+ // if fabs(c) >= fabs(d):
+ // r := d / c
+ // tmp := c + r*d
+ // e = (a + b*r) / tmp
+ // f = (b - a*r) / tmp
+ // else:
+ // r := c / d
+ // tmp := d + r*c
+ // e = (a*r + b) / tmp
+ // f = (b*r - a) / tmp
+
+ mlir::Value &a = lhsReal;
+ mlir::Value &b = lhsImag;
+ mlir::Value &c = rhsReal;
+ mlir::Value &d = rhsImag;
+
+ auto trueBranchBuilder = [&](mlir::OpBuilder &, mlir::Location) {
+ mlir::Value r = builder.createBinop(loc, d, cir::BinOpKind::Div,
+ c); // r := d / c
+ mlir::Value rd = builder.createBinop(loc, r, cir::BinOpKind::Mul, d); // r*d
+ mlir::Value tmp = builder.createBinop(loc, c, cir::BinOpKind::Add,
+ rd); // tmp := c + r*d
+
+ mlir::Value br = builder.createBinop(loc, b, cir::BinOpKind::Mul, r); // b*r
+ mlir::Value abr =
+ builder.createBinop(loc, a, cir::BinOpKind::Add, br); // a + b*r
+ mlir::Value e = builder.createBinop(loc, abr, cir::BinOpKind::Div, tmp);
+
+ mlir::Value ar = builder.createBinop(loc, a, cir::BinOpKind::Mul, r); // a*r
+ mlir::Value bar =
+ builder.createBinop(loc, b, cir::BinOpKind::Sub, ar); // b - a*r
+ mlir::Value f = builder.createBinop(loc, bar, cir::BinOpKind::Div, tmp);
+
+ mlir::Value result = builder.createComplexCreate(loc, e, f);
+ builder.createYield(loc, result);
+ };
+
+ auto falseBranchBuilder = [&](mlir::OpBuilder &, mlir::Location) {
+ mlir::Value r = builder.createBinop(loc, c, cir::BinOpKind::Div,
+ d); // r := c / d
+ mlir::Value rc = builder.createBinop(loc, r, cir::BinOpKind::Mul, c); // r*c
+ mlir::Value tmp = builder.createBinop(loc, d, cir::BinOpKind::Add,
+ rc); // tmp := d + r*c
+
+ mlir::Value ar = builder.createBinop(loc, a, cir::BinOpKind::Mul, r); // a*r
+ mlir::Value arb =
+ builder.createBinop(loc, ar, cir::BinOpKind::Add, b); // a*r + b
+ mlir::Value e = builder.createBinop(loc, arb, cir::BinOpKind::Div, tmp);
+
+ mlir::Value br = builder.createBinop(loc, b, cir::BinOpKind::Mul, r); // b*r
+ mlir::Value bra =
+ builder.createBinop(loc, br, cir::BinOpKind::Sub, a); // b*r - a
+ mlir::Value f = builder.createBinop(loc, bra, cir::BinOpKind::Div, tmp);
+
+ mlir::Value result = builder.createComplexCreate(loc, e, f);
+ builder.createYield(loc, result);
+ };
+
+ auto cFabs = builder.create<cir::FAbsOp>(loc, c);
+ auto dFabs = builder.create<cir::FAbsOp>(loc, d);
+ cir::CmpOp cmpResult =
+ builder.createCompare(loc, cir::CmpOpKind::ge, cFabs, dFabs);
+ auto ternary = builder.create<cir::TernaryOp>(
+ loc, cmpResult, trueBranchBuilder, falseBranchBuilder);
+
+ return ternary.getResult();
+}
+
+static mlir::Type higherPrecisionElementTypeForComplexArithmetic(
+ mlir::MLIRContext &context, clang::ASTContext &cc,
+ CIRBaseBuilderTy &builder, mlir::Type elementType) {
+
+ auto getHigherPrecisionFPType = [&context](mlir::Type type) -> mlir::Type {
+ if (mlir::isa<cir::FP16Type>(type))
+ return cir::SingleType::get(&context);
+
+ if (mlir::isa<cir::SingleType>(type) || mlir::isa<cir::BF16Type>(type))
+ return cir::DoubleType::get(&context);
+
+ if (mlir::isa<cir::DoubleType>(type))
+ return cir::LongDoubleType::get(&context, type);
+
+ return type;
+ };
+
+ auto getFloatTypeSemantics =
+ [&cc](mlir::Type type) -> const llvm::fltSemantics & {
+ const clang::TargetInfo &info = cc.getTargetInfo();
+ if (mlir::isa<cir::FP16Type>(type))
+ return info.getHalfFormat();
+
+ if (mlir::isa<cir::BF16Type>(type))
+ return info.getBFloat16Format();
+
+ if (mlir::isa<cir::SingleType>(type))
+ return info.getFloatFormat();
+
+ if (mlir::isa<cir::DoubleType>(type))
+ return info.getDoubleFormat();
+
+ if (mlir::isa<cir::LongDoubleType>(type)) {
+ if (cc.getLangOpts().OpenMP && cc.getLangOpts().OpenMPIsTargetDevice)
+ llvm_unreachable("NYI Float type semantics with OpenMP");
+ return info.getLongDoubleFormat();
+ }
+
+ if (mlir::isa<cir::FP128Type>(type)) {
+ if (cc.getLangOpts().OpenMP && cc.getLangOpts().OpenMPIsTargetDevice)
+ llvm_unreachable("NYI Float type semantics with OpenMP");
+ return info.getFloat128Format();
+ }
+
+ assert(false && "Unsupported float type semantics");
+ };
+
+ const mlir::Type higherElementType = getHigherPrecisionFPType(elementType);
+ const llvm::fltSemantics &elementTypeSemantics =
+ getFloatTypeSemantics(elementType);
+ const llvm::fltSemantics &higherElementTypeSemantics =
+ getFloatTypeSemantics(higherElementType);
+
+ // Check that the promoted type can handle the intermediate values without
+ // overflowing. This can be interpreted as:
+ // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal) * 2 <=
+ // LargerType.LargestFiniteVal.
+ // In terms of exponent it gives this formula:
+ // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal
+ // doubles the exponent of SmallerType.LargestFiniteVal)
+ if (llvm::APFloat::semanticsMaxExponent(elementTypeSemantics) * 2 + 1 <=
+ llvm::APFloat::semanticsMaxExponent(higherElementTypeSemantics)) {
+ return higherElementType;
+ }
+
+ // The intermediate values can't be represented in the promoted type
+ // without overflowing.
+ return {};
+}
+
+static mlir::Value
+lowerComplexDiv(LoweringPreparePass &pass, CIRBaseBuilderTy &builder,
+ mlir::Location loc, cir::ComplexDivOp op, mlir::Value lhsReal,
+ mlir::Value lhsImag, mlir::Value rhsReal, mlir::Value rhsImag,
+ mlir::MLIRContext &mlirCx, clang::ASTContext &cc) {
+ cir::ComplexType complexTy = op.getType();
+ if (mlir::isa<cir::FPTypeInterface>(complexTy.getElementType())) {
+ cir::ComplexRangeKind range = op.getRange();
+ if (range == cir::ComplexRangeKind::Improved)
+ return buildRangeReductionComplexDiv(builder, loc, lhsReal, lhsImag,
+ rhsReal, rhsImag);
+
+ if (range == cir::ComplexRangeKind::Full)
+ return buildComplexBinOpLibCall(pass, builder, &getComplexDivLibCallName,
+ loc, complexTy, lhsReal, lhsImag, rhsReal,
+ rhsImag);
+
+ if (range == cir::ComplexRangeKind::Promoted) {
+ mlir::Type originalElementType = complexTy.getElementType();
+ mlir::Type higherPrecisionElementType =
+ higherPrecisionElementTypeForComplexArithmetic(mlirCx, cc, builder,
+ originalElementType);
+
+ if (!higherPrecisionElementType)
+ return buildRangeReductionComplexDiv(builder, loc, lhsReal, lhsImag,
+ rhsReal, rhsImag);
+
+ cir::CastKind floatingCastKind = cir::CastKind::floating;
+ lhsReal = builder.createCast(floatingCastKind, lhsReal,
+ higherPrecisionElementType);
+ lhsImag = builder.createCast(floatingCastKind, lhsImag,
+ higherPrecisionElementType);
+ rhsReal = builder.createCast(floatingCastKind, rhsReal,
+ higherPrecisionElementType);
+ rhsImag = builder.createCast(floatingCastKind, rhsImag,
+ higherPrecisionElementType);
+
+ mlir::Value algebraicResult = buildAlgebraicComplexDiv(
+ builder, loc, lhsReal, lhsImag, rhsReal, rhsImag);
+
+ mlir::Value resultReal = builder.createComplexReal(loc, algebraicResult);
+ mlir::Value resultImag = builder.createComplexImag(loc, algebraicResult);
+
+ mlir::Value finalReal =
+ builder.createCast(floatingCastKind, resultReal, originalElementType);
+ mlir::Value finalImag =
+ builder.createCast(floatingCastKind, resultImag, originalElementType);
+ return builder.createComplexCreate(loc, finalReal, finalImag);
+ }
+ }
+
+ return buildAlgebraicComplexDiv(builder, loc, lhsReal, lhsImag, rhsReal,
+ rhsImag);
+}
+
+void LoweringPreparePass::lowerComplexDivOp(cir::ComplexDivOp op) {
+ cir::CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op);
+ mlir::Location loc = op.getLoc();
+ mlir::TypedValue<cir::ComplexType> lhs = op.getLhs();
+ mlir::TypedValue<cir::ComplexType> rhs = op.getRhs();
+ mlir::Value lhsReal = builder.createComplexReal(loc, lhs);
+ mlir::Value lhsImag = builder.createComplexImag(loc, lhs);
+ mlir::Value rhsReal = builder.createComplexReal(loc, rhs);
+ mlir::Value rhsImag = builder.createComplexImag(loc, rhs);
+
+ mlir::Value loweredResult =
+ lowerComplexDiv(*this, builder, loc, op, lhsReal, lhsImag, rhsReal,
+ rhsImag, getContext(), *astCtx);
+ op.replaceAllUsesWith(loweredResult);
+ op.erase();
+}
+
+static llvm::StringRef
getComplexMulLibCallName(llvm::APFloat::Semantics semantics) {
switch (semantics) {
case llvm::APFloat::S_IEEEhalf:
@@ -412,6 +687,8 @@ void LoweringPreparePass::runOnOp(mlir::Operation *op) {
lowerArrayDtor(arrayDtor);
else if (auto cast = mlir::dyn_cast<cir::CastOp>(op))
lowerCastOp(cast);
+ else if (auto complexDiv = mlir::dyn_cast<cir::ComplexDivOp>(op))
+ lowerComplexDivOp(complexDiv);
else if (auto complexMul = mlir::dyn_cast<cir::ComplexMulOp>(op))
lowerComplexMulOp(complexMul);
else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op))
@@ -427,7 +704,7 @@ void LoweringPreparePass::runOnOperation() {
op->walk([&](mlir::Operation *op) {
if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp,
- cir::ComplexMulOp, cir::UnaryOp>(op))
+ cir::ComplexMulOp, cir::ComplexDivOp, cir::UnaryOp>(op))
opsToTransform.push_back(op);
});
diff --git a/clang/lib/CIR/Lowering/CIRPasses.cpp b/clang/lib/CIR/Lowering/CIRPasses.cpp
index bb9781b..ccc8387 100644
--- a/clang/lib/CIR/Lowering/CIRPasses.cpp
+++ b/clang/lib/CIR/Lowering/CIRPasses.cpp
@@ -45,6 +45,7 @@ namespace mlir {
void populateCIRPreLoweringPasses(OpPassManager &pm) {
pm.addPass(createHoistAllocasPass());
pm.addPass(createCIRFlattenCFGPass());
+ pm.addPass(createGotoSolverPass());
}
} // namespace mlir
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 7e1c9fb..f1fdfed 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -201,7 +201,8 @@ public:
mlir::Value visit(mlir::Attribute attr) {
return llvm::TypeSwitch<mlir::Attribute, mlir::Value>(attr)
.Case<cir::IntAttr, cir::FPAttr, cir::ConstComplexAttr,
- cir::ConstArrayAttr, cir::ConstVectorAttr, cir::ConstPtrAttr,
+ cir::ConstArrayAttr, cir::ConstRecordAttr, cir::ConstVectorAttr,
+ cir::ConstPtrAttr, cir::GlobalViewAttr, cir::VTableAttr,
cir::ZeroAttr>([&](auto attrT) { return visitCirAttr(attrT); })
.Default([&](auto attrT) { return mlir::Value(); });
}
@@ -211,7 +212,10 @@ public:
mlir::Value visitCirAttr(cir::ConstComplexAttr complexAttr);
mlir::Value visitCirAttr(cir::ConstPtrAttr ptrAttr);
mlir::Value visitCirAttr(cir::ConstArrayAttr attr);
+ mlir::Value visitCirAttr(cir::ConstRecordAttr attr);
mlir::Value visitCirAttr(cir::ConstVectorAttr attr);
+ mlir::Value visitCirAttr(cir::GlobalViewAttr attr);
+ mlir::Value visitCirAttr(cir::VTableAttr attr);
mlir::Value visitCirAttr(cir::ZeroAttr attr);
private:
@@ -265,6 +269,26 @@ void convertSideEffectForCall(mlir::Operation *callOp, bool isNothrow,
}
}
+static mlir::LLVM::CallIntrinsicOp
+createCallLLVMIntrinsicOp(mlir::ConversionPatternRewriter &rewriter,
+ mlir::Location loc, const llvm::Twine &intrinsicName,
+ mlir::Type resultTy, mlir::ValueRange operands) {
+ auto intrinsicNameAttr =
+ mlir::StringAttr::get(rewriter.getContext(), intrinsicName);
+ return mlir::LLVM::CallIntrinsicOp::create(rewriter, loc, resultTy,
+ intrinsicNameAttr, operands);
+}
+
+static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp(
+ mlir::ConversionPatternRewriter &rewriter, mlir::Operation *op,
+ const llvm::Twine &intrinsicName, mlir::Type resultTy,
+ mlir::ValueRange operands) {
+ mlir::LLVM::CallIntrinsicOp callIntrinOp = createCallLLVMIntrinsicOp(
+ rewriter, op->getLoc(), intrinsicName, resultTy, operands);
+ rewriter.replaceOp(op, callIntrinOp.getOperation());
+ return callIntrinOp;
+}
+
/// IntAttr visitor.
mlir::Value CIRAttrToValue::visitCirAttr(cir::IntAttr intAttr) {
mlir::Location loc = parentOp->getLoc();
@@ -364,6 +388,21 @@ mlir::Value CIRAttrToValue::visitCirAttr(cir::ConstArrayAttr attr) {
return result;
}
+/// ConstRecord visitor.
+mlir::Value CIRAttrToValue::visitCirAttr(cir::ConstRecordAttr constRecord) {
+ const mlir::Type llvmTy = converter->convertType(constRecord.getType());
+ const mlir::Location loc = parentOp->getLoc();
+ mlir::Value result = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmTy);
+
+ // Iteratively lower each constant element of the record.
+ for (auto [idx, elt] : llvm::enumerate(constRecord.getMembers())) {
+ mlir::Value init = visit(elt);
+ result = rewriter.create<mlir::LLVM::InsertValueOp>(loc, result, init, idx);
+ }
+
+ return result;
+}
+
/// ConstVectorAttr visitor.
mlir::Value CIRAttrToValue::visitCirAttr(cir::ConstVectorAttr attr) {
const mlir::Type llvmTy = converter->convertType(attr.getType());
@@ -391,6 +430,92 @@ mlir::Value CIRAttrToValue::visitCirAttr(cir::ConstVectorAttr attr) {
mlirValues));
}
+// GlobalViewAttr visitor.
+mlir::Value CIRAttrToValue::visitCirAttr(cir::GlobalViewAttr globalAttr) {
+ auto moduleOp = parentOp->getParentOfType<mlir::ModuleOp>();
+ mlir::DataLayout dataLayout(moduleOp);
+ mlir::Type sourceType;
+ assert(!cir::MissingFeatures::addressSpace());
+ llvm::StringRef symName;
+ mlir::Operation *sourceSymbol =
+ mlir::SymbolTable::lookupSymbolIn(moduleOp, globalAttr.getSymbol());
+ if (auto llvmSymbol = dyn_cast<mlir::LLVM::GlobalOp>(sourceSymbol)) {
+ sourceType = llvmSymbol.getType();
+ symName = llvmSymbol.getSymName();
+ } else if (auto cirSymbol = dyn_cast<cir::GlobalOp>(sourceSymbol)) {
+ sourceType =
+ convertTypeForMemory(*converter, dataLayout, cirSymbol.getSymType());
+ symName = cirSymbol.getSymName();
+ } else if (auto llvmFun = dyn_cast<mlir::LLVM::LLVMFuncOp>(sourceSymbol)) {
+ sourceType = llvmFun.getFunctionType();
+ symName = llvmFun.getSymName();
+ } else if (auto fun = dyn_cast<cir::FuncOp>(sourceSymbol)) {
+ sourceType = converter->convertType(fun.getFunctionType());
+ symName = fun.getSymName();
+ } else if (auto alias = dyn_cast<mlir::LLVM::AliasOp>(sourceSymbol)) {
+ sourceType = alias.getType();
+ symName = alias.getSymName();
+ } else {
+ llvm_unreachable("Unexpected GlobalOp type");
+ }
+
+ mlir::Location loc = parentOp->getLoc();
+ mlir::Value addrOp = rewriter.create<mlir::LLVM::AddressOfOp>(
+ loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), symName);
+
+ if (globalAttr.getIndices()) {
+ llvm::SmallVector<mlir::LLVM::GEPArg> indices;
+
+ if (mlir::isa<mlir::LLVM::LLVMArrayType, mlir::LLVM::LLVMStructType>(
+ sourceType))
+ indices.push_back(0);
+
+ for (mlir::Attribute idx : globalAttr.getIndices()) {
+ auto intAttr = mlir::cast<mlir::IntegerAttr>(idx);
+ indices.push_back(intAttr.getValue().getSExtValue());
+ }
+ mlir::Type resTy = addrOp.getType();
+ mlir::Type eltTy = converter->convertType(sourceType);
+ addrOp = rewriter.create<mlir::LLVM::GEPOp>(
+ loc, resTy, eltTy, addrOp, indices, mlir::LLVM::GEPNoWrapFlags::none);
+ }
+
+ // The incubator has handling here for the attribute having integer type, but
+ // the only test case I could find that reaches it is a direct CIR-to-LLVM IR
+ // lowering with no clear indication of how the CIR might have been generated.
+ // We'll hit the unreachable below if this happens.
+ assert(!cir::MissingFeatures::globalViewIntLowering());
+
+ if (auto ptrTy = mlir::dyn_cast<cir::PointerType>(globalAttr.getType())) {
+ mlir::Type llvmEltTy =
+ convertTypeForMemory(*converter, dataLayout, ptrTy.getPointee());
+
+ if (llvmEltTy == sourceType)
+ return addrOp;
+
+ mlir::Type llvmDstTy = converter->convertType(globalAttr.getType());
+ return rewriter.create<mlir::LLVM::BitcastOp>(parentOp->getLoc(), llvmDstTy,
+ addrOp);
+ }
+
+ llvm_unreachable("Expecting pointer or integer type for GlobalViewAttr");
+}
+
+// VTableAttr visitor.
+mlir::Value CIRAttrToValue::visitCirAttr(cir::VTableAttr vtableArr) {
+ mlir::Type llvmTy = converter->convertType(vtableArr.getType());
+ mlir::Location loc = parentOp->getLoc();
+ mlir::Value result = mlir::LLVM::UndefOp::create(rewriter, loc, llvmTy);
+
+ for (auto [idx, elt] : llvm::enumerate(vtableArr.getData())) {
+ mlir::Value init = visit(elt);
+ result =
+ mlir::LLVM::InsertValueOp::create(rewriter, loc, result, init, idx);
+ }
+
+ return result;
+}
+
/// ZeroAttr visitor.
mlir::Value CIRAttrToValue::visitCirAttr(cir::ZeroAttr attr) {
mlir::Location loc = parentOp->getLoc();
@@ -460,6 +585,29 @@ mlir::LogicalResult CIRToLLVMAssumeOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMAssumeAlignedOpLowering::matchAndRewrite(
+ cir::AssumeAlignedOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ SmallVector<mlir::Value, 3> opBundleArgs{adaptor.getPointer()};
+
+ auto alignment = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ adaptor.getAlignmentAttr());
+ opBundleArgs.push_back(alignment);
+
+ if (mlir::Value offset = adaptor.getOffset())
+ opBundleArgs.push_back(offset);
+
+ auto cond = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI1Type(), 1);
+ mlir::LLVM::AssumeOp::create(rewriter, op.getLoc(), cond, "align",
+ opBundleArgs);
+
+ // The llvm.assume operation does not have a result, so we need to replace
+ // all uses of this cir.assume_aligned operation with the input ptr itself.
+ rewriter.replaceOp(op, adaptor.getPointer());
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMAssumeSepStorageOpLowering::matchAndRewrite(
cir::AssumeSepStorageOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -989,8 +1137,7 @@ rewriteCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands,
auto calleeTy = op->getOperands().front().getType();
auto calleePtrTy = cast<cir::PointerType>(calleeTy);
auto calleeFuncTy = cast<cir::FuncType>(calleePtrTy.getPointee());
- calleeFuncTy.dump();
- converter->convertType(calleeFuncTy).dump();
+ llvm::append_range(adjustedCallOperands, callOperands);
llvmFnTy = cast<mlir::LLVM::LLVMFunctionType>(
converter->convertType(calleeFuncTy));
}
@@ -1016,12 +1163,50 @@ mlir::LogicalResult CIRToLLVMCallOpLowering::matchAndRewrite(
getTypeConverter(), op.getCalleeAttr());
}
+mlir::LogicalResult CIRToLLVMReturnAddrOpLowering::matchAndRewrite(
+ cir::ReturnAddrOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
+ replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.returnaddress",
+ llvmPtrTy, adaptor.getOperands());
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMFrameAddrOpLowering::matchAndRewrite(
+ cir::FrameAddrOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
+ replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.frameaddress", llvmPtrTy,
+ adaptor.getOperands());
+ return mlir::success();
+}
+
+static mlir::LLVM::AtomicOrdering
+getLLVMMemOrder(std::optional<cir::MemOrder> memorder) {
+ if (!memorder)
+ return mlir::LLVM::AtomicOrdering::not_atomic;
+ switch (*memorder) {
+ case cir::MemOrder::Relaxed:
+ return mlir::LLVM::AtomicOrdering::monotonic;
+ case cir::MemOrder::Consume:
+ case cir::MemOrder::Acquire:
+ return mlir::LLVM::AtomicOrdering::acquire;
+ case cir::MemOrder::Release:
+ return mlir::LLVM::AtomicOrdering::release;
+ case cir::MemOrder::AcquireRelease:
+ return mlir::LLVM::AtomicOrdering::acq_rel;
+ case cir::MemOrder::SequentiallyConsistent:
+ return mlir::LLVM::AtomicOrdering::seq_cst;
+ }
+ llvm_unreachable("unknown memory order");
+}
+
mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
cir::LoadOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
const mlir::Type llvmTy =
convertTypeForMemory(*getTypeConverter(), dataLayout, op.getType());
- assert(!cir::MissingFeatures::opLoadStoreMemOrder());
+ mlir::LLVM::AtomicOrdering ordering = getLLVMMemOrder(op.getMemOrder());
std::optional<size_t> opAlign = op.getAlignment();
unsigned alignment =
(unsigned)opAlign.value_or(dataLayout.getTypeABIAlignment(llvmTy));
@@ -1030,11 +1215,10 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
// TODO: nontemporal, syncscope.
assert(!cir::MissingFeatures::opLoadStoreVolatile());
- mlir::LLVM::LoadOp newLoad = rewriter.create<mlir::LLVM::LoadOp>(
- op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
- /*volatile=*/false, /*nontemporal=*/false,
- /*invariant=*/false, /*invariantGroup=*/false,
- mlir::LLVM::AtomicOrdering::not_atomic);
+ mlir::LLVM::LoadOp newLoad = mlir::LLVM::LoadOp::create(
+ rewriter, op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
+ /*isVolatile=*/false, /*isNonTemporal=*/false,
+ /*isInvariant=*/false, /*isInvariantGroup=*/false, ordering);
// Convert adapted result to its original type if needed.
mlir::Value result =
@@ -1047,7 +1231,7 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite(
cir::StoreOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
- assert(!cir::MissingFeatures::opLoadStoreMemOrder());
+ mlir::LLVM::AtomicOrdering memorder = getLLVMMemOrder(op.getMemOrder());
const mlir::Type llvmTy =
getTypeConverter()->convertType(op.getValue().getType());
std::optional<size_t> opAlign = op.getAlignment();
@@ -1061,10 +1245,10 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite(
op.getValue().getType(), adaptor.getValue());
// TODO: nontemporal, syncscope.
assert(!cir::MissingFeatures::opLoadStoreVolatile());
- mlir::LLVM::StoreOp storeOp = rewriter.create<mlir::LLVM::StoreOp>(
- op->getLoc(), value, adaptor.getAddr(), alignment, /*volatile=*/false,
- /*nontemporal=*/false, /*invariantGroup=*/false,
- mlir::LLVM::AtomicOrdering::not_atomic);
+ mlir::LLVM::StoreOp storeOp = mlir::LLVM::StoreOp::create(
+ rewriter, op->getLoc(), value, adaptor.getAddr(), alignment,
+ /*isVolatile=*/false,
+ /*isNonTemporal=*/false, /*isInvariantGroup=*/false, memorder);
rewriter.replaceOp(op, storeOp);
assert(!cir::MissingFeatures::opLoadStoreTbaa());
return mlir::LogicalResult::success();
@@ -1101,7 +1285,13 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()),
value);
} else if (mlir::isa<cir::IntType>(op.getType())) {
- assert(!cir::MissingFeatures::opGlobalViewAttr());
+ // Lower GlobalViewAttr to llvm.mlir.addressof + llvm.mlir.ptrtoint
+ if (auto ga = mlir::dyn_cast<cir::GlobalViewAttr>(op.getValue())) {
+ // See the comment in visitCirAttr for why this isn't implemented.
+ assert(!cir::MissingFeatures::globalViewIntLowering());
+ op.emitError() << "global view with integer type";
+ return mlir::failure();
+ }
attr = rewriter.getIntegerAttr(
typeConverter->convertType(op.getType()),
@@ -1119,7 +1309,12 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
return mlir::success();
}
}
- assert(!cir::MissingFeatures::opGlobalViewAttr());
+ // Lower GlobalViewAttr to llvm.mlir.addressof
+ if (auto gv = mlir::dyn_cast<cir::GlobalViewAttr>(op.getValue())) {
+ auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter());
+ rewriter.replaceOp(op, newOp);
+ return mlir::success();
+ }
attr = op.getValue();
} else if (const auto arrTy = mlir::dyn_cast<cir::ArrayType>(op.getType())) {
const auto constArr = mlir::dyn_cast<cir::ConstArrayAttr>(op.getValue());
@@ -1142,6 +1337,11 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
rewriter.eraseOp(op);
return mlir::success();
}
+ } else if (const auto recordAttr =
+ mlir::dyn_cast<cir::ConstRecordAttr>(op.getValue())) {
+ auto initVal = lowerCirAttrAsValue(op, recordAttr, rewriter, typeConverter);
+ rewriter.replaceOp(op, initVal);
+ return mlir::success();
} else if (const auto vecTy = mlir::dyn_cast<cir::VectorType>(op.getType())) {
rewriter.replaceOp(op, lowerCirAttrAsValue(op, op.getValue(), rewriter,
getTypeConverter()));
@@ -1204,6 +1404,15 @@ mlir::LogicalResult CIRToLLVMExpectOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMFAbsOpLowering::matchAndRewrite(
+ cir::FAbsOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type resTy = typeConverter->convertType(op.getType());
+ rewriter.replaceOpWithNewOp<mlir::LLVM::FAbsOp>(op, resTy,
+ adaptor.getOperands()[0]);
+ return mlir::success();
+}
+
/// Convert the `cir.func` attributes to `llvm.func` attributes.
/// Only retain those attributes that are not constructed by
/// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out
@@ -1349,7 +1558,7 @@ void CIRToLLVMGlobalOpLowering::setupRegionInitializedLLVMGlobalOp(
// in CIRToLLVMGlobalOpLowering::matchAndRewrite() but that will go
// away when the placeholders are no longer needed.
assert(!cir::MissingFeatures::opGlobalConstant());
- const bool isConst = false;
+ const bool isConst = op.getConstant();
assert(!cir::MissingFeatures::addressSpace());
const unsigned addrSpace = 0;
const bool isDsoLocal = op.getDsoLocal();
@@ -1374,8 +1583,9 @@ CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal(
cir::GlobalOp op, mlir::Attribute init,
mlir::ConversionPatternRewriter &rewriter) const {
// TODO: Generalize this handling when more types are needed here.
- assert((isa<cir::ConstArrayAttr, cir::ConstVectorAttr, cir::ConstPtrAttr,
- cir::ConstComplexAttr, cir::ZeroAttr>(init)));
+ assert((isa<cir::ConstArrayAttr, cir::ConstRecordAttr, cir::ConstVectorAttr,
+ cir::ConstPtrAttr, cir::ConstComplexAttr, cir::GlobalViewAttr,
+ cir::VTableAttr, cir::ZeroAttr>(init)));
// TODO(cir): once LLVM's dialect has proper equivalent attributes this
// should be updated. For now, we use a custom op to initialize globals
@@ -1428,8 +1638,9 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite(
return mlir::failure();
}
} else if (mlir::isa<cir::ConstArrayAttr, cir::ConstVectorAttr,
- cir::ConstPtrAttr, cir::ConstComplexAttr,
- cir::ZeroAttr>(init.value())) {
+ cir::ConstRecordAttr, cir::ConstPtrAttr,
+ cir::ConstComplexAttr, cir::GlobalViewAttr,
+ cir::VTableAttr, cir::ZeroAttr>(init.value())) {
// TODO(cir): once LLVM's dialect has proper equivalent attributes this
// should be updated. For now, we use a custom op to initialize globals
// to the appropriate value.
@@ -1981,6 +2192,10 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
return mlir::LLVM::LLVMPointerType::get(type.getContext(), targetAS);
});
+ converter.addConversion([&](cir::VPtrType type) -> mlir::Type {
+ assert(!cir::MissingFeatures::addressSpace());
+ return mlir::LLVM::LLVMPointerType::get(type.getContext());
+ });
converter.addConversion([&](cir::ArrayType type) -> mlir::Type {
mlir::Type ty =
convertTypeForMemory(converter, dataLayout, type.getElementType());
@@ -2057,6 +2272,9 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
}
break;
}
+ converter.addConversion([&](cir::VoidType type) -> mlir::Type {
+ return mlir::LLVM::LLVMVoidType::get(type.getContext());
+ });
// Record has a name: lower as an identified record.
mlir::LLVM::LLVMStructType llvmStruct;
@@ -2072,6 +2290,9 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
return llvmStruct;
});
+ converter.addConversion([&](cir::VoidType type) -> mlir::Type {
+ return mlir::LLVM::LLVMVoidType::get(type.getContext());
+ });
}
// The applyPartialConversion function traverses blocks in the dominance order,
@@ -2143,6 +2364,11 @@ void ConvertCIRToLLVMPass::processCIRAttrs(mlir::ModuleOp module) {
module->getAttr(cir::CIRDialect::getTripleAttrName()))
module->setAttr(mlir::LLVM::LLVMDialect::getTargetTripleAttrName(),
tripleAttr);
+
+ if (mlir::Attribute asmAttr =
+ module->getAttr(cir::CIRDialect::getModuleLevelAsmAttrName()))
+ module->setAttr(mlir::LLVM::LLVMDialect::getModuleLevelAsmAttrName(),
+ asmAttr);
}
void ConvertCIRToLLVMPass::runOnOperation() {
@@ -2165,9 +2391,12 @@ void ConvertCIRToLLVMPass::runOnOperation() {
patterns.add<CIRToLLVMCastOpLowering>(converter, patterns.getContext(), dl);
patterns.add<CIRToLLVMPtrStrideOpLowering>(converter, patterns.getContext(),
dl);
+ patterns.add<CIRToLLVMInlineAsmOpLowering>(converter, patterns.getContext(),
+ dl);
patterns.add<
// clang-format off
CIRToLLVMAssumeOpLowering,
+ CIRToLLVMAssumeAlignedOpLowering,
CIRToLLVMAssumeSepStorageOpLowering,
CIRToLLVMBaseClassAddrOpLowering,
CIRToLLVMBinOpLowering,
@@ -2192,10 +2421,13 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMComplexSubOpLowering,
CIRToLLVMConstantOpLowering,
CIRToLLVMExpectOpLowering,
+ CIRToLLVMFAbsOpLowering,
+ CIRToLLVMFrameAddrOpLowering,
CIRToLLVMFuncOpLowering,
CIRToLLVMGetBitfieldOpLowering,
CIRToLLVMGetGlobalOpLowering,
CIRToLLVMGetMemberOpLowering,
+ CIRToLLVMReturnAddrOpLowering,
CIRToLLVMRotateOpLowering,
CIRToLLVMSelectOpLowering,
CIRToLLVMSetBitfieldOpLowering,
@@ -2203,8 +2435,13 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMStackRestoreOpLowering,
CIRToLLVMStackSaveOpLowering,
CIRToLLVMSwitchFlatOpLowering,
+ CIRToLLVMThrowOpLowering,
CIRToLLVMTrapOpLowering,
CIRToLLVMUnaryOpLowering,
+ CIRToLLVMUnreachableOpLowering,
+ CIRToLLVMVAArgOpLowering,
+ CIRToLLVMVAEndOpLowering,
+ CIRToLLVMVAStartOpLowering,
CIRToLLVMVecCmpOpLowering,
CIRToLLVMVecCreateOpLowering,
CIRToLLVMVecExtractOpLowering,
@@ -2213,7 +2450,10 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMVecShuffleOpLowering,
CIRToLLVMVecSplatOpLowering,
CIRToLLVMVecTernaryOpLowering,
- CIRToLLVMUnreachableOpLowering
+ CIRToLLVMVTableAddrPointOpLowering,
+ CIRToLLVMVTableGetVPtrOpLowering,
+ CIRToLLVMVTableGetVirtualFnAddrOpLowering,
+ CIRToLLVMVTTAddrPointOpLowering
// clang-format on
>(converter, patterns.getContext());
@@ -2276,6 +2516,42 @@ mlir::LogicalResult CIRToLLVMUnreachableOpLowering::matchAndRewrite(
return mlir::success();
}
+void createLLVMFuncOpIfNotExist(mlir::ConversionPatternRewriter &rewriter,
+ mlir::Operation *srcOp, llvm::StringRef fnName,
+ mlir::Type fnTy) {
+ auto modOp = srcOp->getParentOfType<mlir::ModuleOp>();
+ auto enclosingFnOp = srcOp->getParentOfType<mlir::LLVM::LLVMFuncOp>();
+ mlir::Operation *sourceSymbol =
+ mlir::SymbolTable::lookupSymbolIn(modOp, fnName);
+ if (!sourceSymbol) {
+ mlir::OpBuilder::InsertionGuard guard(rewriter);
+ rewriter.setInsertionPoint(enclosingFnOp);
+ rewriter.create<mlir::LLVM::LLVMFuncOp>(srcOp->getLoc(), fnName, fnTy);
+ }
+}
+
+mlir::LogicalResult CIRToLLVMThrowOpLowering::matchAndRewrite(
+ cir::ThrowOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ if (op.rethrows()) {
+ auto voidTy = mlir::LLVM::LLVMVoidType::get(getContext());
+ auto funcTy =
+ mlir::LLVM::LLVMFunctionType::get(getContext(), voidTy, {}, false);
+
+ auto mlirModule = op->getParentOfType<mlir::ModuleOp>();
+ rewriter.setInsertionPointToStart(&mlirModule.getBodyRegion().front());
+
+ const llvm::StringRef functionName = "__cxa_rethrow";
+ createLLVMFuncOpIfNotExist(rewriter, op, functionName, funcTy);
+
+ rewriter.setInsertionPointAfter(op.getOperation());
+ rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
+ op, mlir::TypeRange{}, functionName, mlir::ValueRange{});
+ }
+
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMTrapOpLowering::matchAndRewrite(
cir::TrapOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -2292,6 +2568,106 @@ mlir::LogicalResult CIRToLLVMTrapOpLowering::matchAndRewrite(
return mlir::success();
}
+static mlir::Value
+getValueForVTableSymbol(mlir::Operation *op,
+ mlir::ConversionPatternRewriter &rewriter,
+ const mlir::TypeConverter *converter,
+ mlir::FlatSymbolRefAttr nameAttr, mlir::Type &eltType) {
+ auto module = op->getParentOfType<mlir::ModuleOp>();
+ mlir::Operation *symbol = mlir::SymbolTable::lookupSymbolIn(module, nameAttr);
+ if (auto llvmSymbol = mlir::dyn_cast<mlir::LLVM::GlobalOp>(symbol)) {
+ eltType = llvmSymbol.getType();
+ } else if (auto cirSymbol = mlir::dyn_cast<cir::GlobalOp>(symbol)) {
+ eltType = converter->convertType(cirSymbol.getSymType());
+ } else {
+ op->emitError() << "unexpected symbol type for " << symbol;
+ return {};
+ }
+
+ return mlir::LLVM::AddressOfOp::create(
+ rewriter, op->getLoc(),
+ mlir::LLVM::LLVMPointerType::get(op->getContext()), nameAttr.getValue());
+}
+
+mlir::LogicalResult CIRToLLVMVTableAddrPointOpLowering::matchAndRewrite(
+ cir::VTableAddrPointOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ const mlir::TypeConverter *converter = getTypeConverter();
+ mlir::Type targetType = converter->convertType(op.getType());
+ llvm::SmallVector<mlir::LLVM::GEPArg> offsets;
+ mlir::Type eltType;
+ mlir::Value symAddr = getValueForVTableSymbol(op, rewriter, converter,
+ op.getNameAttr(), eltType);
+ if (!symAddr)
+ return op.emitError() << "Unable to get value for vtable symbol";
+
+ offsets = llvm::SmallVector<mlir::LLVM::GEPArg>{
+ 0, op.getAddressPointAttr().getIndex(),
+ op.getAddressPointAttr().getOffset()};
+
+ assert(eltType && "Shouldn't ever be missing an eltType here");
+ mlir::LLVM::GEPNoWrapFlags inboundsNuw =
+ mlir::LLVM::GEPNoWrapFlags::inbounds | mlir::LLVM::GEPNoWrapFlags::nuw;
+ rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(op, targetType, eltType,
+ symAddr, offsets, inboundsNuw);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVTableGetVPtrOpLowering::matchAndRewrite(
+ cir::VTableGetVPtrOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ // cir.vtable.get_vptr is equivalent to a bitcast from the source object
+ // pointer to the vptr type. Since the LLVM dialect uses opaque pointers
+ // we can just replace uses of this operation with the original pointer.
+ mlir::Value srcVal = adaptor.getSrc();
+ rewriter.replaceAllUsesWith(op, srcVal);
+ rewriter.eraseOp(op);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVTableGetVirtualFnAddrOpLowering::matchAndRewrite(
+ cir::VTableGetVirtualFnAddrOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type targetType = getTypeConverter()->convertType(op.getType());
+ auto eltType = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
+ llvm::SmallVector<mlir::LLVM::GEPArg> offsets =
+ llvm::SmallVector<mlir::LLVM::GEPArg>{op.getIndex()};
+ rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(
+ op, targetType, eltType, adaptor.getVptr(), offsets,
+ mlir::LLVM::GEPNoWrapFlags::inbounds);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVTTAddrPointOpLowering::matchAndRewrite(
+ cir::VTTAddrPointOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ const mlir::Type resultType = getTypeConverter()->convertType(op.getType());
+ llvm::SmallVector<mlir::LLVM::GEPArg> offsets;
+ mlir::Type eltType;
+ mlir::Value llvmAddr = adaptor.getSymAddr();
+
+ if (op.getSymAddr()) {
+ if (op.getOffset() == 0) {
+ rewriter.replaceOp(op, {llvmAddr});
+ return mlir::success();
+ }
+
+ offsets.push_back(adaptor.getOffset());
+ eltType = mlir::IntegerType::get(resultType.getContext(), 8,
+ mlir::IntegerType::Signless);
+ } else {
+ llvmAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(),
+ op.getNameAttr(), eltType);
+ assert(eltType && "Shouldn't ever be missing an eltType here");
+ offsets.push_back(0);
+ offsets.push_back(adaptor.getOffset());
+ }
+ rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(
+ op, resultType, eltType, llvmAddr, offsets,
+ mlir::LLVM::GEPNoWrapFlags::inbounds);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMStackSaveOpLowering::matchAndRewrite(
cir::StackSaveOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -2797,6 +3173,105 @@ mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMInlineAsmOpLowering::matchAndRewrite(
+ cir::InlineAsmOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type llResTy;
+ if (op.getNumResults())
+ llResTy = getTypeConverter()->convertType(op.getType(0));
+
+ cir::AsmFlavor dialect = op.getAsmFlavor();
+ mlir::LLVM::AsmDialect llDialect = dialect == cir::AsmFlavor::x86_att
+ ? mlir::LLVM::AsmDialect::AD_ATT
+ : mlir::LLVM::AsmDialect::AD_Intel;
+
+ SmallVector<mlir::Attribute> opAttrs;
+ StringRef llvmAttrName = mlir::LLVM::InlineAsmOp::getElementTypeAttrName();
+
+ // this is for the lowering to LLVM from LLVM dialect. Otherwise, if we
+ // don't have the result (i.e. void type as a result of operation), the
+ // element type attribute will be attached to the whole instruction, but not
+ // to the operand
+ if (!op.getNumResults())
+ opAttrs.push_back(mlir::Attribute());
+
+ SmallVector<mlir::Value> llvmOperands;
+ SmallVector<mlir::Value> cirOperands;
+ for (auto const &[llvmOp, cirOp] :
+ zip(adaptor.getAsmOperands(), op.getAsmOperands())) {
+ append_range(llvmOperands, llvmOp);
+ append_range(cirOperands, cirOp);
+ }
+
+ // so far we infer the llvm dialect element type attr from
+ // CIR operand type.
+ for (auto const &[cirOpAttr, cirOp] :
+ zip(op.getOperandAttrs(), cirOperands)) {
+ if (!cirOpAttr) {
+ opAttrs.push_back(mlir::Attribute());
+ continue;
+ }
+
+ llvm::SmallVector<mlir::NamedAttribute, 1> attrs;
+ cir::PointerType typ = mlir::cast<cir::PointerType>(cirOp.getType());
+ mlir::TypeAttr typAttr = mlir::TypeAttr::get(convertTypeForMemory(
+ *getTypeConverter(), dataLayout, typ.getPointee()));
+
+ attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr));
+ mlir::DictionaryAttr newDict = rewriter.getDictionaryAttr(attrs);
+ opAttrs.push_back(newDict);
+ }
+
+ rewriter.replaceOpWithNewOp<mlir::LLVM::InlineAsmOp>(
+ op, llResTy, llvmOperands, op.getAsmStringAttr(), op.getConstraintsAttr(),
+ op.getSideEffectsAttr(),
+ /*is_align_stack*/ mlir::UnitAttr(),
+ /*tail_call_kind*/
+ mlir::LLVM::TailCallKindAttr::get(
+ getContext(), mlir::LLVM::tailcallkind::TailCallKind::None),
+ mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect),
+ rewriter.getArrayAttr(opAttrs));
+
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVAStartOpLowering::matchAndRewrite(
+ cir::VAStartOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext());
+ auto vaList = mlir::LLVM::BitcastOp::create(rewriter, op.getLoc(), opaquePtr,
+ adaptor.getArgList());
+ rewriter.replaceOpWithNewOp<mlir::LLVM::VaStartOp>(op, vaList);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVAEndOpLowering::matchAndRewrite(
+ cir::VAEndOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext());
+ auto vaList = mlir::LLVM::BitcastOp::create(rewriter, op.getLoc(), opaquePtr,
+ adaptor.getArgList());
+ rewriter.replaceOpWithNewOp<mlir::LLVM::VaEndOp>(op, vaList);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVAArgOpLowering::matchAndRewrite(
+ cir::VAArgOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ assert(!cir::MissingFeatures::vaArgABILowering());
+ auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext());
+ auto vaList = mlir::LLVM::BitcastOp::create(rewriter, op.getLoc(), opaquePtr,
+ adaptor.getArgList());
+
+ mlir::Type llvmType =
+ getTypeConverter()->convertType(op->getResultTypes().front());
+ if (!llvmType)
+ return mlir::failure();
+
+ rewriter.replaceOpWithNewOp<mlir::LLVM::VaArgOp>(op, llvmType, vaList);
+ return mlir::success();
+}
+
std::unique_ptr<mlir::Pass> createConvertCIRToLLVMPass() {
return std::make_unique<ConvertCIRToLLVMPass>();
}
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index c5106cb..da7df89 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -44,6 +44,16 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMAssumeAlignedOpLowering
+ : public mlir::OpConversionPattern<cir::AssumeAlignedOp> {
+public:
+ using mlir::OpConversionPattern<cir::AssumeAlignedOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::AssumeAlignedOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMAssumeSepStorageOpLowering
: public mlir::OpConversionPattern<cir::AssumeSepStorageOp> {
public:
@@ -199,6 +209,26 @@ public:
mlir::ConversionPatternRewriter &rewriter) const override;
};
+class CIRToLLVMReturnAddrOpLowering
+ : public mlir::OpConversionPattern<cir::ReturnAddrOp> {
+public:
+ using mlir::OpConversionPattern<cir::ReturnAddrOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::ReturnAddrOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMFrameAddrOpLowering
+ : public mlir::OpConversionPattern<cir::FrameAddrOp> {
+public:
+ using mlir::OpConversionPattern<cir::FrameAddrOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::FrameAddrOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMAllocaOpLowering
: public mlir::OpConversionPattern<cir::AllocaOp> {
mlir::DataLayout const &dataLayout;
@@ -447,6 +477,47 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMVTableAddrPointOpLowering
+ : public mlir::OpConversionPattern<cir::VTableAddrPointOp> {
+public:
+ using mlir::OpConversionPattern<cir::VTableAddrPointOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VTableAddrPointOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVTableGetVPtrOpLowering
+ : public mlir::OpConversionPattern<cir::VTableGetVPtrOp> {
+public:
+ using mlir::OpConversionPattern<cir::VTableGetVPtrOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VTableGetVPtrOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVTableGetVirtualFnAddrOpLowering
+ : public mlir::OpConversionPattern<cir::VTableGetVirtualFnAddrOp> {
+public:
+ using mlir::OpConversionPattern<
+ cir::VTableGetVirtualFnAddrOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VTableGetVirtualFnAddrOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVTTAddrPointOpLowering
+ : public mlir::OpConversionPattern<cir::VTTAddrPointOp> {
+public:
+ using mlir::OpConversionPattern<cir::VTTAddrPointOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VTTAddrPointOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMStackSaveOpLowering
: public mlir::OpConversionPattern<cir::StackSaveOp> {
public:
@@ -638,6 +709,72 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMFAbsOpLowering : public mlir::OpConversionPattern<cir::FAbsOp> {
+public:
+ using mlir::OpConversionPattern<cir::FAbsOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::FAbsOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMInlineAsmOpLowering
+ : public mlir::OpConversionPattern<cir::InlineAsmOp> {
+ mlir::DataLayout const &dataLayout;
+
+public:
+ CIRToLLVMInlineAsmOpLowering(const mlir::TypeConverter &typeConverter,
+ mlir::MLIRContext *context,
+ mlir::DataLayout const &dataLayout)
+ : OpConversionPattern(typeConverter, context), dataLayout(dataLayout) {}
+
+ using mlir::OpConversionPattern<cir::InlineAsmOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::InlineAsmOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMThrowOpLowering
+ : public mlir::OpConversionPattern<cir::ThrowOp> {
+public:
+ using mlir::OpConversionPattern<cir::ThrowOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::ThrowOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVAStartOpLowering
+ : public mlir::OpConversionPattern<cir::VAStartOp> {
+public:
+ using mlir::OpConversionPattern<cir::VAStartOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VAStartOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVAEndOpLowering
+ : public mlir::OpConversionPattern<cir::VAEndOp> {
+public:
+ using mlir::OpConversionPattern<cir::VAEndOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VAEndOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVAArgOpLowering
+ : public mlir::OpConversionPattern<cir::VAArgOp> {
+public:
+ using mlir::OpConversionPattern<cir::VAArgOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VAArgOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
} // namespace direct
} // namespace cir
diff --git a/clang/lib/CodeGen/ABIInfo.cpp b/clang/lib/CodeGen/ABIInfo.cpp
index 3ef430e1..acd6781 100644
--- a/clang/lib/CodeGen/ABIInfo.cpp
+++ b/clang/lib/CodeGen/ABIInfo.cpp
@@ -67,8 +67,7 @@ bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
return false;
Members *= NElements;
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ } else if (const auto *RD = Ty->getAsRecordDecl()) {
if (RD->hasFlexibleArrayMember())
return false;
@@ -244,6 +243,14 @@ ABIInfo::getOptimalVectorMemoryType(llvm::FixedVectorType *T,
return T;
}
+llvm::Value *ABIInfo::createCoercedLoad(Address SrcAddr, const ABIArgInfo &AI,
+ CodeGenFunction &CGF) const {
+ return nullptr;
+}
+
+void ABIInfo::createCoercedStore(llvm::Value *Val, Address DstAddr,
+ const ABIArgInfo &AI, bool DestIsVolatile,
+ CodeGenFunction &CGF) const {}
// Pin the vtable to this file.
SwiftABIInfo::~SwiftABIInfo() = default;
diff --git a/clang/lib/CodeGen/ABIInfo.h b/clang/lib/CodeGen/ABIInfo.h
index 9c7029c..130fcd3 100644
--- a/clang/lib/CodeGen/ABIInfo.h
+++ b/clang/lib/CodeGen/ABIInfo.h
@@ -132,6 +132,12 @@ public:
virtual llvm::FixedVectorType *
getOptimalVectorMemoryType(llvm::FixedVectorType *T,
const LangOptions &Opt) const;
+
+ virtual llvm::Value *createCoercedLoad(Address SrcAddr, const ABIArgInfo &AI,
+ CodeGenFunction &CGF) const;
+ virtual void createCoercedStore(llvm::Value *Val, Address DstAddr,
+ const ABIArgInfo &AI, bool DestIsVolatile,
+ CodeGenFunction &CGF) const;
};
/// Target specific hooks for defining how a type should be passed or returned
diff --git a/clang/lib/CodeGen/ABIInfoImpl.cpp b/clang/lib/CodeGen/ABIInfoImpl.cpp
index 0a612d3..13c837a 100644
--- a/clang/lib/CodeGen/ABIInfoImpl.cpp
+++ b/clang/lib/CodeGen/ABIInfoImpl.cpp
@@ -28,8 +28,8 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
}
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -52,8 +52,8 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() >
@@ -105,17 +105,16 @@ llvm::Type *CodeGen::getVAListElementType(CodeGenFunction &CGF) {
CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(const RecordType *RT,
CGCXXABI &CXXABI) {
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD) {
- if (!RT->getDecl()->canPassInRegisters())
- return CGCXXABI::RAA_Indirect;
- return CGCXXABI::RAA_Default;
- }
- return CXXABI.getRecordArgABI(RD);
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ return CXXABI.getRecordArgABI(CXXRD);
+ if (!RD->canPassInRegisters())
+ return CGCXXABI::RAA_Indirect;
+ return CGCXXABI::RAA_Default;
}
CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(QualType T, CGCXXABI &CXXABI) {
- const RecordType *RT = T->getAs<RecordType>();
+ const RecordType *RT = T->getAsCanonical<RecordType>();
if (!RT)
return CGCXXABI::RAA_Default;
return getRecordArgABI(RT, CXXABI);
@@ -125,20 +124,19 @@ bool CodeGen::classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
const ABIInfo &Info) {
QualType Ty = FI.getReturnType();
- if (const auto *RT = Ty->getAs<RecordType>())
- if (!isa<CXXRecordDecl>(RT->getDecl()) &&
- !RT->getDecl()->canPassInRegisters()) {
- FI.getReturnInfo() = Info.getNaturalAlignIndirect(
- Ty, Info.getDataLayout().getAllocaAddrSpace());
- return true;
- }
+ if (const auto *RD = Ty->getAsRecordDecl();
+ RD && !isa<CXXRecordDecl>(RD) && !RD->canPassInRegisters()) {
+ FI.getReturnInfo() = Info.getNaturalAlignIndirect(
+ Ty, Info.getDataLayout().getAllocaAddrSpace());
+ return true;
+ }
return CXXABI.classifyReturnType(FI);
}
QualType CodeGen::useFirstFieldIfTransparentUnion(QualType Ty) {
if (const RecordType *UT = Ty->getAsUnionType()) {
- const RecordDecl *UD = UT->getDecl();
+ const RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
if (UD->hasAttr<TransparentUnionAttr>()) {
assert(!UD->field_empty() && "sema created an empty transparent union");
return UD->field_begin()->getType();
@@ -262,7 +260,7 @@ bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
WasArray = true;
}
- const RecordType *RT = FT->getAs<RecordType>();
+ const RecordType *RT = FT->getAsCanonical<RecordType>();
if (!RT)
return false;
@@ -276,7 +274,7 @@ bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
// according to the Itanium ABI. The exception applies only to records,
// not arrays of records, so we must also check whether we stripped off an
// array type above.
- if (isa<CXXRecordDecl>(RT->getDecl()) &&
+ if (isa<CXXRecordDecl>(RT->getOriginalDecl()) &&
(WasArray || (!AsIfNoUniqueAddr && !FD->hasAttr<NoUniqueAddressAttr>())))
return false;
@@ -285,10 +283,9 @@ bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
bool AsIfNoUniqueAddr) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
+ const auto *RD = T->getAsRecordDecl();
+ if (!RD)
return false;
- const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return false;
@@ -316,12 +313,10 @@ bool CodeGen::isEmptyFieldForLayout(const ASTContext &Context,
}
bool CodeGen::isEmptyRecordForLayout(const ASTContext &Context, QualType T) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
+ const auto *RD = T->getAsRecordDecl();
+ if (!RD)
return false;
- const RecordDecl *RD = RT->getDecl();
-
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->isDynamicClass())
@@ -340,11 +335,10 @@ bool CodeGen::isEmptyRecordForLayout(const ASTContext &Context, QualType T) {
}
const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
+ const auto *RD = T->getAsRecordDecl();
+ if (!RD)
return nullptr;
- const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return nullptr;
@@ -460,10 +454,9 @@ bool CodeGen::isSIMDVectorType(ASTContext &Context, QualType Ty) {
}
bool CodeGen::isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT)
+ const auto *RD = Ty->getAsRecordDecl();
+ if (!RD)
return false;
- const RecordDecl *RD = RT->getDecl();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
diff --git a/clang/lib/CodeGen/Address.h b/clang/lib/CodeGen/Address.h
index a748dda..4e7f356 100644
--- a/clang/lib/CodeGen/Address.h
+++ b/clang/lib/CodeGen/Address.h
@@ -176,6 +176,11 @@ public:
static Address invalid() { return Address(nullptr); }
bool isValid() const { return Pointer.getPointer() != nullptr; }
+ llvm::Value *getPointerIfNotSigned() const {
+ assert(isValid() && "pointer isn't valid");
+ return !isSigned() ? Pointer.getPointer() : nullptr;
+ }
+
/// This function is used in situations where the caller is doing some sort of
/// opaque "laundering" of the pointer.
void replaceBasePointer(llvm::Value *P) {
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 0b8b824..3f095c0 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -425,7 +425,6 @@ static bool initTargetOptions(const CompilerInstance &CI,
LangOptions::FPModeKind::FPM_Fast ||
LangOpts.getDefaultFPContractMode() ==
LangOptions::FPModeKind::FPM_FastHonorPragmas);
- Options.ApproxFuncFPMath = LangOpts.ApproxFunc;
Options.BBAddrMap = CodeGenOpts.BBAddrMap;
Options.BBSections =
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 0e80522..597127ab 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -188,13 +188,14 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
// Optional copy/dispose helpers.
bool hasInternalHelper = false;
if (blockInfo.NeedsCopyDispose) {
+ auto &Schema = CGM.getCodeGenOpts().PointerAuth.BlockHelperFunctionPointers;
// copy_func_helper_decl
llvm::Constant *copyHelper = buildCopyHelper(CGM, blockInfo);
- elements.add(copyHelper);
+ elements.addSignedPointer(copyHelper, Schema, GlobalDecl(), QualType());
// destroy_func_decl
llvm::Constant *disposeHelper = buildDisposeHelper(CGM, blockInfo);
- elements.add(disposeHelper);
+ elements.addSignedPointer(disposeHelper, Schema, GlobalDecl(), QualType());
if (cast<llvm::Function>(copyHelper->stripPointerCasts())
->hasInternalLinkage() ||
@@ -419,13 +420,11 @@ static void addBlockLayout(CharUnits align, CharUnits size,
/// Determines if the given type is safe for constant capture in C++.
static bool isSafeForCXXConstantCapture(QualType type) {
- const RecordType *recordType =
- type->getBaseElementTypeUnsafe()->getAs<RecordType>();
+ const auto *record = type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
// Only records can be unsafe.
- if (!recordType) return true;
-
- const auto *record = cast<CXXRecordDecl>(recordType->getDecl());
+ if (!record)
+ return true;
// Maintain semantics for classes with non-trivial dtors or copy ctors.
if (!record->hasTrivialDestructor()) return false;
@@ -567,9 +566,8 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
info.CanBeGlobal = true;
return;
- }
- else if (C.getLangOpts().ObjC &&
- CGM.getLangOpts().getGC() == LangOptions::NonGC)
+ } else if (C.getLangOpts().ObjC &&
+ CGM.getLangOpts().getGC() == LangOptions::NonGC)
info.HasCapturedVariableLayout = true;
if (block->doesNotEscape())
@@ -783,7 +781,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
bool IsOpenCL = CGM.getContext().getLangOpts().OpenCL;
- auto GenVoidPtrTy =
+ llvm::PointerType *GenVoidPtrTy =
IsOpenCL ? CGM.getOpenCLRuntime().getGenericVoidPointerType() : VoidPtrTy;
LangAS GenVoidPtrAddr = IsOpenCL ? LangAS::opencl_generic : LangAS::Default;
auto GenVoidPtrSize = CharUnits::fromQuantity(
@@ -817,9 +815,6 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
: CGM.getNSConcreteStackBlock();
isa = blockISA;
- // Build the block descriptor.
- descriptor = buildBlockDescriptor(CGM, blockInfo);
-
// Compute the initial on-stack block flags.
if (!CGM.getCodeGenOpts().DisableBlockSignatureString)
flags = BLOCK_HAS_SIGNATURE;
@@ -833,6 +828,9 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
flags |= BLOCK_USE_STRET;
if (blockInfo.NoEscape)
flags |= BLOCK_IS_NOESCAPE | BLOCK_IS_GLOBAL;
+
+ // Build the block descriptor.
+ descriptor = buildBlockDescriptor(CGM, blockInfo);
}
auto projectField = [&](unsigned index, const Twine &name) -> Address {
@@ -883,11 +881,25 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
llvm::ConstantInt::get(IntTy, blockInfo.BlockAlign.getQuantity()),
getIntSize(), "block.align");
}
- addHeaderField(blockFn, GenVoidPtrSize, "block.invoke");
- if (!IsOpenCL)
- addHeaderField(descriptor, getPointerSize(), "block.descriptor");
- else if (auto *Helper =
- CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
+
+ if (!IsOpenCL) {
+ llvm::Value *blockFnPtr =
+ llvm::ConstantExpr::getBitCast(InvokeFn, VoidPtrTy);
+ QualType type = blockInfo.getBlockExpr()
+ ->getType()
+ ->castAs<BlockPointerType>()
+ ->getPointeeType();
+ addSignedHeaderField(
+ blockFnPtr,
+ CGM.getCodeGenOpts().PointerAuth.BlockInvocationFunctionPointers,
+ GlobalDecl(), type, getPointerSize(), "block.invoke");
+
+ addSignedHeaderField(
+ descriptor, CGM.getCodeGenOpts().PointerAuth.BlockDescriptorPointers,
+ GlobalDecl(), type, getPointerSize(), "block.descriptor");
+ } else if (auto *Helper =
+ CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
+ addHeaderField(blockFn, GenVoidPtrSize, "block.invoke");
for (auto I : Helper->getCustomFieldValues(*this, blockInfo)) {
addHeaderField(
I.first,
@@ -895,7 +907,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
CGM.getDataLayout().getTypeAllocSize(I.first->getType())),
I.second);
}
- }
+ } else
+ addHeaderField(blockFn, GenVoidPtrSize, "block.invoke");
}
// Finally, capture all the values into the block.
@@ -1166,6 +1179,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
ASTContext &Ctx = getContext();
CallArgList Args;
+ llvm::Value *FuncPtr = nullptr;
+
if (getLangOpts().OpenCL) {
// For OpenCL, BlockPtr is already casted to generic block literal.
@@ -1185,7 +1200,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
if (!isa<ParmVarDecl>(E->getCalleeDecl()))
Func = CGM.getOpenCLRuntime().getInvokeFunction(E->getCallee());
else {
- llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 2);
+ FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 2);
Func = Builder.CreateAlignedLoad(GenericVoidPtrTy, FuncPtr,
getPointerAlign());
}
@@ -1194,7 +1209,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
BlockPtr =
Builder.CreatePointerCast(BlockPtr, UnqualPtrTy, "block.literal");
// Get pointer to the block invoke function
- llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
+ FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
// First argument is a block literal casted to a void pointer
BlockPtr = Builder.CreatePointerCast(BlockPtr, VoidPtrTy);
@@ -1211,7 +1226,15 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
CGM.getTypes().arrangeBlockFunctionCall(Args, FuncTy);
// Prepare the callee.
- CGCallee Callee(CGCalleeInfo(), Func);
+ CGPointerAuthInfo PointerAuth;
+ if (auto &AuthSchema =
+ CGM.getCodeGenOpts().PointerAuth.BlockInvocationFunctionPointers) {
+ assert(FuncPtr != nullptr && "Missing function pointer for AuthInfo");
+ PointerAuth =
+ EmitPointerAuthInfo(AuthSchema, FuncPtr, GlobalDecl(), FnType);
+ }
+
+ CGCallee Callee(CGCalleeInfo(), Func, PointerAuth);
// And call the block.
return EmitCall(FnInfo, Callee, ReturnValue, Args, CallOrInvoke);
@@ -1295,14 +1318,15 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
bool IsOpenCL = CGM.getLangOpts().OpenCL;
bool IsWindows = CGM.getTarget().getTriple().isOSWindows();
+ auto &CGOPointerAuth = CGM.getCodeGenOpts().PointerAuth;
if (!IsOpenCL) {
// isa
if (IsWindows)
fields.addNullPointer(CGM.Int8PtrPtrTy);
else
fields.addSignedPointer(CGM.getNSConcreteGlobalBlock(),
- CGM.getCodeGenOpts().PointerAuth.ObjCIsaPointers,
- GlobalDecl(), QualType());
+ CGOPointerAuth.ObjCIsaPointers, GlobalDecl(),
+ QualType());
// __flags
BlockFlags flags = BLOCK_IS_GLOBAL;
@@ -1321,11 +1345,20 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
}
// Function
- fields.add(blockFn);
+ if (auto &Schema = CGOPointerAuth.BlockInvocationFunctionPointers) {
+ QualType FnType = blockInfo.getBlockExpr()
+ ->getType()
+ ->castAs<BlockPointerType>()
+ ->getPointeeType();
+ fields.addSignedPointer(blockFn, Schema, GlobalDecl(), FnType);
+ } else
+ fields.add(blockFn);
if (!IsOpenCL) {
// Descriptor
- fields.add(buildBlockDescriptor(CGM, blockInfo));
+ llvm::Constant *Descriptor = buildBlockDescriptor(CGM, blockInfo);
+ fields.addSignedPointer(Descriptor, CGOPointerAuth.BlockDescriptorPointers,
+ GlobalDecl(), QualType());
} else if (auto *Helper =
CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
for (auto *I : Helper->getCustomFieldValues(CGM, blockInfo)) {
@@ -1995,8 +2028,8 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// it. It's not quite worth the annoyance to avoid creating it in the
// first place.
if (!needsEHCleanup(captureType.isDestructedType()))
- if (auto *I =
- cast_or_null<llvm::Instruction>(dstField.getBasePointer()))
+ if (auto *I = cast_or_null<llvm::Instruction>(
+ dstField.getPointerIfNotSigned()))
I->eraseFromParent();
}
break;
@@ -2730,8 +2763,16 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
unsigned nextHeaderIndex = 0;
CharUnits nextHeaderOffset;
auto storeHeaderField = [&](llvm::Value *value, CharUnits fieldSize,
- const Twine &name) {
+ const Twine &name, bool isFunction = false) {
auto fieldAddr = Builder.CreateStructGEP(addr, nextHeaderIndex, name);
+ if (isFunction) {
+ if (auto &Schema = CGM.getCodeGenOpts()
+ .PointerAuth.BlockByrefHelperFunctionPointers) {
+ auto PointerAuth = EmitPointerAuthInfo(
+ Schema, fieldAddr.emitRawPointer(*this), GlobalDecl(), QualType());
+ value = EmitPointerAuthSign(PointerAuth, value);
+ }
+ }
Builder.CreateStore(value, fieldAddr);
nextHeaderIndex++;
@@ -2814,10 +2855,10 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
storeHeaderField(V, getIntSize(), "byref.size");
if (helpers) {
- storeHeaderField(helpers->CopyHelper, getPointerSize(),
- "byref.copyHelper");
+ storeHeaderField(helpers->CopyHelper, getPointerSize(), "byref.copyHelper",
+ /*isFunction=*/true);
storeHeaderField(helpers->DisposeHelper, getPointerSize(),
- "byref.disposeHelper");
+ "byref.disposeHelper", /*isFunction=*/true);
}
if (ByRefHasLifetime && HasByrefExtendedLayout) {
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a648bde..172a521 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -997,9 +997,8 @@ static const FieldDecl *FindFlexibleArrayMemberField(CodeGenFunction &CGF,
/*IgnoreTemplateOrMacroSubstitution=*/true))
return FD;
- if (auto RT = FD->getType()->getAs<RecordType>())
- if (const FieldDecl *FD =
- FindFlexibleArrayMemberField(CGF, Ctx, RT->getAsRecordDecl()))
+ if (const auto *RD = FD->getType()->getAsRecordDecl())
+ if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
return FD;
}
@@ -1025,8 +1024,8 @@ static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
return true;
}
- if (auto RT = Field->getType()->getAs<RecordType>()) {
- if (GetFieldOffset(Ctx, RT->getAsRecordDecl(), FD, Offset)) {
+ if (const auto *RD = Field->getType()->getAsRecordDecl()) {
+ if (GetFieldOffset(Ctx, RD, FD, Offset)) {
Offset += Layout.getFieldOffset(FieldNo);
return true;
}
@@ -1693,6 +1692,23 @@ getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
llvm_unreachable("invalid interlocking");
}
+static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
+ llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
+ llvm::Type *ArgType = ArgValue->getType();
+
+ // Boolean vectors can be casted directly to its bitfield representation. We
+ // intentionally do not round up to the next power of two size and let LLVM
+ // handle the trailing bits.
+ if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
+ VT && VT->getElementType()->isIntegerTy(1)) {
+ llvm::Type *StorageType =
+ llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
+ ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
+ }
+
+ return ArgValue;
+}
+
/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
/// bits and a bit position and read and optionally modify the bit at that
/// position. The position index can be arbitrarily large, i.e. it can be larger
@@ -2020,7 +2036,7 @@ Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
"Unsupported builtin check kind");
- Value *ArgValue = EmitScalarExpr(E);
+ Value *ArgValue = EmitBitCountExpr(*this, E);
if (!SanOpts.has(SanitizerKind::Builtin))
return ArgValue;
@@ -3326,20 +3342,25 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll:
- case Builtin::BI__builtin_ctzg: {
- bool HasFallback = BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg &&
- E->getNumArgs() > 1;
+ case Builtin::BI__builtin_ctzg:
+ case Builtin::BI__builtin_elementwise_cttz: {
+ bool HasFallback =
+ (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
+ BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_cttz) &&
+ E->getNumArgs() > 1;
Value *ArgValue =
- HasFallback ? EmitScalarExpr(E->getArg(0))
+ HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
: EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef =
- Builder.getInt1(HasFallback || getTarget().isCLZForZeroUndef());
+ // The elementwise builtins always exhibit zero-is-undef behaviour
+ Value *ZeroUndef = Builder.getInt1(
+ HasFallback || getTarget().isCLZForZeroUndef() ||
+ BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_cttz);
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result =
@@ -3358,20 +3379,25 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll:
- case Builtin::BI__builtin_clzg: {
- bool HasFallback = BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg &&
- E->getNumArgs() > 1;
+ case Builtin::BI__builtin_clzg:
+ case Builtin::BI__builtin_elementwise_ctlz: {
+ bool HasFallback =
+ (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
+ BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctlz) &&
+ E->getNumArgs() > 1;
Value *ArgValue =
- HasFallback ? EmitScalarExpr(E->getArg(0))
+ HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
: EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef =
- Builder.getInt1(HasFallback || getTarget().isCLZForZeroUndef());
+ // The elementwise builtins always exhibit zero-is-undef behaviour
+ Value *ZeroUndef = Builder.getInt1(
+ HasFallback || getTarget().isCLZForZeroUndef() ||
+ BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctlz);
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result =
@@ -3446,7 +3472,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll:
case Builtin::BI__builtin_popcountg: {
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
@@ -4030,6 +4056,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_elementwise_fma:
return RValue::get(
emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fma));
+ case Builtin::BI__builtin_elementwise_fshl:
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
+ case Builtin::BI__builtin_elementwise_fshr:
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
+
case Builtin::BI__builtin_elementwise_add_sat:
case Builtin::BI__builtin_elementwise_sub_sat: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -4238,6 +4271,59 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_masked_load:
+ case Builtin::BI__builtin_masked_expand_load: {
+ llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
+
+ llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
+ CharUnits Align = CGM.getNaturalTypeAlignment(E->getType(), nullptr);
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
+
+ llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
+ if (E->getNumArgs() > 2)
+ PassThru = EmitScalarExpr(E->getArg(2));
+
+ llvm::Value *Result;
+ if (BuiltinID == Builtin::BI__builtin_masked_load) {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::masked_load, {RetTy, UnqualPtrTy});
+ Result =
+ Builder.CreateCall(F, {Ptr, AlignVal, Mask, PassThru}, "masked_load");
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
+ Result =
+ Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
+ }
+ return RValue::get(Result);
+ };
+ case Builtin::BI__builtin_masked_store:
+ case Builtin::BI__builtin_masked_compress_store: {
+ llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
+
+ QualType ValTy = E->getArg(1)->getType();
+ llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
+ llvm::Type *PtrTy = Ptr->getType();
+
+ CharUnits Align = CGM.getNaturalTypeAlignment(ValTy, nullptr);
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
+
+ if (BuiltinID == Builtin::BI__builtin_masked_store) {
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::masked_store, {ValLLTy, PtrTy});
+ Builder.CreateCall(F, {Val, Ptr, AlignVal, Mask});
+ } else {
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
+ Builder.CreateCall(F, {Val, Ptr, Mask});
+ }
+ return RValue::get(nullptr);
+ }
+
case Builtin::BI__builtin_isinf_sign: {
// isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
@@ -5985,8 +6071,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Create a temporary array to hold the sizes of local pointer arguments
// for the block. \p First is the position of the first size argument.
- auto CreateArrayForSizeVar = [=](unsigned First)
- -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
+ auto CreateArrayForSizeVar =
+ [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
llvm::APInt ArraySize(32, NumArgs - First);
QualType SizeArrayTy = getContext().getConstantArrayType(
getContext().getSizeType(), ArraySize, nullptr,
@@ -5999,9 +6085,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// actually the Alloca ascasted to the default AS, hence the
// stripPointerCasts()
llvm::Value *Alloca = TmpPtr->stripPointerCasts();
- llvm::Value *TmpSize = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), Alloca);
llvm::Value *ElemPtr;
+ EmitLifetimeStart(Alloca);
// Each of the following arguments specifies the size of the corresponding
// argument passed to the enqueued block.
auto *Zero = llvm::ConstantInt::get(IntTy, 0);
@@ -6018,7 +6103,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// Return the Alloca itself rather than a potential ascast as this is only
// used by the paired EmitLifetimeEnd.
- return {ElemPtr, TmpSize, Alloca};
+ return {ElemPtr, Alloca};
};
// Could have events and/or varargs.
@@ -6030,7 +6115,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- auto [ElemPtr, TmpSize, TmpPtr] = CreateArrayForSizeVar(4);
+ auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
@@ -6045,8 +6130,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Call = RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
- if (TmpSize)
- EmitLifetimeEnd(TmpSize, TmpPtr);
+ EmitLifetimeEnd(TmpPtr);
return Call;
}
// Any calls now have event arguments passed.
@@ -6111,15 +6195,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_varargs";
- auto [ElemPtr, TmpSize, TmpPtr] = CreateArrayForSizeVar(7);
+ auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
Args.push_back(ElemPtr);
ArgTys.push_back(ElemPtr->getType());
llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Call = RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
- if (TmpSize)
- EmitLifetimeEnd(TmpSize, TmpPtr);
+ EmitLifetimeEnd(TmpPtr);
return Call;
}
llvm_unreachable("Unexpected enqueue_kernel signature");
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index dd26be7..5090a05 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -1131,7 +1131,7 @@ void CGNVCUDARuntime::handleVarRegistration(const VarDecl *D,
// Builtin surfaces and textures and their template arguments are
// also registered with CUDA runtime.
const auto *TD = cast<ClassTemplateSpecializationDecl>(
- D->getType()->castAs<RecordType>()->getDecl());
+ D->getType()->castAsCXXRecordDecl());
const TemplateArgumentList &Args = TD->getTemplateArgs();
if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
assert(Args.size() == 2 &&
diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp
index 78a7b02..59aeff6 100644
--- a/clang/lib/CodeGen/CGCXX.cpp
+++ b/clang/lib/CodeGen/CGCXX.cpp
@@ -83,8 +83,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (I.isVirtual()) continue;
// Skip base classes with trivial destructors.
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *Base = I.getType()->castAsCXXRecordDecl();
if (Base->hasTrivialDestructor()) continue;
// If we've already found a base class with a non-trivial
@@ -277,19 +276,11 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
/// indirect call to virtual functions. It makes the call through indexing
/// into the vtable.
-CGCallee
-CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual,
- llvm::Type *Ty) {
- assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
- "BuildAppleKextVirtualCall - bad Qual kind");
-
- const Type *QTy = Qual->getAsType();
- QualType T = QualType(QTy, 0);
- const RecordType *RT = T->getAs<RecordType>();
- assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
- const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
-
+CGCallee CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier Qual,
+ llvm::Type *Ty) {
+ const CXXRecordDecl *RD = Qual.getAsRecordDecl();
+ assert(RD && "BuildAppleKextVirtualCall - Qual must be record");
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD))
return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp
index d42e0bb8..30e5dc2 100644
--- a/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/clang/lib/CodeGen/CGCXXABI.cpp
@@ -52,7 +52,7 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
const auto *RD = MPT->getMostRecentCXXRecordDecl();
ThisPtrForCall =
- CGF.getAsNaturalPointerTo(This, CGF.getContext().getRecordType(RD));
+ CGF.getAsNaturalPointerTo(This, CGF.getContext().getCanonicalTagType(RD));
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
llvm::Constant *FnPtr = llvm::Constant::getNullValue(
@@ -106,7 +106,7 @@ CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
llvm::Constant *CGCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
return GetBogusMemberPointer(CGM.getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent()));
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent()));
}
llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
@@ -165,10 +165,7 @@ bool CGCXXABI::mayNeedDestruction(const VarDecl *VD) const {
// If the variable has an incomplete class type (or array thereof), it
// might need destruction.
const Type *T = VD->getType()->getBaseElementTypeUnsafe();
- if (T->getAs<RecordType>() && T->isIncompleteType())
- return true;
-
- return false;
+ return T->isRecordType() && T->isIncompleteType();
}
bool CGCXXABI::isEmittedWithConstantInitializer(
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index d9bd443..c024f94 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -125,16 +125,16 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
/// calling a method pointer.
CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
const CXXMethodDecl *MD) {
- QualType RecTy;
+ CanQualType RecTy;
if (RD)
- RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ RecTy = Context.getCanonicalTagType(RD);
else
RecTy = Context.VoidTy;
if (MD)
- RecTy = Context.getAddrSpaceQualType(
- RecTy, MD->getMethodQualifiers().getAddressSpace());
- return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
+ RecTy = CanQualType::CreateUnsafe(Context.getAddrSpaceQualType(
+ RecTy, MD->getMethodQualifiers().getAddressSpace()));
+ return Context.getPointerType(RecTy);
}
/// Returns the canonical formal type of the given C++ method.
@@ -1005,10 +1005,9 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) {
return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
AT->getZExtSize());
}
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (const auto *RD = Ty->getAsRecordDecl()) {
SmallVector<const CXXBaseSpecifier *, 1> Bases;
SmallVector<const FieldDecl *, 1> Fields;
- const RecordDecl *RD = RT->getDecl();
assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
if (RD->isUnion()) {
@@ -1602,6 +1601,7 @@ void ClangToLLVMArgMapping::construct(const ASTContext &Context,
IRArgs.PaddingArgIndex = IRArgNo++;
switch (AI.getKind()) {
+ case ABIArgInfo::TargetSpecific:
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
// FIXME: handle sseregparm someday...
@@ -1712,6 +1712,7 @@ llvm::FunctionType *CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
case ABIArgInfo::IndirectAliased:
llvm_unreachable("Invalid ABI kind for return argument");
+ case ABIArgInfo::TargetSpecific:
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
resultType = retAI.getCoerceToType();
@@ -1784,6 +1785,7 @@ llvm::FunctionType *CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
ArgTypes[FirstIRArg] = llvm::PointerType::get(
getLLVMContext(), ArgInfo.getIndirectAddrSpace());
break;
+ case ABIArgInfo::TargetSpecific:
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
// Fast-isel and the optimizer generally like scalar values better than
@@ -1894,8 +1896,8 @@ bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
// We can't just discard the return value for a record type with a
// complex destructor or a non-trivially copyable type.
if (const RecordType *RT =
- ReturnType.getCanonicalType()->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ ReturnType.getCanonicalType()->getAsCanonical<RecordType>()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
return ClassDecl->hasTrivialDestructor();
}
return ReturnType.isTriviallyCopyableType(Context);
@@ -2011,8 +2013,6 @@ static void getTrivialDefaultFunctionAttributes(
FuncAttrs.addAttribute("no-infs-fp-math", "true");
if (LangOpts.NoHonorNaNs)
FuncAttrs.addAttribute("no-nans-fp-math", "true");
- if (LangOpts.ApproxFunc)
- FuncAttrs.addAttribute("approx-func-fp-math", "true");
if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
(LangOpts.getDefaultFPContractMode() ==
@@ -2697,6 +2697,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
else
RetAttrs.addAttribute(llvm::Attribute::NoExt);
[[fallthrough]];
+ case ABIArgInfo::TargetSpecific:
case ABIArgInfo::Direct:
if (RetAI.getInReg())
RetAttrs.addAttribute(llvm::Attribute::InReg);
@@ -2838,6 +2839,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
else
Attrs.addAttribute(llvm::Attribute::NoExt);
[[fallthrough]];
+ case ABIArgInfo::TargetSpecific:
case ABIArgInfo::Direct:
if (ArgNo == 0 && FI.isChainCall())
Attrs.addAttribute(llvm::Attribute::Nest);
@@ -2869,9 +2871,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// whose destruction / clean-up is carried out within the callee
// (e.g., Obj-C ARC-managed structs, MSVC callee-destroyed objects).
if (!ParamType.isDestructedType() || !ParamType->isRecordType() ||
- ParamType->castAs<RecordType>()
- ->getDecl()
- ->isParamDestroyedInCallee())
+ ParamType->castAsRecordDecl()->isParamDestroyedInCallee())
Attrs.addAttribute(llvm::Attribute::DeadOnReturn);
}
}
@@ -3356,17 +3356,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
}
}
- // Struct of fixed-length vectors and struct of array of fixed-length
- // vector in VLS calling convention are coerced to vector tuple
- // type(represented as TargetExtType) and scalable vector type
- // respectively, they're no longer handled as struct.
- if (ArgI.isDirect() && isa<llvm::StructType>(ConvertType(Ty)) &&
- (isa<llvm::TargetExtType>(ArgI.getCoerceToType()) ||
- isa<llvm::ScalableVectorType>(ArgI.getCoerceToType()))) {
- ArgVals.push_back(ParamValue::forDirect(AI));
- break;
- }
-
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
Address Alloca =
@@ -3507,6 +3496,25 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
break;
}
+ case ABIArgInfo::TargetSpecific: {
+ auto *AI = Fn->getArg(FirstIRArg);
+ AI->setName(Arg->getName() + ".target_coerce");
+ Address Alloca =
+ CreateMemTemp(Ty, getContext().getDeclAlign(Arg), Arg->getName());
+ Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
+ CGM.getABIInfo().createCoercedStore(AI, Ptr, ArgI, false, *this);
+ if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
+ llvm::Value *V =
+ EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
+ if (isPromoted) {
+ V = emitArgumentDemotion(*this, Arg, V);
+ }
+ ArgVals.push_back(ParamValue::forDirect(V));
+ } else {
+ ArgVals.push_back(ParamValue::forIndirect(Alloca));
+ }
+ break;
+ }
case ABIArgInfo::Ignore:
assert(NumIRArgs == 0);
// Initialize the local variable appropriately.
@@ -3828,7 +3836,7 @@ static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
SmallVectorImpl<uint64_t> &Bits) {
ASTContext &Context = CGM.getContext();
int CharWidth = Context.getCharWidth();
- const RecordDecl *RD = RTy->getDecl()->getDefinition();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinition();
const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
@@ -3876,7 +3884,7 @@ static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
// the type `QTy`.
static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
SmallVectorImpl<uint64_t> &Bits) {
- if (const auto *RTy = QTy->getAs<RecordType>())
+ if (const auto *RTy = QTy->getAsCanonical<RecordType>())
return setUsedBits(CGM, RTy, Offset, Bits);
ASTContext &Context = CGM.getContext();
@@ -3920,7 +3928,7 @@ llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
const llvm::DataLayout &DataLayout = CGM.getDataLayout();
int Size = DataLayout.getTypeStoreSize(ITy);
SmallVector<uint64_t, 4> Bits(Size);
- setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
+ setUsedBits(CGM, QTy->castAsCanonical<RecordType>(), 0, Bits);
int CharWidth = CGM.getContext().getCharWidth();
uint64_t Mask =
@@ -3937,7 +3945,7 @@ llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
const llvm::DataLayout &DataLayout = CGM.getDataLayout();
int Size = DataLayout.getTypeStoreSize(ATy);
SmallVector<uint64_t, 16> Bits(Size);
- setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
+ setUsedBits(CGM, QTy->castAsCanonical<RecordType>(), 0, Bits);
// Clear each element of the LLVM array.
int CharWidth = CGM.getContext().getCharWidth();
@@ -4135,6 +4143,11 @@ void CodeGenFunction::EmitFunctionEpilog(
}
break;
}
+ case ABIArgInfo::TargetSpecific: {
+ Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
+ RV = CGM.getABIInfo().createCoercedLoad(V, RetAI, *this);
+ break;
+ }
case ABIArgInfo::Expand:
case ABIArgInfo::IndirectAliased:
llvm_unreachable("Invalid ABI kind for return argument");
@@ -4289,7 +4302,7 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
// Deactivate the cleanup for the callee-destructed param that was pushed.
if (type->isRecordType() && !CurFuncIsThunk &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
+ type->castAsRecordDecl()->isParamDestroyedInCallee() &&
param->needsDestruction(getContext())) {
EHScopeStack::stable_iterator cleanup =
CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
@@ -4319,10 +4332,7 @@ static void emitWriteback(CodeGenFunction &CGF,
if (writeback.WritebackExpr) {
CGF.EmitIgnoredExpr(writeback.WritebackExpr);
-
- if (writeback.LifetimeSz)
- CGF.EmitLifetimeEnd(writeback.LifetimeSz,
- writeback.Temporary.getBasePointer());
+ CGF.EmitLifetimeEnd(writeback.Temporary.getBasePointer());
return;
}
@@ -4886,7 +4896,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
if (type->isRecordType() &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ type->castAsRecordDecl()->isParamDestroyedInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
AggValueSlot Slot = args.isUsingInAlloca()
@@ -5240,9 +5250,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// since otherwise we could be making a conditional call after a check for
// the proper cpu features (and it won't cause code generation issues due to
// function based code generation).
- if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
- (TargetDecl->hasAttr<TargetAttr>() ||
- (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>())))
+ if ((TargetDecl->hasAttr<AlwaysInlineAttr>() &&
+ (TargetDecl->hasAttr<TargetAttr>() ||
+ (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>()))) ||
+ (CurFuncDecl && CurFuncDecl->hasAttr<FlattenAttr>() &&
+ (CurFuncDecl->hasAttr<TargetAttr>() ||
+ TargetDecl->hasAttr<TargetAttr>())))
checkTargetFeatures(Loc, FD);
}
@@ -5282,7 +5295,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
Address SRetPtr = Address::invalid();
- llvm::Value *UnusedReturnSizePtr = nullptr;
+ bool NeedSRetLifetimeEnd = false;
if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
// For virtual function pointer thunks and musttail calls, we must always
// forward an incoming SRet pointer to the callee, because a local alloca
@@ -5296,11 +5309,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
SRetPtr = ReturnValue.getAddress();
} else {
SRetPtr = CreateMemTempWithoutCast(RetTy, "tmp");
- if (HaveInsertPoint() && ReturnValue.isUnused()) {
- llvm::TypeSize size =
- CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
- UnusedReturnSizePtr = EmitLifetimeStart(size, SRetPtr.getBasePointer());
- }
+ if (HaveInsertPoint() && ReturnValue.isUnused())
+ NeedSRetLifetimeEnd = EmitLifetimeStart(SRetPtr.getBasePointer());
}
if (IRFunctionArgs.hasSRetArg()) {
// A mismatch between the allocated return value's AS and the target's
@@ -5484,15 +5494,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Val = Builder.CreateFreeze(Val);
IRCallArgs[FirstIRArg] = Val;
- // Emit lifetime markers for the temporary alloca.
- llvm::TypeSize ByvalTempElementSize =
- CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
- llvm::Value *LifetimeSize =
- EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
-
- // Add cleanup code to emit the end lifetime marker after the call.
- if (LifetimeSize) // In case we disabled lifetime markers.
- CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
+ // Emit lifetime markers for the temporary alloca and add cleanup code to
+ // emit the end lifetime marker after the call.
+ if (EmitLifetimeStart(AI.getPointer()))
+ CallLifetimeEndAfterCall.emplace_back(AI);
// Generate the copy.
I->copyInto(*this, AI);
@@ -5653,9 +5658,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
auto unpaddedCoercionType = ArgInfo.getUnpaddedCoerceAndExpandType();
auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
- llvm::Value *tempSize = nullptr;
Address addr = Address::invalid();
RawAddress AllocaAddr = RawAddress::invalid();
+ bool NeedLifetimeEnd = false;
if (I->isAggregate()) {
addr = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
@@ -5665,7 +5670,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(RV.isScalar()); // complex should always just be direct
llvm::Type *scalarType = RV.getScalarVal()->getType();
- auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
// Materialize to a temporary.
@@ -5674,7 +5678,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
layout->getAlignment(), scalarAlign)),
"tmp",
/*ArraySize=*/nullptr, &AllocaAddr);
- tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
+ NeedLifetimeEnd = EmitLifetimeStart(AllocaAddr.getPointer());
Builder.CreateStore(RV.getScalarVal(), addr);
}
@@ -5699,10 +5703,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
assert(IRArgPos == FirstIRArg + NumIRArgs);
- if (tempSize) {
- EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
- }
-
+ if (NeedLifetimeEnd)
+ EmitLifetimeEnd(AllocaAddr.getPointer());
break;
}
@@ -5712,6 +5714,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(IRArgPos == FirstIRArg + NumIRArgs);
break;
}
+
+ case ABIArgInfo::TargetSpecific: {
+ Address Src = Address::invalid();
+ if (!I->isAggregate()) {
+ Src = CreateMemTemp(I->Ty, "target_coerce");
+ I->copyInto(*this, Src);
+ } else {
+ Src = I->hasLValue() ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
+ }
+
+ // If the value is offset in memory, apply the offset now.
+ Src = emitAddressAtOffset(*this, Src, ArgInfo);
+ llvm::Value *Load =
+ CGM.getABIInfo().createCoercedLoad(Src, ArgInfo, *this);
+ IRCallArgs[FirstIRArg] = Load;
+ break;
+ }
}
}
@@ -5871,9 +5891,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// can't depend on being inside of an ExprWithCleanups, so we need to manually
// pop this cleanup later on. Being eager about this is OK, since this
// temporary is 'invisible' outside of the callee.
- if (UnusedReturnSizePtr)
- pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetPtr,
- UnusedReturnSizePtr);
+ if (NeedSRetLifetimeEnd)
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetPtr);
llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
@@ -6007,7 +6026,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// insertion point; this allows the rest of IRGen to discard
// unreachable code.
if (CI->doesNotReturn()) {
- if (UnusedReturnSizePtr)
+ if (NeedSRetLifetimeEnd)
PopCleanupBlock();
// Strip away the noreturn attribute to better diagnose unreachable UB.
@@ -6122,7 +6141,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
- if (UnusedReturnSizePtr)
+ if (NeedSRetLifetimeEnd)
PopCleanupBlock();
return ret;
}
@@ -6198,6 +6217,19 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
+ case ABIArgInfo::TargetSpecific: {
+ Address DestPtr = ReturnValue.getValue();
+ Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
+ bool DestIsVolatile = ReturnValue.isVolatile();
+ if (!DestPtr.isValid()) {
+ DestPtr = CreateMemTemp(RetTy, "target_coerce");
+ DestIsVolatile = false;
+ }
+ CGM.getABIInfo().createCoercedStore(CI, StorePtr, RetAI, DestIsVolatile,
+ *this);
+ return convertTempToRValue(DestPtr, RetTy, SourceLocation());
+ }
+
case ABIArgInfo::Expand:
case ABIArgInfo::IndirectAliased:
llvm_unreachable("Invalid ABI kind for return argument");
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index 0b4e3f9..3157b7f 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -289,9 +289,6 @@ public:
/// An Expression (optional) that performs the writeback with any required
/// casting.
const Expr *WritebackExpr;
-
- // Size for optional lifetime end on the temporary.
- llvm::Value *LifetimeSz;
};
struct CallArgCleanup {
@@ -321,9 +318,8 @@ public:
}
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse,
- const Expr *writebackExpr = nullptr,
- llvm::Value *lifetimeSz = nullptr) {
- Writeback writeback = {srcLV, temporary, toUse, writebackExpr, lifetimeSz};
+ const Expr *writebackExpr = nullptr) {
+ Writeback writeback = {srcLV, temporary, toUse, writebackExpr};
Writebacks.push_back(writeback);
}
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 4a465e6..bae55aa 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -180,9 +180,7 @@ CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
// Get the layout.
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
-
+ const auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
// Add the offset.
Offset += Layout.getBaseClassOffset(BaseDecl);
@@ -300,8 +298,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
// *start* with a step down to the correct virtual base subobject,
// and hence will not require any further steps.
if ((*Start)->isVirtual()) {
- VBase = cast<CXXRecordDecl>(
- (*Start)->getType()->castAs<RecordType>()->getDecl());
+ VBase = (*Start)->getType()->castAsCXXRecordDecl();
++Start;
}
@@ -326,7 +323,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
llvm::Type *PtrTy = llvm::PointerType::get(
CGM.getLLVMContext(), Value.getType()->getPointerAddressSpace());
- QualType DerivedTy = getContext().getRecordType(Derived);
+ CanQualType DerivedTy = getContext().getCanonicalTagType(Derived);
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
// If the static offset is zero and we don't have a virtual step,
@@ -401,8 +398,7 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
bool NullCheckValue) {
assert(PathBegin != PathEnd && "Base path should not be empty!");
- QualType DerivedTy =
- getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ CanQualType DerivedTy = getContext().getCanonicalTagType(Derived);
llvm::Type *DerivedValueTy = ConvertType(DerivedTy);
llvm::Value *NonVirtualOffset =
@@ -557,9 +553,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
Address ThisPtr = CGF.LoadCXXThisAddress();
- const Type *BaseType = BaseInit->getBaseClass();
- const auto *BaseClassDecl =
- cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getDecl());
+ const auto *BaseClassDecl = BaseInit->getBaseClass()->castAsCXXRecordDecl();
bool isBaseVirtual = BaseInit->isBaseVirtual();
@@ -638,7 +632,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
QualType FieldType = Field->getType();
llvm::Value *ThisPtr = CGF.LoadCXXThis();
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
LValue LHS;
// If a base constructor is being emitted, create an LValue that has the
@@ -974,7 +968,7 @@ namespace {
}
CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
Address ThisPtr = CGF.LoadCXXThisAddress();
LValue DestLV = CGF.MakeAddrLValue(ThisPtr, RecordTy);
LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
@@ -1122,7 +1116,7 @@ namespace {
void pushEHDestructors() {
Address ThisPtr = CGF.LoadCXXThisAddress();
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
LValue LHS = CGF.MakeAddrLValue(ThisPtr, RecordTy);
for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
@@ -1264,9 +1258,7 @@ namespace {
static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) {
const Type *BaseType = BaseInit->getBaseClass();
- const auto *BaseClassDecl =
- cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getDecl());
- return BaseClassDecl->isDynamicClass();
+ return BaseType->castAsCXXRecordDecl()->isDynamicClass();
}
/// EmitCtorPrologue - This routine generates necessary code to initialize
@@ -1373,8 +1365,7 @@ HasTrivialDestructorBody(ASTContext &Context,
if (I.isVirtual())
continue;
- const CXXRecordDecl *NonVirtualBase =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *NonVirtualBase = I.getType()->castAsCXXRecordDecl();
if (!HasTrivialDestructorBody(Context, NonVirtualBase,
MostDerivedClassDecl))
return false;
@@ -1383,8 +1374,7 @@ HasTrivialDestructorBody(ASTContext &Context,
if (BaseClassDecl == MostDerivedClassDecl) {
// Check virtual bases.
for (const auto &I : BaseClassDecl->vbases()) {
- const CXXRecordDecl *VirtualBase =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *VirtualBase = I.getType()->castAsCXXRecordDecl();
if (!HasTrivialDestructorBody(Context, VirtualBase,
MostDerivedClassDecl))
return false;
@@ -1400,12 +1390,10 @@ FieldHasTrivialDestructorBody(ASTContext &Context,
{
QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
- const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
- if (!RT)
+ auto *FieldClassDecl = FieldBaseElementType->getAsCXXRecordDecl();
+ if (!FieldClassDecl)
return true;
- CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
-
// The destructor for an implicit anonymous union member is never invoked.
if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
return true;
@@ -1588,7 +1576,7 @@ namespace {
const CXXRecordDecl *ClassDecl = Dtor->getParent();
CGF.EmitDeleteCall(Dtor->getOperatorDelete(),
LoadThisForDtorDelete(CGF, Dtor),
- CGF.getContext().getTagDeclType(ClassDecl));
+ CGF.getContext().getCanonicalTagType(ClassDecl));
}
};
@@ -1606,7 +1594,7 @@ namespace {
const CXXRecordDecl *ClassDecl = Dtor->getParent();
CGF.EmitDeleteCall(Dtor->getOperatorDelete(),
LoadThisForDtorDelete(CGF, Dtor),
- CGF.getContext().getTagDeclType(ClassDecl));
+ CGF.getContext().getCanonicalTagType(ClassDecl));
assert(Dtor->getOperatorDelete()->isDestroyingOperatorDelete() ==
ReturnAfterDelete &&
"unexpected value for ReturnAfterDelete");
@@ -1647,7 +1635,8 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Find the address of the field.
Address thisValue = CGF.LoadCXXThisAddress();
- QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
+ CanQualType RecordTy =
+ CGF.getContext().getCanonicalTagType(field->getParent());
LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
LValue LV = CGF.EmitLValueForField(ThisLV, field);
assert(LV.isSimple());
@@ -1870,7 +1859,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
const CXXRecordDecl *ClassDecl = DD->getParent();
EmitDeleteCall(DD->getOperatorDelete(),
LoadThisForDtorDelete(*this, DD),
- getContext().getTagDeclType(ClassDecl));
+ getContext().getCanonicalTagType(ClassDecl));
EmitBranchThroughCleanup(ReturnBlock);
} else {
EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
@@ -1897,9 +1886,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
// We push them in the forward order so that they'll be popped in
// the reverse order.
for (const auto &Base : ClassDecl->vbases()) {
- auto *BaseClassDecl =
- cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
-
+ auto *BaseClassDecl = Base.getType()->castAsCXXRecordDecl();
if (BaseClassDecl->hasTrivialDestructor()) {
// Under SanitizeMemoryUseAfterDtor, poison the trivial base class
// memory. For non-trival base classes the same is done in the class
@@ -1964,7 +1951,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
// Anonymous union members do not have their destructors called.
const RecordType *RT = type->getAsUnionType();
- if (RT && RT->getDecl()->isAnonymousStructOrUnion())
+ if (RT && RT->getOriginalDecl()->isAnonymousStructOrUnion())
continue;
CleanupKind cleanupKind = getCleanupKind(dtorKind);
@@ -2057,7 +2044,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
//
// Note that these are complete objects and so we don't need to
// use the non-virtual size or alignment.
- QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CanQualType type = getContext().getCanonicalTagType(ctor->getParent());
CharUnits eltAlignment =
arrayBase.getAlignment()
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
@@ -2118,9 +2105,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
Address addr,
QualType type) {
- const RecordType *rtype = type->castAs<RecordType>();
- const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
- const CXXDestructorDecl *dtor = record->getDestructor();
+ const CXXDestructorDecl *dtor = type->castAsCXXRecordDecl()->getDestructor();
assert(!dtor->isTrivial());
CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
/*Delegating=*/false, addr, type);
@@ -2158,7 +2143,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const Expr *Arg = E->getArg(0);
LValue Src = EmitLValue(Arg);
- QualType DestTy = getContext().getTypeDeclType(D->getParent());
+ CanQualType DestTy = getContext().getCanonicalTagType(D->getParent());
LValue Dest = MakeAddrLValue(This, DestTy);
EmitAggregateCopyCtor(Dest, Src, ThisAVS.mayOverlap());
return;
@@ -2210,7 +2195,8 @@ void CodeGenFunction::EmitCXXConstructorCall(
if (!NewPointerIsChecked)
EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This,
- getContext().getRecordType(ClassDecl), CharUnits::Zero());
+ getContext().getCanonicalTagType(ClassDecl),
+ CharUnits::Zero());
if (D->isTrivial() && D->isDefaultConstructor()) {
assert(Args.size() == 1 && "trivial default ctor with args");
@@ -2226,7 +2212,7 @@ void CodeGenFunction::EmitCXXConstructorCall(
Address Src = makeNaturalAddressForPointer(
Args[1].getRValue(*this).getScalarVal(), SrcTy);
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
- QualType DestTy = getContext().getTypeDeclType(ClassDecl);
+ CanQualType DestTy = getContext().getCanonicalTagType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
EmitAggregateCopyCtor(DestLVal, SrcLVal, Overlap);
return;
@@ -2638,9 +2624,7 @@ void CodeGenFunction::getVTablePointers(BaseSubobject Base,
// Traverse bases.
for (const auto &I : RD->bases()) {
- auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
+ auto *BaseDecl = I.getType()->castAsCXXRecordDecl();
// Ignore classes without a vtable.
if (!BaseDecl->isDynamicClass())
continue;
@@ -2772,7 +2756,7 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
// Don't insert type test assumes if we are forcing public
// visibility.
!CGM.AlwaysHasLTOVisibilityPublic(RD)) {
- QualType Ty = QualType(RD->getTypeForDecl(), 0);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(Ty);
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
@@ -2835,12 +2819,10 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived,
if (!getLangOpts().CPlusPlus)
return;
- auto *ClassTy = T->getAs<RecordType>();
- if (!ClassTy)
+ const auto *ClassDecl = T->getAsCXXRecordDecl();
+ if (!ClassDecl)
return;
- const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl());
-
if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
return;
@@ -2896,8 +2878,8 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
EmitSanitizerStatReport(SSK);
- llvm::Metadata *MD =
- CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T);
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
llvm::Value *TypeTest = Builder.CreateCall(
@@ -2906,7 +2888,7 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(Int8Ty, TCK),
EmitCheckSourceLocation(Loc),
- EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)),
+ EmitCheckTypeDescriptor(T),
};
auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
@@ -2956,8 +2938,8 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
EmitSanitizerStatReport(llvm::SanStat_CFI_VCall);
- llvm::Metadata *MD =
- CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T);
llvm::Value *TypeId = llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
auto CheckedLoadIntrinsic = CGM.getVTables().useRelativeLayout()
@@ -3039,7 +3021,8 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
// Start building arguments for forwarding call
CallArgList CallArgs;
- QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ CanQualType ThisType =
+ getContext().getPointerType(getContext().getCanonicalTagType(Lambda));
Address ThisPtr = GetAddrOfBlockDecl(variable);
CallArgs.add(RValue::get(getAsNaturalPointerTo(ThisPtr, ThisType)), ThisType);
@@ -3066,8 +3049,8 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
// Start building arguments for forwarding call
CallArgList CallArgs;
- QualType LambdaType = getContext().getRecordType(Lambda);
- QualType ThisType = getContext().getPointerType(LambdaType);
+ CanQualType LambdaType = getContext().getCanonicalTagType(Lambda);
+ CanQualType ThisType = getContext().getPointerType(LambdaType);
Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture");
CallArgs.add(RValue::get(ThisPtr.emitRawPointer(*this)), ThisType);
@@ -3118,8 +3101,8 @@ void CodeGenFunction::EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD) {
// Forward %this argument.
CallArgList CallArgs;
- QualType LambdaType = getContext().getRecordType(MD->getParent());
- QualType ThisType = getContext().getPointerType(LambdaType);
+ CanQualType LambdaType = getContext().getCanonicalTagType(MD->getParent());
+ CanQualType ThisType = getContext().getPointerType(LambdaType);
llvm::Value *ThisArg = CurFn->getArg(0);
CallArgs.add(RValue::get(ThisArg), ThisType);
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 2b469f2..0385dbda 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -366,7 +366,7 @@ llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context,
if (const auto *RDecl = dyn_cast<RecordDecl>(Context))
if (!RDecl->isDependentType())
- return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ return getOrCreateType(CGM.getContext().getCanonicalTagType(RDecl),
TheCU->getFile());
return Default;
}
@@ -1285,7 +1285,7 @@ static bool needsTypeIdentifier(const TagDecl *TD, CodeGenModule &CGM,
static SmallString<256> getTypeIdentifier(const TagType *Ty, CodeGenModule &CGM,
llvm::DICompileUnit *TheCU) {
SmallString<256> Identifier;
- const TagDecl *TD = Ty->getDecl();
+ const TagDecl *TD = Ty->getOriginalDecl()->getDefinitionOrSelf();
if (!needsTypeIdentifier(TD, CGM, TheCU))
return Identifier;
@@ -1321,8 +1321,8 @@ static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) {
llvm::DICompositeType *
CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
llvm::DIScope *Ctx) {
- const RecordDecl *RD = Ty->getDecl();
- if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD)))
+ const RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
+ if (llvm::DIType *T = getTypeOrNull(QualType(Ty, 0)))
return cast<llvm::DICompositeType>(T);
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
const unsigned Line =
@@ -2015,6 +2015,8 @@ void CGDebugInfo::CollectRecordNestedType(
const TypeDecl *TD, SmallVectorImpl<llvm::Metadata *> &elements) {
QualType Ty = CGM.getContext().getTypeDeclType(TD);
// Injected class names are not considered nested records.
+ // FIXME: Is this supposed to be testing for injected class name declarations
+ // instead?
if (isa<InjectedClassNameType>(Ty))
return;
SourceLocation Loc = TD->getLocation();
@@ -2356,7 +2358,9 @@ void CGDebugInfo::CollectCXXBasesAux(
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (const auto &BI : Bases) {
const auto *Base =
- cast<CXXRecordDecl>(BI.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ BI.getType()->castAsCanonical<RecordType>()->getOriginalDecl())
+ ->getDefinition();
if (!SeenTypes.insert(Base).second)
continue;
auto *BaseTy = getOrCreateType(BI.getType(), Unit);
@@ -2825,12 +2829,12 @@ void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
void CGDebugInfo::completeType(const EnumDecl *ED) {
if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
- QualType Ty = CGM.getContext().getEnumType(ED);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(ED);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I == TypeCache.end() || !cast<llvm::DIType>(I->second)->isForwardDecl())
return;
- llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<EnumType>());
+ llvm::DIType *Res = CreateTypeDefinition(dyn_cast<EnumType>(Ty));
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
@@ -2900,7 +2904,7 @@ void CGDebugInfo::completeClassData(const RecordDecl *RD) {
void CGDebugInfo::completeClass(const RecordDecl *RD) {
if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
- QualType Ty = CGM.getContext().getRecordType(RD);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl())
@@ -2909,7 +2913,7 @@ void CGDebugInfo::completeClass(const RecordDecl *RD) {
// We want the canonical definition of the structure to not
// be the typedef. Since that would lead to circular typedef
// metadata.
- auto [Res, PrefRes] = CreateTypeDefinition(Ty->castAs<RecordType>());
+ auto [Res, PrefRes] = CreateTypeDefinition(dyn_cast<RecordType>(Ty));
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
@@ -3013,14 +3017,14 @@ void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
if (shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD, CGM.getLangOpts()))
return;
- QualType Ty = CGM.getContext().getRecordType(RD);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
llvm::DIType *T = getTypeOrNull(Ty);
if (T && T->isForwardDecl())
completeClassData(RD);
}
llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0)));
if (T || shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD,
CGM.getLangOpts())) {
@@ -3048,7 +3052,7 @@ llvm::DIType *CGDebugInfo::GetPreferredNameType(const CXXRecordDecl *RD,
std::pair<llvm::DIType *, llvm::DIType *>
CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
// Get overall information about the record type for the debug info.
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
@@ -3070,7 +3074,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
// Push the struct on region stack.
LexicalBlockStack.emplace_back(&*FwdDecl);
- RegionMap[Ty->getDecl()].reset(FwdDecl);
+ RegionMap[RD].reset(FwdDecl);
// Convert all the elements.
SmallVector<llvm::Metadata *, 16> EltTys;
@@ -3092,7 +3096,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
LexicalBlockStack.pop_back();
- RegionMap.erase(Ty->getDecl());
+ RegionMap.erase(RD);
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
DBuilder.replaceArrays(FwdDecl, Elements);
@@ -3101,7 +3105,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
FwdDecl =
llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl));
- RegionMap[Ty->getDecl()].reset(FwdDecl);
+ RegionMap[RD].reset(FwdDecl);
if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB)
if (auto *PrefDI = GetPreferredNameType(CXXDecl, DefUnit))
@@ -3651,8 +3655,9 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
}
}
- llvm::DIType *ClassType = getOrCreateType(
- QualType(Ty->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0), U);
+ CanQualType T =
+ CGM.getContext().getCanonicalTagType(Ty->getMostRecentCXXRecordDecl());
+ llvm::DIType *ClassType = getOrCreateType(T, U);
if (Ty->isMemberDataPointerType())
return DBuilder.createMemberPointerType(
getOrCreateType(Ty->getPointeeType(), U), ClassType, Size, /*Align=*/0,
@@ -3687,17 +3692,21 @@ llvm::DIType *CGDebugInfo::CreateType(const HLSLInlineSpirvType *Ty,
return nullptr;
}
-llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
- const EnumDecl *ED = Ty->getDecl();
+static auto getEnumInfo(CodeGenModule &CGM, llvm::DICompileUnit *TheCU,
+ const EnumType *Ty) {
+ const EnumDecl *ED = Ty->getOriginalDecl()->getDefinitionOrSelf();
uint64_t Size = 0;
uint32_t Align = 0;
- if (!ED->getTypeForDecl()->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ if (ED->isComplete()) {
+ Size = CGM.getContext().getTypeSize(QualType(Ty, 0));
Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
+ return std::make_tuple(ED, Size, Align, getTypeIdentifier(Ty, CGM, TheCU));
+}
- SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
+llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
+ auto [ED, Size, Align, Identifier] = getEnumInfo(CGM, TheCU, Ty);
bool isImportedFromModule =
DebugTypeExtRefs && ED->isFromASTFile() && ED->getDefinition();
@@ -3732,15 +3741,7 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
}
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
- const EnumDecl *ED = Ty->getDecl();
- uint64_t Size = 0;
- uint32_t Align = 0;
- if (!ED->getTypeForDecl()->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = getDeclAlignIfRequired(ED, CGM.getContext());
- }
-
- SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
+ auto [ED, Size, Align, Identifier] = getEnumInfo(CGM, TheCU, Ty);
SmallVector<llvm::Metadata *, 16> Enumerators;
ED = ED->getDefinition();
@@ -3815,6 +3816,11 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
switch (T->getTypeClass()) {
default:
return C.getQualifiedType(T.getTypePtr(), Quals);
+ case Type::Enum:
+ case Type::Record:
+ case Type::InjectedClassName:
+ return C.getQualifiedType(T->getCanonicalTypeUnqualified().getTypePtr(),
+ Quals);
case Type::TemplateSpecialization: {
const auto *Spec = cast<TemplateSpecializationType>(T);
if (Spec->isTypeAlias())
@@ -3843,11 +3849,8 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::CountAttributed:
T = cast<CountAttributedType>(T)->desugar();
break;
- case Type::Elaborated:
- T = cast<ElaboratedType>(T)->getNamedType();
- break;
case Type::Using:
- T = cast<UsingType>(T)->getUnderlyingType();
+ T = cast<UsingType>(T)->desugar();
break;
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
@@ -3906,7 +3909,8 @@ void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) {
completeClassData(&D);
// In case this type has no member function definitions being emitted, ensure
// it is retained
- RetainedTypes.push_back(CGM.getContext().getRecordType(&D).getAsOpaquePtr());
+ RetainedTypes.push_back(
+ CGM.getContext().getCanonicalTagType(&D).getAsOpaquePtr());
}
llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
@@ -4051,7 +4055,6 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Adjusted:
case Type::Decayed:
case Type::DeducedTemplateSpecialization:
- case Type::Elaborated:
case Type::Using:
case Type::Paren:
case Type::MacroQualified:
@@ -4094,7 +4097,7 @@ CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty) {
// TODO: Currently used for context chains when limiting debug info.
llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
// Get overall information about the record type for the debug info.
StringRef RDName = getClassName(RD);
@@ -4111,7 +4114,7 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// If we ended up creating the type during the context chain construction,
// just return that.
auto *T = cast_or_null<llvm::DICompositeType>(
- getTypeOrNull(CGM.getContext().getRecordType(RD)));
+ getTypeOrNull(CGM.getContext().getCanonicalTagType(RD)));
if (T && (!T->isForwardDecl() || !RD->getDefinition()))
return T;
@@ -4181,7 +4184,14 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
break;
}
- RegionMap[Ty->getDecl()].reset(RealDecl);
+ if (auto *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(Ty->getOriginalDecl())) {
+ CXXRecordDecl *TemplateDecl =
+ CTSD->getSpecializedTemplate()->getTemplatedDecl();
+ RegionMap[TemplateDecl].reset(RealDecl);
+ } else {
+ RegionMap[RD].reset(RealDecl);
+ }
TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl);
if (const auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD))
@@ -4205,8 +4215,8 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
else
break;
}
- ContainingType = getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
- getOrCreateFile(RD->getLocation()));
+ CanQualType T = CGM.getContext().getCanonicalTagType(PBase);
+ ContainingType = getOrCreateType(T, getOrCreateFile(RD->getLocation()));
} else if (RD->isDynamicClass())
ContainingType = RealDecl;
@@ -4412,9 +4422,10 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
// we would otherwise do to get a type for a pointee. (forward declarations in
// limited debug info, full definitions (if the type definition is available)
// in unlimited debug info)
- if (const auto *TD = dyn_cast<TypeDecl>(D))
- return getOrCreateType(CGM.getContext().getTypeDeclType(TD),
- getOrCreateFile(TD->getLocation()));
+ if (const auto *TD = dyn_cast<TypeDecl>(D)) {
+ QualType Ty = CGM.getContext().getTypeDeclType(TD);
+ return getOrCreateType(Ty, getOrCreateFile(TD->getLocation()));
+ }
auto I = DeclCache.find(D->getCanonicalDecl());
if (I != DeclCache.end()) {
@@ -5076,7 +5087,7 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
} else if (const auto *RT = dyn_cast<RecordType>(VD->getType())) {
// If VD is an anonymous union then Storage represents value for
// all union fields.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion() && RD->isAnonymousStructOrUnion()) {
// GDB has trouble finding local variables in anonymous unions, so we emit
// artificial local variables for each of the members.
@@ -5536,7 +5547,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
cast_or_null<CXXMethodDecl>(blockDecl->getNonClosureContext()))
type = Method->getThisType();
else if (auto *RDecl = dyn_cast<CXXRecordDecl>(blockDecl->getParent()))
- type = QualType(RDecl->getTypeForDecl(), 0);
+ type = CGM.getContext().getCanonicalTagType(RDecl);
else
llvm_unreachable("unexpected block declcontext");
@@ -5626,8 +5637,9 @@ llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
// Ignore unnamed fields, but recurse into anonymous records.
if (FieldName.empty()) {
if (const auto *RT = dyn_cast<RecordType>(Field->getType()))
- GVE = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
- Var, DContext);
+ GVE =
+ CollectAnonRecordDecls(RT->getOriginalDecl()->getDefinitionOrSelf(),
+ Unit, LineNo, LinkageName, Var, DContext);
continue;
}
// Use VarDecl's Tag, Scope and Line number.
@@ -5646,7 +5658,7 @@ static bool ReferencesAnonymousEntity(RecordType *RT) {
// But so long as it's not one of those, it doesn't matter if some sub-type
// of the record (a template parameter) can't be reconstituted - because the
// un-reconstitutable type itself will carry its own name.
- const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ const auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl());
if (!RD)
return false;
if (!RD->getIdentifier())
@@ -5705,15 +5717,15 @@ struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
}
return true;
}
- bool TraverseEnumType(EnumType *ET) {
+ bool TraverseEnumType(EnumType *ET, bool = false) {
// Unnamed enums can't be reconstituted due to a lack of column info we
// produce in the DWARF, so we can't get Clang's full name back.
- if (const auto *ED = dyn_cast<EnumDecl>(ET->getDecl())) {
+ if (const auto *ED = dyn_cast<EnumDecl>(ET->getOriginalDecl())) {
if (!ED->getIdentifier()) {
Reconstitutable = false;
return false;
}
- if (!ED->isExternallyVisible()) {
+ if (!ED->getDefinitionOrSelf()->isExternallyVisible()) {
Reconstitutable = false;
return false;
}
@@ -5726,7 +5738,7 @@ struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
Reconstitutable &= !FT->getNoReturnAttr();
return Reconstitutable;
}
- bool VisitRecordType(RecordType *RT) {
+ bool VisitRecordType(RecordType *RT, bool = false) {
if (ReferencesAnonymousEntity(RT)) {
Reconstitutable = false;
return false;
@@ -5909,7 +5921,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// variable for each member of the anonymous union so that it's possible
// to find the name of any field in the union.
if (T->isUnionType() && DeclName.empty()) {
- const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
+ const auto *RD = T->castAsRecordDecl();
assert(RD->isAnonymousStructOrUnion() &&
"unnamed non-anonymous struct or union?");
GVE = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
@@ -5956,8 +5968,6 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
if (const auto *ECD = dyn_cast<EnumConstantDecl>(VD)) {
const auto *ED = cast<EnumDecl>(ECD->getDeclContext());
- assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?");
-
if (CGM.getCodeGenOpts().EmitCodeView) {
// If CodeView, emit enums as global variables, unless they are defined
// inside a class. We do this because MSVC doesn't emit S_CONSTANTs for
@@ -5969,10 +5979,9 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
// If not CodeView, emit DW_TAG_enumeration_type if necessary. For
// example: for "enum { ZERO };", a DW_TAG_enumeration_type is created the
// first time `ZERO` is referenced in a function.
- llvm::DIType *EDTy =
- getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit);
- assert (EDTy->getTag() == llvm::dwarf::DW_TAG_enumeration_type);
- (void)EDTy;
+ CanQualType T = CGM.getContext().getCanonicalTagType(ED);
+ [[maybe_unused]] llvm::DIType *EDTy = getOrCreateType(T, Unit);
+ assert(EDTy->getTag() == llvm::dwarf::DW_TAG_enumeration_type);
return;
}
}
@@ -5991,7 +6000,7 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
// FIXME: This is probably unnecessary, since Ty should reference RD
// through its scope.
RetainedTypes.push_back(
- CGM.getContext().getRecordType(RD).getAsOpaquePtr());
+ CGM.getContext().getCanonicalTagType(RD).getAsOpaquePtr());
return;
}
diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h
index 497d3a6..ff9c3cd 100644
--- a/clang/lib/CodeGen/CGDebugInfo.h
+++ b/clang/lib/CodeGen/CGDebugInfo.h
@@ -977,6 +977,8 @@ public:
ApplyInlineDebugLocation(CodeGenFunction &CGF, GlobalDecl InlinedFn);
/// Restore everything back to the original state.
~ApplyInlineDebugLocation();
+ ApplyInlineDebugLocation(const ApplyInlineDebugLocation &) = delete;
+ ApplyInlineDebugLocation &operator=(ApplyInlineDebugLocation &) = delete;
};
class SanitizerDebugLocation {
@@ -988,6 +990,8 @@ public:
ArrayRef<SanitizerKind::SanitizerOrdinal> Ordinals,
SanitizerHandler Handler);
~SanitizerDebugLocation();
+ SanitizerDebugLocation(const SanitizerDebugLocation &) = delete;
+ SanitizerDebugLocation &operator=(SanitizerDebugLocation &) = delete;
};
} // namespace CodeGen
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index ff2dada..29193e0 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -113,12 +113,14 @@ void CodeGenFunction::EmitDecl(const Decl &D, bool EvaluateConditionDecl) {
case Decl::CXXRecord: // struct/union/class X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
if (cast<RecordDecl>(D).getDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(&D)));
return;
case Decl::Enum: // enum X;
if (CGDebugInfo *DI = getDebugInfo())
if (cast<EnumDecl>(D).getDefinition())
- DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<EnumDecl>(&D)));
return;
case Decl::Function: // void X();
case Decl::EnumConstant: // enum ? { X = ? }
@@ -1351,30 +1353,27 @@ void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
}
/// Emit a lifetime.begin marker if some criteria are satisfied.
-/// \return a pointer to the temporary size Value if a marker was emitted, null
-/// otherwise
-llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
- llvm::Value *Addr) {
+/// \return whether the marker was emitted.
+bool CodeGenFunction::EmitLifetimeStart(llvm::Value *Addr) {
if (!ShouldEmitLifetimeMarkers)
- return nullptr;
+ return false;
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
- llvm::Value *SizeV = llvm::ConstantInt::get(
- Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
- llvm::CallInst *C =
- Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
+ llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {Addr});
C->setDoesNotThrow();
- return SizeV;
+ return true;
}
-void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
+void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Addr) {
+ if (!ShouldEmitLifetimeMarkers)
+ return;
+
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
- llvm::CallInst *C =
- Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
+ llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Addr});
C->setDoesNotThrow();
}
@@ -1564,16 +1563,14 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// The named return value optimization: allocate this variable in the
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
- address = ReturnValue;
AllocaAddr =
RawAddress(ReturnValue.emitRawPointer(*this),
ReturnValue.getElementType(), ReturnValue.getAlignment());
- ;
+ address = MaybeCastStackAddressSpace(AllocaAddr, Ty.getAddressSpace());
- if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
- const auto *RD = RecordTy->getDecl();
- const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
- if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
+ if (const auto *RD = Ty->getAsRecordDecl()) {
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ (CXXRD && !CXXRD->hasTrivialDestructor()) ||
RD->isNonTrivialToPrimitiveDestroy()) {
// Create a flag that is used to indicate when the NRVO was applied
// to this variable. Set it to zero to indicate that NRVO was not
@@ -1632,9 +1629,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// is rare.
if (!Bypasses.IsBypassed(&D) &&
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
- llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
- emission.SizeForLifetimeMarkers =
- EmitLifetimeStart(Size, AllocaAddr.getPointer());
+ emission.UseLifetimeMarkers =
+ EmitLifetimeStart(AllocaAddr.getPointer());
}
} else {
assert(!emission.useLifetimeMarkers());
@@ -1727,9 +1723,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Make sure we call @llvm.lifetime.end.
if (emission.useLifetimeMarkers())
- EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
- emission.getOriginalAllocatedAddress(),
- emission.getSizeForLifetimeMarkers());
+ EHStack.pushCleanup<CallLifetimeEnd>(
+ NormalEHLifetimeMarker, emission.getOriginalAllocatedAddress());
// Analogous to lifetime markers, we use a 'cleanup' to emit fake.use
// calls for local variables. We are exempting volatile variables and
@@ -2732,7 +2727,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
if (Ty->isRecordType() && !CurFuncIsThunk &&
- Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ Ty->castAsRecordDecl()->isParamDestroyedInCallee()) {
if (QualType::DestructionKind DtorKind =
D.needsDestruction(getContext())) {
assert((DtorKind == QualType::DK_cxx_destructor ||
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index ed35a05..844b445 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -16,6 +16,7 @@
#include "CGCall.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
+#include "CGHLSLRuntime.h"
#include "CGObjCRuntime.h"
#include "CGOpenMPRuntime.h"
#include "CGRecordLayout.h"
@@ -107,13 +108,10 @@ CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
return RawAddress(Alloca, Ty, Align, KnownNonNull);
}
-RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, LangAS DestLangAS,
- CharUnits Align, const Twine &Name,
- llvm::Value *ArraySize,
- RawAddress *AllocaAddr) {
- RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
- if (AllocaAddr)
- *AllocaAddr = Alloca;
+RawAddress CodeGenFunction::MaybeCastStackAddressSpace(RawAddress Alloca,
+ LangAS DestLangAS,
+ llvm::Value *ArraySize) {
+
llvm::Value *V = Alloca.getPointer();
// Alloca always returns a pointer in alloca address space, which may
// be different from the type defined by the language. For example,
@@ -133,7 +131,18 @@ RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, LangAS DestLangAS,
/*IsNonNull=*/true);
}
- return RawAddress(V, Ty, Align, KnownNonNull);
+ return RawAddress(V, Alloca.getElementType(), Alloca.getAlignment(),
+ KnownNonNull);
+}
+
+RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, LangAS DestLangAS,
+ CharUnits Align, const Twine &Name,
+ llvm::Value *ArraySize,
+ RawAddress *AllocaAddr) {
+ RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
+ if (AllocaAddr)
+ *AllocaAddr = Alloca;
+ return MaybeCastStackAddressSpace(Alloca, DestLangAS, ArraySize);
}
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
@@ -405,13 +414,11 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
case SD_Static:
case SD_Thread: {
CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
- if (const RecordType *RT =
- E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
+ if (const auto *ClassDecl =
+ E->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ ClassDecl && !ClassDecl->hasTrivialDestructor())
// Get the destructor for the reference temporary.
- if (auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl());
- ClassDecl && !ClassDecl->hasTrivialDestructor())
- ReferenceTemporaryDtor = ClassDecl->getDestructor();
- }
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
if (!ReferenceTemporaryDtor)
return;
@@ -588,11 +595,9 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
} else {
switch (M->getStorageDuration()) {
case SD_Automatic:
- if (auto *Size = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
- Alloca.getPointer())) {
+ if (EmitLifetimeStart(Alloca.getPointer())) {
pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
- Alloca, Size);
+ Alloca);
}
break;
@@ -623,11 +628,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
Block, llvm::BasicBlock::iterator(Block->back())));
}
- if (auto *Size = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
- Alloca.getPointer())) {
- pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
- Size);
+ if (EmitLifetimeStart(Alloca.getPointer())) {
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca);
}
if (OldConditional) {
@@ -1209,9 +1211,10 @@ llvm::Value *CodeGenFunction::GetCountedByFieldExprGEP(
return nullptr;
Indices.push_back(Builder.getInt32(0));
- return Builder.CreateInBoundsGEP(
- ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
- RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ return Builder.CreateInBoundsGEP(ConvertType(T), Res,
+ RecIndicesTy(llvm::reverse(Indices)),
+ "counted_by.gep");
}
/// This method is typically called in contexts where we can't generate
@@ -1319,6 +1322,57 @@ void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
// LValue Expression Emission
//===----------------------------------------------------------------------===//
+static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx,
+ CharUnits eltSize) {
+ // If we have a constant index, we can use the exact offset of the
+ // element we're accessing.
+ if (auto *constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
+ CharUnits offset = constantIdx->getZExtValue() * eltSize;
+ return arrayAlign.alignmentAtOffset(offset);
+ }
+
+ // Otherwise, use the worst-case alignment for any element.
+ return arrayAlign.alignmentOfArrayElement(eltSize);
+}
+
+/// Emit pointer + index arithmetic.
+static Address emitPointerArithmetic(CodeGenFunction &CGF,
+ const BinaryOperator *BO,
+ LValueBaseInfo *BaseInfo,
+ TBAAAccessInfo *TBAAInfo,
+ KnownNonNull_t IsKnownNonNull) {
+ assert(BO->isAdditiveOp() && "Expect an addition or subtraction.");
+ Expr *pointerOperand = BO->getLHS();
+ Expr *indexOperand = BO->getRHS();
+ bool isSubtraction = BO->getOpcode() == BO_Sub;
+
+ Address BaseAddr = Address::invalid();
+ llvm::Value *index = nullptr;
+ // In a subtraction, the LHS is always the pointer.
+ // Note: do not change the evaluation order.
+ if (!isSubtraction && !pointerOperand->getType()->isAnyPointerType()) {
+ std::swap(pointerOperand, indexOperand);
+ index = CGF.EmitScalarExpr(indexOperand);
+ BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
+ NotKnownNonNull);
+ } else {
+ BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
+ NotKnownNonNull);
+ index = CGF.EmitScalarExpr(indexOperand);
+ }
+
+ llvm::Value *pointer = BaseAddr.getBasePointer();
+ llvm::Value *Res = CGF.EmitPointerArithmetic(
+ BO, pointerOperand, pointer, indexOperand, index, isSubtraction);
+ QualType PointeeTy = BO->getType()->getPointeeType();
+ CharUnits Align =
+ getArrayElementAlign(BaseAddr.getAlignment(), index,
+ CGF.getContext().getTypeSizeInChars(PointeeTy));
+ return Address(Res, CGF.ConvertTypeForMem(PointeeTy), Align,
+ CGF.CGM.getPointerAuthInfoForPointeeType(PointeeTy),
+ /*Offset=*/nullptr, IsKnownNonNull);
+}
+
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo,
KnownNonNull_t IsKnownNonNull,
@@ -1381,6 +1435,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
if (CE->getCastKind() == CK_AddressSpaceConversion)
Addr = CGF.Builder.CreateAddrSpaceCast(
Addr, CGF.ConvertType(E->getType()), ElemTy);
+
return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
CE->getType());
}
@@ -1441,6 +1496,12 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
}
}
+ // Pointer arithmetic: pointer +/- index.
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->isAdditiveOp())
+ return emitPointerArithmetic(CGF, BO, BaseInfo, TBAAInfo, IsKnownNonNull);
+ }
+
// TODO: conditional operators, comma.
// Otherwise, use the alignment of the type.
@@ -1759,9 +1820,11 @@ static bool isConstantEmittableObjectType(QualType type) {
// Otherwise, all object types satisfy this except C++ classes with
// mutable subobjects or non-trivial copy/destroy behavior.
if (const auto *RT = dyn_cast<RecordType>(type))
- if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
+ RD = RD->getDefinitionOrSelf();
if (RD->hasMutableFields() || !RD->isTrivial())
return false;
+ }
return true;
}
@@ -1921,9 +1984,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
llvm::APInt &Min, llvm::APInt &End,
bool StrictEnums, bool IsBool) {
- const EnumType *ET = Ty->getAs<EnumType>();
- bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
- ET && !ET->getDecl()->isFixed();
+ const auto *ED = Ty->getAsEnumDecl();
+ bool IsRegularCPlusPlusEnum =
+ CGF.getLangOpts().CPlusPlus && StrictEnums && ED && !ED->isFixed();
if (!IsBool && !IsRegularCPlusPlusEnum)
return false;
@@ -1931,7 +1994,6 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
} else {
- const EnumDecl *ED = ET->getDecl();
ED->getValueRange(End, Min);
}
return true;
@@ -1971,7 +2033,7 @@ bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) ||
NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
bool NeedsBoolCheck = HasBoolCheck && IsBool;
- bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
+ bool NeedsEnumCheck = HasEnumCheck && Ty->isEnumeralType();
if (!NeedsBoolCheck && !NeedsEnumCheck)
return false;
@@ -3720,7 +3782,7 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
void CodeGenFunction::EmitCheck(
ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
- ArrayRef<llvm::Value *> DynamicArgs) {
+ ArrayRef<llvm::Value *> DynamicArgs, const TrapReason *TR) {
assert(IsSanitizerScope);
assert(Checked.size() > 0);
assert(CheckHandler >= 0 &&
@@ -3759,7 +3821,7 @@ void CodeGenFunction::EmitCheck(
}
if (TrapCond)
- EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
+ EmitTrapCheck(TrapCond, CheckHandler, NoMerge, TR);
if (!FatalCond && !RecoverableCond)
return;
@@ -4071,7 +4133,7 @@ void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
SanitizerHandler CheckHandlerID,
- bool NoMerge) {
+ bool NoMerge, const TrapReason *TR) {
llvm::BasicBlock *Cont = createBasicBlock("cont");
// If we're optimizing, collapse all calls to trap down to just one per
@@ -4082,12 +4144,25 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
- llvm::StringRef TrapMessage = GetUBSanTrapForHandler(CheckHandlerID);
+ llvm::StringRef TrapMessage;
+ llvm::StringRef TrapCategory;
+ auto DebugTrapReasonKind = CGM.getCodeGenOpts().getSanitizeDebugTrapReasons();
+ if (TR && !TR->isEmpty() &&
+ DebugTrapReasonKind ==
+ CodeGenOptions::SanitizeDebugTrapReasonKind::Detailed) {
+ TrapMessage = TR->getMessage();
+ TrapCategory = TR->getCategory();
+ } else {
+ TrapMessage = GetUBSanTrapForHandler(CheckHandlerID);
+ TrapCategory = "Undefined Behavior Sanitizer";
+ }
if (getDebugInfo() && !TrapMessage.empty() &&
- CGM.getCodeGenOpts().SanitizeDebugTrapReasons && TrapLocation) {
+ DebugTrapReasonKind !=
+ CodeGenOptions::SanitizeDebugTrapReasonKind::None &&
+ TrapLocation) {
TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
- TrapLocation, "Undefined Behavior Sanitizer", TrapMessage);
+ TrapLocation, TrapCategory, TrapMessage);
}
NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
@@ -4227,21 +4302,6 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
}
}
-static CharUnits getArrayElementAlign(CharUnits arrayAlign,
- llvm::Value *idx,
- CharUnits eltSize) {
- // If we have a constant index, we can use the exact offset of the
- // element we're accessing.
- if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
- CharUnits offset = constantIdx->getZExtValue() * eltSize;
- return arrayAlign.alignmentAtOffset(offset);
-
- // Otherwise, use the worst-case alignment for any element.
- } else {
- return arrayAlign.alignmentOfArrayElement(eltSize);
- }
-}
-
static QualType getFixedSizeElementType(const ASTContext &ctx,
const VariableArrayType *vla) {
QualType eltType;
@@ -4307,7 +4367,9 @@ static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
const auto *PointeeT = PtrT->getPointeeType()
->getUnqualifiedDesugaredType();
if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
- return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
+ return RecT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<BPFPreserveAccessIndexAttr>();
return false;
}
@@ -4532,6 +4594,15 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
LHS.getBaseInfo(), TBAAAccessInfo());
}
+ // The HLSL runtime handle the subscript expression on global resource arrays.
+ if (getLangOpts().HLSL && (E->getType()->isHLSLResourceRecord() ||
+ E->getType()->isHLSLResourceRecordArray())) {
+ std::optional<LValue> LV =
+ CGM.getHLSLRuntime().emitResourceArraySubscriptExpr(E, *this);
+ if (LV.has_value())
+ return *LV;
+ }
+
// All the other cases basically behave like simple offsetting.
// Handle the extvector case we ignored above.
@@ -5073,10 +5144,12 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
Address Base = GetAddressOfBaseClass(
LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
- LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
+ CanQualType T = getContext().getCanonicalTagType(LambdaTy);
+ LambdaLV = MakeAddrLValue(Base, T);
}
} else {
- QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
+ CanQualType LambdaTagType =
+ getContext().getCanonicalTagType(Field->getParent());
LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
}
return EmitLValueForField(LambdaLV, Field);
@@ -5201,7 +5274,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field,
}
} else {
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
- getContext().getRecordType(rec), rec->getLocation());
+ getContext().getCanonicalTagType(rec), rec->getLocation());
Addr = Builder.CreatePreserveStructAccessIndex(
Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
DbgInfo);
@@ -5661,10 +5734,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const auto *DerivedClassTy =
- E->getSubExpr()->getType()->castAs<RecordType>();
- auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
-
+ auto *DerivedClassDecl = E->getSubExpr()->getType()->castAsCXXRecordDecl();
LValue LV = EmitLValue(E->getSubExpr());
Address This = LV.getAddress();
@@ -5682,9 +5752,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ToUnion:
return EmitAggExprToLValue(E);
case CK_BaseToDerived: {
- const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
- auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
-
+ auto *DerivedClassDecl = E->getType()->castAsCXXRecordDecl();
LValue LV = EmitLValue(E->getSubExpr());
// Perform the base-to-derived conversion
@@ -5784,13 +5852,10 @@ LValue CodeGenFunction::EmitHLSLOutArgExpr(const HLSLOutArgExpr *E,
llvm::Value *Addr = TempLV.getAddress().getBasePointer();
llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
- llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
-
- llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
+ EmitLifetimeStart(Addr);
Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
- Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
- LifetimeSize);
+ Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast());
Args.add(RValue::get(TmpAddr, *this), Ty);
return TempLV;
}
@@ -6741,7 +6806,7 @@ void CodeGenFunction::FlattenAccessAndType(
WorkList.emplace_back(CAT->getElementType(), IdxListCopy);
}
} else if (const auto *RT = dyn_cast<RecordType>(T)) {
- const RecordDecl *Record = RT->getDecl();
+ const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(!Record->isUnion() && "Union types not supported in flat cast.");
const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index cad6731..b8150a2 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -268,11 +268,11 @@ void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
/// True if the given aggregate type requires special GC API calls.
bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
// Only record types have members that might require garbage collection.
- const RecordType *RecordTy = T->getAs<RecordType>();
- if (!RecordTy) return false;
+ const auto *Record = T->getAsRecordDecl();
+ if (!Record)
+ return false;
// Don't mess with non-trivial C++ types.
- RecordDecl *Record = RecordTy->getDecl();
if (isa<CXXRecordDecl>(Record) &&
(cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
!cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
@@ -300,16 +300,12 @@ void AggExprEmitter::withReturnValueSlot(
Address RetAddr = Address::invalid();
EHScopeStack::stable_iterator LifetimeEndBlock;
- llvm::Value *LifetimeSizePtr = nullptr;
llvm::IntrinsicInst *LifetimeStartInst = nullptr;
if (!UseTemp) {
RetAddr = Dest.getAddress();
} else {
RetAddr = CGF.CreateMemTempWithoutCast(RetTy, "tmp");
- llvm::TypeSize Size =
- CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
- LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAddr.getBasePointer());
- if (LifetimeSizePtr) {
+ if (CGF.EmitLifetimeStart(RetAddr.getBasePointer())) {
LifetimeStartInst =
cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
assert(LifetimeStartInst->getIntrinsicID() ==
@@ -317,7 +313,7 @@ void AggExprEmitter::withReturnValueSlot(
"Last insertion wasn't a lifetime.start?");
CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
- NormalEHLifetimeMarker, RetAddr, LifetimeSizePtr);
+ NormalEHLifetimeMarker, RetAddr);
LifetimeEndBlock = CGF.EHStack.stable_begin();
}
}
@@ -338,7 +334,7 @@ void AggExprEmitter::withReturnValueSlot(
// Since we're not guaranteed to be in an ExprWithCleanups, clean up
// eagerly.
CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
- CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAddr.getBasePointer());
+ CGF.EmitLifetimeEnd(RetAddr.getBasePointer());
}
}
@@ -428,7 +424,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
assert(ArrayType && "std::initializer_list constructed from non-array");
- RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ auto *Record = E->getType()->castAsRecordDecl();
RecordDecl::field_iterator Field = Record->field_begin();
assert(Field != Record->field_end() &&
Ctx.hasSameType(Field->getType()->getPointeeType(),
@@ -1810,7 +1806,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned NumInitElements = InitExprs.size();
- RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *record = ExprToVisit->getType()->castAsRecordDecl();
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
@@ -2118,9 +2114,9 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// InitListExprs for structs have to be handled carefully. If there are
// reference members, we need to consider the size of the reference, not the
// referencee. InitListExprs for unions and arrays can't have references.
- if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ if (const RecordType *RT = E->getType()->getAsCanonical<RecordType>()) {
if (!RT->isUnionType()) {
- RecordDecl *SD = RT->getDecl();
+ RecordDecl *SD = RT->getOriginalDecl()->getDefinitionOrSelf();
CharUnits NumNonZeroBytes = CharUnits::Zero();
unsigned ILEElement = 0;
@@ -2171,8 +2167,9 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
// C++ objects with a user-declared constructor don't need zero'ing.
if (CGF.getLangOpts().CPlusPlus)
if (const RecordType *RT = CGF.getContext()
- .getBaseElementType(E->getType())->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ .getBaseElementType(E->getType())
+ ->getAsCanonical<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getOriginalDecl());
if (RD->hasUserDeclaredConstructor())
return;
}
@@ -2292,8 +2289,7 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
Address SrcPtr = Src.getAddress();
if (getLangOpts().CPlusPlus) {
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ if (const auto *Record = Ty->getAsCXXRecordDecl()) {
assert((Record->hasTrivialCopyConstructor() ||
Record->hasTrivialCopyAssignment() ||
Record->hasTrivialMoveConstructor() ||
@@ -2376,8 +2372,7 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
// Don't do any of the memmove_collectable tests if GC isn't set.
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
// fall through
- } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
- RecordDecl *Record = RecordTy->getDecl();
+ } else if (const auto *Record = Ty->getAsRecordDecl()) {
if (Record->hasObjectMember()) {
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
@@ -2385,8 +2380,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
}
} else if (Ty->isArrayType()) {
QualType BaseType = getContext().getBaseElementType(Ty);
- if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
- if (RecordTy->getDecl()->hasObjectMember()) {
+ if (const auto *Record = BaseType->getAsRecordDecl()) {
+ if (Record->hasObjectMember()) {
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 49d5d8a..1e4c72a 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -180,8 +180,7 @@ static CXXRecordDecl *getCXXRecord(const Expr *E) {
QualType T = E->getType();
if (const PointerType *PTy = T->getAs<PointerType>())
T = PTy->getPointeeType();
- const RecordType *Ty = T->castAs<RecordType>();
- return cast<CXXRecordDecl>(Ty->getDecl());
+ return T->castAsCXXRecordDecl();
}
// Note: This function also emit constructor calls to support a MSVC
@@ -206,7 +205,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
bool HasQualifier = ME->hasQualifier();
- NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
+ NestedNameSpecifier Qualifier = ME->getQualifier();
bool IsArrow = ME->isArrow();
const Expr *Base = ME->getBase();
@@ -217,7 +216,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
- bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
const Expr *Base, llvm::CallBase **CallOrInvoke) {
assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
@@ -361,7 +360,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (sanitizePerformTypeCheck())
EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
This.emitRawPointer(*this),
- C.getRecordType(CalleeDecl->getParent()),
+ C.getCanonicalTagType(CalleeDecl->getParent()),
/*Alignment=*/CharUnits::Zero(), SkippedChecks);
// C++ [class.virtual]p12:
@@ -461,9 +460,9 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
else
This = EmitLValue(BaseExpr, KnownNonNull).getAddress();
- EmitTypeCheck(
- TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
- QualType(MPT->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0));
+ CanQualType ClassType = CGM.getContext().getCanonicalTagType(RD);
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
+ ClassType);
// Get the member function pointer.
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
@@ -476,8 +475,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
CallArgList Args;
- QualType ThisType =
- getContext().getPointerType(getContext().getTagDeclType(RD));
+ QualType ThisType = getContext().getPointerType(ClassType);
// Push the this ptr.
Args.add(RValue::get(ThisPtrForCall), ThisType);
@@ -498,7 +496,7 @@ RValue CodeGenFunction::EmitCXXOperatorMemberCallExpr(
assert(MD->isImplicitObjectMemberFunction() &&
"Trying to emit a member call expr on a static method!");
return EmitCXXMemberOrOperatorMemberCallExpr(
- E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
/*IsArrow=*/false, E->getArg(0), CallOrInvoke);
}
@@ -1236,12 +1234,14 @@ void CodeGenFunction::EmitNewArrayInitializer(
// If we have a struct whose every field is value-initialized, we can
// usually use memset.
if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
- if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
- if (RType->getDecl()->isStruct()) {
+ if (const RecordType *RType =
+ ILE->getType()->getAsCanonical<RecordType>()) {
+ if (RType->getOriginalDecl()->isStruct()) {
+ const RecordDecl *RD = RType->getOriginalDecl()->getDefinitionOrSelf();
unsigned NumElements = 0;
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
NumElements = CXXRD->getNumBases();
- for (auto *Field : RType->getDecl()->fields())
+ for (auto *Field : RD->fields())
if (!Field->isUnnamedBitField())
++NumElements;
// FIXME: Recurse into nested InitListExprs.
@@ -1688,8 +1688,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
if (allocatorType->getNumParams() > IndexOfAlignArg) {
AlignValT = allocatorType->getParamType(IndexOfAlignArg);
assert(getContext().hasSameUnqualifiedType(
- AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
- sizeType) &&
+ AlignValT->castAsEnumDecl()->getIntegerType(), sizeType) &&
"wrong type for alignment parameter");
++ParamsToSkip;
} else {
@@ -1971,8 +1970,7 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
- if (const RecordType *RT = ElementType->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (const auto *RD = ElementType->getAsCXXRecordDecl()) {
if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
Dtor = RD->getDestructor();
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index 715bd39..b44dd9e 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -714,7 +714,7 @@ static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
}
bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
- RecordDecl *RD = ILE->getType()->castAs<RecordType>()->getDecl();
+ auto *RD = ILE->getType()->castAsRecordDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
unsigned FieldNo = -1;
@@ -873,8 +873,9 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
for (const BaseInfo &Base : Bases) {
bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
- Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
- VTableClass, Offset + Base.Offset);
+ if (!Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
+ VTableClass, Offset + Base.Offset))
+ return false;
}
}
@@ -977,7 +978,7 @@ bool ConstStructBuilder::DoZeroInitPadding(const ASTRecordLayout &Layout,
llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
Type = Type.getNonReferenceType();
- RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ auto *RD = Type->castAsRecordDecl();
llvm::Type *ValTy = CGM.getTypes().ConvertType(Type);
return Builder.build(ValTy, RD->hasFlexibleArrayMember());
}
@@ -1000,7 +1001,7 @@ llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
ConstantAggregateBuilder Const(Emitter.CGM);
ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
- const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
+ const auto *RD = ValTy->castAsRecordDecl();
const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
if (!Builder.Build(Val, RD, false, CD, CharUnits::Zero()))
return nullptr;
@@ -1505,8 +1506,8 @@ public:
llvm::Type *ValTy = CGM.getTypes().ConvertType(destType);
bool HasFlexibleArray = false;
- if (const auto *RT = destType->getAs<RecordType>())
- HasFlexibleArray = RT->getDecl()->hasFlexibleArrayMember();
+ if (const auto *RD = destType->getAsRecordDecl())
+ HasFlexibleArray = RD->hasFlexibleArrayMember();
return Const.build(ValTy, HasFlexibleArray);
}
@@ -1620,7 +1621,7 @@ llvm::Constant *ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) {
if (CE->isGLValue())
RetType = CGM.getContext().getLValueReferenceType(RetType);
- return emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType);
+ return tryEmitAbstract(CE->getAPValueResult(), RetType);
}
llvm::Constant *
@@ -2639,9 +2640,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
continue;
}
- const CXXRecordDecl *base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
+ const auto *base = I.getType()->castAsCXXRecordDecl();
// Ignore empty bases.
if (isEmptyRecordForLayout(CGM.getContext(), I.getType()) ||
CGM.getContext()
@@ -2679,9 +2678,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
// Fill in the virtual bases, if we're working with the complete object.
if (CXXR && asCompleteObject) {
for (const auto &I : CXXR->vbases()) {
- const CXXRecordDecl *base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
+ const auto *base = I.getType()->castAsCXXRecordDecl();
// Ignore empty bases.
if (isEmptyRecordForLayout(CGM.getContext(), I.getType()))
continue;
@@ -2745,8 +2742,9 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
return llvm::ConstantArray::get(ATy, Array);
}
- if (const RecordType *RT = T->getAs<RecordType>())
- return ::EmitNullConstant(*this, RT->getDecl(), /*complete object*/ true);
+ if (const auto *RD = T->getAsRecordDecl())
+ return ::EmitNullConstant(*this, RD,
+ /*asCompleteObject=*/true);
assert(T->isMemberDataPointerType() &&
"Should only see pointers to data members here!");
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 44931d0..2eff3a3 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -21,6 +21,7 @@
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "TargetInfo.h"
+#include "TrapReasonBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
@@ -29,6 +30,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/DiagnosticTrap.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APFixedPoint.h"
#include "llvm/IR/Argument.h"
@@ -1813,6 +1815,7 @@ void ScalarExprEmitter::EmitBinOpCheck(
SanitizerHandler Check;
SmallVector<llvm::Constant *, 4> StaticData;
SmallVector<llvm::Value *, 2> DynamicData;
+ TrapReason TR;
BinaryOperatorKind Opcode = Info.Opcode;
if (BinaryOperator::isCompoundAssignmentOp(Opcode))
@@ -1839,19 +1842,43 @@ void ScalarExprEmitter::EmitBinOpCheck(
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
} else {
// Arithmetic overflow (+, -, *).
+ int ArithOverflowKind = 0;
switch (Opcode) {
- case BO_Add: Check = SanitizerHandler::AddOverflow; break;
- case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
- case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
- default: llvm_unreachable("unexpected opcode for bin op check");
+ case BO_Add: {
+ Check = SanitizerHandler::AddOverflow;
+ ArithOverflowKind = diag::UBSanArithKind::Add;
+ break;
+ }
+ case BO_Sub: {
+ Check = SanitizerHandler::SubOverflow;
+ ArithOverflowKind = diag::UBSanArithKind::Sub;
+ break;
+ }
+ case BO_Mul: {
+ Check = SanitizerHandler::MulOverflow;
+ ArithOverflowKind = diag::UBSanArithKind::Mul;
+ break;
+ }
+ default:
+ llvm_unreachable("unexpected opcode for bin op check");
}
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
+ if (CGF.CGM.getCodeGenOpts().SanitizeTrap.has(
+ SanitizerKind::UnsignedIntegerOverflow) ||
+ CGF.CGM.getCodeGenOpts().SanitizeTrap.has(
+ SanitizerKind::SignedIntegerOverflow)) {
+ // Only pay the cost for constructing the trap diagnostic if they are
+ // going to be used.
+ CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
+ << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
+ << Info.E;
+ }
}
DynamicData.push_back(Info.LHS);
DynamicData.push_back(Info.RHS);
}
- CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
+ CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
}
//===----------------------------------------------------------------------===//
@@ -3515,7 +3542,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
case OffsetOfNode::Field: {
FieldDecl *MemberDecl = ON.getField();
- RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
+ auto *RD = CurrentType->castAsRecordDecl();
const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
// Compute the index of the field in its parent.
@@ -3548,15 +3575,14 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
continue;
}
- RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
- const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
+ CurrentType->castAsCanonical<RecordType>()->getOriginalDecl());
// Save the element type.
CurrentType = ON.getBase()->getType();
// Compute the offset to the base.
- auto *BaseRT = CurrentType->castAs<RecordType>();
- auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ auto *BaseRD = CurrentType->castAsCXXRecordDecl();
CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
break;
@@ -4183,9 +4209,10 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
return phi;
}
-/// Emit pointer + index arithmetic.
-static Value *emitPointerArithmetic(CodeGenFunction &CGF,
- const BinOpInfo &op,
+/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
+/// information.
+/// This function is used for BO_AddAssign/BO_SubAssign.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
bool isSubtraction) {
// Must have binary (not unary) expr here. Unary pointer
// increment/decrement doesn't use this path.
@@ -4202,11 +4229,19 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
std::swap(pointerOperand, indexOperand);
}
+ return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
+ index, isSubtraction);
+}
+
+/// Emit pointer + index arithmetic.
+llvm::Value *CodeGenFunction::EmitPointerArithmetic(
+ const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
+ Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
- auto &DL = CGF.CGM.getDataLayout();
- auto PtrTy = cast<llvm::PointerType>(pointer->getType());
+ auto &DL = CGM.getDataLayout();
+ auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
// Some versions of glibc and gcc use idioms (particularly in their malloc
// routines) that add a pointer-sized integer (known to be a pointer value)
@@ -4227,79 +4262,77 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
//
// Note that we do not suppress the pointer overflow check in this case.
if (BinaryOperator::isNullPointerArithmeticExtension(
- CGF.getContext(), op.Opcode, expr->getLHS(), expr->getRHS())) {
- Value *Ptr = CGF.Builder.CreateIntToPtr(index, pointer->getType());
- if (CGF.getLangOpts().PointerOverflowDefined ||
- !CGF.SanOpts.has(SanitizerKind::PointerOverflow) ||
- NullPointerIsDefined(CGF.Builder.GetInsertBlock()->getParent(),
+ getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
+ llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
+ if (getLangOpts().PointerOverflowDefined ||
+ !SanOpts.has(SanitizerKind::PointerOverflow) ||
+ NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
PtrTy->getPointerAddressSpace()))
return Ptr;
// The inbounds GEP of null is valid iff the index is zero.
auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
auto CheckHandler = SanitizerHandler::PointerOverflow;
- SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
- Value *IsZeroIndex = CGF.Builder.CreateIsNull(index);
- llvm::Constant *StaticArgs[] = {
- CGF.EmitCheckSourceLocation(op.E->getExprLoc())};
+ SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
+ llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
+ llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
- Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
- Value *ComputedGEP = CGF.Builder.CreateZExtOrTrunc(index, IntPtrTy);
- Value *DynamicArgs[] = {IntPtr, ComputedGEP};
- CGF.EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
- DynamicArgs);
+ llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
+ llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
+ llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
+ EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
+ DynamicArgs);
return Ptr;
}
if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
// Zero-extend or sign-extend the pointer value according to
// whether the index is signed or not.
- index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
- "idx.ext");
+ index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
+ "idx.ext");
}
// If this is subtraction, negate the index.
if (isSubtraction)
- index = CGF.Builder.CreateNeg(index, "idx.neg");
+ index = Builder.CreateNeg(index, "idx.neg");
- if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
- CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
- /*Accessed*/ false);
+ if (SanOpts.has(SanitizerKind::ArrayBounds))
+ EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
+ /*Accessed*/ false);
- const PointerType *pointerType
- = pointerOperand->getType()->getAs<PointerType>();
+ const PointerType *pointerType =
+ pointerOperand->getType()->getAs<PointerType>();
if (!pointerType) {
QualType objectType = pointerOperand->getType()
- ->castAs<ObjCObjectPointerType>()
- ->getPointeeType();
- llvm::Value *objectSize
- = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize =
+ CGM.getSize(getContext().getTypeSizeInChars(objectType));
- index = CGF.Builder.CreateMul(index, objectSize);
+ index = Builder.CreateMul(index, objectSize);
- Value *result =
- CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
- return CGF.Builder.CreateBitCast(result, pointer->getType());
+ llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
+ return Builder.CreateBitCast(result, pointer->getType());
}
QualType elementType = pointerType->getPointeeType();
- if (const VariableArrayType *vla
- = CGF.getContext().getAsVariableArrayType(elementType)) {
+ if (const VariableArrayType *vla =
+ getContext().getAsVariableArrayType(elementType)) {
// The element count here is the total number of non-VLA elements.
- llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
+ llvm::Value *numElements = getVLASize(vla).NumElts;
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
- if (CGF.getLangOpts().PointerOverflowDefined) {
- index = CGF.Builder.CreateMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
+ llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
+ if (getLangOpts().PointerOverflowDefined) {
+ index = Builder.CreateMul(index, numElements, "vla.index");
+ pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
} else {
- index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
- pointer = CGF.EmitCheckedInBoundsGEP(
- elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
- "add.ptr");
+ index = Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer =
+ EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
+ isSubtraction, BO->getExprLoc(), "add.ptr");
}
return pointer;
}
@@ -4309,16 +4342,15 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// future proof.
llvm::Type *elemTy;
if (elementType->isVoidType() || elementType->isFunctionType())
- elemTy = CGF.Int8Ty;
+ elemTy = Int8Ty;
else
- elemTy = CGF.ConvertTypeForMem(elementType);
+ elemTy = ConvertTypeForMem(elementType);
- if (CGF.getLangOpts().PointerOverflowDefined)
- return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
+ if (getLangOpts().PointerOverflowDefined)
+ return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
- return CGF.EmitCheckedInBoundsGEP(
- elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
- "add.ptr");
+ return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
+ BO->getExprLoc(), "add.ptr");
}
// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
@@ -5551,8 +5583,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
- condExpr->getType()->isExtVectorType()) {
+ if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
+ condExpr->getType()->isExtVectorType())) {
CGF.incrementProfileCounter(E);
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
@@ -5601,9 +5633,16 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Type *CondType = ConvertType(condExpr->getType());
auto *VecTy = cast<llvm::VectorType>(CondType);
- llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
- CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
+ if (VecTy->getElementType()->isIntegerTy(1))
+ return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
+
+ // OpenCL uses the MSB of the mask vector.
+ llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
+ if (condExpr->getType()->isExtVectorType())
+ CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
+ else
+ CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
}
diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
index 5816518..32e930b 100644
--- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp
+++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
@@ -335,15 +335,9 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Value *RangeOp = EmitScalarExpr(E->getArg(3));
Value *IndexOp = EmitScalarExpr(E->getArg(4));
Value *Name = EmitScalarExpr(E->getArg(5));
- // FIXME: NonUniformResourceIndex bit is not yet implemented
- // (llvm/llvm-project#135452)
- Value *NonUniform =
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(getLLVMContext()), false);
-
llvm::Intrinsic::ID IntrinsicID =
CGM.getHLSLRuntime().getCreateHandleFromBindingIntrinsic();
- SmallVector<Value *> Args{SpaceOp, RegisterOp, RangeOp,
- IndexOp, NonUniform, Name};
+ SmallVector<Value *> Args{SpaceOp, RegisterOp, RangeOp, IndexOp, Name};
return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args);
}
case Builtin::BI__builtin_hlsl_resource_handlefromimplicitbinding: {
@@ -353,15 +347,9 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Value *IndexOp = EmitScalarExpr(E->getArg(3));
Value *OrderID = EmitScalarExpr(E->getArg(4));
Value *Name = EmitScalarExpr(E->getArg(5));
- // FIXME: NonUniformResourceIndex bit is not yet implemented
- // (llvm/llvm-project#135452)
- Value *NonUniform =
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(getLLVMContext()), false);
-
llvm::Intrinsic::ID IntrinsicID =
CGM.getHLSLRuntime().getCreateHandleFromImplicitBindingIntrinsic();
- SmallVector<Value *> Args{OrderID, SpaceOp, RangeOp,
- IndexOp, NonUniform, Name};
+ SmallVector<Value *> Args{OrderID, SpaceOp, RangeOp, IndexOp, Name};
return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args);
}
case Builtin::BI__builtin_hlsl_all: {
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index f64ac20..f32d01a 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/HLSL/RootSignatureMetadata.h"
#include "llvm/IR/Constants.h"
@@ -84,6 +85,111 @@ void addRootSignature(llvm::dxbc::RootSignatureVersion RootSigVer,
RootSignatureValMD->addOperand(MDVals);
}
+// Find array variable declaration from nested array subscript AST nodes
+static const ValueDecl *getArrayDecl(const ArraySubscriptExpr *ASE) {
+ const Expr *E = nullptr;
+ while (ASE != nullptr) {
+ E = ASE->getBase()->IgnoreImpCasts();
+ if (!E)
+ return nullptr;
+ ASE = dyn_cast<ArraySubscriptExpr>(E);
+ }
+ if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(E))
+ return DRE->getDecl();
+ return nullptr;
+}
+
+// Get the total size of the array, or -1 if the array is unbounded.
+static int getTotalArraySize(ASTContext &AST, const clang::Type *Ty) {
+ Ty = Ty->getUnqualifiedDesugaredType();
+ assert(Ty->isArrayType() && "expected array type");
+ if (Ty->isIncompleteArrayType())
+ return -1;
+ return AST.getConstantArrayElementCount(cast<ConstantArrayType>(Ty));
+}
+
+// Find constructor decl for a specific resource record type and binding
+// (implicit vs. explicit). The constructor has 5 parameters.
+// For explicit binding the signature is:
+// void(unsigned, unsigned, int, unsigned, const char *).
+// For implicit binding the signature is:
+// void(unsigned, int, unsigned, unsigned, const char *).
+static CXXConstructorDecl *findResourceConstructorDecl(ASTContext &AST,
+ QualType ResTy,
+ bool ExplicitBinding) {
+ std::array<QualType, 5> ExpParmTypes = {
+ AST.UnsignedIntTy, AST.UnsignedIntTy, AST.UnsignedIntTy,
+ AST.UnsignedIntTy, AST.getPointerType(AST.CharTy.withConst())};
+ ExpParmTypes[ExplicitBinding ? 2 : 1] = AST.IntTy;
+
+ CXXRecordDecl *ResDecl = ResTy->getAsCXXRecordDecl();
+ for (auto *Ctor : ResDecl->ctors()) {
+ if (Ctor->getNumParams() != ExpParmTypes.size())
+ continue;
+ auto *ParmIt = Ctor->param_begin();
+ auto ExpTyIt = ExpParmTypes.begin();
+ for (; ParmIt != Ctor->param_end() && ExpTyIt != ExpParmTypes.end();
+ ++ParmIt, ++ExpTyIt) {
+ if ((*ParmIt)->getType() != *ExpTyIt)
+ break;
+ }
+ if (ParmIt == Ctor->param_end())
+ return Ctor;
+ }
+ llvm_unreachable("did not find constructor for resource class");
+}
+
+static Value *buildNameForResource(llvm::StringRef BaseName,
+ CodeGenModule &CGM) {
+ llvm::SmallString<64> GlobalName = {BaseName, ".str"};
+ return CGM.GetAddrOfConstantCString(BaseName.str(), GlobalName.c_str())
+ .getPointer();
+}
+
+static void createResourceCtorArgs(CodeGenModule &CGM, CXXConstructorDecl *CD,
+ llvm::Value *ThisPtr, llvm::Value *Range,
+ llvm::Value *Index, StringRef Name,
+ HLSLResourceBindingAttr *RBA,
+ HLSLVkBindingAttr *VkBinding,
+ CallArgList &Args) {
+ assert((VkBinding || RBA) && "at least one a binding attribute expected");
+
+ std::optional<uint32_t> RegisterSlot;
+ uint32_t SpaceNo = 0;
+ if (VkBinding) {
+ RegisterSlot = VkBinding->getBinding();
+ SpaceNo = VkBinding->getSet();
+ } else {
+ if (RBA->hasRegisterSlot())
+ RegisterSlot = RBA->getSlotNumber();
+ SpaceNo = RBA->getSpaceNumber();
+ }
+
+ ASTContext &AST = CD->getASTContext();
+ Value *NameStr = buildNameForResource(Name, CGM);
+ Value *Space = llvm::ConstantInt::get(CGM.IntTy, SpaceNo);
+
+ Args.add(RValue::get(ThisPtr), CD->getThisType());
+ if (RegisterSlot.has_value()) {
+ // explicit binding
+ auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, RegisterSlot.value());
+ Args.add(RValue::get(RegSlot), AST.UnsignedIntTy);
+ Args.add(RValue::get(Space), AST.UnsignedIntTy);
+ Args.add(RValue::get(Range), AST.IntTy);
+ Args.add(RValue::get(Index), AST.UnsignedIntTy);
+
+ } else {
+ // implicit binding
+ auto *OrderID =
+ llvm::ConstantInt::get(CGM.IntTy, RBA->getImplicitBindingOrderID());
+ Args.add(RValue::get(Space), AST.UnsignedIntTy);
+ Args.add(RValue::get(Range), AST.IntTy);
+ Args.add(RValue::get(Index), AST.UnsignedIntTy);
+ Args.add(RValue::get(OrderID), AST.UnsignedIntTy);
+ }
+ Args.add(RValue::get(NameStr), AST.getPointerType(AST.CharTy.withConst()));
+}
+
} // namespace
llvm::Type *
@@ -103,13 +209,6 @@ llvm::Triple::ArchType CGHLSLRuntime::getArch() {
return CGM.getTarget().getTriple().getArch();
}
-// Returns true if the type is an HLSL resource class or an array of them
-static bool isResourceRecordTypeOrArrayOf(const clang::Type *Ty) {
- while (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
- Ty = CAT->getArrayElementTypeNoTypeQual();
- return Ty->isHLSLResourceRecord();
-}
-
// Emits constant global variables for buffer constants declarations
// and creates metadata linking the constant globals with the buffer global.
void CGHLSLRuntime::emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl,
@@ -146,7 +245,7 @@ void CGHLSLRuntime::emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl,
if (VDTy.getAddressSpace() != LangAS::hlsl_constant) {
if (VD->getStorageClass() == SC_Static ||
VDTy.getAddressSpace() == LangAS::hlsl_groupshared ||
- isResourceRecordTypeOrArrayOf(VDTy.getTypePtr())) {
+ VDTy->isHLSLResourceRecord() || VDTy->isHLSLResourceRecordArray()) {
// Emit static and groupshared variables and resource classes inside
// cbuffer as regular globals
CGM.EmitGlobal(VD);
@@ -186,8 +285,7 @@ static const clang::HLSLAttributedResourceType *
createBufferHandleType(const HLSLBufferDecl *BufDecl) {
ASTContext &AST = BufDecl->getASTContext();
QualType QT = AST.getHLSLAttributedResourceType(
- AST.HLSLResourceTy,
- QualType(BufDecl->getLayoutStruct()->getTypeForDecl(), 0),
+ AST.HLSLResourceTy, AST.getCanonicalTagType(BufDecl->getLayoutStruct()),
HLSLAttributedResourceType::Attributes(ResourceClass::CBuffer));
return cast<HLSLAttributedResourceType>(QT.getTypePtr());
}
@@ -597,19 +695,10 @@ static void initializeBuffer(CodeGenModule &CGM, llvm::GlobalVariable *GV,
CGM.AddCXXGlobalInit(InitResFunc);
}
-static Value *buildNameForResource(llvm::StringRef BaseName,
- CodeGenModule &CGM) {
- std::string Str(BaseName);
- std::string GlobalName(Str + ".str");
- return CGM.GetAddrOfConstantCString(Str, GlobalName.c_str()).getPointer();
-}
-
void CGHLSLRuntime::initializeBufferFromBinding(const HLSLBufferDecl *BufDecl,
llvm::GlobalVariable *GV,
HLSLVkBindingAttr *VkBinding) {
assert(VkBinding && "expect a nonnull binding attribute");
- llvm::Type *Int1Ty = llvm::Type::getInt1Ty(CGM.getLLVMContext());
- auto *NonUniform = llvm::ConstantInt::get(Int1Ty, false);
auto *Index = llvm::ConstantInt::get(CGM.IntTy, 0);
auto *RangeSize = llvm::ConstantInt::get(CGM.IntTy, 1);
auto *Set = llvm::ConstantInt::get(CGM.IntTy, VkBinding->getSet());
@@ -618,7 +707,7 @@ void CGHLSLRuntime::initializeBufferFromBinding(const HLSLBufferDecl *BufDecl,
llvm::Intrinsic::ID IntrinsicID =
CGM.getHLSLRuntime().getCreateHandleFromBindingIntrinsic();
- SmallVector<Value *> Args{Set, Binding, RangeSize, Index, NonUniform, Name};
+ SmallVector<Value *> Args{Set, Binding, RangeSize, Index, Name};
initializeBuffer(CGM, GV, IntrinsicID, Args);
}
@@ -626,34 +715,26 @@ void CGHLSLRuntime::initializeBufferFromBinding(const HLSLBufferDecl *BufDecl,
llvm::GlobalVariable *GV,
HLSLResourceBindingAttr *RBA) {
assert(RBA && "expect a nonnull binding attribute");
- llvm::Type *Int1Ty = llvm::Type::getInt1Ty(CGM.getLLVMContext());
- auto *NonUniform = llvm::ConstantInt::get(Int1Ty, false);
auto *Index = llvm::ConstantInt::get(CGM.IntTy, 0);
auto *RangeSize = llvm::ConstantInt::get(CGM.IntTy, 1);
auto *Space = llvm::ConstantInt::get(CGM.IntTy, RBA->getSpaceNumber());
- Value *Name = nullptr;
+ Value *Name = buildNameForResource(BufDecl->getName(), CGM);
llvm::Intrinsic::ID IntrinsicID =
RBA->hasRegisterSlot()
? CGM.getHLSLRuntime().getCreateHandleFromBindingIntrinsic()
: CGM.getHLSLRuntime().getCreateHandleFromImplicitBindingIntrinsic();
- std::string Str(BufDecl->getName());
- std::string GlobalName(Str + ".str");
- Name = CGM.GetAddrOfConstantCString(Str, GlobalName.c_str()).getPointer();
-
// buffer with explicit binding
if (RBA->hasRegisterSlot()) {
auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, RBA->getSlotNumber());
- SmallVector<Value *> Args{Space, RegSlot, RangeSize,
- Index, NonUniform, Name};
+ SmallVector<Value *> Args{Space, RegSlot, RangeSize, Index, Name};
initializeBuffer(CGM, GV, IntrinsicID, Args);
} else {
// buffer with implicit binding
auto *OrderID =
llvm::ConstantInt::get(CGM.IntTy, RBA->getImplicitBindingOrderID());
- SmallVector<Value *> Args{OrderID, Space, RangeSize,
- Index, NonUniform, Name};
+ SmallVector<Value *> Args{OrderID, Space, RangeSize, Index, Name};
initializeBuffer(CGM, GV, IntrinsicID, Args);
}
}
@@ -708,3 +789,91 @@ void CGHLSLRuntime::emitInitListOpaqueValues(CodeGenFunction &CGF,
}
}
}
+
+std::optional<LValue> CGHLSLRuntime::emitResourceArraySubscriptExpr(
+ const ArraySubscriptExpr *ArraySubsExpr, CodeGenFunction &CGF) {
+ assert(ArraySubsExpr->getType()->isHLSLResourceRecord() ||
+ ArraySubsExpr->getType()->isHLSLResourceRecordArray() &&
+ "expected resource array subscript expression");
+
+ // let clang codegen handle local resource array subscripts
+ const VarDecl *ArrayDecl = dyn_cast<VarDecl>(getArrayDecl(ArraySubsExpr));
+ if (!ArrayDecl || !ArrayDecl->hasGlobalStorage())
+ return std::nullopt;
+
+ if (ArraySubsExpr->getType()->isArrayType())
+ // FIXME: this is not yet implemented (llvm/llvm-project#145426)
+ llvm_unreachable(
+ "indexing of sub-arrays of multidimensional arrays not supported yet");
+
+ // get the resource array type
+ ASTContext &AST = ArrayDecl->getASTContext();
+ const Type *ResArrayTy = ArrayDecl->getType().getTypePtr();
+ assert(ResArrayTy->isHLSLResourceRecordArray() &&
+ "expected array of resource classes");
+
+ // Iterate through all nested array subscript expressions to calculate
+ // the index in the flattened resource array (if this is a multi-
+ // dimensional array). The index is calculated as a sum of all indices
+ // multiplied by the total size of the array at that level.
+ Value *Index = nullptr;
+ const ArraySubscriptExpr *ASE = ArraySubsExpr;
+ while (ASE != nullptr) {
+ Value *SubIndex = CGF.EmitScalarExpr(ASE->getIdx());
+ if (const auto *ArrayTy =
+ dyn_cast<ConstantArrayType>(ASE->getType().getTypePtr())) {
+ Value *Multiplier = llvm::ConstantInt::get(
+ CGM.IntTy, AST.getConstantArrayElementCount(ArrayTy));
+ SubIndex = CGF.Builder.CreateMul(SubIndex, Multiplier);
+ }
+
+ Index = Index ? CGF.Builder.CreateAdd(Index, SubIndex) : SubIndex;
+ ASE = dyn_cast<ArraySubscriptExpr>(ASE->getBase()->IgnoreParenImpCasts());
+ }
+
+ // find binding info for the resource array (for implicit binding
+ // an HLSLResourceBindingAttr should have been added by SemaHLSL)
+ QualType ResourceTy = ArraySubsExpr->getType();
+ HLSLVkBindingAttr *VkBinding = ArrayDecl->getAttr<HLSLVkBindingAttr>();
+ HLSLResourceBindingAttr *RBA = ArrayDecl->getAttr<HLSLResourceBindingAttr>();
+ assert((VkBinding || RBA) && "resource array must have a binding attribute");
+
+ // lookup the resource class constructor based on the resource type and
+ // binding
+ CXXConstructorDecl *CD = findResourceConstructorDecl(
+ AST, ResourceTy, VkBinding || RBA->hasRegisterSlot());
+
+ // create a temporary variable for the resource class instance (we need to
+ // return an LValue)
+ RawAddress TmpVar = CGF.CreateMemTemp(ResourceTy);
+ if (CGF.EmitLifetimeStart(TmpVar.getPointer()))
+ CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
+ NormalEHLifetimeMarker, TmpVar);
+
+ AggValueSlot ValueSlot = AggValueSlot::forAddr(
+ TmpVar, Qualifiers(), AggValueSlot::IsDestructed_t(true),
+ AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsAliased_t(false),
+ AggValueSlot::DoesNotOverlap);
+
+ Address ThisAddress = ValueSlot.getAddress();
+ llvm::Value *ThisPtr = CGF.getAsNaturalPointerTo(
+ ThisAddress, CD->getThisType()->getPointeeType());
+
+ // get total array size (= range size)
+ llvm::Value *Range =
+ llvm::ConstantInt::get(CGM.IntTy, getTotalArraySize(AST, ResArrayTy));
+
+ // assemble the constructor parameters
+ CallArgList Args;
+ createResourceCtorArgs(CGM, CD, ThisPtr, Range, Index, ArrayDecl->getName(),
+ RBA, VkBinding, Args);
+
+ // call the constructor
+ CGF.EmitCXXConstructorCall(CD, Ctor_Complete, false, false, ThisAddress, Args,
+ ValueSlot.mayOverlap(),
+ ArraySubsExpr->getExprLoc(),
+ ValueSlot.isSanitizerChecked());
+
+ return CGF.MakeAddrLValue(TmpVar, ArraySubsExpr->getType(),
+ AlignmentSource::Decl);
+}
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 31d1728..b872f9e 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -68,6 +68,7 @@ class Type;
class RecordType;
class DeclContext;
class HLSLPackOffsetAttr;
+class ArraySubscriptExpr;
class FunctionDecl;
@@ -75,6 +76,7 @@ namespace CodeGen {
class CodeGenModule;
class CodeGenFunction;
+class LValue;
class CGHLSLRuntime {
public:
@@ -164,6 +166,10 @@ public:
llvm::TargetExtType *LayoutTy);
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E);
+ std::optional<LValue>
+ emitResourceArraySubscriptExpr(const ArraySubscriptExpr *E,
+ CodeGenFunction &CGF);
+
private:
void emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl,
llvm::GlobalVariable *BufGV);
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index e0983ef..2d70e4c 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -39,7 +39,7 @@ template <class Derived> struct StructVisitor {
template <class... Ts>
void visitStructFields(QualType QT, CharUnits CurStructOffset, Ts... Args) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const auto *RD = QT->castAsRecordDecl();
// Iterate over the fields of the struct.
for (const FieldDecl *FD : RD->fields()) {
@@ -464,7 +464,8 @@ template <class Derived> struct GenFuncBase {
if (WrongType) {
std::string FuncName = std::string(F->getName());
- SourceLocation Loc = QT->castAs<RecordType>()->getDecl()->getLocation();
+ SourceLocation Loc =
+ QT->castAs<RecordType>()->getOriginalDecl()->getLocation();
CGM.Error(Loc, "special function " + FuncName +
" for non-trivial C struct has incorrect type");
return nullptr;
@@ -560,7 +561,8 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
if (FD->isZeroLengthBitField())
return;
- QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
+ CanQualType RT =
+ this->CGF->getContext().getCanonicalTagType(FD->getParent());
llvm::Type *Ty = this->CGF->ConvertType(RT);
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
LValue DstBase =
@@ -671,7 +673,8 @@ struct GenDefaultInitialize
CharUnits Size = Ctx.getTypeSizeInChars(QualType(AT, 0));
QualType EltTy = Ctx.getBaseElementType(QualType(AT, 0));
- if (Size < CharUnits::fromQuantity(16) || EltTy->getAs<RecordType>()) {
+ if (Size < CharUnits::fromQuantity(16) ||
+ EltTy->getAsCanonical<RecordType>()) {
GenFuncBaseTy::visitArray(FK, AT, IsVolatile, FD, CurStructOffset, Addrs);
return;
}
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 24b6ce7..b01d547 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -999,8 +999,8 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// Compute whether the ivar has strong members.
if (CGM.getLangOpts().getGC())
- if (const RecordType *recordType = ivarType->getAs<RecordType>())
- HasStrong = recordType->getDecl()->hasObjectMember();
+ if (const auto *RD = ivarType->getAsRecordDecl())
+ HasStrong = RD->hasObjectMember();
// We can never access structs with object members with a native
// access, because we need to use write barriers. This is what
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 8c66176..60f30a1 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -2315,7 +2315,7 @@ void IvarLayoutBuilder::visitBlock(const CGBlockInfo &blockInfo) {
}
assert(!type->isArrayType() && "array variable should not be caught");
- if (const RecordType *record = type->getAs<RecordType>()) {
+ if (const RecordType *record = type->getAsCanonical<RecordType>()) {
visitRecord(record, fieldOffset);
continue;
}
@@ -2409,7 +2409,7 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
if (FQT->isUnionType())
HasUnion = true;
- BuildRCBlockVarRecordLayout(FQT->castAs<RecordType>(),
+ BuildRCBlockVarRecordLayout(FQT->castAsCanonical<RecordType>(),
BytePos + FieldOffset, HasUnion);
continue;
}
@@ -2426,7 +2426,7 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
}
if (FQT->isRecordType() && ElCount) {
int OldIndex = RunSkipBlockVars.size() - 1;
- auto *RT = FQT->castAs<RecordType>();
+ auto *RT = FQT->castAsCanonical<RecordType>();
BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset, HasUnion);
// Replicate layout information for each array element. Note that
@@ -2495,7 +2495,7 @@ void CGObjCCommonMac::BuildRCBlockVarRecordLayout(const RecordType *RT,
CharUnits BytePos,
bool &HasUnion,
bool ByrefLayout) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
SmallVector<const FieldDecl *, 16> Fields(RD->fields());
llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
@@ -2831,7 +2831,7 @@ void CGObjCCommonMac::fillRunSkipBlockVars(CodeGenModule &CGM,
assert(!type->isArrayType() && "array variable should not be caught");
if (!CI.isByRef())
- if (const RecordType *record = type->getAs<RecordType>()) {
+ if (const auto *record = type->getAsCanonical<RecordType>()) {
BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion);
continue;
}
@@ -2865,7 +2865,7 @@ llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM,
CharUnits fieldOffset;
RunSkipBlockVars.clear();
bool hasUnion = false;
- if (const RecordType *record = T->getAs<RecordType>()) {
+ if (const auto *record = T->getAsCanonical<RecordType>()) {
BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion,
true /*ByrefLayout */);
llvm::Constant *Result = getBitmapBlockLayout(true);
@@ -3353,8 +3353,8 @@ static bool hasWeakMember(QualType type) {
return true;
}
- if (auto recType = type->getAs<RecordType>()) {
- for (auto *field : recType->getDecl()->fields()) {
+ if (auto *RD = type->getAsRecordDecl()) {
+ for (auto *field : RD->fields()) {
if (hasWeakMember(field->getType()))
return true;
}
@@ -5184,7 +5184,7 @@ CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
}
void IvarLayoutBuilder::visitRecord(const RecordType *RT, CharUnits offset) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a union, remember that we had one, because it might mess
// up the ordering of layout entries.
@@ -5246,7 +5246,7 @@ void IvarLayoutBuilder::visitField(const FieldDecl *field,
return;
// Recurse if the base element type is a record type.
- if (auto recType = fieldType->getAs<RecordType>()) {
+ if (const auto *recType = fieldType->getAsCanonical<RecordType>()) {
size_t oldEnd = IvarsInfo.size();
visitRecord(recType, fieldOffset);
@@ -5670,7 +5670,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
nullptr, false, ICIS_NoInit));
RD->completeDefinition();
- SuperCTy = Ctx.getTagDeclType(RD);
+ SuperCTy = Ctx.getCanonicalTagType(RD);
SuperPtrCTy = Ctx.getPointerType(SuperCTy);
SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
@@ -6016,7 +6016,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(
false, ICIS_NoInit));
RD->completeDefinition();
- MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCTy = Ctx.getCanonicalTagType(RD);
MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index 6e2f320..76e0054 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -439,8 +439,8 @@ void CGObjCRuntime::destroyCalleeDestroyedArguments(CodeGenFunction &CGF,
CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
} else {
QualType QT = param->getType();
- auto *RT = QT->getAs<RecordType>();
- if (RT && RT->getDecl()->isParamDestroyedInCallee()) {
+ auto *RD = QT->getAsRecordDecl();
+ if (RD && RD->isParamDestroyedInCallee()) {
RValue RV = I->getRValue(CGF);
QualType::DestructionKind DtorKind = QT.isDestructedType();
switch (DtorKind) {
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 91237cf..b38eb54 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1845,11 +1845,11 @@ void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
}
-void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond,
- llvm::Value *NumThreads) {
+void CGOpenMPRuntime::emitParallelCall(
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond,
+ llvm::Value *NumThreads, OpenMPNumThreadsClauseModifier NumThreadsModifier,
+ OpenMPSeverityClauseKind Severity, const Expr *Message) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
@@ -2372,9 +2372,8 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
void CGOpenMPRuntime::emitErrorCall(CodeGenFunction &CGF, SourceLocation Loc,
Expr *ME, bool IsFatal) {
- llvm::Value *MVL =
- ME ? CGF.EmitStringLiteralLValue(cast<StringLiteral>(ME)).getPointer(CGF)
- : llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ llvm::Value *MVL = ME ? CGF.EmitScalarExpr(ME)
+ : llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
// Build call void __kmpc_error(ident_t *loc, int severity, const char
// *message)
llvm::Value *Args[] = {
@@ -2699,18 +2698,54 @@ llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
CGF.getContext().BoolTy, Loc);
}
-void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) {
+llvm::Value *CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF,
+ const Expr *Message) {
+ if (!Message)
+ return llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ return CGF.EmitScalarExpr(Message);
+}
+
+llvm::Value *
+CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF,
+ const OMPMessageClause *MessageClause) {
+ return emitMessageClause(
+ CGF, MessageClause ? MessageClause->getMessageString() : nullptr);
+}
+
+llvm::Value *
+CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity) {
+ // OpenMP 6.0, 10.4: "If no severity clause is specified then the effect is
+ // as if sev-level is fatal."
+ return llvm::ConstantInt::get(CGM.Int32Ty,
+ Severity == OMPC_SEVERITY_warning ? 1 : 2);
+}
+
+llvm::Value *
+CGOpenMPRuntime::emitSeverityClause(const OMPSeverityClause *SeverityClause) {
+ return emitSeverityClause(SeverityClause ? SeverityClause->getSeverityKind()
+ : OMPC_SEVERITY_unknown);
+}
+
+void CGOpenMPRuntime::emitNumThreadsClause(
+ CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc,
+ OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity,
+ const Expr *Message) {
if (!CGF.HaveInsertPoint())
return;
+ llvm::SmallVector<llvm::Value *, 4> Args(
+ {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)});
// Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_push_num_threads),
- Args);
+ // or __kmpc_push_num_threads_strict(&loc, global_tid, num_threads, severity,
+ // messsage) if strict modifier is used.
+ RuntimeFunction FnID = OMPRTL___kmpc_push_num_threads;
+ if (Modifier == OMPC_NUMTHREADS_strict) {
+ FnID = OMPRTL___kmpc_push_num_threads_strict;
+ Args.push_back(emitSeverityClause(Severity));
+ Args.push_back(emitMessageClause(CGF, Message));
+ }
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args);
}
void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
@@ -2915,7 +2950,7 @@ createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
addFieldToRecordDecl(C, UD, KmpInt32Ty);
addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
UD->completeDefinition();
- QualType KmpCmplrdataTy = C.getRecordType(UD);
+ CanQualType KmpCmplrdataTy = C.getCanonicalTagType(UD);
RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
RD->startDefinition();
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
@@ -2950,7 +2985,7 @@ createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
RD->startDefinition();
addFieldToRecordDecl(C, RD, KmpTaskTQTy);
if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
- addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
+ addFieldToRecordDecl(C, RD, C.getCanonicalTagType(PrivateRD));
RD->completeDefinition();
return RD;
}
@@ -3006,10 +3041,10 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
CGF.GetAddrOfLocalVar(&TaskTypeArg),
KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
const auto *KmpTaskTWithPrivatesQTyRD =
- cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
+ KmpTaskTWithPrivatesQTy->castAsRecordDecl();
LValue Base =
CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
- const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
+ const auto *KmpTaskTQTyRD = KmpTaskTQTy->castAsRecordDecl();
auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
@@ -3104,11 +3139,10 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
CGF.GetAddrOfLocalVar(&TaskTypeArg),
KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
const auto *KmpTaskTWithPrivatesQTyRD =
- cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
+ KmpTaskTWithPrivatesQTy->castAsRecordDecl();
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
Base = CGF.EmitLValueForField(Base, *FI);
- for (const auto *Field :
- cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
+ for (const auto *Field : FI->getType()->castAsRecordDecl()->fields()) {
if (QualType::DestructionKind DtorKind =
Field->getType().isDestructedType()) {
LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
@@ -3212,7 +3246,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
LValue Base = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
TaskPrivatesArg.getType()->castAs<PointerType>());
- const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
+ const auto *PrivatesQTyRD = PrivatesQTy->castAsRecordDecl();
Counter = 0;
for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
@@ -3259,7 +3293,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
CGF.ConvertTypeForMem(SharedsTy)),
SharedsTy);
}
- FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
+ FI = FI->getType()->castAsRecordDecl()->field_begin();
for (const PrivateDataTy &Pair : Privates) {
// Do not initialize private locals.
if (Pair.second.isLocalPrivate()) {
@@ -3582,7 +3616,7 @@ static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
KmpAffinityInfoRD->completeDefinition();
- KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
+ KmpTaskAffinityInfoTy = C.getCanonicalTagType(KmpAffinityInfoRD);
}
}
@@ -3640,7 +3674,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Build type kmp_task_t (if not built yet).
if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
if (SavedKmpTaskloopTQTy.isNull()) {
- SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
+ SavedKmpTaskloopTQTy = C.getCanonicalTagType(createKmpTaskTRecordDecl(
CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
KmpTaskTQTy = SavedKmpTaskloopTQTy;
@@ -3650,16 +3684,17 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
"Expected taskloop, task or target directive");
if (SavedKmpTaskTQTy.isNull()) {
- SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
+ SavedKmpTaskTQTy = C.getCanonicalTagType(createKmpTaskTRecordDecl(
CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
KmpTaskTQTy = SavedKmpTaskTQTy;
}
- const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
+ const auto *KmpTaskTQTyRD = KmpTaskTQTy->castAsRecordDecl();
// Build particular struct kmp_task_t for the given task.
const RecordDecl *KmpTaskTWithPrivatesQTyRD =
createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
- QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
+ CanQualType KmpTaskTWithPrivatesQTy =
+ C.getCanonicalTagType(KmpTaskTWithPrivatesQTyRD);
QualType KmpTaskTWithPrivatesPtrQTy =
C.getPointerType(KmpTaskTWithPrivatesQTy);
llvm::Type *KmpTaskTWithPrivatesPtrTy = CGF.Builder.getPtrTy(0);
@@ -3914,7 +3949,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Fill the data in the resulting kmp_task_t record.
// Copy shareds if there are any.
Address KmpTaskSharedsPtr = Address::invalid();
- if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
+ if (!SharedsTy->castAsRecordDecl()->field_empty()) {
KmpTaskSharedsPtr = Address(
CGF.EmitLoadOfScalar(
CGF.EmitLValueForField(
@@ -3944,8 +3979,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
enum { Priority = 0, Destructors = 1 };
// Provide pointer to function with destructors for privates.
auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
- const RecordDecl *KmpCmplrdataUD =
- (*FI)->getType()->getAsUnionType()->getDecl();
+ const auto *KmpCmplrdataUD = (*FI)->getType()->castAsRecordDecl();
+ assert(KmpCmplrdataUD->isUnion());
if (NeedsCleanup) {
llvm::Value *DestructorFn = emitDestructorsFunction(
CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
@@ -4015,7 +4050,7 @@ static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
KmpDependInfoRD->completeDefinition();
- KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
+ KmpDependInfoTy = C.getCanonicalTagType(KmpDependInfoRD);
}
}
@@ -4025,8 +4060,7 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ auto *KmpDependInfoRD = KmpDependInfoTy->castAsRecordDecl();
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
DepobjLVal.getAddress().withElementType(
@@ -4054,8 +4088,7 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ auto *KmpDependInfoRD = KmpDependInfoTy->castAsRecordDecl();
llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
OMPIteratorGeneratorScope IteratorScope(
@@ -4326,8 +4359,7 @@ Address CGOpenMPRuntime::emitDepobjDependClause(
unsigned NumDependencies = Dependencies.DepExprs.size();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ auto *KmpDependInfoRD = KmpDependInfoTy->castAsRecordDecl();
llvm::Value *Size;
// Define type kmp_depend_info[<Dependencies.size()>];
@@ -4435,8 +4467,7 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ auto *KmpDependInfoRD = KmpDependInfoTy->castAsRecordDecl();
llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
llvm::Value *NumDeps;
LValue Base;
@@ -5714,7 +5745,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
const FieldDecl *FlagsFD = addFieldToRecordDecl(
C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
RD->completeDefinition();
- QualType RDType = C.getRecordType(RD);
+ CanQualType RDType = C.getCanonicalTagType(RD);
unsigned Size = Data.ReductionVars.size();
llvm::APInt ArraySize(/*numBits=*/64, Size);
QualType ArrayRDType =
@@ -7487,7 +7518,32 @@ private:
// dimension.
uint64_t DimSize = 1;
- bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous;
+ // Detects non-contiguous updates due to strided accesses.
+ // Sets the 'IsNonContiguous' flag so that the 'MapType' bits are set
+ // correctly when generating information to be passed to the runtime. The
+ // flag is set to true if any array section has a stride not equal to 1, or
+ // if the stride is not a constant expression (conservatively assumed
+ // non-contiguous).
+ bool IsNonContiguous =
+ CombinedInfo.NonContigInfo.IsNonContiguous ||
+ any_of(Components, [&](const auto &Component) {
+ const auto *OASE =
+ dyn_cast<ArraySectionExpr>(Component.getAssociatedExpression());
+ if (!OASE)
+ return false;
+
+ const Expr *StrideExpr = OASE->getStride();
+ if (!StrideExpr)
+ return false;
+
+ const auto Constant =
+ StrideExpr->getIntegerConstantExpr(CGF.getContext());
+ if (!Constant)
+ return false;
+
+ return !Constant->isOne();
+ });
+
bool IsPrevMemberReference = false;
bool IsPartialMapped =
@@ -10703,7 +10759,7 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD,
unsigned Offset = 0;
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (ParamAttrs[Offset].Kind == Vector)
- CDT = C.getPointerType(C.getRecordType(MD->getParent()));
+ CDT = C.getPointerType(C.getCanonicalTagType(MD->getParent()));
++Offset;
}
if (CDT.isNull()) {
@@ -11285,9 +11341,9 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
addFieldToRecordDecl(C, RD, Int64Ty);
addFieldToRecordDecl(C, RD, Int64Ty);
RD->completeDefinition();
- KmpDimTy = C.getRecordType(RD);
+ KmpDimTy = C.getCanonicalTagType(RD);
} else {
- RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
+ RD = KmpDimTy->castAsRecordDecl();
}
llvm::APInt Size(/*numBits=*/32, NumIterations.size());
QualType ArrayTy = C.getConstantArrayType(KmpDimTy, Size, nullptr,
@@ -11781,7 +11837,7 @@ Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
RD->completeDefinition();
- NewType = C.getRecordType(RD);
+ NewType = C.getCanonicalTagType(RD);
Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
@@ -12093,12 +12149,11 @@ llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
llvm_unreachable("Not supported in SIMD-only mode");
}
-void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
- SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond,
- llvm::Value *NumThreads) {
+void CGOpenMPSIMDRuntime::emitParallelCall(
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond,
+ llvm::Value *NumThreads, OpenMPNumThreadsClauseModifier NumThreadsModifier,
+ OpenMPSeverityClauseKind Severity, const Expr *Message) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -12201,9 +12256,10 @@ llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
llvm_unreachable("Not supported in SIMD-only mode");
}
-void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) {
+void CGOpenMPSIMDRuntime::emitNumThreadsClause(
+ CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc,
+ OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity,
+ const Expr *Message) {
llvm_unreachable("Not supported in SIMD-only mode");
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index 5be48b4..eb04ece 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -777,11 +777,22 @@ public:
/// specified, nullptr otherwise.
/// \param NumThreads The value corresponding to the num_threads clause, if
/// any, or nullptr.
+ /// \param NumThreadsModifier The modifier of the num_threads clause, if
+ /// any, ignored otherwise.
+ /// \param Severity The severity corresponding to the num_threads clause, if
+ /// any, ignored otherwise.
+ /// \param Message The message string corresponding to the num_threads clause,
+ /// if any, or nullptr.
///
- virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond, llvm::Value *NumThreads);
+ virtual void
+ emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond,
+ llvm::Value *NumThreads,
+ OpenMPNumThreadsClauseModifier NumThreadsModifier =
+ OMPC_NUMTHREADS_unknown,
+ OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal,
+ const Expr *Message = nullptr);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
@@ -1037,13 +1048,28 @@ public:
Address IL, Address LB,
Address UB, Address ST);
+ virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF,
+ const Expr *Message);
+ virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF,
+ const OMPMessageClause *MessageClause);
+
+ virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity);
+ virtual llvm::Value *
+ emitSeverityClause(const OMPSeverityClause *SeverityClause);
+
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
+ /// If the modifier 'strict' is given:
+ /// Emits call to void __kmpc_push_num_threads_strict(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 num_threads, int severity, const char *message) to
+ /// generate code for 'num_threads' clause with 'strict' modifier.
/// \param NumThreads An integer value of threads.
- virtual void emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc);
+ virtual void emitNumThreadsClause(
+ CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc,
+ OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown,
+ OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal,
+ const Expr *Message = nullptr);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
@@ -1737,11 +1763,21 @@ public:
/// specified, nullptr otherwise.
/// \param NumThreads The value corresponding to the num_threads clause, if
/// any, or nullptr.
+ /// \param NumThreadsModifier The modifier of the num_threads clause, if
+ /// any, ignored otherwise.
+ /// \param Severity The severity corresponding to the num_threads clause, if
+ /// any, ignored otherwise.
+ /// \param Message The message string corresponding to the num_threads clause,
+ /// if any, or nullptr.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond, llvm::Value *NumThreads) override;
+ const Expr *IfCond, llvm::Value *NumThreads,
+ OpenMPNumThreadsClauseModifier NumThreadsModifier =
+ OMPC_NUMTHREADS_unknown,
+ OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal,
+ const Expr *Message = nullptr) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
@@ -1911,9 +1947,16 @@ public:
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
+ /// If the modifier 'strict' is given:
+ /// Emits call to void __kmpc_push_num_threads_strict(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 num_threads, int severity, const char *message) to
+ /// generate code for 'num_threads' clause with 'strict' modifier.
/// \param NumThreads An integer value of threads.
- void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
- SourceLocation Loc) override;
+ void emitNumThreadsClause(
+ CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc,
+ OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown,
+ OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal,
+ const Expr *Message = nullptr) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 04c9192..a80d9fd 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -769,7 +769,7 @@ void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
"_openmp_teams_reduction_type_$_", RecordDecl::TagKind::Union);
StaticRD->startDefinition();
for (const RecordDecl *TeamReductionRec : TeamsReductions) {
- QualType RecTy = C.getRecordType(TeamReductionRec);
+ CanQualType RecTy = C.getCanonicalTagType(TeamReductionRec);
auto *Field = FieldDecl::Create(
C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
@@ -779,7 +779,7 @@ void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
StaticRD->addDecl(Field);
}
StaticRD->completeDefinition();
- QualType StaticTy = C.getRecordType(StaticRD);
+ CanQualType StaticTy = C.getCanonicalTagType(StaticRD);
llvm::Type *LLVMReductionsBufferTy =
CGM.getTypes().ConvertTypeForMem(StaticTy);
const auto &DL = CGM.getModule().getDataLayout();
@@ -899,9 +899,10 @@ void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
// Nothing to do.
}
-void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) {
+void CGOpenMPRuntimeGPU::emitNumThreadsClause(
+ CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc,
+ OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity,
+ const Expr *Message) {
// Nothing to do.
}
@@ -1201,18 +1202,17 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
}
-void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
- SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond,
- llvm::Value *NumThreads) {
+void CGOpenMPRuntimeGPU::emitParallelCall(
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond,
+ llvm::Value *NumThreads, OpenMPNumThreadsClauseModifier NumThreadsModifier,
+ OpenMPSeverityClauseKind Severity, const Expr *Message) {
if (!CGF.HaveInsertPoint())
return;
- auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond,
- NumThreads](CodeGenFunction &CGF,
- PrePostActionTy &Action) {
+ auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond, NumThreads,
+ NumThreadsModifier, Severity, Message](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
CGBuilderTy &Bld = CGF.Builder;
llvm::Value *NumThreadsVal = NumThreads;
llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
@@ -1260,21 +1260,22 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty);
assert(IfCondVal && "Expected a value");
+ RuntimeFunction FnID = OMPRTL___kmpc_parallel_51;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *Args[] = {
- RTLoc,
- getThreadID(CGF, Loc),
- IfCondVal,
- NumThreadsVal,
- llvm::ConstantInt::get(CGF.Int32Ty, -1),
- FnPtr,
- ID,
- Bld.CreateBitOrPointerCast(CapturedVarsAddrs.emitRawPointer(CGF),
- CGF.VoidPtrPtrTy),
- llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_parallel_51),
- Args);
+ llvm::SmallVector<llvm::Value *, 10> Args(
+ {RTLoc, getThreadID(CGF, Loc), IfCondVal, NumThreadsVal,
+ llvm::ConstantInt::get(CGF.Int32Ty, -1), FnPtr, ID,
+ Bld.CreateBitOrPointerCast(CapturedVarsAddrs.emitRawPointer(CGF),
+ CGF.VoidPtrPtrTy),
+ llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())});
+ if (NumThreadsModifier == OMPC_NUMTHREADS_strict) {
+ FnID = OMPRTL___kmpc_parallel_60;
+ Args.append({llvm::ConstantInt::get(CGM.Int32Ty, true),
+ emitSeverityClause(Severity),
+ emitMessageClause(CGF, Message)});
+ }
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args);
};
RegionCodeGenTy RCG(ParallelGen);
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
index b59f43a..3e36708 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -165,9 +165,16 @@ public:
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
+ /// If the modifier 'strict' is given:
+ /// Emits call to void __kmpc_push_num_threads_strict(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 num_threads, int severity, const char *message) to
+ /// generate code for 'num_threads' clause with 'strict' modifier.
/// \param NumThreads An integer value of threads.
- void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
- SourceLocation Loc) override;
+ void emitNumThreadsClause(
+ CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc,
+ OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown,
+ OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal,
+ const Expr *Message = nullptr) override;
/// This function ought to emit, in the general case, a call to
// the openmp runtime kmpc_push_num_teams. In NVPTX backend it is not needed
@@ -229,12 +236,21 @@ public:
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param NumThreads The value corresponding to the num_threads clause, if
- /// any,
- /// or nullptr.
+ /// any, or nullptr.
+ /// \param NumThreadsModifier The modifier of the num_threads clause, if
+ /// any, ignored otherwise.
+ /// \param Severity The severity corresponding to the num_threads clause, if
+ /// any, ignored otherwise.
+ /// \param Message The message string corresponding to the num_threads clause,
+ /// if any, or nullptr.
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond, llvm::Value *NumThreads) override;
+ const Expr *IfCond, llvm::Value *NumThreads,
+ OpenMPNumThreadsClauseModifier NumThreadsModifier =
+ OMPC_NUMTHREADS_unknown,
+ OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal,
+ const Expr *Message = nullptr) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp
index dcef01a..375f87a 100644
--- a/clang/lib/CodeGen/CGPointerAuth.cpp
+++ b/clang/lib/CodeGen/CGPointerAuth.cpp
@@ -466,6 +466,14 @@ llvm::Constant *CodeGenModule::getConstantSignedPointer(
OtherDiscriminator);
}
+llvm::Constant *
+CodeGen::getConstantSignedPointer(CodeGenModule &CGM, llvm::Constant *Pointer,
+ unsigned Key, llvm::Constant *StorageAddress,
+ llvm::ConstantInt *OtherDiscriminator) {
+ return CGM.getConstantSignedPointer(Pointer, Key, StorageAddress,
+ OtherDiscriminator);
+}
+
/// If applicable, sign a given constant function pointer with the ABI rules for
/// functionType.
llvm::Constant *CodeGenModule::getFunctionPointer(llvm::Constant *Pointer,
@@ -531,7 +539,7 @@ llvm::Constant *CodeGenModule::getMemberFunctionPointer(llvm::Constant *Pointer,
llvm::Constant *CodeGenModule::getMemberFunctionPointer(const FunctionDecl *FD,
llvm::Type *Ty) {
QualType FT = FD->getType();
- FT = getContext().getMemberPointerType(FT, /*Qualifier=*/nullptr,
+ FT = getContext().getMemberPointerType(FT, /*Qualifier=*/std::nullopt,
cast<CXXMethodDecl>(FD)->getParent());
return getMemberFunctionPointer(getRawFunctionPointer(FD, Ty), FT);
}
diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index e1310ae..5f6136c 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -1027,8 +1027,11 @@ void CGRecordLowering::fillOutputFields() {
if (Member.FD)
Fields[Member.FD->getCanonicalDecl()] = FieldTypes.size() - 1;
// A field without storage must be a bitfield.
- if (!Member.Data)
+ if (!Member.Data) {
+ assert(Member.FD &&
+ "Member.Data is a nullptr so Member.FD should not be");
setBitFieldInfo(Member.FD, Member.Offset, FieldTypes.back());
+ }
} else if (Member.Kind == MemberInfo::Base)
NonVirtualBases[Member.RD] = FieldTypes.size() - 1;
else if (Member.Kind == MemberInfo::VBase)
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 1a8c6f0..031ef73 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -3279,7 +3279,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
const RecordDecl *RD = S.getCapturedRecordDecl();
- QualType RecordTy = getContext().getRecordType(RD);
+ CanQualType RecordTy = getContext().getCanonicalTagType(RD);
// Initialize the captured struct.
LValue SlotLV =
@@ -3359,7 +3359,7 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
// Initialize variable-length arrays.
LValue Base = MakeNaturalAlignRawAddrLValue(
- CapturedStmtInfo->getContextValue(), Ctx.getTagDeclType(RD));
+ CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
for (auto *FD : RD->fields()) {
if (FD->hasCapturedVLAType()) {
auto *ExprArg =
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 5822e0f..1074ee5 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -1608,6 +1608,11 @@ static void emitCommonOMPParallelDirective(
const CodeGenBoundParametersTy &CodeGenBoundParameters) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
llvm::Value *NumThreads = nullptr;
+ OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown;
+ // OpenMP 6.0, 10.4: "If no severity clause is specified then the effect is as
+ // if sev-level is fatal."
+ OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal;
+ clang::Expr *Message = nullptr;
llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind,
@@ -1616,8 +1621,14 @@ static void emitCommonOMPParallelDirective(
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
/*IgnoreResultAssign=*/true);
+ Modifier = NumThreadsClause->getModifier();
+ if (const auto *MessageClause = S.getSingleClause<OMPMessageClause>())
+ Message = MessageClause->getMessageString();
+ if (const auto *SeverityClause = S.getSingleClause<OMPSeverityClause>())
+ Severity = SeverityClause->getSeverityKind();
CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
- CGF, NumThreads, NumThreadsClause->getBeginLoc());
+ CGF, NumThreads, NumThreadsClause->getBeginLoc(), Modifier, Severity,
+ Message);
}
if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
@@ -1642,7 +1653,8 @@ static void emitCommonOMPParallelDirective(
CodeGenBoundParameters(CGF, S, CapturedVars);
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
- CapturedVars, IfCond, NumThreads);
+ CapturedVars, IfCond, NumThreads,
+ Modifier, Severity, Message);
}
static bool isAllocatableDecl(const VarDecl *VD) {
@@ -5273,7 +5285,8 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
auto I = CS->getCapturedDecl()->param_begin();
auto PartId = std::next(I);
auto TaskT = std::next(I, 4);
@@ -5507,7 +5520,8 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
@@ -7890,7 +7904,8 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
CapturedStruct = GenerateCapturedStmtArgument(*CS);
}
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index 0b6e830..e14e883 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -971,7 +971,7 @@ llvm::GlobalVariable *CodeGenVTables::GenerateConstructionVTable(
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(
- CGM.getContext().getTagDeclType(Base.getBase()));
+ CGM.getContext().getCanonicalTagType(Base.getBase()));
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
@@ -1382,8 +1382,8 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
AP.second.AddressPointIndex,
{}};
llvm::raw_string_ostream Stream(N.TypeName);
- getCXXABI().getMangleContext().mangleCanonicalTypeName(
- QualType(N.Base->getTypeForDecl(), 0), Stream);
+ CanQualType T = getContext().getCanonicalTagType(N.Base);
+ getCXXABI().getMangleContext().mangleCanonicalTypeName(T, Stream);
AddressPoints.push_back(std::move(N));
}
@@ -1404,7 +1404,7 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
continue;
llvm::Metadata *MD = CreateMetadataIdentifierForVirtualMemPtrType(
Context.getMemberPointerType(Comps[I].getFunctionDecl()->getType(),
- /*Qualifier=*/nullptr, AP.Base));
+ /*Qualifier=*/std::nullopt, AP.Base));
VTable->addTypeMetadata((ComponentWidth * I).getQuantity(), MD);
}
}
diff --git a/clang/lib/CodeGen/CMakeLists.txt b/clang/lib/CodeGen/CMakeLists.txt
index 0f2a352..ad9ef91 100644
--- a/clang/lib/CodeGen/CMakeLists.txt
+++ b/clang/lib/CodeGen/CMakeLists.txt
@@ -154,6 +154,7 @@ add_clang_library(clangCodeGen
Targets/WebAssembly.cpp
Targets/X86.cpp
Targets/XCore.cpp
+ TrapReasonBuilder.cpp
VarBypassDetector.cpp
DEPENDS
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index ab345a5..b2fe917 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -2221,12 +2221,9 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
void
CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
// Ignore empty classes in C++.
- if (getLangOpts().CPlusPlus) {
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
- return;
- }
- }
+ if (getLangOpts().CPlusPlus)
+ if (const auto *RD = Ty->getAsCXXRecordDecl(); RD && RD->isEmpty())
+ return;
if (DestPtr.getElementType() != Int8Ty)
DestPtr = DestPtr.withElementType(Int8Ty);
@@ -2494,10 +2491,6 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::PredefinedSugar:
llvm_unreachable("type class is never variably-modified!");
- case Type::Elaborated:
- type = cast<ElaboratedType>(ty)->getNamedType();
- break;
-
case Type::Adjusted:
type = cast<AdjustedType>(ty)->getAdjustedType();
break;
@@ -2829,6 +2822,9 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
if (!FD)
return;
+ bool IsAlwaysInline = TargetDecl->hasAttr<AlwaysInlineAttr>();
+ bool IsFlatten = FD && FD->hasAttr<FlattenAttr>();
+
// Grab the required features for the call. For a builtin this is listed in
// the td file with the default cpu, for an always_inline function this is any
// listed cpu and any listed features.
@@ -2871,25 +2867,39 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
if (F.getValue())
ReqFeatures.push_back(F.getKey());
}
- if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
- if (!CallerFeatureMap.lookup(Feature)) {
- MissingFeature = Feature.str();
- return false;
- }
- return true;
- }) && !IsHipStdPar)
- CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
- << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
+ if (!llvm::all_of(ReqFeatures,
+ [&](StringRef Feature) {
+ if (!CallerFeatureMap.lookup(Feature)) {
+ MissingFeature = Feature.str();
+ return false;
+ }
+ return true;
+ }) &&
+ !IsHipStdPar) {
+ if (IsAlwaysInline)
+ CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
+ << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
+ else if (IsFlatten)
+ CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
+ << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
+ }
+
} else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
llvm::StringMap<bool> CalleeFeatureMap;
CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
for (const auto &F : CalleeFeatureMap) {
- if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
- !CallerFeatureMap.find(F.getKey())->getValue()) &&
- !IsHipStdPar)
- CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
- << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
+ if (F.getValue() &&
+ (!CallerFeatureMap.lookup(F.getKey()) ||
+ !CallerFeatureMap.find(F.getKey())->getValue()) &&
+ !IsHipStdPar) {
+ if (IsAlwaysInline)
+ CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
+ << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
+ else if (IsFlatten)
+ CGM.getDiags().Report(Loc, diag::err_flatten_function_needs_feature)
+ << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
+ }
}
}
}
@@ -2905,10 +2915,16 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
void CodeGenFunction::EmitKCFIOperandBundle(
const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
- const FunctionProtoType *FP =
- Callee.getAbstractInfo().getCalleeFunctionProtoType();
- if (FP)
- Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
+ const CGCalleeInfo &CI = Callee.getAbstractInfo();
+ const FunctionProtoType *FP = CI.getCalleeFunctionProtoType();
+ if (!FP)
+ return;
+
+ StringRef Salt;
+ if (const auto &Info = FP->getExtraAttributeInfo())
+ Salt = Info.CFISalt;
+
+ Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar(), Salt));
}
llvm::Value *
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 6c32c98..c02ac18 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -701,14 +701,12 @@ public:
bool isRedundantBeforeReturn() override { return true; }
llvm::Value *Addr;
- llvm::Value *Size;
public:
- CallLifetimeEnd(RawAddress addr, llvm::Value *size)
- : Addr(addr.getPointer()), Size(size) {}
+ CallLifetimeEnd(RawAddress addr) : Addr(addr.getPointer()) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- CGF.EmitLifetimeEnd(Size, Addr);
+ CGF.EmitLifetimeEnd(Addr);
}
};
@@ -727,7 +725,7 @@ public:
};
/// Header for data within LifetimeExtendedCleanupStack.
- struct LifetimeExtendedCleanupHeader {
+ struct alignas(uint64_t) LifetimeExtendedCleanupHeader {
/// The size of the following cleanup object.
unsigned Size;
/// The kind of cleanup to push.
@@ -949,7 +947,8 @@ public:
LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
(Header.IsConditional ? sizeof(ActiveFlag) : 0));
- static_assert(sizeof(Header) % alignof(T) == 0,
+ static_assert((alignof(LifetimeExtendedCleanupHeader) == alignof(T)) &&
+ (alignof(T) == alignof(RawAddress)),
"Cleanup will be allocated on misaligned address");
char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
new (Buffer) LifetimeExtendedCleanupHeader(Header);
@@ -2805,6 +2804,13 @@ public:
AllocaTracker Tracker;
};
+private:
+ /// If \p Alloca is not in the same address space as \p DestLangAS, insert an
+ /// address space cast and return a new RawAddress based on this value.
+ RawAddress MaybeCastStackAddressSpace(RawAddress Alloca, LangAS DestLangAS,
+ llvm::Value *ArraySize = nullptr);
+
+public:
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
/// block if \p ArraySize is nullptr, otherwise inserts it at the current
/// insertion point of the builder. The caller is responsible for setting an
@@ -2973,10 +2979,8 @@ public:
/// hasVolatileMember - returns true if aggregate type has a volatile
/// member.
bool hasVolatileMember(QualType T) {
- if (const RecordType *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (const auto *RD = T->getAsRecordDecl())
return RD->hasVolatileMember();
- }
return false;
}
@@ -3233,8 +3237,8 @@ public:
void EmitSehTryScopeBegin();
void EmitSehTryScopeEnd();
- llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
- void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
+ bool EmitLifetimeStart(llvm::Value *Addr);
+ void EmitLifetimeEnd(llvm::Value *Addr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
@@ -3417,8 +3421,8 @@ public:
/// initializer.
bool IsConstantAggregate;
- /// Non-null if we should use lifetime annotations.
- llvm::Value *SizeForLifetimeMarkers;
+ /// True if lifetime markers should be used.
+ bool UseLifetimeMarkers;
/// Address with original alloca instruction. Invalid if the variable was
/// emitted as a global constant.
@@ -3432,20 +3436,14 @@ public:
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
IsEscapingByRef(false), IsConstantAggregate(false),
- SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
+ UseLifetimeMarkers(false), AllocaAddr(RawAddress::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
public:
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
- bool useLifetimeMarkers() const {
- return SizeForLifetimeMarkers != nullptr;
- }
- llvm::Value *getSizeForLifetimeMarkers() const {
- assert(useLifetimeMarkers());
- return SizeForLifetimeMarkers;
- }
+ bool useLifetimeMarkers() const { return UseLifetimeMarkers; }
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself. It is casted to default
@@ -4560,7 +4558,7 @@ public:
ArrayRef<llvm::Value *> args);
CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual, llvm::Type *Ty);
+ NestedNameSpecifier Qual, llvm::Type *Ty);
CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
@@ -4665,7 +4663,7 @@ public:
llvm::CallBase **CallOrInvoke = nullptr);
RValue EmitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
- bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
const Expr *Base, llvm::CallBase **CallOrInvoke);
// Compute the object pointer.
Address EmitCXXMemberDataPointerAddress(
@@ -5220,6 +5218,12 @@ public:
/// operation is a subtraction.
enum { NotSubtraction = false, IsSubtraction = true };
+ /// Emit pointer + index arithmetic.
+ llvm::Value *EmitPointerArithmetic(const BinaryOperator *BO,
+ Expr *pointerOperand, llvm::Value *pointer,
+ Expr *indexOperand, llvm::Value *index,
+ bool isSubtraction);
+
/// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
/// detect undefined behavior when the pointer overflow sanitizer is enabled.
/// \p SignedIndices indicates whether any of the GEP indices are signed.
@@ -5274,7 +5278,8 @@ public:
EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
Checked,
SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
- ArrayRef<llvm::Value *> DynamicArgs);
+ ArrayRef<llvm::Value *> DynamicArgs,
+ const TrapReason *TR = nullptr);
/// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
/// if Cond if false.
@@ -5290,7 +5295,7 @@ public:
/// Create a basic block that will call the trap intrinsic, and emit a
/// conditional branch to it, for the -ftrapv checks.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID,
- bool NoMerge = false);
+ bool NoMerge = false, const TrapReason *TR = nullptr);
/// Emit a call to trap or debugtrap and attach function attribute
/// "trap-func-name" if specified.
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 834b1c0..323823c 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -2335,7 +2335,40 @@ llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
}
-llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T) {
+// Generalize pointer types to a void pointer with the qualifiers of the
+// originally pointed-to type, e.g. 'const char *' and 'char * const *'
+// generalize to 'const void *' while 'char *' and 'const char **' generalize to
+// 'void *'.
+static QualType GeneralizeType(ASTContext &Ctx, QualType Ty) {
+ if (!Ty->isPointerType())
+ return Ty;
+
+ return Ctx.getPointerType(
+ QualType(Ctx.VoidTy)
+ .withCVRQualifiers(Ty->getPointeeType().getCVRQualifiers()));
+}
+
+// Apply type generalization to a FunctionType's return and argument types
+static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) {
+ if (auto *FnType = Ty->getAs<FunctionProtoType>()) {
+ SmallVector<QualType, 8> GeneralizedParams;
+ for (auto &Param : FnType->param_types())
+ GeneralizedParams.push_back(GeneralizeType(Ctx, Param));
+
+ return Ctx.getFunctionType(GeneralizeType(Ctx, FnType->getReturnType()),
+ GeneralizedParams, FnType->getExtProtoInfo());
+ }
+
+ if (auto *FnType = Ty->getAs<FunctionNoProtoType>())
+ return Ctx.getFunctionNoProtoType(
+ GeneralizeType(Ctx, FnType->getReturnType()));
+
+ llvm_unreachable("Encountered unknown FunctionType");
+}
+
+llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T, StringRef Salt) {
+ if (getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
+ T = GeneralizeFunctionType(getContext(), T);
if (auto *FnType = T->getAs<FunctionProtoType>())
T = getContext().getFunctionType(
FnType->getReturnType(), FnType->getParamTypes(),
@@ -2346,8 +2379,13 @@ llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T) {
getCXXABI().getMangleContext().mangleCanonicalTypeName(
T, Out, getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
+ if (!Salt.empty())
+ Out << "." << Salt;
+
if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
Out << ".normalized";
+ if (getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
+ Out << ".generalized";
return llvm::ConstantInt::get(Int32Ty,
static_cast<uint32_t>(llvm::xxHash64(OutName)));
@@ -2793,7 +2831,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
llvm::Metadata *Id =
CreateMetadataIdentifierForType(Context.getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, Base));
+ MD->getType(), /*Qualifier=*/std::nullopt, Base));
F->addTypeMetadata(0, Id);
}
}
@@ -3012,9 +3050,15 @@ void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
llvm::LLVMContext &Ctx = F->getContext();
llvm::MDBuilder MDB(Ctx);
+ llvm::StringRef Salt;
+
+ if (const auto *FP = FD->getType()->getAs<FunctionProtoType>())
+ if (const auto &Info = FP->getExtraAttributeInfo())
+ Salt = Info.CFISalt;
+
F->setMetadata(llvm::LLVMContext::MD_kcfi_type,
- llvm::MDNode::get(
- Ctx, MDB.createConstant(CreateKCFITypeId(FD->getType()))));
+ llvm::MDNode::get(Ctx, MDB.createConstant(CreateKCFITypeId(
+ FD->getType(), Salt))));
}
static bool allowKCFIIdentifier(StringRef Name) {
@@ -4151,10 +4195,13 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Check if T is a class type with a destructor that's not dllimport.
static bool HasNonDllImportDtor(QualType T) {
- if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *RT =
+ T->getBaseElementTypeUnsafe()->getAsCanonical<RecordType>())
+ if (auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
+ RD = RD->getDefinitionOrSelf();
if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
return true;
+ }
return false;
}
@@ -5750,11 +5797,16 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
(D->getType()->isCUDADeviceBuiltinSurfaceType() ||
D->getType()->isCUDADeviceBuiltinTextureType());
if (getLangOpts().CUDA &&
- (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
+ (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar)) {
Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
- else if (D->hasAttr<LoaderUninitializedAttr>())
+ } else if (getLangOpts().HLSL &&
+ (D->getType()->isHLSLResourceRecord() ||
+ D->getType()->isHLSLResourceRecordArray())) {
+ Init = llvm::PoisonValue::get(getTypes().ConvertType(ASTTy));
+ NeedsGlobalCtor = D->getType()->isHLSLResourceRecord();
+ } else if (D->hasAttr<LoaderUninitializedAttr>()) {
Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
- else if (!InitExpr) {
+ } else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
// implicitly initialized with { 0 }.
//
@@ -5775,11 +5827,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
if (D->getType()->isReferenceType())
T = D->getType();
- if (getLangOpts().HLSL &&
- D->getType().getTypePtr()->isHLSLResourceRecord()) {
- Init = llvm::PoisonValue::get(getTypes().ConvertType(ASTTy));
- NeedsGlobalCtor = true;
- } else if (getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
Init = EmitNullConstant(T);
if (!IsDefinitionAvailableExternally)
NeedsGlobalCtor = true;
@@ -6028,8 +6076,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
if (Context.isAlignmentRequired(VarType))
return true;
- if (const auto *RT = VarType->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ if (const auto *RD = VarType->getAsRecordDecl()) {
for (const FieldDecl *FD : RD->fields()) {
if (FD->isBitField())
continue;
@@ -6738,7 +6785,7 @@ QualType CodeGenModule::getObjCFastEnumerationStateType() {
}
D->completeDefinition();
- ObjCFastEnumerationStateType = Context.getTagDeclType(D);
+ ObjCFastEnumerationStateType = Context.getCanonicalTagType(D);
}
return ObjCFastEnumerationStateType;
@@ -7248,7 +7295,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
CXXRecordDecl *CRD = cast<CXXRecordDecl>(D);
if (CGDebugInfo *DI = getModuleDebugInfo()) {
if (CRD->hasDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(D)));
if (auto *ES = D->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
DI->completeUnusedClass(*CRD);
@@ -7467,22 +7515,28 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Typedef:
case Decl::TypeAlias: // using foo = bar; [C++11]
if (CGDebugInfo *DI = getModuleDebugInfo())
- DI->EmitAndRetainType(
- getContext().getTypedefType(cast<TypedefNameDecl>(D)));
+ DI->EmitAndRetainType(getContext().getTypedefType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
+ cast<TypedefNameDecl>(D)));
break;
case Decl::Record:
if (CGDebugInfo *DI = getModuleDebugInfo())
if (cast<RecordDecl>(D)->getDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(D)));
break;
case Decl::Enum:
if (CGDebugInfo *DI = getModuleDebugInfo())
if (cast<EnumDecl>(D)->getDefinition())
- DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<EnumDecl>(D)));
break;
+ case Decl::HLSLRootSignature:
+ // Will be handled by attached function
+ break;
case Decl::HLSLBuffer:
getHLSLRuntime().addBuffer(cast<HLSLBufferDecl>(D));
break;
@@ -7880,38 +7934,6 @@ CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) {
return CreateMetadataIdentifierImpl(T, VirtualMetadataIdMap, ".virtual");
}
-// Generalize pointer types to a void pointer with the qualifiers of the
-// originally pointed-to type, e.g. 'const char *' and 'char * const *'
-// generalize to 'const void *' while 'char *' and 'const char **' generalize to
-// 'void *'.
-static QualType GeneralizeType(ASTContext &Ctx, QualType Ty) {
- if (!Ty->isPointerType())
- return Ty;
-
- return Ctx.getPointerType(
- QualType(Ctx.VoidTy).withCVRQualifiers(
- Ty->getPointeeType().getCVRQualifiers()));
-}
-
-// Apply type generalization to a FunctionType's return and argument types
-static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) {
- if (auto *FnType = Ty->getAs<FunctionProtoType>()) {
- SmallVector<QualType, 8> GeneralizedParams;
- for (auto &Param : FnType->param_types())
- GeneralizedParams.push_back(GeneralizeType(Ctx, Param));
-
- return Ctx.getFunctionType(
- GeneralizeType(Ctx, FnType->getReturnType()),
- GeneralizedParams, FnType->getExtProtoInfo());
- }
-
- if (auto *FnType = Ty->getAs<FunctionNoProtoType>())
- return Ctx.getFunctionNoProtoType(
- GeneralizeType(Ctx, FnType->getReturnType()));
-
- llvm_unreachable("Encountered unknown FunctionType");
-}
-
llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) {
return CreateMetadataIdentifierImpl(GeneralizeFunctionType(getContext(), T),
GeneralizedMetadataIdMap, ".generalized");
@@ -7934,8 +7956,8 @@ bool CodeGenModule::NeedAllVtablesTypeId() const {
void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
CharUnits Offset,
const CXXRecordDecl *RD) {
- llvm::Metadata *MD =
- CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CreateMetadataIdentifierForType(T);
VTable->addTypeMetadata(Offset.getQuantity(), MD);
if (CodeGenOpts.SanitizeCfiCrossDso)
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index cb013fe..b4b3a17 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -17,6 +17,7 @@
#include "CodeGenTypeCache.h"
#include "CodeGenTypes.h"
#include "SanitizerMetadata.h"
+#include "TrapReasonBuilder.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
@@ -1621,7 +1622,7 @@ public:
llvm::ConstantInt *CreateCrossDsoCfiTypeId(llvm::Metadata *MD);
/// Generate a KCFI type identifier for T.
- llvm::ConstantInt *CreateKCFITypeId(QualType T);
+ llvm::ConstantInt *CreateKCFITypeId(QualType T, StringRef Salt);
/// Create a metadata identifier for the given type. This may either be an
/// MDString (for external identifiers) or a distinct unnamed MDNode (for
@@ -1824,6 +1825,11 @@ public:
return PAlign;
}
+ /// Helper function to construct a TrapReasonBuilder
+ TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR) {
+ return TrapReasonBuilder(&getDiags(), DiagID, TR);
+ }
+
private:
bool shouldDropDLLAttribute(const Decl *D, const llvm::GlobalValue *GV) const;
diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp
index 90eafe2..f8c7d64 100644
--- a/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -142,10 +142,9 @@ static bool TypeHasMayAlias(QualType QTy) {
/// Check if the given type is a valid base type to be used in access tags.
static bool isValidBaseType(QualType QTy) {
- if (const RecordType *TTy = QTy->getAs<RecordType>()) {
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ if (const auto *RD = QTy->getAsRecordDecl()) {
// Incomplete types are not valid base access types.
- if (!RD)
+ if (!RD->isCompleteDefinition())
return false;
if (RD->hasFlexibleArrayMember())
return false;
@@ -296,7 +295,7 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
// Be conservative if the type isn't a RecordType. We are specifically
// required to do this for member pointers until we implement the
// similar-types rule.
- const auto *RT = Ty->getAs<RecordType>();
+ const auto *RT = Ty->getAsCanonical<RecordType>();
if (!RT)
return getAnyPtr(PtrDepth);
@@ -311,7 +310,7 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
// This also covers anonymous structs and unions, which have a different
// compatibility rule, but it doesn't matter because you can never have a
// pointer to an anonymous struct or union.
- if (!RT->getDecl()->getDeclName())
+ if (!RT->getOriginalDecl()->getDeclName())
return getAnyPtr(PtrDepth);
// For non-builtin types use the mangled name of the canonical type.
@@ -333,14 +332,15 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
// Enum types are distinct types. In C++ they have "underlying types",
// however they aren't related for TBAA.
if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ const EnumDecl *ED = ETy->getOriginalDecl()->getDefinitionOrSelf();
if (!Features.CPlusPlus)
- return getTypeInfo(ETy->getDecl()->getIntegerType());
+ return getTypeInfo(ED->getIntegerType());
// In C++ mode, types have linkage, so we can rely on the ODR and
// on their mangled names, if they're external.
// TODO: Is there a way to get a program-wide unique name for a
// decl with local linkage or no linkage?
- if (!ETy->getDecl()->isExternallyVisible())
+ if (!ED->isExternallyVisible())
return getChar();
SmallString<256> OutName;
@@ -424,7 +424,7 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
bool MayAlias) {
/* Things not handled yet include: C++ base classes, bitfields, */
- if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ if (const auto *TTy = QTy->getAsCanonical<RecordType>()) {
if (TTy->isUnionType()) {
uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
llvm::MDNode *TBAAType = getChar();
@@ -433,7 +433,7 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
llvm::MDBuilder::TBAAStructField(BaseOffset, Size, TBAATag));
return true;
}
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ const RecordDecl *RD = TTy->getOriginalDecl()->getDefinition();
if (RD->hasFlexibleArrayMember())
return false;
@@ -514,7 +514,7 @@ CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
if (auto *TTy = dyn_cast<RecordType>(Ty)) {
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ const RecordDecl *RD = TTy->getOriginalDecl()->getDefinition();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
using TBAAStructField = llvm::MDBuilder::TBAAStructField;
SmallVector<TBAAStructField, 4> Fields;
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index c98503e..3ffe999 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -229,12 +229,13 @@ bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ CanQualType T = CGM.getContext().getCanonicalTagType(TD);
// If this is an enum being completed, then we flush all non-struct types from
// the cache. This allows function types and other things that may be derived
// from the enum to be recomputed.
if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
// Only flush the cache if we've actually already converted this type.
- if (TypeCache.count(ED->getTypeForDecl())) {
+ if (TypeCache.count(T->getTypePtr())) {
// Okay, we formed some types based on this. We speculated that the enum
// would be lowered to i32, so we only need to flush the cache if this
// didn't happen.
@@ -255,7 +256,7 @@ void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
// Only complete it if we converted it already. If we haven't converted it
// yet, we'll just do it lazily.
- if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
+ if (RecordDeclTypes.count(T.getTypePtr()))
ConvertRecordDeclType(RD);
// If necessary, provide the full definition of a type only used with a
@@ -265,7 +266,7 @@ void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
}
void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
- QualType T = Context.getRecordType(RD);
+ CanQualType T = Context.getCanonicalTagType(RD);
T = Context.getCanonicalType(T);
const Type *Ty = T.getTypePtr();
@@ -310,12 +311,12 @@ llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
// Force conversion of all the relevant record types, to make sure
// we re-convert the FunctionType when appropriate.
- if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
- ConvertRecordDeclType(RT->getDecl());
+ if (const auto *RD = FT->getReturnType()->getAsRecordDecl())
+ ConvertRecordDeclType(RD);
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
- if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
- ConvertRecordDeclType(RT->getDecl());
+ if (const auto *RD = FPT->getParamType(i)->getAsRecordDecl())
+ ConvertRecordDeclType(RD);
SkippedLayout = true;
@@ -373,7 +374,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// RecordTypes are cached and processed specially.
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- return ConvertRecordDeclType(RT->getDecl());
+ return ConvertRecordDeclType(RT->getOriginalDecl()->getDefinitionOrSelf());
llvm::Type *CachedType = nullptr;
auto TCI = TypeCache.find(Ty);
@@ -699,7 +700,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
break;
case Type::Enum: {
- const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
+ const auto *ED = Ty->castAsEnumDecl();
if (ED->isCompleteDefinition() || ED->isFixed())
return ConvertType(ED->getIntegerType());
// Return a placeholder 'i32' type. This can be changed later when the
@@ -725,8 +726,10 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::MemberPointer: {
auto *MPTy = cast<MemberPointerType>(Ty);
if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
- auto *C = MPTy->getMostRecentCXXRecordDecl()->getTypeForDecl();
- auto Insertion = RecordsWithOpaqueMemberPointers.try_emplace(C);
+ CanQualType T = CGM.getContext().getCanonicalTagType(
+ MPTy->getMostRecentCXXRecordDecl());
+ auto Insertion =
+ RecordsWithOpaqueMemberPointers.try_emplace(T.getTypePtr());
if (Insertion.second)
Insertion.first->second = llvm::StructType::create(getLLVMContext());
ResultType = Insertion.first->second;
@@ -789,7 +792,7 @@ bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
// TagDecl's are not necessarily unique, instead use the (clang)
// type connected to the decl.
- const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+ const Type *Key = Context.getCanonicalTagType(RD).getTypePtr();
llvm::StructType *&Entry = RecordDeclTypes[Key];
@@ -810,7 +813,7 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CRD->bases()) {
if (I.isVirtual()) continue;
- ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
+ ConvertRecordDeclType(I.getType()->castAsRecordDecl());
}
}
@@ -830,7 +833,7 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
/// getCGRecordLayout - Return record layout info for the given record decl.
const CGRecordLayout &
CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
- const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+ const Type *Key = Context.getCanonicalTagType(RD).getTypePtr();
auto I = CGRecordLayouts.find(Key);
if (I != CGRecordLayouts.end())
@@ -868,10 +871,8 @@ bool CodeGenTypes::isZeroInitializable(QualType T) {
// Records are non-zero-initializable if they contain any
// non-zero-initializable subobjects.
- if (const RecordType *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ if (const auto *RD = T->getAsRecordDecl())
return isZeroInitializable(RD);
- }
// We have to ask the ABI about member pointers.
if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index 38aaceb..05fb137 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -2269,6 +2269,11 @@ struct CounterCoverageMappingBuilder
// Track LHS True/False Decision.
const auto DecisionLHS = MCDCBuilder.pop();
+ if (auto Gap =
+ findGapAreaBetween(getEnd(E->getLHS()), getStart(E->getRHS()))) {
+ fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), getRegionCounter(E));
+ }
+
// Counter tracks the right hand side of a logical and operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
@@ -2330,6 +2335,11 @@ struct CounterCoverageMappingBuilder
// Track LHS True/False Decision.
const auto DecisionLHS = MCDCBuilder.pop();
+ if (auto Gap =
+ findGapAreaBetween(getEnd(E->getLHS()), getStart(E->getRHS()))) {
+ fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), getRegionCounter(E));
+ }
+
// Counter tracks the right hand side of a logical or operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
diff --git a/clang/lib/CodeGen/EHScopeStack.h b/clang/lib/CodeGen/EHScopeStack.h
index ed11dc2..54f6cea 100644
--- a/clang/lib/CodeGen/EHScopeStack.h
+++ b/clang/lib/CodeGen/EHScopeStack.h
@@ -143,7 +143,7 @@ public:
///
/// Cleanup implementations should generally be declared in an
/// anonymous namespace.
- class Cleanup {
+ class alignas(uint64_t) Cleanup {
// Anchor the construction vtable.
virtual void anchor();
diff --git a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
index 1ed3389..a21feaa 100644
--- a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
@@ -85,23 +85,22 @@ llvm::TargetExtType *HLSLBufferLayoutBuilder::createLayoutType(
Layout.push_back(0);
// iterate over all fields of the record, including fields on base classes
- llvm::SmallVector<const RecordType *> RecordTypes;
- RecordTypes.push_back(RT);
- while (RecordTypes.back()->getAsCXXRecordDecl()->getNumBases()) {
- CXXRecordDecl *D = RecordTypes.back()->getAsCXXRecordDecl();
+ llvm::SmallVector<CXXRecordDecl *> RecordDecls;
+ RecordDecls.push_back(RT->castAsCXXRecordDecl());
+ while (RecordDecls.back()->getNumBases()) {
+ CXXRecordDecl *D = RecordDecls.back();
assert(D->getNumBases() == 1 &&
"HLSL doesn't support multiple inheritance");
- RecordTypes.push_back(D->bases_begin()->getType()->getAs<RecordType>());
+ RecordDecls.push_back(D->bases_begin()->getType()->castAsCXXRecordDecl());
}
unsigned FieldOffset;
llvm::Type *FieldType;
- while (!RecordTypes.empty()) {
- const RecordType *RT = RecordTypes.back();
- RecordTypes.pop_back();
+ while (!RecordDecls.empty()) {
+ const CXXRecordDecl *RD = RecordDecls.pop_back_val();
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD : RD->fields()) {
assert((!PackOffsets || Index < PackOffsets->size()) &&
"number of elements in layout struct does not match number of "
"packoffset annotations");
@@ -147,7 +146,7 @@ llvm::TargetExtType *HLSLBufferLayoutBuilder::createLayoutType(
// create the layout struct type; anonymous struct have empty name but
// non-empty qualified name
- const CXXRecordDecl *Decl = RT->getAsCXXRecordDecl();
+ const auto *Decl = RT->castAsCXXRecordDecl();
std::string Name =
Decl->getName().empty() ? "anon" : Decl->getQualifiedNameAsString();
llvm::StructType *StructTy =
@@ -202,8 +201,8 @@ bool HLSLBufferLayoutBuilder::layoutField(const FieldDecl *FD,
// For array of structures, create a new array with a layout type
// instead of the structure type.
if (Ty->isStructureOrClassType()) {
- llvm::Type *NewTy =
- cast<llvm::TargetExtType>(createLayoutType(Ty->getAs<RecordType>()));
+ llvm::Type *NewTy = cast<llvm::TargetExtType>(
+ createLayoutType(Ty->getAsCanonical<RecordType>()));
if (!NewTy)
return false;
assert(isa<llvm::TargetExtType>(NewTy) && "expected target type");
@@ -221,8 +220,8 @@ bool HLSLBufferLayoutBuilder::layoutField(const FieldDecl *FD,
} else if (FieldTy->isStructureOrClassType()) {
// Create a layout type for the structure
- ElemLayoutTy =
- createLayoutType(cast<RecordType>(FieldTy->getAs<RecordType>()));
+ ElemLayoutTy = createLayoutType(
+ cast<RecordType>(FieldTy->getAsCanonical<RecordType>()));
if (!ElemLayoutTy)
return false;
assert(isa<llvm::TargetExtType>(ElemLayoutTy) && "expected target type");
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 5ffc1ed..885b700 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -831,7 +831,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
getContext().getMemberPointerType(MPT->getPointeeType(),
- /*Qualifier=*/nullptr,
+ /*Qualifier=*/std::nullopt,
Base->getCanonicalDecl()));
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
@@ -1241,7 +1241,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
QualType SrcType = getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent());
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent());
return pointerAuthResignMemberFunctionPointer(Src, MPType, SrcType, CGM);
}
@@ -1397,8 +1397,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// to pass to the deallocation function.
// Grab the vtable pointer as an intptr_t*.
- auto *ClassDecl =
- cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
+ auto *ClassDecl = ElementType->castAsCXXRecordDecl();
llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
@@ -1483,20 +1482,18 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
// The address of the destructor. If the exception type has a
// trivial destructor (or isn't a record), we just pass null.
llvm::Constant *Dtor = nullptr;
- if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
- if (!Record->hasTrivialDestructor()) {
- // __cxa_throw is declared to take its destructor as void (*)(void *). We
- // must match that if function pointers can be authenticated with a
- // discriminator based on their type.
- const ASTContext &Ctx = getContext();
- QualType DtorTy = Ctx.getFunctionType(Ctx.VoidTy, {Ctx.VoidPtrTy},
- FunctionProtoType::ExtProtoInfo());
-
- CXXDestructorDecl *DtorD = Record->getDestructor();
- Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
- Dtor = CGM.getFunctionPointer(Dtor, DtorTy);
- }
+ if (const auto *Record = ThrowType->getAsCXXRecordDecl();
+ Record && !Record->hasTrivialDestructor()) {
+ // __cxa_throw is declared to take its destructor as void (*)(void *). We
+ // must match that if function pointers can be authenticated with a
+ // discriminator based on their type.
+ const ASTContext &Ctx = getContext();
+ QualType DtorTy = Ctx.getFunctionType(Ctx.VoidTy, {Ctx.VoidPtrTy},
+ FunctionProtoType::ExtProtoInfo());
+
+ CXXDestructorDecl *DtorD = Record->getDestructor();
+ Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
+ Dtor = CGM.getFunctionPointer(Dtor, DtorTy);
}
if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
@@ -1610,8 +1607,7 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
QualType SrcRecordTy,
Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
- auto *ClassDecl =
- cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
+ auto *ClassDecl = SrcRecordTy->castAsCXXRecordDecl();
llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
ClassDecl);
@@ -1783,8 +1779,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
Address ThisAddr,
QualType SrcRecordTy) {
- auto *ClassDecl =
- cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
+ auto *ClassDecl = SrcRecordTy->castAsCXXRecordDecl();
llvm::Value *OffsetToTop;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
@@ -2037,7 +2032,7 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
llvm::Constant *RTTI =
- CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
+ CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getCanonicalTagType(RD));
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
@@ -3776,7 +3771,8 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
if (!Context.getLangOpts().RTTI) return false;
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())->getDefinitionOrSelf();
if (!RD->hasDefinition())
return false;
@@ -3810,7 +3806,9 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
/// IsIncompleteClassType - Returns whether the given record type is incomplete.
static bool IsIncompleteClassType(const RecordType *RecordTy) {
- return !RecordTy->getDecl()->isCompleteDefinition();
+ return !RecordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isCompleteDefinition();
}
/// ContainsIncompleteClassType - Returns whether the given type contains an
@@ -3836,9 +3834,7 @@ static bool ContainsIncompleteClassType(QualType Ty) {
if (const MemberPointerType *MemberPointerTy =
dyn_cast<MemberPointerType>(Ty)) {
// Check if the class type is incomplete.
- const auto *ClassType = cast<RecordType>(
- MemberPointerTy->getMostRecentCXXRecordDecl()->getTypeForDecl());
- if (IsIncompleteClassType(ClassType))
+ if (!MemberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition())
return true;
return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
@@ -3867,8 +3863,7 @@ static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
return false;
// Check that the class is dynamic iff the base is.
- auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
if (!BaseDecl->isEmpty() &&
BaseDecl->isDynamicClass() != RD->isDynamicClass())
return false;
@@ -3947,7 +3942,8 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty,
case Type::Record: {
const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!RD->hasDefinition() || !RD->getNumBases()) {
VTableName = ClassTypeInfo;
@@ -4069,7 +4065,8 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
return llvm::GlobalValue::LinkOnceODRLinkage;
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Record->getOriginalDecl())->getDefinitionOrSelf();
if (RD->hasAttr<WeakAttr>())
return llvm::GlobalValue::WeakODRLinkage;
if (CGM.getTriple().isWindowsItaniumEnvironment())
@@ -4233,7 +4230,8 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::Record: {
const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!RD->hasDefinition() || !RD->getNumBases()) {
// We don't need to emit any fields.
break;
@@ -4280,7 +4278,8 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
if (CGM.getTarget().hasPS4DLLImportExport() &&
GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (RD->hasAttr<DLLExportAttr>() ||
CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
@@ -4384,9 +4383,7 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
unsigned Flags = 0;
- auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
-
+ auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
if (Base->isVirtual()) {
// Mark the virtual base as seen.
if (!Bases.VirtualBases.insert(BaseDecl).second) {
@@ -4484,9 +4481,7 @@ void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
// The __base_type member points to the RTTI for the base type.
Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
- auto *BaseDecl =
- cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
-
+ auto *BaseDecl = Base.getType()->castAsCXXRecordDecl();
int64_t OffsetFlags = 0;
// All but the lower 8 bits of __offset_flags are a signed offset.
@@ -4575,9 +4570,8 @@ ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// attributes of the type pointed to.
unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
- const auto *ClassType =
- cast<RecordType>(Ty->getMostRecentCXXRecordDecl()->getTypeForDecl());
- if (IsIncompleteClassType(ClassType))
+ const auto *RD = Ty->getMostRecentCXXRecordDecl();
+ if (!RD->hasDefinition())
Flags |= PTI_ContainingClassIncomplete;
llvm::Type *UnsignedIntLTy =
@@ -4595,8 +4589,8 @@ ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// __context is a pointer to an abi::__class_type_info corresponding to the
// class type containing the member pointed to
// (e.g., the "A" in "int A::*").
- Fields.push_back(
- ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(T));
}
llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
@@ -5176,7 +5170,7 @@ ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
.getDecl());
llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD);
QualType funcType = CGM.getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent());
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent());
return CGM.getMemberFunctionPointer(thunk, funcType);
}
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index e8d2451..88f0648 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -876,7 +876,8 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
// it indirectly. Prior to MSVC version 19.14, passing overaligned
// arguments was not supported and resulted in a compiler error. In 19.14
// and later versions, such arguments are now passed indirectly.
- TypeInfo Info = getContext().getTypeInfo(RD->getTypeForDecl());
+ TypeInfo Info =
+ getContext().getTypeInfo(getContext().getCanonicalTagType(RD));
if (Info.isAlignRequired() && Info.Align > 4)
return RAA_Indirect;
@@ -2924,15 +2925,15 @@ llvm::Constant *MicrosoftCXXABI::EmitMemberPointer(const APValue &MP,
if (!FD)
FD = cast<FieldDecl>(*cast<IndirectFieldDecl>(MPD)->chain_begin());
const CXXRecordDecl *RD = cast<CXXRecordDecl>(FD->getParent());
- RD = RD->getMostRecentNonInjectedDecl();
+ RD = RD->getMostRecentDecl();
C = EmitMemberDataPointer(RD, FieldOffset);
}
if (!MemberPointerPath.empty()) {
const CXXRecordDecl *SrcRD = cast<CXXRecordDecl>(MPD->getDeclContext());
const MemberPointerType *SrcTy =
- Ctx.getMemberPointerType(DstTy->getPointeeType(), /*Qualifier=*/nullptr,
- SrcRD)
+ Ctx.getMemberPointerType(DstTy->getPointeeType(),
+ /*Qualifier=*/std::nullopt, SrcRD)
->castAs<MemberPointerType>();
bool DerivedMember = MP.isMemberPointerToDerivedMember();
@@ -2969,7 +2970,7 @@ MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
assert(MD->isInstance() && "Member function must not be static!");
CharUnits NonVirtualBaseAdjustment = CharUnits::Zero();
- const CXXRecordDecl *RD = MD->getParent()->getMostRecentNonInjectedDecl();
+ const CXXRecordDecl *RD = MD->getParent()->getMostRecentDecl();
CodeGenTypes &Types = CGM.getTypes();
unsigned VBTableIndex = 0;
@@ -3690,7 +3691,7 @@ struct MSRTTIBuilder {
MSRTTIBuilder(MicrosoftCXXABI &ABI, const CXXRecordDecl *RD)
: CGM(ABI.CGM), Context(CGM.getContext()),
VMContext(CGM.getLLVMContext()), Module(CGM.getModule()), RD(RD),
- Linkage(getLinkageForRTTI(CGM.getContext().getTagDeclType(RD))),
+ Linkage(getLinkageForRTTI(CGM.getContext().getCanonicalTagType(RD))),
ABI(ABI) {}
llvm::GlobalVariable *getBaseClassDescriptor(const MSRTTIClass &Classes);
@@ -3864,7 +3865,7 @@ MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
// Initialize the BaseClassDescriptor.
llvm::Constant *Fields[] = {
ABI.getImageRelativeConstant(
- ABI.getAddrOfRTTIDescriptor(Context.getTypeDeclType(Class.RD))),
+ ABI.getAddrOfRTTIDescriptor(Context.getCanonicalTagType(Class.RD))),
llvm::ConstantInt::get(CGM.IntTy, Class.NumBases),
llvm::ConstantInt::get(CGM.IntTy, Class.OffsetInVBase),
llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
@@ -3911,7 +3912,7 @@ MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo &Info) {
llvm::ConstantInt::get(CGM.IntTy, OffsetToTop),
llvm::ConstantInt::get(CGM.IntTy, VFPtrOffset),
ABI.getImageRelativeConstant(
- CGM.GetAddrOfRTTIDescriptor(Context.getTypeDeclType(RD))),
+ CGM.GetAddrOfRTTIDescriptor(Context.getCanonicalTagType(RD))),
ABI.getImageRelativeConstant(getClassHierarchyDescriptor()),
ABI.getImageRelativeConstant(COL),
};
@@ -4082,7 +4083,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSCtorClosure(CD, CT);
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
const CXXRecordDecl *RD = CD->getParent();
- QualType RecordTy = getContext().getRecordType(RD);
+ CanQualType RecordTy = getContext().getCanonicalTagType(RD);
llvm::Function *ThunkFn = llvm::Function::Create(
ThunkTy, getLinkageForRTTI(RecordTy), ThunkName.str(), &CGM.getModule());
ThunkFn->setCallingConv(static_cast<llvm::CallingConv::ID>(
@@ -4318,7 +4319,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
// Turn our record back into a pointer if the exception object is a
// pointer.
- QualType RTTITy = QualType(Class.RD->getTypeForDecl(), 0);
+ CanQualType RTTITy = Context.getCanonicalTagType(Class.RD);
if (IsPointer)
RTTITy = Context.getPointerType(RTTITy);
CatchableTypes.insert(getCatchableType(RTTITy, Class.OffsetInVBase,
@@ -4469,8 +4470,8 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
std::pair<llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) {
- std::tie(This, std::ignore, RD) =
- performBaseAdjustment(CGF, This, QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGF.getContext().getCanonicalTagType(RD);
+ std::tie(This, std::ignore, RD) = performBaseAdjustment(CGF, This, T);
return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
}
diff --git a/clang/lib/CodeGen/SwiftCallingConv.cpp b/clang/lib/CodeGen/SwiftCallingConv.cpp
index 10f9f20b..4d894fd 100644
--- a/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -65,10 +65,10 @@ void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
// Deal with various aggregate types as special cases:
// Record types.
- if (auto recType = type->getAs<RecordType>()) {
- addTypedData(recType->getDecl(), begin);
+ if (auto recType = type->getAsCanonical<RecordType>()) {
+ addTypedData(recType->getOriginalDecl(), begin);
- // Array types.
+ // Array types.
} else if (type->isArrayType()) {
// Incomplete array types (flexible array members?) don't provide
// data to lay out, and the other cases shouldn't be possible.
@@ -814,7 +814,7 @@ static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
bool forReturn) {
unsigned IndirectAS = CGM.getDataLayout().getAllocaAddrSpace();
if (auto recordType = dyn_cast<RecordType>(type)) {
- auto record = recordType->getDecl();
+ auto record = recordType->getOriginalDecl();
auto &layout = CGM.getContext().getASTRecordLayout(record);
if (mustPassRecordIndirectly(CGM, record))
@@ -822,7 +822,8 @@ static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
/*AddrSpace=*/IndirectAS, /*byval=*/false);
SwiftAggLowering lowering(CGM);
- lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
+ lowering.addTypedData(recordType->getOriginalDecl(), CharUnits::Zero(),
+ layout);
lowering.finish();
return classifyExpandedType(lowering, forReturn, layout.getAlignment(),
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index dad1f95..433d76b 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -504,6 +504,13 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, { ResultType });
return Builder.CreateCall(F, { Src });
}
+ case AMDGPU::BI__builtin_amdgcn_inverse_ballot_w32:
+ case AMDGPU::BI__builtin_amdgcn_inverse_ballot_w64: {
+ llvm::Value *Src = EmitScalarExpr(E->getArg(0));
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::amdgcn_inverse_ballot, {Src->getType()});
+ return Builder.CreateCall(F, {Src});
+ }
case AMDGPU::BI__builtin_amdgcn_tanhf:
case AMDGPU::BI__builtin_amdgcn_tanhh:
case AMDGPU::BI__builtin_amdgcn_tanh_bf16:
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 980f7eb..60413e7 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -358,7 +358,7 @@ static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
- bool HasLegalHalfType = true,
+ bool HasFastHalfType = true,
bool V1Ty = false,
bool AllowBFloatArgsAndRet = true) {
int IsQuad = TypeFlags.isQuad();
@@ -376,7 +376,7 @@ static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
else
return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Float16:
- if (HasLegalHalfType)
+ if (HasFastHalfType)
return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
else
return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
@@ -1754,12 +1754,12 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
const bool Usgn = Type.isUnsigned();
const bool Quad = Type.isQuad();
const bool Floating = Type.isFloatingPoint();
- const bool HasLegalHalfType = getTarget().hasLegalHalfType();
+ const bool HasFastHalfType = getTarget().hasFastHalfType();
const bool AllowBFloatArgsAndRet =
getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
llvm::FixedVectorType *VTy =
- GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
+ GetNeonType(this, Type, HasFastHalfType, false, AllowBFloatArgsAndRet);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -1886,7 +1886,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
- HasLegalHalfType);
+ HasFastHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f16_s16:
@@ -1895,7 +1895,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvtq_f16_u16:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
- HasLegalHalfType);
+ HasFastHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_n_f16_s16:
@@ -3211,7 +3211,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
bool rightShift = false;
llvm::FixedVectorType *VTy =
- GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
+ GetNeonType(this, Type, getTarget().hasFastHalfType(), false,
getTarget().hasBFloat16Type());
llvm::Type *Ty = VTy;
if (!Ty)
diff --git a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
index 270e9fc..ba65cf1 100644
--- a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
@@ -1152,10 +1152,13 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
CallOps.push_back(Acc);
}
if (BuiltinID == PPC::BI__builtin_mma_dmmr ||
- BuiltinID == PPC::BI__builtin_mma_dmxor) {
+ BuiltinID == PPC::BI__builtin_mma_dmxor ||
+ BuiltinID == PPC::BI__builtin_mma_disassemble_dmr) {
Address Addr = EmitPointerWithAlignment(E->getArg(1));
Ops[1] = Builder.CreateLoad(Addr);
}
+ if (BuiltinID == PPC::BI__builtin_mma_disassemble_dmr)
+ return Builder.CreateAlignedStore(Ops[1], Ops[0], MaybeAlign());
for (unsigned i=1; i<Ops.size(); i++)
CallOps.push_back(Ops[i]);
llvm::Function *F = CGM.getIntrinsic(ID);
diff --git a/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp b/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
index b08a058..8e67f8e 100644
--- a/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
@@ -20,6 +20,951 @@ using namespace clang;
using namespace CodeGen;
using namespace llvm;
+// The 0th bit simulates the `vta` of RVV
+// The 1st bit simulates the `vma` of RVV
+static constexpr unsigned RVV_VTA = 0x1;
+static constexpr unsigned RVV_VMA = 0x2;
+
+// RISC-V Vector builtin helper functions are marked NOINLINE to prevent
+// excessive inlining in CodeGenFunction::EmitRISCVBuiltinExpr's large switch
+// statement, which would significantly increase compilation time.
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVVLEFFBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ if (IsMasked) {
+ // Move mask to right before vl.
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ IntrinsicTypes = {ResultType, Ops[4]->getType(), Ops[2]->getType()};
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[1]->getType()};
+ }
+ Value *NewVL = Ops[2];
+ Ops.erase(Ops.begin() + 2);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
+ // Store new_vl.
+ clang::CharUnits Align;
+ if (IsMasked)
+ Align = CGM.getNaturalPointeeTypeAlignment(
+ E->getArg(E->getNumArgs() - 2)->getType());
+ else
+ Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType());
+ llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1});
+ Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align));
+ return V;
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVVSSEBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ if (IsMasked) {
+ // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride,
+ // mask, vl)
+ std::swap(Ops[0], Ops[3]);
+ } else {
+ // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
+ }
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedStoreBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
+ if (IsMasked) {
+ // Builtin: (mask, ptr, index, value, vl).
+ // Intrinsic: (value, ptr, index, mask, vl)
+ std::swap(Ops[0], Ops[3]);
+ } else {
+ // Builtin: (ptr, index, value, vl).
+ // Intrinsic: (value, ptr, index, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
+ }
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
+ Ops[4]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
+ Ops[3]->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVPseudoUnaryBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+ auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
+ if (IsMasked) {
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, op2, mask, vl, policy
+ IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
+ } else {
+ // passthru, op1, op2, vl
+ IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVPseudoVNotBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+ auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getAllOnesValue(ElemTy));
+ if (IsMasked) {
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, po2, mask, vl, policy
+ IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
+ } else {
+ // passthru, op1, op2, vl
+ IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVPseudoMaskBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // op1, vl
+ IntrinsicTypes = {ResultType, Ops[1]->getType()};
+ Ops.insert(Ops.begin() + 1, Ops[0]);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVPseudoVFUnaryBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ Ops.insert(Ops.begin() + 2, Ops[1]);
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, op2, mask, vl
+ IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ // op1, po2, vl
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
+ Ops.insert(Ops.begin() + 2, Ops[1]);
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVPseudoVWCVTBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+ auto ElemTy = cast<llvm::VectorType>(Ops[1]->getType())->getElementType();
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
+ if (IsMasked) {
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, op2, mask, vl, policy
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), ElemTy, Ops[4]->getType()};
+ } else {
+ // passtru, op1, op2, vl
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), ElemTy, Ops[3]->getType()};
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVPseudoVNCVTBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+ Ops.insert(Ops.begin() + 2,
+ llvm::Constant::getNullValue(Ops.back()->getType()));
+ if (IsMasked) {
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, xlen, mask, vl
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType(),
+ Ops[4]->getType()};
+ } else {
+ // passthru, op1, xlen, vl
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType(),
+ Ops[3]->getType()};
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVVlenbBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ LLVMContext &Context = CGM.getLLVMContext();
+ llvm::MDBuilder MDHelper(Context);
+ llvm::Metadata *OpsMD[] = {llvm::MDString::get(Context, "vlenb")};
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, OpsMD);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, {CGF->SizeTy});
+ return Builder.CreateCall(F, Metadata);
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVVsetvliBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::Function *F = CGM.getIntrinsic(ID, {ResultType});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVVSEMaskBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ if (IsMasked) {
+ // Builtin: (mask, ptr, value, vl).
+ // Intrinsic: (value, ptr, mask, vl)
+ std::swap(Ops[0], Ops[2]);
+ } else {
+ // Builtin: (ptr, value, vl).
+ // Intrinsic: (value, ptr, vl)
+ std::swap(Ops[0], Ops[1]);
+ }
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegLoadTupleBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[0]->getType(),
+ Ops.back()->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
+ Ops.back()->getType()};
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if (NoPassthru)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
+ if (ReturnValue.isNull())
+ return LoadValue;
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegStoreTupleBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
+ // Masked
+ // Builtin: (mask, ptr, v_tuple, vl)
+ // Intrinsic: (tuple, ptr, mask, vl, SegInstSEW)
+ // Unmasked
+ // Builtin: (ptr, v_tuple, vl)
+ // Intrinsic: (tuple, ptr, vl, SegInstSEW)
+ if (IsMasked)
+ std::swap(Ops[0], Ops[2]);
+ else
+ std::swap(Ops[0], Ops[1]);
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
+ Ops[3]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegLoadFFTupleBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[Offset]->getType(),
+ Ops[0]->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops.back()->getType(),
+ Ops[Offset]->getType()};
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if (NoPassthru)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
+ Value *NewVL = Ops[2];
+ Ops.erase(Ops.begin() + 2);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
+ // Get alignment from the new vl operand
+ clang::CharUnits Align =
+ CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
+ llvm::Value *ReturnTuple = Builder.CreateExtractValue(LoadValue, 0);
+ // Store new_vl
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, 1);
+ Builder.CreateStore(V, Address(NewVL, V->getType(), Align));
+ if (ReturnValue.isNull())
+ return ReturnTuple;
+ return Builder.CreateStore(ReturnTuple, ReturnValue.getValue());
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVStridedSegLoadTupleBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops.back()->getType(),
+ Ops[0]->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
+ Ops.back()->getType()};
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if (NoPassthru)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
+ if (ReturnValue.isNull())
+ return LoadValue;
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVStridedSegStoreTupleBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
+ // Masked
+ // Builtin: (mask, ptr, stride, v_tuple, vl)
+ // Intrinsic: (tuple, ptr, stride, mask, vl, SegInstSEW)
+ // Unmasked
+ // Builtin: (ptr, stride, v_tuple, vl)
+ // Intrinsic: (tuple, ptr, stride, vl, SegInstSEW)
+ if (IsMasked)
+ std::swap(Ops[0], Ops[3]);
+ else
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType(),
+ Ops[3]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVAveragingBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl,
+ // policy)
+
+ bool HasMaskedOff =
+ !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
+
+ if (!HasMaskedOff)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F = CGM.getIntrinsic(
+ ID, {ResultType, Ops[2]->getType(), Ops.back()->getType()});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVNarrowingClipBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl,
+ // policy)
+
+ bool HasMaskedOff =
+ !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
+
+ if (!HasMaskedOff)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F =
+ CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
+ Ops.back()->getType()});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingPointBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ bool HasMaskedOff =
+ !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp =
+ IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
+ : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ if (!HasRoundModeOp)
+ Ops.insert(Ops.end() - 1,
+ ConstantInt::get(Ops.back()->getType(), 7)); // frm
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
+
+ if (!HasMaskedOff)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F = CGM.getIntrinsic(
+ ID, {ResultType, Ops[2]->getType(), Ops.back()->getType()});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVWideningFloatingPointBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ bool HasMaskedOff =
+ !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp =
+ IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
+ : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ if (!HasRoundModeOp)
+ Ops.insert(Ops.end() - 1,
+ ConstantInt::get(Ops.back()->getType(), 7)); // frm
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
+
+ if (!HasMaskedOff)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F =
+ CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
+ Ops.back()->getType()});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedSegLoadTupleBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 5> IntrinsicTypes;
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if (NoPassthru)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
+
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType(),
+ Ops[3]->getType(), Ops[4]->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType(),
+ Ops[3]->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
+
+ if (ReturnValue.isNull())
+ return LoadValue;
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedSegStoreTupleBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 5> IntrinsicTypes;
+ // Masked
+ // Builtin: (mask, ptr, index, v_tuple, vl)
+ // Intrinsic: (tuple, ptr, index, mask, vl, SegInstSEW)
+ // Unmasked
+ // Builtin: (ptr, index, v_tuple, vl)
+ // Intrinsic: (tuple, ptr, index, vl, SegInstSEW)
+
+ if (IsMasked)
+ std::swap(Ops[0], Ops[3]);
+ else
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
+
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
+
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
+ Ops[3]->getType(), Ops[4]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
+ Ops[3]->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVFMABuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (vector_in, vector_in/scalar_in, vector_in, round_mode,
+ // vl, policy)
+ // Masked: (vector_in, vector_in/scalar_in, vector_in, mask, frm,
+ // vl, policy)
+
+ bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
+
+ if (!HasRoundModeOp)
+ Ops.insert(Ops.end() - 1,
+ ConstantInt::get(Ops.back()->getType(), 7)); // frm
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
+
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F = CGM.getIntrinsic(
+ ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVWideningFMABuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (vector_in, vector_in/scalar_in, vector_in, round_mode, vl,
+ // policy) Masked: (vector_in, vector_in/scalar_in, vector_in, mask, frm,
+ // vl, policy)
+
+ bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
+
+ if (!HasRoundModeOp)
+ Ops.insert(Ops.end() - 1,
+ ConstantInt::get(Ops.back()->getType(), 7)); // frm
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 4);
+
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F =
+ CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
+ Ops.back()->getType()});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingUnaryBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, round_mode, vl)
+ // Masked: (passthru, op0, mask, frm, vl, policy)
+
+ bool HasMaskedOff =
+ !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp =
+ IsMasked ? (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4)
+ : (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
+
+ if (!HasRoundModeOp)
+ Ops.insert(Ops.end() - 1,
+ ConstantInt::get(Ops.back()->getType(), 7)); // frm
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
+
+ if (!HasMaskedOff)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingConvBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, frm, vl)
+ // Masked: (passthru, op0, mask, frm, vl, policy)
+ bool HasMaskedOff =
+ !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp =
+ IsMasked ? (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4)
+ : (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
+
+ if (!HasRoundModeOp)
+ Ops.insert(Ops.end() - 1,
+ ConstantInt::get(Ops.back()->getType(), 7)); // frm
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
+
+ if (!HasMaskedOff)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+
+ if (IsMasked)
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F = CGM.getIntrinsic(
+ ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingReductionBuiltin(
+ CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
+ llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+ llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ bool HasMaskedOff =
+ !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp =
+ IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
+ : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ if (!HasRoundModeOp)
+ Ops.insert(Ops.end() - 1,
+ ConstantInt::get(Ops.back()->getType(), 7)); // frm
+
+ if (IsMasked)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
+
+ if (!HasMaskedOff)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+
+ llvm::Function *F = CGM.getIntrinsic(
+ ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
+ return Builder.CreateCall(F, Ops, "");
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVReinterpretBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto &CGM = CGF->CGM;
+
+ if (ResultType->isIntOrIntVectorTy(1) ||
+ Ops[0]->getType()->isIntOrIntVectorTy(1)) {
+ assert(isa<ScalableVectorType>(ResultType) &&
+ isa<ScalableVectorType>(Ops[0]->getType()));
+
+ LLVMContext &Context = CGM.getLLVMContext();
+ ScalableVectorType *Boolean64Ty =
+ ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64);
+
+ if (ResultType->isIntOrIntVectorTy(1)) {
+ // Casting from m1 vector integer -> vector boolean
+ // Ex: <vscale x 8 x i8>
+ // --(bitcast)--------> <vscale x 64 x i1>
+ // --(vector_extract)-> <vscale x 8 x i1>
+ llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty);
+ return Builder.CreateExtractVector(ResultType, BitCast,
+ ConstantInt::get(CGF->Int64Ty, 0));
+ } else {
+ // Casting from vector boolean -> m1 vector integer
+ // Ex: <vscale x 1 x i1>
+ // --(vector_insert)-> <vscale x 64 x i1>
+ // --(bitcast)-------> <vscale x 8 x i8>
+ llvm::Value *Boolean64Val = Builder.CreateInsertVector(
+ Boolean64Ty, llvm::PoisonValue::get(Boolean64Ty), Ops[0],
+ ConstantInt::get(CGF->Int64Ty, 0));
+ return Builder.CreateBitCast(Boolean64Val, ResultType);
+ }
+ }
+ return Builder.CreateBitCast(Ops[0], ResultType);
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVGetBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ auto *VecTy = cast<ScalableVectorType>(ResultType);
+ if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) {
+ unsigned MaxIndex =
+ OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
+ assert(isPowerOf2_32(MaxIndex));
+ // Mask to only valid indices.
+ Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
+ Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
+ Ops[1] =
+ Builder.CreateMul(Ops[1], ConstantInt::get(Ops[1]->getType(),
+ VecTy->getMinNumElements()));
+ return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
+ }
+
+ return Builder.CreateIntrinsic(
+ Intrinsic::riscv_tuple_extract, {ResultType, Ops[0]->getType()},
+ {Ops[0], Builder.CreateTrunc(Ops[1], Builder.getInt32Ty())});
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVSetBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) {
+ auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
+ unsigned MaxIndex =
+ ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
+ assert(isPowerOf2_32(MaxIndex));
+ // Mask to only valid indices.
+ Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
+ Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
+ Ops[1] =
+ Builder.CreateMul(Ops[1], ConstantInt::get(Ops[1]->getType(),
+ VecTy->getMinNumElements()));
+ return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
+ }
+
+ return Builder.CreateIntrinsic(
+ Intrinsic::riscv_tuple_insert, {ResultType, Ops[2]->getType()},
+ {Ops[0], Ops[2], Builder.CreateTrunc(Ops[1], Builder.getInt32Ty())});
+}
+
+static LLVM_ATTRIBUTE_NOINLINE Value *
+emitRVVCreateBuiltin(CodeGenFunction *CGF, const CallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::Type *ResultType,
+ Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
+ int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
+ auto &Builder = CGF->Builder;
+ llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
+ auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
+ for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
+ if (isa<ScalableVectorType>(ResultType)) {
+ llvm::Value *Idx = ConstantInt::get(Builder.getInt64Ty(),
+ VecTy->getMinNumElements() * I);
+ ReturnVector =
+ Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
+ } else {
+ llvm::Value *Idx = ConstantInt::get(Builder.getInt32Ty(), I);
+ ReturnVector = Builder.CreateIntrinsic(Intrinsic::riscv_tuple_insert,
+ {ResultType, Ops[I]->getType()},
+ {ReturnVector, Ops[I], Idx});
+ }
+ }
+ return ReturnVector;
+}
+
Value *CodeGenFunction::EmitRISCVCpuInit() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {VoidPtrTy}, false);
llvm::FunctionCallee Func =
@@ -180,10 +1125,6 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
}
Intrinsic::ID ID = Intrinsic::not_intrinsic;
- // The 0th bit simulates the `vta` of RVV
- // The 1st bit simulates the `vma` of RVV
- constexpr unsigned RVV_VTA = 0x1;
- constexpr unsigned RVV_VMA = 0x2;
int PolicyAttrs = 0;
bool IsMasked = false;
// This is used by segment load/store to determine it's llvm type.
diff --git a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
index 33a8d8f..1a1889a 100644
--- a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
@@ -246,35 +246,26 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
llvm::FunctionType *LLVMFuncTy =
cast<llvm::FunctionType>(ConvertType(QualType(FuncTy, 0)));
+ bool VarArg = LLVMFuncTy->isVarArg();
unsigned NParams = LLVMFuncTy->getNumParams();
std::vector<Value *> Args;
- Args.reserve(NParams + 3);
+ Args.reserve(NParams + 3 + VarArg);
// The only real argument is the FuncRef
Args.push_back(FuncRef);
// Add the type information
- auto addType = [this, &Args](llvm::Type *T) {
- if (T->isVoidTy()) {
- // Do nothing
- } else if (T->isFloatingPointTy()) {
- Args.push_back(ConstantFP::get(T, 0));
- } else if (T->isIntegerTy()) {
- Args.push_back(ConstantInt::get(T, 0));
- } else if (T->isPointerTy()) {
- Args.push_back(ConstantPointerNull::get(llvm::PointerType::get(
- getLLVMContext(), T->getPointerAddressSpace())));
- } else {
- // TODO: Handle reference types. For now, we reject them in Sema.
- llvm_unreachable("Unhandled type");
- }
- };
-
- addType(LLVMFuncTy->getReturnType());
+ llvm::Type *RetType = LLVMFuncTy->getReturnType();
+ if (!RetType->isVoidTy()) {
+ Args.push_back(PoisonValue::get(RetType));
+ }
// The token type indicates the boundary between return types and param
// types.
Args.push_back(PoisonValue::get(llvm::Type::getTokenTy(getLLVMContext())));
for (unsigned i = 0; i < NParams; i++) {
- addType(LLVMFuncTy->getParamType(i));
+ Args.push_back(PoisonValue::get(LLVMFuncTy->getParamType(i)));
+ }
+ if (VarArg) {
+ Args.push_back(PoisonValue::get(Builder.getPtrTy()));
}
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_test_func);
return Builder.CreateCall(Callee, Args);
diff --git a/clang/lib/CodeGen/TargetBuiltins/X86.cpp b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
index e23d19d..a4974e4 100644
--- a/clang/lib/CodeGen/TargetBuiltins/X86.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
@@ -1051,18 +1051,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmsubsd3_mask3:
return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
/*NegAcc*/ true);
- case X86::BI__builtin_ia32_vfmaddph:
- case X86::BI__builtin_ia32_vfmaddps:
- case X86::BI__builtin_ia32_vfmaddpd:
- case X86::BI__builtin_ia32_vfmaddph256:
- case X86::BI__builtin_ia32_vfmaddps256:
- case X86::BI__builtin_ia32_vfmaddpd256:
case X86::BI__builtin_ia32_vfmaddph512_mask:
case X86::BI__builtin_ia32_vfmaddph512_maskz:
case X86::BI__builtin_ia32_vfmaddph512_mask3:
- case X86::BI__builtin_ia32_vfmaddbf16128:
- case X86::BI__builtin_ia32_vfmaddbf16256:
- case X86::BI__builtin_ia32_vfmaddbf16512:
case X86::BI__builtin_ia32_vfmaddps512_mask:
case X86::BI__builtin_ia32_vfmaddps512_maskz:
case X86::BI__builtin_ia32_vfmaddps512_mask3:
@@ -1941,10 +1932,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return SI;
}
// Rotate is a special case of funnel shift - 1st 2 args are the same.
- case X86::BI__builtin_ia32_vprotb:
- case X86::BI__builtin_ia32_vprotw:
- case X86::BI__builtin_ia32_vprotd:
- case X86::BI__builtin_ia32_vprotq:
case X86::BI__builtin_ia32_vprotbi:
case X86::BI__builtin_ia32_vprotwi:
case X86::BI__builtin_ia32_vprotdi:
@@ -1955,12 +1942,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_prolq128:
case X86::BI__builtin_ia32_prolq256:
case X86::BI__builtin_ia32_prolq512:
- case X86::BI__builtin_ia32_prolvd128:
- case X86::BI__builtin_ia32_prolvd256:
- case X86::BI__builtin_ia32_prolvd512:
- case X86::BI__builtin_ia32_prolvq128:
- case X86::BI__builtin_ia32_prolvq256:
- case X86::BI__builtin_ia32_prolvq512:
return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
case X86::BI__builtin_ia32_prord128:
case X86::BI__builtin_ia32_prord256:
@@ -1968,12 +1949,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_prorq128:
case X86::BI__builtin_ia32_prorq256:
case X86::BI__builtin_ia32_prorq512:
- case X86::BI__builtin_ia32_prorvd128:
- case X86::BI__builtin_ia32_prorvd256:
- case X86::BI__builtin_ia32_prorvd512:
- case X86::BI__builtin_ia32_prorvq128:
- case X86::BI__builtin_ia32_prorvq256:
- case X86::BI__builtin_ia32_prorvq512:
return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
case X86::BI__builtin_ia32_selectb_128:
case X86::BI__builtin_ia32_selectb_256:
@@ -2208,15 +2183,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateBitCast(Res, Ops[0]->getType());
}
- case X86::BI__builtin_ia32_vplzcntd_128:
- case X86::BI__builtin_ia32_vplzcntd_256:
- case X86::BI__builtin_ia32_vplzcntd_512:
- case X86::BI__builtin_ia32_vplzcntq_128:
- case X86::BI__builtin_ia32_vplzcntq_256:
- case X86::BI__builtin_ia32_vplzcntq_512: {
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
- }
case X86::BI__builtin_ia32_sqrtss:
case X86::BI__builtin_ia32_sqrtsd: {
Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
@@ -2366,29 +2332,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Ops 0 and 1 are swapped.
return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
- case X86::BI__builtin_ia32_vpshldvd128:
- case X86::BI__builtin_ia32_vpshldvd256:
- case X86::BI__builtin_ia32_vpshldvd512:
- case X86::BI__builtin_ia32_vpshldvq128:
- case X86::BI__builtin_ia32_vpshldvq256:
- case X86::BI__builtin_ia32_vpshldvq512:
- case X86::BI__builtin_ia32_vpshldvw128:
- case X86::BI__builtin_ia32_vpshldvw256:
- case X86::BI__builtin_ia32_vpshldvw512:
- return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
-
- case X86::BI__builtin_ia32_vpshrdvd128:
- case X86::BI__builtin_ia32_vpshrdvd256:
- case X86::BI__builtin_ia32_vpshrdvd512:
- case X86::BI__builtin_ia32_vpshrdvq128:
- case X86::BI__builtin_ia32_vpshrdvq256:
- case X86::BI__builtin_ia32_vpshrdvq512:
- case X86::BI__builtin_ia32_vpshrdvw128:
- case X86::BI__builtin_ia32_vpshrdvw256:
- case X86::BI__builtin_ia32_vpshrdvw512:
- // Ops 0 and 1 are swapped.
- return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
-
// Reductions
case X86::BI__builtin_ia32_reduce_fadd_pd512:
case X86::BI__builtin_ia32_reduce_fadd_ps512:
@@ -2850,8 +2793,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
// f16c half2float intrinsics
- case X86::BI__builtin_ia32_vcvtph2ps:
- case X86::BI__builtin_ia32_vcvtph2ps256:
case X86::BI__builtin_ia32_vcvtph2ps_mask:
case X86::BI__builtin_ia32_vcvtph2ps256_mask:
case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 277d69d..1e58c3f 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -63,6 +63,13 @@ LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
OS << "CoerceAndExpand Type=";
getCoerceAndExpandType()->print(OS);
break;
+ case TargetSpecific:
+ OS << "TargetSpecific Type=";
+ if (llvm::Type *Ty = getCoerceToType())
+ Ty->print(OS);
+ else
+ OS << "null";
+ break;
}
OS << ")\n";
}
diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp
index b82c469..d7deece 100644
--- a/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -374,8 +374,8 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadicFn,
if (!passAsAggregateType(Ty)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
@@ -493,10 +493,9 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadicFn,
auto ContainsOnlyPointers = [&](const auto &Self, QualType Ty) {
if (isEmptyRecord(getContext(), Ty, true))
return false;
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT)
+ const auto *RD = Ty->getAsRecordDecl();
+ if (!RD)
return false;
- const RecordDecl *RD = RT->getDecl();
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CXXRD->bases())
if (!Self(Self, I.getType()))
@@ -547,8 +546,8 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (!passAsAggregateType(RetTy)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
@@ -737,14 +736,14 @@ bool AArch64ABIInfo::passAsPureScalableType(
return true;
}
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (const RecordType *RT = Ty->getAsCanonical<RecordType>()) {
// If the record cannot be passed in registers, then it's not a PST.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
RAA != CGCXXABI::RAA_Default)
return false;
// Pure scalable types are never unions and never contain unions.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion())
return false;
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 47a552a..0fcbf7e 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -95,8 +95,7 @@ unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
return EltNumRegs * VT->getNumElements();
}
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ if (const auto *RD = Ty->getAsRecordDecl()) {
assert(!RD->hasFlexibleArrayMember());
for (const FieldDecl *Field : RD->fields()) {
@@ -152,11 +151,9 @@ ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
- if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return DefaultABIInfo::classifyReturnType(RetTy);
- }
+ if (const auto *RD = RetTy->getAsRecordDecl();
+ RD && RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyReturnType(RetTy);
// Pack aggregates <= 4 bytes into single VGPR or pair.
uint64_t Size = getContext().getTypeSize(RetTy);
@@ -245,11 +242,9 @@ ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, bool Variadic,
if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return DefaultABIInfo::classifyArgumentType(Ty);
- }
+ if (const auto *RD = Ty->getAsRecordDecl();
+ RD && RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyArgumentType(Ty);
// Pack aggregates <= 8 bytes into single VGPR or pair.
uint64_t Size = getContext().getTypeSize(Ty);
diff --git a/clang/lib/CodeGen/Targets/ARC.cpp b/clang/lib/CodeGen/Targets/ARC.cpp
index c8db7e8..6727587 100644
--- a/clang/lib/CodeGen/Targets/ARC.cpp
+++ b/clang/lib/CodeGen/Targets/ARC.cpp
@@ -94,7 +94,7 @@ RValue ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
uint8_t FreeRegs) const {
// Handle the generic C++ ABI.
- const RecordType *RT = Ty->getAs<RecordType>();
+ const RecordType *RT = Ty->getAsCanonical<RecordType>();
if (RT) {
CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
if (RAA == CGCXXABI::RAA_Indirect)
@@ -105,14 +105,15 @@ ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
}
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectByValue(Ty);
// Ignore empty structs/unions.
diff --git a/clang/lib/CodeGen/Targets/ARM.cpp b/clang/lib/CodeGen/Targets/ARM.cpp
index 68f9e01..c84c9f2 100644
--- a/clang/lib/CodeGen/Targets/ARM.cpp
+++ b/clang/lib/CodeGen/Targets/ARM.cpp
@@ -316,7 +316,7 @@ ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
// Base can be a floating-point or a vector.
if (const VectorType *VT = Base->getAs<VectorType>()) {
// FP16 vectors should be converted to integer vectors
- if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
+ if (!getTarget().hasFastHalfType() && containsAnyFP16Vectors(Ty)) {
uint64_t Size = getContext().getTypeSize(VT);
auto *NewVecTy = llvm::FixedVectorType::get(
llvm::Type::getInt32Ty(getVMContext()), Size / 32);
@@ -382,8 +382,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl()) {
+ Ty = ED->getIntegerType();
}
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -512,11 +512,11 @@ static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
// above, but they are not.
// Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAs<RecordType>();
+ const RecordType *RT = Ty->getAsCanonical<RecordType>();
if (!RT) return false;
// Ignore records with flexible arrays.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return false;
@@ -582,7 +582,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
getDataLayout().getAllocaAddrSpace());
// TODO: FP16/BF16 vectors should be converted to integer vectors
// This check is similar to isIllegalVectorType - refactor?
- if ((!getTarget().hasLegalHalfType() &&
+ if ((!getTarget().hasFastHalfType() &&
(VT->getElementType()->isFloat16Type() ||
VT->getElementType()->isHalfType())) ||
(IsFloatABISoftFP &&
@@ -592,8 +592,8 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
@@ -678,9 +678,9 @@ bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
// into float, and we don't want the ABI to depend on whether or not they
// are supported in hardware. Thus return false to coerce vectors of these
// types into integer vectors.
- // We do not depend on hasLegalHalfType for bfloat as it is a
+ // We do not depend on hasFastHalfType for bfloat as it is a
// separate IR type.
- if ((!getTarget().hasLegalHalfType() &&
+ if ((!getTarget().hasFastHalfType() &&
(VT->getElementType()->isFloat16Type() ||
VT->getElementType()->isHalfType())) ||
(IsFloatABISoftFP &&
@@ -717,9 +717,8 @@ bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
if (NElements == 0)
return false;
return containsAnyFP16Vectors(AT->getElementType());
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
-
+ }
+ if (const auto *RD = Ty->getAsRecordDecl()) {
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
diff --git a/clang/lib/CodeGen/Targets/BPF.cpp b/clang/lib/CodeGen/Targets/BPF.cpp
index 880a8910..3a7af34 100644
--- a/clang/lib/CodeGen/Targets/BPF.cpp
+++ b/clang/lib/CodeGen/Targets/BPF.cpp
@@ -47,8 +47,8 @@ public:
}
}
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -69,8 +69,8 @@ public:
getDataLayout().getAllocaAddrSpace());
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = RetTy->getAs<BitIntType>())
diff --git a/clang/lib/CodeGen/Targets/CSKY.cpp b/clang/lib/CodeGen/Targets/CSKY.cpp
index ef26d48..b925420 100644
--- a/clang/lib/CodeGen/Targets/CSKY.cpp
+++ b/clang/lib/CodeGen/Targets/CSKY.cpp
@@ -91,7 +91,7 @@ ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
- if (!Ty->getAsUnionType())
+ if (!Ty->isUnionType())
if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
@@ -115,8 +115,8 @@ ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// All integral types are promoted to XLen width, unless passed on the
// stack.
diff --git a/clang/lib/CodeGen/Targets/DirectX.cpp b/clang/lib/CodeGen/Targets/DirectX.cpp
index 96a1284..b4cebb9 100644
--- a/clang/lib/CodeGen/Targets/DirectX.cpp
+++ b/clang/lib/CodeGen/Targets/DirectX.cpp
@@ -77,7 +77,8 @@ llvm::Type *DirectXTargetCodeGenInfo::getHLSLType(
llvm::Type *BufferLayoutTy =
HLSLBufferLayoutBuilder(CGM, "dx.Layout")
- .createLayoutType(ContainedTy->getAsStructureType(), Packoffsets);
+ .createLayoutType(ContainedTy->castAsCanonical<RecordType>(),
+ Packoffsets);
if (!BufferLayoutTy)
return nullptr;
diff --git a/clang/lib/CodeGen/Targets/Hexagon.cpp b/clang/lib/CodeGen/Targets/Hexagon.cpp
index 2976657..97a9300 100644
--- a/clang/lib/CodeGen/Targets/Hexagon.cpp
+++ b/clang/lib/CodeGen/Targets/Hexagon.cpp
@@ -97,8 +97,8 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
unsigned *RegsLeft) const {
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 64)
@@ -160,8 +160,8 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
if (Size > 64 && RetTy->isBitIntType())
return getNaturalAlignIndirect(
diff --git a/clang/lib/CodeGen/Targets/Lanai.cpp b/clang/lib/CodeGen/Targets/Lanai.cpp
index 6f75bd5..e76431a 100644
--- a/clang/lib/CodeGen/Targets/Lanai.cpp
+++ b/clang/lib/CodeGen/Targets/Lanai.cpp
@@ -88,7 +88,7 @@ ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
CCState &State) const {
// Check with the C++ ABI first.
- const RecordType *RT = Ty->getAs<RecordType>();
+ const RecordType *RT = Ty->getAsCanonical<RecordType>();
if (RT) {
CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
if (RAA == CGCXXABI::RAA_Indirect) {
@@ -102,7 +102,8 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectResult(Ty, /*ByVal=*/true, State);
// Ignore empty structs/unions.
@@ -124,8 +125,8 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
}
// Treat an enum type as its underlying type.
- if (const auto *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
bool InReg = shouldUseInReg(Ty, State);
diff --git a/clang/lib/CodeGen/Targets/LoongArch.cpp b/clang/lib/CodeGen/Targets/LoongArch.cpp
index 7640f37..1f344d6 100644
--- a/clang/lib/CodeGen/Targets/LoongArch.cpp
+++ b/clang/lib/CodeGen/Targets/LoongArch.cpp
@@ -149,8 +149,8 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
QualType EltTy = ATy->getElementType();
// Non-zero-length arrays of empty records make the struct ineligible to be
// passed via FARs in C++.
- if (const auto *RTy = EltTy->getAs<RecordType>()) {
- if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
+ if (const auto *RTy = EltTy->getAsCanonical<RecordType>()) {
+ if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getOriginalDecl()) &&
isEmptyRecord(getContext(), EltTy, true, true))
return false;
}
@@ -164,12 +164,12 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
return true;
}
- if (const auto *RTy = Ty->getAs<RecordType>()) {
+ if (const auto *RTy = Ty->getAsCanonical<RecordType>()) {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are not eligible for the FP calling convention.
if (getRecordArgABI(Ty, CGT.getCXXABI()))
return false;
- const RecordDecl *RD = RTy->getDecl();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinitionOrSelf();
if (isEmptyRecord(getContext(), Ty, true, true) &&
(!RD->isUnion() || !isa<CXXRecordDecl>(RD)))
return true;
@@ -180,8 +180,7 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const CXXBaseSpecifier &B : CXXRD->bases()) {
- const auto *BDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ const auto *BDecl = B.getType()->castAsCXXRecordDecl();
if (!detectFARsEligibleStructHelper(
B.getType(), CurOff + Layout.getBaseClassOffset(BDecl),
Field1Ty, Field1Off, Field2Ty, Field2Off))
@@ -368,8 +367,8 @@ ABIArgInfo LoongArchABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// All integral types are promoted to GRLen width.
if (Size < GRLen && Ty->isIntegralOrEnumerationType())
diff --git a/clang/lib/CodeGen/Targets/Mips.cpp b/clang/lib/CodeGen/Targets/Mips.cpp
index c025f73..f26ab97 100644
--- a/clang/lib/CodeGen/Targets/Mips.cpp
+++ b/clang/lib/CodeGen/Targets/Mips.cpp
@@ -153,7 +153,7 @@ llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
if (Ty->isComplexType())
return CGT.ConvertType(Ty);
- const RecordType *RT = Ty->getAs<RecordType>();
+ const RecordType *RT = Ty->getAsCanonical<RecordType>();
// Unions/vectors are passed in integer registers.
if (!RT || !RT->isStructureOrClassType()) {
@@ -161,7 +161,7 @@ llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
return llvm::StructType::get(getVMContext(), ArgList);
}
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
@@ -241,8 +241,8 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
}
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// Make sure we pass indirectly things that are too large.
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -261,11 +261,11 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
llvm::Type*
MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
- const RecordType *RT = RetTy->getAs<RecordType>();
+ const RecordType *RT = RetTy->getAsCanonical<RecordType>();
SmallVector<llvm::Type*, 8> RTList;
if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
unsigned FieldCnt = Layout.getFieldCount();
@@ -332,8 +332,8 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
}
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
// Make sure we pass indirectly things that are too large.
if (const auto *EIT = RetTy->getAs<BitIntType>())
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 82bdfe2..53f2fc4 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -87,10 +87,6 @@ public:
static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
int Operand);
- static void
- addGridConstantNVVMMetadata(llvm::GlobalValue *GV,
- const SmallVectorImpl<int> &GridConstantArgs);
-
private:
static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
LValue Src) {
@@ -130,10 +126,9 @@ bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
return true;
if (const auto *AT = T->getAsArrayTypeUnsafe())
return isUnsupportedType(AT->getElementType());
- const auto *RT = T->getAs<RecordType>();
- if (!RT)
+ const auto *RD = T->getAsRecordDecl();
+ if (!RD)
return false;
- const RecordDecl *RD = RT->getDecl();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
@@ -173,8 +168,8 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
: ABIArgInfo::getDirect());
@@ -182,8 +177,8 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// Return aggregates type as indirect by value
if (isAggregateTypeForABI(Ty)) {
@@ -266,27 +261,24 @@ void NVPTXTargetCodeGenInfo::setTargetAttributes(
// By default, all functions are device functions
if (FD->hasAttr<DeviceKernelAttr>() || FD->hasAttr<CUDAGlobalAttr>()) {
// OpenCL/CUDA kernel functions get kernel metadata
- // Create !{<func-ref>, metadata !"kernel", i32 1} node
// And kernel functions are not subject to inlining
F->addFnAttr(llvm::Attribute::NoInline);
if (FD->hasAttr<CUDAGlobalAttr>()) {
- SmallVector<int, 10> GCI;
+ F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+
for (auto IV : llvm::enumerate(FD->parameters()))
if (IV.value()->hasAttr<CUDAGridConstantAttr>())
- // For some reason arg indices are 1-based in NVVM
- GCI.push_back(IV.index() + 1);
- // Create !{<func-ref>, metadata !"kernel", i32 1} node
- F->setCallingConv(llvm::CallingConv::PTX_Kernel);
- addGridConstantNVVMMetadata(F, GCI);
+ F->addParamAttr(
+ IV.index(),
+ llvm::Attribute::get(F->getContext(), "nvvm.grid_constant"));
}
if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>())
M.handleCUDALaunchBoundsAttr(F, Attr);
}
}
// Attach kernel metadata directly if compiling for NVPTX.
- if (FD->hasAttr<DeviceKernelAttr>()) {
+ if (FD->hasAttr<DeviceKernelAttr>())
F->setCallingConv(llvm::CallingConv::PTX_Kernel);
- }
}
void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
@@ -306,29 +298,6 @@ void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
}
-void NVPTXTargetCodeGenInfo::addGridConstantNVVMMetadata(
- llvm::GlobalValue *GV, const SmallVectorImpl<int> &GridConstantArgs) {
-
- llvm::Module *M = GV->getParent();
- llvm::LLVMContext &Ctx = M->getContext();
-
- // Get "nvvm.annotations" metadata node
- llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
-
- SmallVector<llvm::Metadata *, 5> MDVals = {llvm::ConstantAsMetadata::get(GV)};
- if (!GridConstantArgs.empty()) {
- SmallVector<llvm::Metadata *, 10> GCM;
- for (int I : GridConstantArgs)
- GCM.push_back(llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), I)));
- MDVals.append({llvm::MDString::get(Ctx, "grid_constant"),
- llvm::MDNode::get(Ctx, GCM)});
- }
-
- // Append metadata to nvvm.annotations
- MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
-}
-
bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
return false;
}
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 4df4c9f..380e8c0 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -153,8 +153,8 @@ public:
// extended to 32/64 bits.
bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (getContext().isPromotableIntegerType(Ty))
@@ -294,8 +294,7 @@ void AIXTargetCodeGenInfo::setTargetAttributes(
ASTContext &Context = D->getASTContext();
unsigned Alignment = Context.toBits(Context.getDeclAlign(D)) / 8;
const auto *Ty = VarD->getType().getTypePtr();
- const RecordDecl *RDecl =
- Ty->isRecordType() ? Ty->getAs<RecordType>()->getDecl() : nullptr;
+ const RecordDecl *RDecl = Ty->getAsRecordDecl();
bool EmitDiagnostic = UserSpecifiedTOC && GV->hasExternalLinkage();
auto reportUnsupportedWarning = [&](bool ShouldEmitWarning, StringRef Msg) {
@@ -706,8 +705,8 @@ public:
bool
PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (isPromotableIntegerTypeForABI(Ty))
diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp
index a7f9298..0ef39b6 100644
--- a/clang/lib/CodeGen/Targets/RISCV.cpp
+++ b/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -8,6 +8,7 @@
#include "ABIInfoImpl.h"
#include "TargetInfo.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
using namespace clang;
@@ -73,6 +74,11 @@ public:
raw_ostream &Out) const override;
void appendAttributeMangling(StringRef AttrStr,
raw_ostream &Out) const override;
+ llvm::Value *createCoercedLoad(Address SrcAddr, const ABIArgInfo &AI,
+ CodeGenFunction &CGF) const override;
+ void createCoercedStore(llvm::Value *Val, Address DstAddr,
+ const ABIArgInfo &AI, bool DestIsVolatile,
+ CodeGenFunction &CGF) const override;
};
} // end anonymous namespace
@@ -227,8 +233,8 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
QualType EltTy = ATy->getElementType();
// Non-zero-length arrays of empty records make the struct ineligible for
// the FP calling convention in C++.
- if (const auto *RTy = EltTy->getAs<RecordType>()) {
- if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
+ if (const auto *RTy = EltTy->getAsCanonical<RecordType>()) {
+ if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getOriginalDecl()) &&
isEmptyRecord(getContext(), EltTy, true, true))
return false;
}
@@ -243,14 +249,14 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
return true;
}
- if (const auto *RTy = Ty->getAs<RecordType>()) {
+ if (const auto *RTy = Ty->getAsCanonical<RecordType>()) {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are not eligible for the FP calling convention.
if (getRecordArgABI(Ty, CGT.getCXXABI()))
return false;
if (isEmptyRecord(getContext(), Ty, true, true))
return true;
- const RecordDecl *RD = RTy->getDecl();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinitionOrSelf();
// Unions aren't eligible unless they're empty (which is caught above).
if (RD->isUnion())
return false;
@@ -258,8 +264,7 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const CXXBaseSpecifier &B : CXXRD->bases()) {
- const auto *BDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ const auto *BDecl = B.getType()->castAsCXXRecordDecl();
CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
Field1Ty, Field1Off, Field2Ty,
@@ -648,7 +653,7 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (IsFixed && Ty->isStructureOrClassType()) {
llvm::Type *VLSType = nullptr;
if (detectVLSCCEligibleStruct(Ty, ABIVLen, VLSType))
- return ABIArgInfo::getDirect(VLSType);
+ return ABIArgInfo::getTargetSpecific(VLSType);
}
uint64_t NeededAlign = getContext().getTypeAlign(Ty);
@@ -672,8 +677,8 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// All integral types are promoted to XLen width
if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
@@ -780,6 +785,175 @@ ABIArgInfo RISCVABIInfo::extendType(QualType Ty, llvm::Type *CoerceTy) const {
return ABIArgInfo::getExtend(Ty, CoerceTy);
}
+llvm::Value *RISCVABIInfo::createCoercedLoad(Address Src, const ABIArgInfo &AI,
+ CodeGenFunction &CGF) const {
+ llvm::Type *Ty = AI.getCoerceToType();
+ llvm::Type *SrcTy = Src.getElementType();
+ llvm::StructType *SrcSTy = cast<llvm::StructType>(SrcTy);
+ assert((Ty->isScalableTy() || Ty->isTargetExtTy()) &&
+ "Only scalable vector type and vector tuple type are allowed for load "
+ "type.");
+ if (llvm::TargetExtType *TupTy = dyn_cast<llvm::TargetExtType>(Ty)) {
+ // In RISC-V VLS calling convention, struct of fixed vectors or struct of
+ // array of fixed vector of length >1 might be lowered using vector tuple
+ // type, we consider it as a valid load, e.g.
+ // struct i32x4x2 {
+ // __attribute__((vector_size(16))) int i;
+ // __attribute__((vector_size(16))) int i;
+ // };
+ // or
+ // struct i32x4 {
+ // __attribute__((vector_size(16))) int i[2];
+ // };
+ // is lowered to target("riscv.vector.tuple", <vscale x 8 x i8>, 2)
+ // when ABI_VLEN = 128 bits, please checkout
+ // clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
+ // for more information.
+ assert(TupTy->getName() == "riscv.vector.tuple");
+ llvm::Type *EltTy = TupTy->getTypeParameter(0);
+ unsigned NumElts = TupTy->getIntParameter(0);
+
+ if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(SrcSTy->getElementType(0)))
+ Src = Src.withElementType(ArrayTy);
+
+ // Perform extract element and load
+ llvm::Value *TupleVal = llvm::PoisonValue::get(Ty);
+ auto *Load = CGF.Builder.CreateLoad(Src);
+ for (unsigned i = 0; i < NumElts; ++i) {
+ // Extract from struct
+ llvm::Value *ExtractFromLoad = CGF.Builder.CreateExtractValue(Load, i);
+ // Element in vector tuple type is always i8, so we need to cast back to
+ // it's original element type.
+ EltTy =
+ cast<llvm::ScalableVectorType>(llvm::VectorType::getWithSizeAndScalar(
+ cast<llvm::VectorType>(EltTy), ExtractFromLoad->getType()));
+ llvm::Value *VectorVal = llvm::PoisonValue::get(EltTy);
+ // Insert to scalable vector
+ VectorVal = CGF.Builder.CreateInsertVector(
+ EltTy, VectorVal, ExtractFromLoad, uint64_t(0), "cast.scalable");
+ // Insert scalable vector to vector tuple
+ llvm::Value *Idx = CGF.Builder.getInt32(i);
+ TupleVal =
+ CGF.Builder.CreateIntrinsic(llvm::Intrinsic::riscv_tuple_insert,
+ {Ty, EltTy}, {TupleVal, VectorVal, Idx});
+ }
+ return TupleVal;
+ }
+
+ // In RISC-V VLS calling convention, struct of fixed vector or struct of
+ // fixed vector array of length 1 might be lowered using scalable vector,
+ // we consider it as a valid load, e.g.
+ // struct i32x4 {
+ // __attribute__((vector_size(16))) int i;
+ // };
+ // or
+ // struct i32x4 {
+ // __attribute__((vector_size(16))) int i[1];
+ // };
+ // is lowered to <vscale x 2 x i32>
+ // when ABI_VLEN = 128 bits, please checkout
+ // clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
+ // for more information.
+ auto *ScalableDstTy = cast<llvm::ScalableVectorType>(Ty);
+ SrcTy = SrcSTy->getElementType(0);
+ if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(SrcTy))
+ SrcTy = ArrayTy->getElementType();
+ Src = Src.withElementType(SrcTy);
+ [[maybe_unused]] auto *FixedSrcTy = cast<llvm::FixedVectorType>(SrcTy);
+ assert(ScalableDstTy->getElementType() == FixedSrcTy->getElementType());
+ auto *Load = CGF.Builder.CreateLoad(Src);
+ auto *VectorVal = llvm::PoisonValue::get(ScalableDstTy);
+ llvm::Value *Result = CGF.Builder.CreateInsertVector(
+ ScalableDstTy, VectorVal, Load, uint64_t(0), "cast.scalable");
+ return Result;
+}
+
+void RISCVABIInfo::createCoercedStore(llvm::Value *Val, Address Dst,
+ const ABIArgInfo &AI, bool DestIsVolatile,
+ CodeGenFunction &CGF) const {
+ llvm::Type *SrcTy = Val->getType();
+ llvm::StructType *DstSTy = cast<llvm::StructType>(Dst.getElementType());
+ assert((SrcTy->isScalableTy() || SrcTy->isTargetExtTy()) &&
+ "Only scalable vector type and vector tuple type are allowed for "
+ "store value.");
+ if (llvm::TargetExtType *TupTy = dyn_cast<llvm::TargetExtType>(SrcTy)) {
+ // In RISC-V VLS calling convention, struct of fixed vectors or struct
+ // of array of fixed vector of length >1 might be lowered using vector
+ // tuple type, we consider it as a valid load, e.g.
+ // struct i32x4x2 {
+ // __attribute__((vector_size(16))) int i;
+ // __attribute__((vector_size(16))) int i;
+ // };
+ // or
+ // struct i32x4 {
+ // __attribute__((vector_size(16))) int i[2];
+ // };
+ // is lowered to target("riscv.vector.tuple", <vscale x 8 x i8>, 2)
+ // when ABI_VLEN = 128 bits, please checkout
+ // clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
+ // for more information.
+ assert(TupTy->getName() == "riscv.vector.tuple");
+ llvm::Type *EltTy = TupTy->getTypeParameter(0);
+ unsigned NumElts = TupTy->getIntParameter(0);
+
+ llvm::Type *FixedVecTy = DstSTy->getElementType(0);
+ if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(DstSTy->getElementType(0))) {
+ Dst = Dst.withElementType(ArrayTy);
+ FixedVecTy = ArrayTy->getArrayElementType();
+ }
+
+ // Perform extract element and store
+ for (unsigned i = 0; i < NumElts; ++i) {
+ // Element in vector tuple type is always i8, so we need to cast back
+ // to it's original element type.
+ EltTy =
+ cast<llvm::ScalableVectorType>(llvm::VectorType::getWithSizeAndScalar(
+ cast<llvm::VectorType>(EltTy), FixedVecTy));
+ // Extract scalable vector from tuple
+ llvm::Value *Idx = CGF.Builder.getInt32(i);
+ auto *TupleElement = CGF.Builder.CreateIntrinsic(
+ llvm::Intrinsic::riscv_tuple_extract, {EltTy, TupTy}, {Val, Idx});
+
+ // Extract fixed vector from scalable vector
+ auto *ExtractVec = CGF.Builder.CreateExtractVector(
+ FixedVecTy, TupleElement, uint64_t(0));
+ // Store fixed vector to corresponding address
+ Address EltPtr = Address::invalid();
+ if (Dst.getElementType()->isStructTy())
+ EltPtr = CGF.Builder.CreateStructGEP(Dst, i);
+ else
+ EltPtr = CGF.Builder.CreateConstArrayGEP(Dst, i);
+ auto *I = CGF.Builder.CreateStore(ExtractVec, EltPtr, DestIsVolatile);
+ CGF.addInstToCurrentSourceAtom(I, ExtractVec);
+ }
+ return;
+ }
+
+ // In RISC-V VLS calling convention, struct of fixed vector or struct of
+ // fixed vector array of length 1 might be lowered using scalable
+ // vector, we consider it as a valid load, e.g.
+ // struct i32x4 {
+ // __attribute__((vector_size(16))) int i;
+ // };
+ // or
+ // struct i32x4 {
+ // __attribute__((vector_size(16))) int i[1];
+ // };
+ // is lowered to <vscale x 2 x i32>
+ // when ABI_VLEN = 128 bits, please checkout
+ // clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
+ // for more information.
+ llvm::Type *EltTy = DstSTy->getElementType(0);
+ if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(EltTy)) {
+ assert(ArrayTy->getNumElements() == 1);
+ EltTy = ArrayTy->getElementType();
+ }
+ auto *Coerced = CGF.Builder.CreateExtractVector(
+ cast<llvm::FixedVectorType>(EltTy), Val, uint64_t(0));
+ auto *I = CGF.Builder.CreateStore(Coerced, Dst, DestIsVolatile);
+ CGF.addInstToCurrentSourceAtom(I, Val);
+}
+
namespace {
class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
public:
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index d952c6e..5380624 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -118,11 +118,9 @@ ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy) const {
if (!isAggregateTypeForABI(RetTy) || getRecordArgABI(RetTy, getCXXABI()))
return DefaultABIInfo::classifyReturnType(RetTy);
- if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return DefaultABIInfo::classifyReturnType(RetTy);
- }
+ if (const auto *RD = RetTy->getAsRecordDecl();
+ RD && RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyReturnType(RetTy);
// TODO: The AMDGPU ABI is non-trivial to represent in SPIR-V; in order to
// avoid encoding various architecture specific bits here we return everything
@@ -186,11 +184,9 @@ ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty) const {
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
RAA == CGCXXABI::RAA_DirectInMemory);
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return DefaultABIInfo::classifyArgumentType(Ty);
- }
+ if (const auto *RD = Ty->getAsRecordDecl();
+ RD && RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyArgumentType(Ty);
return ABIArgInfo::getDirect(CGT.ConvertType(Ty), 0u, nullptr, false);
}
@@ -431,8 +427,7 @@ static llvm::Type *getInlineSpirvType(CodeGenModule &CGM,
}
case SpirvOperandKind::TypeId: {
QualType TypeOperand = Operand.getResultType();
- if (auto *RT = TypeOperand->getAs<RecordType>()) {
- auto *RD = RT->getDecl();
+ if (const auto *RD = TypeOperand->getAsRecordDecl()) {
assert(RD->isCompleteDefinition() &&
"Type completion should have been required in Sema");
@@ -505,7 +500,8 @@ llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
llvm::Type *BufferLayoutTy =
HLSLBufferLayoutBuilder(CGM, "spirv.Layout")
- .createLayoutType(ContainedTy->getAsStructureType(), Packoffsets);
+ .createLayoutType(ContainedTy->castAsCanonical<RecordType>(),
+ Packoffsets);
uint32_t StorageClass = /* Uniform storage class */ 2;
return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {BufferLayoutTy},
{StorageClass, false});
@@ -517,14 +513,65 @@ llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
return nullptr;
}
+static unsigned
+getImageFormat(const LangOptions &LangOpts,
+ const HLSLAttributedResourceType::Attributes &attributes,
+ llvm::Type *SampledType, QualType Ty, unsigned NumChannels) {
+ // For images with `Sampled` operand equal to 2, there are restrictions on
+ // using the Unknown image format. To avoid these restrictions in common
+ // cases, we guess an image format for them based on the sampled type and the
+ // number of channels. This is intended to match the behaviour of DXC.
+ if (LangOpts.HLSLSpvUseUnknownImageFormat ||
+ attributes.ResourceClass != llvm::dxil::ResourceClass::UAV) {
+ return 0; // Unknown
+ }
+
+ if (SampledType->isIntegerTy(32)) {
+ if (Ty->isSignedIntegerType()) {
+ if (NumChannels == 1)
+ return 24; // R32i
+ if (NumChannels == 2)
+ return 25; // Rg32i
+ if (NumChannels == 4)
+ return 21; // Rgba32i
+ } else {
+ if (NumChannels == 1)
+ return 33; // R32ui
+ if (NumChannels == 2)
+ return 35; // Rg32ui
+ if (NumChannels == 4)
+ return 30; // Rgba32ui
+ }
+ } else if (SampledType->isIntegerTy(64)) {
+ if (NumChannels == 1) {
+ if (Ty->isSignedIntegerType()) {
+ return 41; // R64i
+ }
+ return 40; // R64ui
+ }
+ } else if (SampledType->isFloatTy()) {
+ if (NumChannels == 1)
+ return 3; // R32f
+ if (NumChannels == 2)
+ return 6; // Rg32f
+ if (NumChannels == 4)
+ return 1; // Rgba32f
+ }
+
+ return 0; // Unknown
+}
+
llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
const HLSLAttributedResourceType::Attributes &attributes, QualType Ty,
CodeGenModule &CGM) const {
llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ unsigned NumChannels = 1;
Ty = Ty->getCanonicalTypeUnqualified();
- if (const VectorType *V = dyn_cast<VectorType>(Ty))
+ if (const VectorType *V = dyn_cast<VectorType>(Ty)) {
+ NumChannels = V->getNumElements();
Ty = V->getElementType();
+ }
assert(!Ty->isVectorType() && "We still have a vector type.");
llvm::Type *SampledType = CGM.getTypes().ConvertTypeForMem(Ty);
@@ -560,8 +607,8 @@ llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
// Image format.
- // Setting to unknown for now.
- IntParams[5] = 0;
+ IntParams[5] = getImageFormat(CGM.getLangOpts(), attributes, SampledType, Ty,
+ NumChannels);
llvm::TargetExtType *ImageType =
llvm::TargetExtType::get(Ctx, Name, {SampledType}, IntParams);
diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp
index 9642196..5f3c15d 100644
--- a/clang/lib/CodeGen/Targets/Sparc.cpp
+++ b/clang/lib/CodeGen/Targets/Sparc.cpp
@@ -237,8 +237,8 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
/*ByVal=*/false);
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// Integer types smaller than a register are extended.
if (Size < 64 && Ty->isIntegerType())
@@ -303,6 +303,7 @@ RValue SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
case ABIArgInfo::Expand:
case ABIArgInfo::CoerceAndExpand:
case ABIArgInfo::InAlloca:
+ case ABIArgInfo::TargetSpecific:
llvm_unreachable("Unsupported ABI kind for va_arg");
case ABIArgInfo::Extend: {
diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp
index 6ea6c7a..9b6b72b1 100644
--- a/clang/lib/CodeGen/Targets/SystemZ.cpp
+++ b/clang/lib/CodeGen/Targets/SystemZ.cpp
@@ -145,8 +145,8 @@ public:
bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
@@ -208,10 +208,8 @@ llvm::Type *SystemZABIInfo::getFPArgumentType(QualType Ty,
}
QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
- const RecordType *RT = Ty->getAs<RecordType>();
-
- if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
+ const auto *RD = Ty->getAsRecordDecl();
+ if (RD && RD->isStructureOrClass()) {
QualType Found;
// If this is a C++ record, check the bases first.
@@ -452,10 +450,9 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
/*ByVal=*/false);
// Handle small structures.
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (const auto *RD = Ty->getAsRecordDecl()) {
// Structures with flexible arrays have variable length, so really
// fail the size test above.
- const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
/*ByVal=*/false);
@@ -525,8 +522,7 @@ bool SystemZTargetCodeGenInfo::isVectorTypeBased(const Type *Ty,
if (Ty->isVectorType() && Ctx.getTypeSize(Ty) / 8 >= 16)
return true;
- if (const auto *RecordTy = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RecordTy->getDecl();
+ if (const auto *RD = Ty->getAsRecordDecl()) {
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (CXXRD->hasDefinition())
for (const auto &I : CXXRD->bases())
diff --git a/clang/lib/CodeGen/Targets/WebAssembly.cpp b/clang/lib/CodeGen/Targets/WebAssembly.cpp
index 9217c78..ebe996a 100644
--- a/clang/lib/CodeGen/Targets/WebAssembly.cpp
+++ b/clang/lib/CodeGen/Targets/WebAssembly.cpp
@@ -115,10 +115,9 @@ ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
// For the experimental multivalue ABI, fully expand all other aggregates
if (Kind == WebAssemblyABIKind::ExperimentalMV) {
- const RecordType *RT = Ty->getAs<RecordType>();
- assert(RT);
+ const auto *RD = Ty->castAsRecordDecl();
bool HasBitField = false;
- for (auto *Field : RT->getDecl()->fields()) {
+ for (auto *Field : RD->fields()) {
if (Field->isBitField()) {
HasBitField = true;
break;
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index abb9148..71db63b 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -352,14 +352,15 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
return shouldReturnTypeInRegister(AT->getElementType(), Context);
// Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT) return false;
+ const auto *RD = Ty->getAsRecordDecl();
+ if (!RD)
+ return false;
// FIXME: Traverse bases here too.
// Structure types are passed in register if all fields would be
// passed in a register.
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD : RD->fields()) {
// Empty fields are ignored.
if (isEmptyField(Context, FD, true))
continue;
@@ -426,12 +427,11 @@ static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
/// optimizations.
bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
// We can only expand structure types.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT)
+ const RecordDecl *RD = Ty->getAsRecordDecl();
+ if (!RD)
return false;
- const RecordDecl *RD = RT->getDecl();
uint64_t Size = 0;
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (!IsWin32StructABI) {
// On non-Windows, we have to conservatively match our old bitcode
// prototypes in order to be ABI-compatible at the bitcode level.
@@ -507,11 +507,10 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
}
if (isAggregateTypeForABI(RetTy)) {
- if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ if (const auto *RD = RetTy->getAsRecordDecl();
+ RD && RD->hasFlexibleArrayMember())
// Structures with flexible arrays are always indirect.
- if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectReturnResult(RetTy, State);
- }
+ return getIndirectReturnResult(RetTy, State);
// If specified, structs and unions are always indirect.
if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
@@ -553,8 +552,8 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
}
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
@@ -753,7 +752,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
TypeInfo TI = getContext().getTypeInfo(Ty);
// Check with the C++ ABI first.
- const RecordType *RT = Ty->getAs<RecordType>();
+ const RecordType *RT = Ty->getAsCanonical<RecordType>();
if (RT) {
CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
if (RAA == CGCXXABI::RAA_Indirect) {
@@ -796,7 +795,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
// FIXME: This should not be byval!
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectResult(Ty, true, State);
// Ignore empty structs/unions on non-Windows.
@@ -831,7 +831,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
unsigned AlignInBits = 0;
if (RT) {
const ASTRecordLayout &Layout =
- getContext().getASTRecordLayout(RT->getDecl());
+ getContext().getASTRecordLayout(RT->getOriginalDecl());
AlignInBits = getContext().toBits(Layout.getRequiredAlignment());
} else if (TI.isAlignRequired()) {
AlignInBits = TI.Align;
@@ -881,9 +881,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
return ABIArgInfo::getDirect();
}
-
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
bool InReg = shouldPrimitiveUseInReg(Ty, State);
@@ -1008,6 +1007,7 @@ static bool isArgInAlloca(const ABIArgInfo &Info) {
return true;
case ABIArgInfo::Ignore:
case ABIArgInfo::IndirectAliased:
+ case ABIArgInfo::TargetSpecific:
return false;
case ABIArgInfo::Indirect:
case ABIArgInfo::Direct:
@@ -1845,9 +1845,9 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
return;
}
- if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ if (const auto *ED = Ty->getAsEnumDecl()) {
// Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
+ classify(ED->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
return;
}
@@ -2039,7 +2039,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
return;
}
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (const RecordType *RT = Ty->getAsCanonical<RecordType>()) {
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
@@ -2053,7 +2053,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
if (getRecordArgABI(RT, getCXXABI()))
return;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// Assume variable sized types are passed in memory.
if (RD->hasFlexibleArrayMember())
@@ -2069,9 +2069,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
for (const auto &I : CXXRD->bases()) {
assert(!I.isVirtual() && !I.getType()->isDependentType() &&
"Unexpected base class!");
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
+ const auto *Base = I.getType()->castAsCXXRecordDecl();
// Classify this field.
//
// AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
@@ -2183,8 +2181,8 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
// place naturally.
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
if (Ty->isBitIntType())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
@@ -2225,8 +2223,8 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
!Ty->isBitIntType()) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
: ABIArgInfo::getDirect());
@@ -2346,8 +2344,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
return true;
}
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ if (const auto *RD = Ty->getAsRecordDecl()) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// If this is a C++ record, check the bases first.
@@ -2355,8 +2352,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
for (const auto &I : CXXRD->bases()) {
assert(!I.isVirtual() && !I.getType()->isDependentType() &&
"Unexpected base class!");
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *Base = I.getType()->castAsCXXRecordDecl();
// If the base is after the span we care about, ignore it.
unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
@@ -2636,8 +2632,8 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const {
// so that the parameter gets the right LLVM IR attributes.
if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = RetTy->getAsEnumDecl())
+ RetTy = ED->getIntegerType();
if (RetTy->isIntegralOrEnumerationType() &&
isPromotableIntegerTypeForABI(RetTy))
@@ -2786,8 +2782,8 @@ X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
// so that the parameter gets the right LLVM IR attributes.
if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
// Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
if (Ty->isIntegralOrEnumerationType() &&
isPromotableIntegerTypeForABI(Ty))
@@ -2866,14 +2862,15 @@ ABIArgInfo
X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
unsigned &NeededSSE,
unsigned &MaxVectorWidth) const {
- auto RT = Ty->getAs<RecordType>();
- assert(RT && "classifyRegCallStructType only valid with struct types");
+ auto *RD = cast<RecordType>(Ty.getCanonicalType())
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
- if (RT->getDecl()->hasFlexibleArrayMember())
+ if (RD->hasFlexibleArrayMember())
return getIndirectReturnResult(Ty);
// Sum up bases
- if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->isDynamicClass()) {
NeededInt = NeededSSE = 0;
return getIndirectReturnResult(Ty);
@@ -2889,7 +2886,7 @@ X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
}
// Sum up members
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD : RD->fields()) {
QualType MTy = FD->getType();
if (MTy->isRecordType() && !MTy->isUnionType()) {
if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
@@ -3312,14 +3309,14 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
if (Ty->isVoidType())
return ABIArgInfo::getIgnore();
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ if (const auto *ED = Ty->getAsEnumDecl())
+ Ty = ED->getIntegerType();
TypeInfo Info = getContext().getTypeInfo(Ty);
uint64_t Width = Info.Width;
CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
- const RecordType *RT = Ty->getAs<RecordType>();
+ const RecordType *RT = Ty->getAsCanonical<RecordType>();
if (RT) {
if (!IsReturnType) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
@@ -3327,7 +3324,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
RAA == CGCXXABI::RAA_DirectInMemory);
}
- if (RT->getDecl()->hasFlexibleArrayMember())
+ if (RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
/*ByVal=*/false);
}
diff --git a/clang/lib/CodeGen/Targets/XCore.cpp b/clang/lib/CodeGen/Targets/XCore.cpp
index b7824bd..ab01154 100644
--- a/clang/lib/CodeGen/Targets/XCore.cpp
+++ b/clang/lib/CodeGen/Targets/XCore.cpp
@@ -157,6 +157,7 @@ RValue XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
case ABIArgInfo::Expand:
case ABIArgInfo::CoerceAndExpand:
case ABIArgInfo::InAlloca:
+ case ABIArgInfo::TargetSpecific:
llvm_unreachable("Unsupported ABI kind for va_arg");
case ABIArgInfo::Ignore:
Val = Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeAlign);
@@ -379,7 +380,7 @@ static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
// We collect all encoded fields and order as necessary.
bool IsRecursive = false;
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinition();
if (RD && !RD->field_empty()) {
// An incomplete TypeString stub is placed in the cache for this RecordType
// so that recursive calls to this RecordType will use it whilst building a
@@ -428,7 +429,7 @@ static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
Enc += "){";
// We collect all encoded enumerations and order them alphanumerically.
- if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
+ if (const EnumDecl *ED = ET->getOriginalDecl()->getDefinition()) {
SmallVector<FieldEncoding, 16> FE;
for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
++I) {
@@ -614,13 +615,10 @@ static bool appendType(SmallStringEnc &Enc, QualType QType,
if (const PointerType *PT = QT->getAs<PointerType>())
return appendPointerType(Enc, PT, CGM, TSC);
- if (const EnumType *ET = QT->getAs<EnumType>())
+ if (const EnumType *ET = QT->getAsCanonical<EnumType>())
return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
- if (const RecordType *RT = QT->getAsStructureType())
- return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
-
- if (const RecordType *RT = QT->getAsUnionType())
+ if (const RecordType *RT = QT->getAsCanonical<RecordType>())
return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
if (const FunctionType *FT = QT->getAs<FunctionType>())
diff --git a/clang/lib/CodeGen/TrapReasonBuilder.cpp b/clang/lib/CodeGen/TrapReasonBuilder.cpp
new file mode 100644
index 0000000..5881229
--- /dev/null
+++ b/clang/lib/CodeGen/TrapReasonBuilder.cpp
@@ -0,0 +1,50 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements TrapReasonBuilder and related classes.
+///
+//===----------------------------------------------------------------------===//
+#include "TrapReasonBuilder.h"
+
+namespace clang {
+namespace CodeGen {
+
+TrapReasonBuilder::TrapReasonBuilder(DiagnosticsEngine *DiagObj,
+ unsigned DiagID, TrapReason &TR)
+ : DiagnosticBuilder(DiagObj, SourceLocation(), DiagID), TR(TR) {
+ assert(DiagObj->getDiagnosticIDs()->isTrapDiag(DiagID));
+}
+
+TrapReasonBuilder::~TrapReasonBuilder() {
+ // Store the trap message and category into the TrapReason object.
+ getMessage(TR.Message);
+ TR.Category = getCategory();
+
+ // Make sure that when `DiagnosticBuilder::~DiagnosticBuilder()`
+ // calls `Emit()` that it does nothing.
+ Clear();
+}
+
+void TrapReasonBuilder::getMessage(SmallVectorImpl<char> &Storage) {
+ // Render the Diagnostic
+ Diagnostic Info(getDiagnosticsEngine(), *this);
+ Info.FormatDiagnostic(Storage);
+}
+
+StringRef TrapReasonBuilder::getCategory() {
+ auto CategoryID =
+ getDiagnosticsEngine()->getDiagnosticIDs()->getCategoryNumberForDiag(
+ getDiagID());
+ if (CategoryID == 0)
+ return "";
+ return getDiagnosticsEngine()->getDiagnosticIDs()->getCategoryNameFromID(
+ CategoryID);
+}
+} // namespace CodeGen
+} // namespace clang
diff --git a/clang/lib/CodeGen/TrapReasonBuilder.h b/clang/lib/CodeGen/TrapReasonBuilder.h
new file mode 100644
index 0000000..b16cae4
--- /dev/null
+++ b/clang/lib/CodeGen/TrapReasonBuilder.h
@@ -0,0 +1,112 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of TrapReasonBuilder and related classes.
+///
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_CODEGEN_TRAP_REASON_BUILDER_H
+#define LLVM_CLANG_CODEGEN_TRAP_REASON_BUILDER_H
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+namespace CodeGen {
+
+/// Helper class for \class TrapReasonBuilder. \class TrapReason stores the
+/// "trap reason" built by \class TrapReasonBuilder. This consists of
+/// a trap message and trap category.
+///
+/// It is intended that this object be allocated on the stack.
+class TrapReason {
+public:
+ TrapReason() = default;
+ /// \return The trap message. Note the lifetime of the underlying storage for
+ /// the returned StringRef lives in this class which means the returned
+ /// StringRef should not be used after this class is destroyed.
+ StringRef getMessage() const { return Message; }
+
+ /// \return the trap category (e.g. "Undefined Behavior Sanitizer")
+ StringRef getCategory() const { return Category; }
+
+ bool isEmpty() const {
+ // Note both Message and Category are checked because it is legitimate for
+ // the Message to be empty but for the Category to be non-empty when the
+ // trap category is known but the specific reason is not available during
+ // codegen.
+ return Message.size() == 0 && Category.size() == 0;
+ }
+
+private:
+ llvm::SmallString<64> Message;
+ // The Category doesn't need its own storage because the StringRef points
+ // to a global constant string.
+ StringRef Category;
+
+ // Only this class can set the private fields.
+ friend class TrapReasonBuilder;
+};
+
+/// Class to make it convenient to initialize TrapReason objects which can be
+/// used to attach the "trap reason" to trap instructions.
+///
+/// Although this class inherits from \class DiagnosticBuilder it has slightly
+/// different semantics.
+///
+/// * This class should only be used with trap diagnostics (declared in
+/// `DiagnosticTrapKinds.td`).
+/// * The `TrapReasonBuilder` does not emit diagnostics to the normal
+/// diagnostics consumers on destruction like normal Diagnostic builders.
+/// Instead on destruction it assigns to the TrapReason object passed into
+/// the constructor.
+///
+/// Given that this class inherits from `DiagnosticBuilder` it inherits all of
+/// its abilities to format diagnostic messages and consume various types in
+/// class (e.g. Type, Exprs, etc.). This makes it particularly suited to
+/// printing types and expressions from the AST while codegen-ing runtime
+/// checks.
+///
+///
+/// Example use via the `CodeGenModule::BuildTrapReason` helper.
+///
+/// \code
+/// {
+/// TrapReason TR;
+/// CGM.BuildTrapReason(diag::trap_diagnostic, TR) << 0 << SomeExpr;
+/// consume(&TR);
+/// }
+/// \endcode
+///
+///
+class TrapReasonBuilder : public DiagnosticBuilder {
+public:
+ TrapReasonBuilder(DiagnosticsEngine *DiagObj, unsigned DiagID,
+ TrapReason &TR);
+ ~TrapReasonBuilder();
+
+ // Prevent accidentally copying or assigning
+ TrapReasonBuilder &operator=(const TrapReasonBuilder &) = delete;
+ TrapReasonBuilder &operator=(const TrapReasonBuilder &&) = delete;
+ TrapReasonBuilder(const TrapReasonBuilder &) = delete;
+ TrapReasonBuilder(const TrapReasonBuilder &&) = delete;
+
+private:
+ /// \return Format the trap message into `Storage`.
+ void getMessage(SmallVectorImpl<char> &Storage);
+
+ /// \return Return the trap category. These are the `CategoryName` property
+ /// of `trap` diagnostics declared in `DiagnosticTrapKinds.td`.
+ StringRef getCategory();
+
+private:
+ TrapReason &TR;
+};
+
+} // namespace CodeGen
+} // namespace clang
+
+#endif
diff --git a/clang/lib/Driver/Action.cpp b/clang/lib/Driver/Action.cpp
index ec09726..e19daa9 100644
--- a/clang/lib/Driver/Action.cpp
+++ b/clang/lib/Driver/Action.cpp
@@ -52,6 +52,8 @@ const char *Action::getClassName(ActionClass AC) {
return "binary-analyzer";
case BinaryTranslatorJobClass:
return "binary-translator";
+ case ObjcopyJobClass:
+ return "objcopy";
}
llvm_unreachable("invalid class");
@@ -467,3 +469,8 @@ void BinaryTranslatorJobAction::anchor() {}
BinaryTranslatorJobAction::BinaryTranslatorJobAction(Action *Input,
types::ID Type)
: JobAction(BinaryTranslatorJobClass, Input, Type) {}
+
+void ObjcopyJobAction::anchor() {}
+
+ObjcopyJobAction::ObjcopyJobAction(Action *Input, types::ID Type)
+ : JobAction(ObjcopyJobClass, Input, Type) {}
diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt
index 45782cb..7c4f70b 100644
--- a/clang/lib/Driver/CMakeLists.txt
+++ b/clang/lib/Driver/CMakeLists.txt
@@ -98,5 +98,6 @@ add_clang_library(clangDriver
LINK_LIBS
clangBasic
+ clangLex
${system_libs}
)
diff --git a/clang/lib/Driver/Compilation.cpp b/clang/lib/Driver/Compilation.cpp
index a39952e..4e30031 100644
--- a/clang/lib/Driver/Compilation.cpp
+++ b/clang/lib/Driver/Compilation.cpp
@@ -232,11 +232,6 @@ static bool ActionFailed(const Action *A,
return false;
}
-static bool InputsOk(const Command &C,
- const FailingCommandList &FailingCommands) {
- return !ActionFailed(&C.getSource(), FailingCommands);
-}
-
void Compilation::ExecuteJobs(const JobList &Jobs,
FailingCommandList &FailingCommands,
bool LogOnly) const {
@@ -245,7 +240,7 @@ void Compilation::ExecuteJobs(const JobList &Jobs,
// In all but CLMode, execute all the jobs unless the necessary inputs for the
// job is missing due to previous failures.
for (const auto &Job : Jobs) {
- if (!InputsOk(Job, FailingCommands))
+ if (ActionFailed(&Job.getSource(), FailingCommands))
continue;
const Command *FailingCommand = nullptr;
if (int Res = ExecuteCommand(Job, FailingCommand, LogOnly)) {
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 8c0bba9..f110dba 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -66,6 +66,7 @@
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Types.h"
+#include "clang/Lex/DependencyDirectivesScanner.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
@@ -4188,6 +4189,11 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
YcArg = nullptr;
}
+ if (Args.hasArgNoClaim(options::OPT_fmodules_driver))
+ // TODO: Check against all incompatible -fmodules-driver arguments
+ if (!ModulesModeCXX20 && !Args.hasArgNoClaim(options::OPT_fmodules))
+ Args.eraseArg(options::OPT_fmodules_driver);
+
Arg *FinalPhaseArg;
phases::ID FinalPhase = getFinalPhase(Args, &FinalPhaseArg);
@@ -4314,6 +4320,33 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
}
}
+static bool hasCXXModuleInputType(const Driver::InputList &Inputs) {
+ const auto IsTypeCXXModule = [](const auto &Input) -> bool {
+ const auto TypeID = Input.first;
+ return (TypeID == types::TY_CXXModule);
+ };
+ return llvm::any_of(Inputs, IsTypeCXXModule);
+}
+
+llvm::ErrorOr<bool>
+Driver::ScanInputsForCXX20ModulesUsage(const InputList &Inputs) const {
+ const auto CXXInputs = llvm::make_filter_range(
+ Inputs, [](const auto &Input) { return types::isCXX(Input.first); });
+ for (const auto &Input : CXXInputs) {
+ StringRef Filename = Input.second->getSpelling();
+ auto ErrOrBuffer = VFS->getBufferForFile(Filename);
+ if (!ErrOrBuffer)
+ return ErrOrBuffer.getError();
+ const auto Buffer = std::move(*ErrOrBuffer);
+
+ if (scanInputForCXX20ModulesUsage(Buffer->getBuffer())) {
+ Diags.Report(diag::remark_found_cxx20_module_usage) << Filename;
+ return true;
+ }
+ }
+ return false;
+}
+
void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
const InputList &Inputs, ActionList &Actions) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation actions");
@@ -4325,6 +4358,33 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
handleArguments(C, Args, Inputs, Actions);
+ if (Args.hasFlag(options::OPT_fmodules_driver,
+ options::OPT_fno_modules_driver, false)) {
+ // TODO: Move the logic for implicitly enabling explicit-module-builds out
+ // of -fmodules-driver once it is no longer experimental.
+ // Currently, this serves diagnostic purposes only.
+ bool UsesCXXModules = hasCXXModuleInputType(Inputs);
+ if (!UsesCXXModules) {
+ const auto ErrOrScanResult = ScanInputsForCXX20ModulesUsage(Inputs);
+ if (!ErrOrScanResult) {
+ Diags.Report(diag::err_cannot_open_file)
+ << ErrOrScanResult.getError().message();
+ return;
+ }
+ UsesCXXModules = *ErrOrScanResult;
+ }
+ if (UsesCXXModules || Args.hasArg(options::OPT_fmodules))
+ BuildDriverManagedModuleBuildActions(C, Args, Inputs, Actions);
+ return;
+ }
+
+ BuildDefaultActions(C, Args, Inputs, Actions);
+}
+
+void Driver::BuildDefaultActions(Compilation &C, DerivedArgList &Args,
+ const InputList &Inputs,
+ ActionList &Actions) const {
+
bool UseNewOffloadingDriver =
C.isOffloadingHostKind(Action::OFK_OpenMP) ||
C.isOffloadingHostKind(Action::OFK_SYCL) ||
@@ -4582,16 +4642,28 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
}
}
- // Call validator for dxil when -Vd not in Args.
if (C.getDefaultToolChain().getTriple().isDXIL()) {
- // Only add action when needValidation.
const auto &TC =
static_cast<const toolchains::HLSLToolChain &>(C.getDefaultToolChain());
+
+ // Call objcopy for manipulation of the unvalidated DXContainer when an
+ // option in Args requires it.
+ if (TC.requiresObjcopy(Args)) {
+ Action *LastAction = Actions.back();
+ // llvm-objcopy expects an unvalidated DXIL container (TY_OBJECT).
+ if (LastAction->getType() == types::TY_Object)
+ Actions.push_back(
+ C.MakeAction<ObjcopyJobAction>(LastAction, types::TY_Object));
+ }
+
+ // Call validator for dxil when -Vd not in Args.
if (TC.requiresValidation(Args)) {
Action *LastAction = Actions.back();
Actions.push_back(C.MakeAction<BinaryAnalyzeJobAction>(
LastAction, types::TY_DX_CONTAINER));
}
+
+ // Call metal-shaderconverter when targeting metal.
if (TC.requiresBinaryTranslation(Args)) {
Action *LastAction = Actions.back();
// Metal shader converter runs on DXIL containers, which can either be
@@ -4608,6 +4680,12 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
}
+void Driver::BuildDriverManagedModuleBuildActions(
+ Compilation &C, llvm::opt::DerivedArgList &Args, const InputList &Inputs,
+ ActionList &Actions) const {
+ Diags.Report(diag::remark_performing_driver_managed_module_build);
+}
+
/// Returns the canonical name for the offloading architecture when using a HIP
/// or CUDA architecture.
static StringRef getCanonicalArchString(Compilation &C,
@@ -6187,8 +6265,9 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
C.getArgs().hasArg(options::OPT_dxc_Fo)) ||
JA.getType() == types::TY_DX_CONTAINER) {
StringRef FoValue = C.getArgs().getLastArgValue(options::OPT_dxc_Fo);
- // If we are targeting DXIL and not validating or translating, we should set
- // the final result file. Otherwise we should emit to a temporary.
+ // If we are targeting DXIL and not validating/translating/objcopying, we
+ // should set the final result file. Otherwise we should emit to a
+ // temporary.
if (C.getDefaultToolChain().getTriple().isDXIL()) {
const auto &TC = static_cast<const toolchains::HLSLToolChain &>(
C.getDefaultToolChain());
diff --git a/clang/lib/Driver/OffloadBundler.cpp b/clang/lib/Driver/OffloadBundler.cpp
index e83aee0..f69ac41 100644
--- a/clang/lib/Driver/OffloadBundler.cpp
+++ b/clang/lib/Driver/OffloadBundler.cpp
@@ -1936,8 +1936,7 @@ Error OffloadBundler::UnbundleArchive() {
/// Write out an archive for each target
for (auto &Target : BundlerConfig.TargetNames) {
StringRef FileName = TargetOutputFileNameMap[Target];
- StringMapIterator<std::vector<llvm::NewArchiveMember>> CurArchiveMembers =
- OutputArchivesMap.find(Target);
+ auto CurArchiveMembers = OutputArchivesMap.find(Target);
if (CurArchiveMembers != OutputArchivesMap.end()) {
if (Error WriteErr = writeArchive(FileName, CurArchiveMembers->getValue(),
SymtabWritingMode::NormalSymtab,
diff --git a/clang/lib/Driver/SanitizerArgs.cpp b/clang/lib/Driver/SanitizerArgs.cpp
index 98793a5..7ce1afe 100644
--- a/clang/lib/Driver/SanitizerArgs.cpp
+++ b/clang/lib/Driver/SanitizerArgs.cpp
@@ -851,6 +851,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
if (AllAddedKinds & SanitizerKind::KCFI) {
+ CfiICallGeneralizePointers =
+ Args.hasArg(options::OPT_fsanitize_cfi_icall_generalize_pointers);
CfiICallNormalizeIntegers =
Args.hasArg(options::OPT_fsanitize_cfi_icall_normalize_integers);
@@ -1382,11 +1384,7 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("-fsanitize-annotate-debug-info=" +
toString(AnnotateDebugInfo)));
- if (const Arg *A =
- Args.getLastArg(options::OPT_fsanitize_debug_trap_reasons,
- options::OPT_fno_sanitize_debug_trap_reasons)) {
- CmdArgs.push_back(Args.MakeArgString(A->getAsString(Args)));
- }
+ Args.AddLastArg(CmdArgs, options::OPT_fsanitize_debug_trap_reasons_EQ);
addSpecialCaseListOpt(Args, CmdArgs,
"-fsanitize-ignorelist=", UserIgnorelistFiles);
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 7667dbd..49c89ab 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -652,6 +652,7 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::VerifyDebugInfoJobClass:
case Action::BinaryAnalyzeJobClass:
case Action::BinaryTranslatorJobClass:
+ case Action::ObjcopyJobClass:
llvm_unreachable("Invalid tool kind.");
case Action::CompileJobClass:
@@ -1409,13 +1410,6 @@ void ToolChain::addSystemFrameworkInclude(const llvm::opt::ArgList &DriverArgs,
CC1Args.push_back(DriverArgs.MakeArgString(Path));
}
-/// Utility function to add a system include directory to CC1 arguments.
-void ToolChain::addSystemInclude(const ArgList &DriverArgs,
- ArgStringList &CC1Args, const Twine &Path) {
- CC1Args.push_back("-internal-isystem");
- CC1Args.push_back(DriverArgs.MakeArgString(Path));
-}
-
/// Utility function to add a system include directory with extern "C"
/// semantics to CC1 arguments.
///
@@ -1438,6 +1432,14 @@ void ToolChain::addExternCSystemIncludeIfExists(const ArgList &DriverArgs,
addExternCSystemInclude(DriverArgs, CC1Args, Path);
}
+/// Utility function to add a system include directory to CC1 arguments.
+/*static*/ void ToolChain::addSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path) {
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
+}
+
/// Utility function to add a list of system framework directories to CC1.
void ToolChain::addSystemFrameworkIncludes(const ArgList &DriverArgs,
ArgStringList &CC1Args,
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 418f9fd..98f5efb 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -52,6 +52,22 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
return "apple-m1";
}
+ if (Triple.getOS() == llvm::Triple::IOS) {
+ assert(!Triple.isSimulatorEnvironment() && "iossim should be mac-like");
+ // iOS 26 only runs on apple-a12 and later CPUs.
+ if (!Triple.isOSVersionLT(26))
+ return "apple-a12";
+ }
+
+ if (Triple.isWatchOS()) {
+ assert(!Triple.isSimulatorEnvironment() && "watchossim should be mac-like");
+ // arm64_32/arm64e watchOS requires S4 before watchOS 26, S6 after.
+ if (Triple.getArch() == llvm::Triple::aarch64_32 || Triple.isArm64e())
+ return Triple.isOSVersionLT(26) ? "apple-s4" : "apple-s6";
+ // arm64 (non-e, non-32) watchOS comes later, and requires S6 anyway.
+ return "apple-s6";
+ }
+
if (Triple.isXROS()) {
// The xrOS simulator runs on M1 as well, it should have been covered above.
assert(!Triple.isSimulatorEnvironment() && "xrossim should be mac-like");
diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index baa2c8c..76dde0d 100644
--- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -273,9 +273,12 @@ std::string riscv::getRISCVArch(const llvm::opt::ArgList &Args,
// Clang does not yet support MULTILIB_REUSE, so we use `rv{XLEN}imafdc`
// instead of `rv{XLEN}gc` though they are (currently) equivalent.
- // 1. If `-march=` is specified, use it.
- if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
- return A->getValue();
+ // 1. If `-march=` is specified, use it unless the value is "unset".
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ StringRef MArch = A->getValue();
+ if (MArch != "unset")
+ return MArch.str();
+ }
// 2. Get march (isa string) based on `-mcpu=`
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
@@ -300,7 +303,7 @@ std::string riscv::getRISCVArch(const llvm::opt::ArgList &Args,
StringRef MArch = llvm::RISCV::getMArchFromMcpu(CPU);
// Bypass if target cpu's default march is empty.
- if (MArch != "")
+ if (!MArch.empty())
return MArch.str();
}
diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp
index 25a16fe..9b7f58c 100644
--- a/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -586,11 +586,18 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const Driver &D = getToolChain().getDriver();
const llvm::Triple::ArchType Arch = TC.getArch();
const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
+ const bool IsStaticPIE = getStaticPIE(Args, TC);
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
CmdArgs.push_back("-Bstatic");
+ if (IsStaticPIE) {
+ CmdArgs.push_back("-pie");
+ CmdArgs.push_back("--no-dynamic-linker");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("text");
+ }
if (const char *LDMOption = getLDMOption(TC.getTriple(), Args)) {
CmdArgs.push_back("-m");
@@ -620,14 +627,18 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *CRTBegin, *CRTEnd;
if (NeedCRTs) {
- if (!Args.hasArg(options::OPT_r))
- CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crt0.o")));
+ if (!Args.hasArg(options::OPT_r)) {
+ const char *crt = "crt0.o";
+ if (IsStaticPIE)
+ crt = "rcrt1.o";
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath(crt)));
+ }
if (TC.hasValidGCCInstallation() || detectGCCToolchainAdjacent(D)) {
auto RuntimeLib = TC.GetRuntimeLibType(Args);
switch (RuntimeLib) {
case (ToolChain::RLT_Libgcc): {
- CRTBegin = "crtbegin.o";
- CRTEnd = "crtend.o";
+ CRTBegin = IsStaticPIE ? "crtbeginS.o" : "crtbegin.o";
+ CRTEnd = IsStaticPIE ? "crtendS.o" : "crtend.o";
break;
}
case (ToolChain::RLT_CompilerRT): {
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 6eb77610..21e45c6 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1736,7 +1736,6 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
options::OPT_fno_ptrauth_objc_interface_sel);
Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_objc_class_ro,
options::OPT_fno_ptrauth_objc_class_ro);
-
if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
handlePAuthABI(Args, CmdArgs);
@@ -3802,7 +3801,9 @@ static void RenderHLSLOptions(const ArgList &Args, ArgStringList &CmdArgs,
options::OPT_disable_llvm_passes,
options::OPT_fnative_half_type,
options::OPT_hlsl_entrypoint,
- options::OPT_fdx_rootsignature_version};
+ options::OPT_fdx_rootsignature_define,
+ options::OPT_fdx_rootsignature_version,
+ options::OPT_fhlsl_spv_use_unknown_image_format};
if (!types::isHLSL(InputType))
return;
for (const auto &Arg : ForwardedArguments)
@@ -4394,10 +4395,15 @@ static void renderDwarfFormat(const Driver &D, const llvm::Triple &T,
static void
renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
- const ArgList &Args, bool IRInput, ArgStringList &CmdArgs,
- const InputInfo &Output,
+ const ArgList &Args, types::ID InputType,
+ ArgStringList &CmdArgs, const InputInfo &Output,
llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
DwarfFissionKind &DwarfFission) {
+ bool IRInput = isLLVMIR(InputType);
+ bool PlainCOrCXX = isDerivedFromC(InputType) && !isCuda(InputType) &&
+ !isHIP(InputType) && !isObjC(InputType) &&
+ !isOpenCL(InputType);
+
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
options::OPT_fno_debug_info_for_profiling, false) &&
checkDebugInfoOption(
@@ -4591,8 +4597,15 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
CmdArgs.push_back("-gembed-source");
}
+ // Enable Key Instructions by default if we're emitting DWARF, the language is
+ // plain C or C++, and optimisations are enabled.
+ Arg *OptLevel = Args.getLastArg(options::OPT_O_Group);
+ bool KeyInstructionsOnByDefault =
+ EmitDwarf && PlainCOrCXX && OptLevel &&
+ !OptLevel->getOption().matches(options::OPT_O0);
if (Args.hasFlag(options::OPT_gkey_instructions,
- options::OPT_gno_key_instructions, false))
+ options::OPT_gno_key_instructions,
+ KeyInstructionsOnByDefault))
CmdArgs.push_back("-gkey-instructions");
if (EmitCodeView) {
@@ -5982,12 +5995,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fno_knr_functions);
- // This is a coarse approximation of what llvm-gcc actually does, both
- // -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
- // complicated ways.
auto SanitizeArgs = TC.getSanitizerArgs(Args);
Args.AddLastArg(CmdArgs,
options::OPT_fallow_runtime_check_skip_hot_cutoff_EQ);
+
+ // This is a coarse approximation of what llvm-gcc actually does, both
+ // -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
+ // complicated ways.
bool IsAsyncUnwindTablesDefault =
TC.getDefaultUnwindTableLevel(Args) == ToolChain::UnwindTableLevel::Asynchronous;
bool IsSyncUnwindTablesDefault =
@@ -6059,8 +6073,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
llvm::codegenoptions::DebugInfoKind DebugInfoKind =
llvm::codegenoptions::NoDebugInfo;
DwarfFissionKind DwarfFission = DwarfFissionKind::None;
- renderDebugOptions(TC, D, RawTriple, Args, types::isLLVMIR(InputType),
- CmdArgs, Output, DebugInfoKind, DwarfFission);
+ renderDebugOptions(TC, D, RawTriple, Args, InputType, CmdArgs, Output,
+ DebugInfoKind, DwarfFission);
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
@@ -9117,10 +9131,16 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA,
OPT_flto_partitions_EQ,
OPT_flto_EQ};
const llvm::DenseSet<unsigned> LinkerOptions{OPT_mllvm, OPT_Zlinker_input};
- auto ShouldForward = [&](const llvm::DenseSet<unsigned> &Set, Arg *A) {
- return Set.contains(A->getOption().getID()) ||
- (A->getOption().getGroup().isValid() &&
- Set.contains(A->getOption().getGroup().getID()));
+ auto ShouldForwardForToolChain = [&](Arg *A, const ToolChain &TC) {
+ // Don't forward -mllvm to toolchains that don't support LLVM.
+ return TC.HasNativeLLVMSupport() || A->getOption().getID() != OPT_mllvm;
+ };
+ auto ShouldForward = [&](const llvm::DenseSet<unsigned> &Set, Arg *A,
+ const ToolChain &TC) {
+ return (Set.contains(A->getOption().getID()) ||
+ (A->getOption().getGroup().isValid() &&
+ Set.contains(A->getOption().getGroup().getID()))) &&
+ ShouldForwardForToolChain(A, TC);
};
ArgStringList CmdArgs;
@@ -9139,9 +9159,9 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA,
for (Arg *A : ToolChainArgs) {
if (A->getOption().matches(OPT_Zlinker_input))
LinkerArgs.emplace_back(A->getValue());
- else if (ShouldForward(CompilerOptions, A))
+ else if (ShouldForward(CompilerOptions, A, *TC))
A->render(Args, CompilerArgs);
- else if (ShouldForward(LinkerOptions, A))
+ else if (ShouldForward(LinkerOptions, A, *TC))
A->render(Args, LinkerArgs);
}
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 53fd525..2994223 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -2132,6 +2132,18 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
return std::make_tuple(RelocM, 0U, false);
}
+bool tools::getStaticPIE(const ArgList &Args, const ToolChain &TC) {
+ bool HasStaticPIE = Args.hasArg(options::OPT_static_pie);
+ if (HasStaticPIE && Args.hasArg(options::OPT_no_pie)) {
+ const Driver &D = TC.getDriver();
+ const llvm::opt::OptTable &Opts = D.getOpts();
+ StringRef StaticPIEName = Opts.getOptionName(options::OPT_static_pie);
+ StringRef NoPIEName = Opts.getOptionName(options::OPT_nopie);
+ D.Diag(diag::err_drv_cannot_mix_options) << StaticPIEName << NoPIEName;
+ }
+ return HasStaticPIE;
+}
+
// `-falign-functions` indicates that the functions should be aligned to the
// backend's preferred alignment.
//
@@ -2974,7 +2986,8 @@ void tools::addHIPRuntimeLibArgs(const ToolChain &TC, Compilation &C,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) {
if ((C.getActiveOffloadKinds() & Action::OFK_HIP) &&
- !Args.hasArg(options::OPT_nostdlib) &&
+ (!Args.hasArg(options::OPT_nostdlib) ||
+ TC.getTriple().isKnownWindowsMSVCEnvironment()) &&
!Args.hasArg(options::OPT_no_hip_rt) && !Args.hasArg(options::OPT_r)) {
TC.AddHIPRuntimeLibArgs(Args, CmdArgs);
} else {
@@ -3113,6 +3126,8 @@ void tools::addMCModel(const Driver &D, const llvm::opt::ArgList &Args,
else if (CM == "medany")
CM = "large";
Ok = CM == "small" || CM == "medium" || CM == "large";
+ } else if (Triple.getArch() == llvm::Triple::lanai) {
+ Ok = llvm::is_contained({"small", "medium", "large"}, CM);
}
if (Ok) {
CmdArgs.push_back(Args.MakeArgString("-mcmodel=" + CM));
@@ -3316,14 +3331,8 @@ void tools::handleVectorizeSLPArgs(const ArgList &Args,
void tools::handleInterchangeLoopsArgs(const ArgList &Args,
ArgStringList &CmdArgs) {
- // FIXME: instead of relying on shouldEnableVectorizerAtOLevel, we may want to
- // implement a separate function to infer loop interchange from opt level.
- // For now, enable loop-interchange at the same opt levels as loop-vectorize.
- bool EnableInterchange = shouldEnableVectorizerAtOLevel(Args, false);
- OptSpecifier InterchangeAliasOption =
- EnableInterchange ? options::OPT_O_Group : options::OPT_floop_interchange;
- if (Args.hasFlag(options::OPT_floop_interchange, InterchangeAliasOption,
- options::OPT_fno_loop_interchange, EnableInterchange))
+ if (Args.hasFlag(options::OPT_floop_interchange,
+ options::OPT_fno_loop_interchange, false))
CmdArgs.push_back("-floop-interchange");
}
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index fdfcea8..327cb51 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -611,9 +611,12 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(
"--pxtas-path=" + Args.getLastArgValue(options::OPT_ptxas_path_EQ)));
- if (Args.hasArg(options::OPT_cuda_path_EQ))
- CmdArgs.push_back(Args.MakeArgString(
- "--cuda-path=" + Args.getLastArgValue(options::OPT_cuda_path_EQ)));
+ if (Args.hasArg(options::OPT_cuda_path_EQ) || TC.CudaInstallation.isValid()) {
+ StringRef CudaPath = Args.getLastArgValue(
+ options::OPT_cuda_path_EQ,
+ llvm::sys::path::parent_path(TC.CudaInstallation.getBinPath()));
+ CmdArgs.push_back(Args.MakeArgString("--cuda-path=" + CudaPath));
+ }
// Add paths specified in LIBRARY_PATH environment variable as -L options.
addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index e5075cb..234683f 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -3187,28 +3187,46 @@ void MachO::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
ToolChain::addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadKind);
- // On arm64e, enable pointer authentication (for the return address and
- // indirect calls), as well as usage of the intrinsics.
- if (getArchName() == "arm64e") {
+ // On arm64e, we enable all the features required for the Darwin userspace
+ // ABI
+ if (getTriple().isArm64e()) {
+ // Core platform ABI
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_calls,
+ options::OPT_fno_ptrauth_calls))
+ CC1Args.push_back("-fptrauth-calls");
if (!DriverArgs.hasArg(options::OPT_fptrauth_returns,
options::OPT_fno_ptrauth_returns))
CC1Args.push_back("-fptrauth-returns");
-
if (!DriverArgs.hasArg(options::OPT_fptrauth_intrinsics,
options::OPT_fno_ptrauth_intrinsics))
CC1Args.push_back("-fptrauth-intrinsics");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_calls,
- options::OPT_fno_ptrauth_calls))
- CC1Args.push_back("-fptrauth-calls");
-
if (!DriverArgs.hasArg(options::OPT_fptrauth_indirect_gotos,
options::OPT_fno_ptrauth_indirect_gotos))
CC1Args.push_back("-fptrauth-indirect-gotos");
-
if (!DriverArgs.hasArg(options::OPT_fptrauth_auth_traps,
options::OPT_fno_ptrauth_auth_traps))
CC1Args.push_back("-fptrauth-auth-traps");
+
+ // C++ v-table ABI
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_vtable_pointer_address_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_address_discrimination))
+ CC1Args.push_back("-fptrauth-vtable-pointer-address-discrimination");
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_vtable_pointer_type_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_type_discrimination))
+ CC1Args.push_back("-fptrauth-vtable-pointer-type-discrimination");
+
+ // Objective-C ABI
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_objc_isa,
+ options::OPT_fno_ptrauth_objc_isa))
+ CC1Args.push_back("-fptrauth-objc-isa");
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_objc_class_ro,
+ options::OPT_fno_ptrauth_objc_class_ro))
+ CC1Args.push_back("-fptrauth-objc-class-ro");
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_objc_interface_sel,
+ options::OPT_fno_ptrauth_objc_interface_sel))
+ CC1Args.push_back("-fptrauth-objc-interface-sel");
}
}
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index 7ab41e9..1535f4c 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -178,6 +178,8 @@ void Flang::addCodegenOptions(const ArgList &Args,
options::OPT_fstack_repack_arrays, options::OPT_fno_stack_repack_arrays,
options::OPT_ftime_report, options::OPT_ftime_report_EQ,
options::OPT_funroll_loops, options::OPT_fno_unroll_loops});
+ if (Args.hasArg(clang::driver::options::OPT_fcoarray))
+ CmdArgs.push_back("-fcoarray");
}
void Flang::addPicOptions(const ArgList &Args, ArgStringList &CmdArgs) const {
@@ -532,7 +534,14 @@ void Flang::addTargetOptions(const ArgList &Args,
}
Args.addAllArgs(CmdArgs,
- {options::OPT_fverbose_asm, options::OPT_fno_verbose_asm});
+ {options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
+ options::OPT_fatomic_ignore_denormal_mode,
+ options::OPT_fno_atomic_ignore_denormal_mode,
+ options::OPT_fatomic_fine_grained_memory,
+ options::OPT_fno_atomic_fine_grained_memory,
+ options::OPT_fatomic_remote_memory,
+ options::OPT_fno_atomic_remote_memory,
+ options::OPT_munsafe_fp_atomics});
}
void Flang::addOffloadOptions(Compilation &C, const InputInfoList &Inputs,
@@ -937,6 +946,8 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fopenmp_force_usm))
CmdArgs.push_back("-fopenmp-force-usm");
+ Args.AddLastArg(CmdArgs, options::OPT_fopenmp_simd,
+ options::OPT_fno_openmp_simd);
// FIXME: Clang supports a whole bunch more flags here.
break;
@@ -952,6 +963,9 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getSpelling() << A->getValue();
break;
}
+ } else {
+ Args.AddLastArg(CmdArgs, options::OPT_fopenmp_simd,
+ options::OPT_fno_openmp_simd);
}
// Pass the path to compiler resource files.
diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp
index f5e2655..3dade2b 100644
--- a/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -219,18 +219,6 @@ void tools::gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
// The types are (hopefully) good enough.
}
-static bool getStaticPIE(const ArgList &Args, const ToolChain &TC) {
- bool HasStaticPIE = Args.hasArg(options::OPT_static_pie);
- if (HasStaticPIE && Args.hasArg(options::OPT_no_pie)) {
- const Driver &D = TC.getDriver();
- const llvm::opt::OptTable &Opts = D.getOpts();
- StringRef StaticPIEName = Opts.getOptionName(options::OPT_static_pie);
- StringRef NoPIEName = Opts.getOptionName(options::OPT_nopie);
- D.Diag(diag::err_drv_cannot_mix_options) << StaticPIEName << NoPIEName;
- }
- return HasStaticPIE;
-}
-
static bool getStatic(const ArgList &Args) {
return Args.hasArg(options::OPT_static) &&
!Args.hasArg(options::OPT_static_pie);
@@ -2135,10 +2123,11 @@ void Generic_GCC::GCCInstallationDetector::init(
StringRef TripleText =
llvm::sys::path::filename(llvm::sys::path::parent_path(InstallDir));
- Version = GCCVersion::Parse(VersionText);
- GCCTriple.setTriple(TripleText);
- GCCInstallPath = std::string(InstallDir);
- GCCParentLibPath = GCCInstallPath + "/../../..";
+ SelectedInstallation.Version = GCCVersion::Parse(VersionText);
+ SelectedInstallation.GCCTriple.setTriple(TripleText);
+ SelectedInstallation.GCCInstallPath = std::string(InstallDir);
+ SelectedInstallation.GCCParentLibPath =
+ SelectedInstallation.GCCInstallPath + "/../../..";
IsValid = true;
}
return;
@@ -2198,7 +2187,7 @@ void Generic_GCC::GCCInstallationDetector::init(
// Loop over the various components which exist and select the best GCC
// installation available. GCC installs are ranked by version number.
const GCCVersion VersionZero = GCCVersion::Parse("0.0.0");
- Version = VersionZero;
+ SelectedInstallation.Version = VersionZero;
for (const std::string &Prefix : Prefixes) {
auto &VFS = D.getVFS();
if (!VFS.exists(Prefix))
@@ -2226,7 +2215,7 @@ void Generic_GCC::GCCInstallationDetector::init(
}
// Skip other prefixes once a GCC installation is found.
- if (Version > VersionZero)
+ if (SelectedInstallation.Version > VersionZero)
break;
}
}
@@ -2235,14 +2224,17 @@ void Generic_GCC::GCCInstallationDetector::print(raw_ostream &OS) const {
for (const auto &InstallPath : CandidateGCCInstallPaths)
OS << "Found candidate GCC installation: " << InstallPath << "\n";
- if (!GCCInstallPath.empty())
- OS << "Selected GCC installation: " << GCCInstallPath << "\n";
+ if (!SelectedInstallation.GCCInstallPath.empty())
+ OS << "Selected GCC installation: " << SelectedInstallation.GCCInstallPath
+ << "\n";
for (const auto &Multilib : Multilibs)
OS << "Candidate multilib: " << Multilib << "\n";
- if (Multilibs.size() != 0 || !SelectedMultilib.isDefault())
- OS << "Selected multilib: " << SelectedMultilib << "\n";
+ if (Multilibs.size() != 0 ||
+ !SelectedInstallation.SelectedMultilib.isDefault())
+ OS << "Selected multilib: " << SelectedInstallation.SelectedMultilib
+ << "\n";
}
bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
@@ -2780,14 +2772,50 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
}
Multilibs = Detected.Multilibs;
- SelectedMultilib = Detected.SelectedMultilibs.empty()
- ? Multilib()
- : Detected.SelectedMultilibs.back();
+ SelectedInstallation.SelectedMultilib =
+ Detected.SelectedMultilibs.empty() ? Multilib()
+ : Detected.SelectedMultilibs.back();
BiarchSibling = Detected.BiarchSibling;
return true;
}
+bool Generic_GCC::GCCInstallationDetector::SelectGCCInstallationDirectory(
+ const SmallVector<Generic_GCC::GCCInstallCandidate, 3> &Installations,
+ const ArgList &Args,
+ Generic_GCC::GCCInstallCandidate &SelectedInstallation) const {
+ if (Installations.empty())
+ return false;
+
+ SelectedInstallation =
+ *max_element(Installations, [](const auto &Max, const auto &I) {
+ return I.Version > Max.Version;
+ });
+
+ // FIXME Start selecting installation with libstdc++ in clang 22,
+ // using the current way of selecting the installation as a fallback
+ // only. For now, warn if the installation with libstdc++ differs
+ // from SelectedInstallation.
+ const GCCInstallCandidate *InstallWithIncludes = nullptr;
+ for (const auto &I : Installations) {
+ if ((!InstallWithIncludes || I.Version > InstallWithIncludes->Version) &&
+ GCCInstallationHasLibStdcxxIncludePaths(I, Args))
+ InstallWithIncludes = &I;
+ }
+
+ if (InstallWithIncludes && SelectedInstallation.GCCInstallPath !=
+ InstallWithIncludes->GCCInstallPath)
+ D.Diag(diag::warn_drv_gcc_install_dir_libstdcxx)
+ << InstallWithIncludes->GCCInstallPath
+ << SelectedInstallation.GCCInstallPath;
+
+ // TODO Warn if SelectedInstallation does not contain libstdc++ includes
+ // although compiler flags indicate that it is required (C++ compilation,
+ // libstdc++ not explicitly disabled).
+
+ return true;
+}
+
void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
const llvm::Triple &TargetTriple, const ArgList &Args,
const std::string &LibDir, StringRef CandidateTriple,
@@ -2817,6 +2845,7 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
TargetTriple.getVendor() == llvm::Triple::Freescale ||
TargetTriple.getVendor() == llvm::Triple::OpenEmbedded}};
+ SmallVector<GCCInstallCandidate, 3> Installations;
for (auto &Suffix : Suffixes) {
if (!Suffix.Active)
continue;
@@ -2834,23 +2863,31 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
continue; // Saw this path before; no need to look at it again.
if (CandidateVersion.isOlderThan(4, 1, 1))
continue;
- if (CandidateVersion <= Version)
+ if (CandidateVersion <= SelectedInstallation.Version && IsValid)
continue;
if (!ScanGCCForMultilibs(TargetTriple, Args, LI->path(),
NeedsBiarchSuffix))
continue;
- Version = CandidateVersion;
- GCCTriple.setTriple(CandidateTriple);
+ GCCInstallCandidate Installation;
+ Installation.Version = CandidateVersion;
+ Installation.GCCTriple.setTriple(CandidateTriple);
// FIXME: We hack together the directory name here instead of
// using LI to ensure stable path separators across Windows and
// Linux.
- GCCInstallPath = (LibDir + "/" + LibSuffix + "/" + VersionText).str();
- GCCParentLibPath = (GCCInstallPath + "/../" + Suffix.ReversePath).str();
- IsValid = true;
+ Installation.GCCInstallPath =
+ (LibDir + "/" + LibSuffix + "/" + VersionText).str();
+ Installation.GCCParentLibPath =
+ (Installation.GCCInstallPath + "/../" + Suffix.ReversePath).str();
+ Installation.SelectedMultilib = getMultilib();
+
+ Installations.push_back(Installation);
}
}
+
+ IsValid |=
+ SelectGCCInstallationDirectory(Installations, Args, SelectedInstallation);
}
bool Generic_GCC::GCCInstallationDetector::ScanGentooConfigs(
@@ -2928,10 +2965,12 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
NeedsBiarchSuffix))
continue;
- Version = GCCVersion::Parse(ActiveVersion.second);
- GCCInstallPath = GentooPath;
- GCCParentLibPath = GentooPath + std::string("/../../..");
- GCCTriple.setTriple(ActiveVersion.first);
+ SelectedInstallation.Version =
+ GCCVersion::Parse(ActiveVersion.second);
+ SelectedInstallation.GCCInstallPath = GentooPath;
+ SelectedInstallation.GCCParentLibPath =
+ GentooPath + std::string("/../../..");
+ SelectedInstallation.GCCTriple.setTriple(ActiveVersion.first);
IsValid = true;
return true;
}
@@ -3134,8 +3173,9 @@ void Generic_GCC::AddMultilibIncludeArgs(const ArgList &DriverArgs,
// gcc TOOL_INCLUDE_DIR.
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
std::string LibPath(GCCInstallation.getParentLibPath());
- addSystemInclude(DriverArgs, CC1Args,
- Twine(LibPath) + "/../" + GCCTriple.str() + "/include");
+ ToolChain::addSystemInclude(DriverArgs, CC1Args,
+ Twine(LibPath) + "/../" + GCCTriple.str() +
+ "/include");
const auto &Callback = Multilibs.includeDirsCallback();
if (Callback) {
@@ -3222,12 +3262,14 @@ Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
return;
}
-bool Generic_GCC::addLibStdCXXIncludePaths(Twine IncludeDir, StringRef Triple,
- Twine IncludeSuffix,
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- bool DetectDebian) const {
- if (!getVFS().exists(IncludeDir))
+static bool addLibStdCXXIncludePaths(llvm::vfs::FileSystem &vfs,
+ Twine IncludeDir, StringRef Triple,
+ Twine IncludeSuffix,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ bool DetectDebian = false) {
+
+ if (!vfs.exists(IncludeDir))
return false;
// Debian native gcc uses g++-multiarch-incdir.diff which uses
@@ -3239,39 +3281,48 @@ bool Generic_GCC::addLibStdCXXIncludePaths(Twine IncludeDir, StringRef Triple,
std::string Path =
(Include + "/" + Triple + Dir.substr(Include.size()) + IncludeSuffix)
.str();
- if (DetectDebian && !getVFS().exists(Path))
+ if (DetectDebian && !vfs.exists(Path))
return false;
// GPLUSPLUS_INCLUDE_DIR
- addSystemInclude(DriverArgs, CC1Args, IncludeDir);
+ ToolChain::addSystemInclude(DriverArgs, CC1Args, IncludeDir);
// GPLUSPLUS_TOOL_INCLUDE_DIR. If Triple is not empty, add a target-dependent
// include directory.
if (DetectDebian)
- addSystemInclude(DriverArgs, CC1Args, Path);
+ ToolChain::addSystemInclude(DriverArgs, CC1Args, Path);
else if (!Triple.empty())
- addSystemInclude(DriverArgs, CC1Args,
- IncludeDir + "/" + Triple + IncludeSuffix);
+ ToolChain::addSystemInclude(DriverArgs, CC1Args,
+ IncludeDir + "/" + Triple + IncludeSuffix);
// GPLUSPLUS_BACKWARD_INCLUDE_DIR
- addSystemInclude(DriverArgs, CC1Args, IncludeDir + "/backward");
+ ToolChain::addSystemInclude(DriverArgs, CC1Args, IncludeDir + "/backward");
return true;
}
-bool Generic_GCC::addGCCLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
- StringRef DebianMultiarch) const {
- assert(GCCInstallation.isValid());
+bool Generic_GCC::addLibStdCXXIncludePaths(Twine IncludeDir, StringRef Triple,
+ Twine IncludeSuffix,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ bool DetectDebian) const {
+ return ::addLibStdCXXIncludePaths(getVFS(), IncludeDir, Triple, IncludeSuffix,
+ DriverArgs, CC1Args, DetectDebian);
+}
+
+bool Generic_GCC::GCCInstallCandidate::addGCCLibStdCxxIncludePaths(
+ llvm::vfs::FileSystem &vfs, const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args, StringRef DebianMultiarch) const {
// By default, look for the C++ headers in an include directory adjacent to
// the lib directory of the GCC installation. Note that this is expect to be
// equivalent to '/usr/include/c++/X.Y' in almost all cases.
- StringRef LibDir = GCCInstallation.getParentLibPath();
- StringRef InstallDir = GCCInstallation.getInstallPath();
- StringRef TripleStr = GCCInstallation.getTriple().str();
- const Multilib &Multilib = GCCInstallation.getMultilib();
- const GCCVersion &Version = GCCInstallation.getVersion();
+ StringRef LibDir = getParentLibPath();
+ StringRef InstallDir = getInstallPath();
+ StringRef TripleStr = getTriple().str();
+ const Multilib &Multilib = getMultilib();
+ const GCCVersion &Version = getVersion();
// Try /../$triple/include/c++/$version (gcc --print-multiarch is not empty).
- if (addLibStdCXXIncludePaths(
+ if (::addLibStdCXXIncludePaths(
+ vfs,
LibDir.str() + "/../" + TripleStr + "/include/c++/" + Version.Text,
TripleStr, Multilib.includeSuffix(), DriverArgs, CC1Args))
return true;
@@ -3279,22 +3330,24 @@ bool Generic_GCC::addGCCLibStdCxxIncludePaths(
// Try /gcc/$triple/$version/include/c++/ (gcc --print-multiarch is not
// empty). Like above but for GCC built with
// --enable-version-specific-runtime-libs.
- if (addLibStdCXXIncludePaths(LibDir.str() + "/gcc/" + TripleStr + "/" +
- Version.Text + "/include/c++/",
- TripleStr, Multilib.includeSuffix(), DriverArgs,
- CC1Args))
+ if (::addLibStdCXXIncludePaths(vfs,
+ LibDir.str() + "/gcc/" + TripleStr + "/" +
+ Version.Text + "/include/c++/",
+ TripleStr, Multilib.includeSuffix(),
+ DriverArgs, CC1Args))
return true;
// Detect Debian g++-multiarch-incdir.diff.
- if (addLibStdCXXIncludePaths(LibDir.str() + "/../include/c++/" + Version.Text,
- DebianMultiarch, Multilib.includeSuffix(),
- DriverArgs, CC1Args, /*Debian=*/true))
+ if (::addLibStdCXXIncludePaths(
+ vfs, LibDir.str() + "/../include/c++/" + Version.Text,
+ DebianMultiarch, Multilib.includeSuffix(), DriverArgs, CC1Args,
+ /*Debian=*/true))
return true;
// Try /../include/c++/$version (gcc --print-multiarch is empty).
- if (addLibStdCXXIncludePaths(LibDir.str() + "/../include/c++/" + Version.Text,
- TripleStr, Multilib.includeSuffix(), DriverArgs,
- CC1Args))
+ if (::addLibStdCXXIncludePaths(
+ vfs, LibDir.str() + "/../include/c++/" + Version.Text, TripleStr,
+ Multilib.includeSuffix(), DriverArgs, CC1Args))
return true;
// Otherwise, fall back on a bunch of options which don't use multiarch
@@ -3309,20 +3362,50 @@ bool Generic_GCC::addGCCLibStdCxxIncludePaths(
};
for (const auto &IncludePath : LibStdCXXIncludePathCandidates) {
- if (addLibStdCXXIncludePaths(IncludePath, TripleStr,
- Multilib.includeSuffix(), DriverArgs, CC1Args))
+ if (::addLibStdCXXIncludePaths(vfs, IncludePath, TripleStr,
+ Multilib.includeSuffix(), DriverArgs,
+ CC1Args))
return true;
}
return false;
}
+bool Generic_GCC::GCCInstallationDetector::
+ GCCInstallationHasLibStdcxxIncludePaths(
+ const GCCInstallCandidate &GCCInstallation,
+ const llvm::opt::ArgList &DriverArgs) const {
+ StringRef DebianMultiarch =
+ TripleToDebianMultiarch(GCCInstallation.getTriple());
+
+ // The following function checks for libstdc++ include paths and
+ // adds them to the provided argument list. Here we just need the
+ // check.
+ llvm::opt::ArgStringList dummyCC1Args;
+ return GCCInstallation.addGCCLibStdCxxIncludePaths(
+ D.getVFS(), DriverArgs, dummyCC1Args, DebianMultiarch);
+}
+
+bool Generic_GCC::addGCCLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ assert(GCCInstallation.isValid());
+
+ // Detect Debian g++-multiarch-incdir.diff.
+ StringRef DebianMultiarch =
+ GCCInstallation.TripleToDebianMultiarch(GCCInstallation.getTriple());
+
+ return GCCInstallation.getSelectedInstallation().addGCCLibStdCxxIncludePaths(
+ getVFS(), DriverArgs, CC1Args, DebianMultiarch);
+}
+
void
Generic_GCC::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- if (GCCInstallation.isValid()) {
- addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args,
- GCCInstallation.getTriple().str());
- }
+ if (!GCCInstallation.isValid())
+ return;
+
+ GCCInstallation.getSelectedInstallation().addGCCLibStdCxxIncludePaths(
+ getVFS(), DriverArgs, CC1Args, GCCInstallation.getTriple().str());
}
llvm::opt::DerivedArgList *
diff --git a/clang/lib/Driver/ToolChains/Gnu.h b/clang/lib/Driver/ToolChains/Gnu.h
index 4c42a5e5..5fe143b 100644
--- a/clang/lib/Driver/ToolChains/Gnu.h
+++ b/clang/lib/Driver/ToolChains/Gnu.h
@@ -184,6 +184,39 @@ public:
bool operator>=(const GCCVersion &RHS) const { return !(*this < RHS); }
};
+ struct GCCInstallCandidate {
+ // FIXME: These might be better as path objects.
+ std::string GCCInstallPath;
+ std::string GCCParentLibPath;
+
+ llvm::Triple GCCTriple;
+
+ /// The primary multilib appropriate for the given flags.
+ Multilib SelectedMultilib;
+
+ GCCVersion Version;
+
+ /// Get the GCC triple for the detected install.
+ const llvm::Triple &getTriple() const { return GCCTriple; }
+
+ /// Get the detected GCC installation path.
+ StringRef getInstallPath() const { return GCCInstallPath; }
+
+ /// Get the detected GCC parent lib path.
+ StringRef getParentLibPath() const { return GCCParentLibPath; }
+
+ /// Get the detected Multilib
+ const Multilib &getMultilib() const { return SelectedMultilib; }
+
+ /// Get the detected GCC version string.
+ const GCCVersion &getVersion() const { return Version; }
+
+ bool addGCCLibStdCxxIncludePaths(llvm::vfs::FileSystem &vfs,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ StringRef DebianMultiarch) const;
+ };
+
/// This is a class to find a viable GCC installation for Clang to
/// use.
///
@@ -192,21 +225,15 @@ public:
/// Driver, and has logic for fuzzing that where appropriate.
class GCCInstallationDetector {
bool IsValid;
- llvm::Triple GCCTriple;
+
const Driver &D;
- // FIXME: These might be better as path objects.
- std::string GCCInstallPath;
- std::string GCCParentLibPath;
+ GCCInstallCandidate SelectedInstallation;
- /// The primary multilib appropriate for the given flags.
- Multilib SelectedMultilib;
/// On Biarch systems, this corresponds to the default multilib when
/// targeting the non-default multilib. Otherwise, it is empty.
std::optional<Multilib> BiarchSibling;
- GCCVersion Version;
-
// We retain the list of install paths that were considered and rejected in
// order to print out detailed information in verbose mode.
std::set<std::string> CandidateGCCInstallPaths;
@@ -218,23 +245,50 @@ public:
const std::string GentooConfigDir = "/etc/env.d/gcc";
public:
+ /// Function for converting a triple to a Debian multiarch. The
+ /// toolchains use this to adjust the target specific component of
+ /// include paths for Debian.
+ std::function<StringRef(const llvm::Triple &)> TripleToDebianMultiarch =
+ [](const llvm::Triple &T) {
+ StringRef S = T.str();
+ return S;
+ };
+
explicit GCCInstallationDetector(const Driver &D) : IsValid(false), D(D) {}
+
void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args);
+ // TODO Replace isValid by changing SelectedInstallation into
+ // std::optional<SelectedInstallation>
+ // and move all accessors for fields of GCCInstallCandidate into
+ // that struct.
+
/// Check whether we detected a valid GCC install.
bool isValid() const { return IsValid; }
+ const GCCInstallCandidate &getSelectedInstallation() const {
+ return SelectedInstallation;
+ }
+
/// Get the GCC triple for the detected install.
- const llvm::Triple &getTriple() const { return GCCTriple; }
+ const llvm::Triple &getTriple() const {
+ return SelectedInstallation.GCCTriple;
+ }
/// Get the detected GCC installation path.
- StringRef getInstallPath() const { return GCCInstallPath; }
+ StringRef getInstallPath() const {
+ return SelectedInstallation.GCCInstallPath;
+ }
/// Get the detected GCC parent lib path.
- StringRef getParentLibPath() const { return GCCParentLibPath; }
+ StringRef getParentLibPath() const {
+ return SelectedInstallation.GCCParentLibPath;
+ }
/// Get the detected Multilib
- const Multilib &getMultilib() const { return SelectedMultilib; }
+ const Multilib &getMultilib() const {
+ return SelectedInstallation.SelectedMultilib;
+ }
/// Get the whole MultilibSet
const MultilibSet &getMultilibs() const { return Multilibs; }
@@ -244,7 +298,9 @@ public:
bool getBiarchSibling(Multilib &M) const;
/// Get the detected GCC version string.
- const GCCVersion &getVersion() const { return Version; }
+ const GCCVersion &getVersion() const {
+ return SelectedInstallation.Version;
+ }
/// Print information about the detected GCC installation.
void print(raw_ostream &OS) const;
@@ -262,9 +318,21 @@ public:
SmallVectorImpl<std::string> &Prefixes,
StringRef SysRoot);
+ /// Checks if the \p GCCInstallation has libstdc++ include
+ /// directories.
+ bool GCCInstallationHasLibStdcxxIncludePaths(
+ const GCCInstallCandidate &GCCInstallation,
+ const llvm::opt::ArgList &DriverArgs) const;
+
+ /// Select a GCC installation directory from \p Installations and
+ /// set \p SelectedInstallation accordingly.
+ bool SelectGCCInstallationDirectory(
+ const SmallVector<GCCInstallCandidate, 3> &Installations,
+ const llvm::opt::ArgList &Args,
+ GCCInstallCandidate &SelectedInstallation) const;
+
bool ScanGCCForMultilibs(const llvm::Triple &TargetTriple,
- const llvm::opt::ArgList &Args,
- StringRef Path,
+ const llvm::opt::ArgList &Args, StringRef Path,
bool NeedsBiarchSuffix = false);
void ScanLibDirForGCCTriple(const llvm::Triple &TargetArch,
@@ -349,8 +417,7 @@ protected:
llvm::opt::ArgStringList &CC1Args) const;
bool addGCCLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- StringRef DebianMultiarch) const;
+ llvm::opt::ArgStringList &CC) const;
bool addLibStdCXXIncludePaths(Twine IncludeDir, StringRef Triple,
Twine IncludeSuffix,
diff --git a/clang/lib/Driver/ToolChains/HLSL.cpp b/clang/lib/Driver/ToolChains/HLSL.cpp
index 38f4643a..6606619 100644
--- a/clang/lib/Driver/ToolChains/HLSL.cpp
+++ b/clang/lib/Driver/ToolChains/HLSL.cpp
@@ -173,24 +173,71 @@ bool isLegalValidatorVersion(StringRef ValVersionStr, const Driver &D) {
return true;
}
-std::string getSpirvExtArg(ArrayRef<std::string> SpvExtensionArgs) {
+void getSpirvExtOperand(StringRef SpvExtensionArg, raw_ostream &out) {
+ // The extensions that are commented out are supported in DXC, but the SPIR-V
+ // backend does not know about them yet.
+ static const std::vector<StringRef> DxcSupportedExtensions = {
+ "SPV_KHR_16bit_storage", "SPV_KHR_device_group",
+ "SPV_KHR_fragment_shading_rate", "SPV_KHR_multiview",
+ "SPV_KHR_post_depth_coverage", "SPV_KHR_non_semantic_info",
+ "SPV_KHR_shader_draw_parameters", "SPV_KHR_ray_tracing",
+ "SPV_KHR_shader_clock", "SPV_EXT_demote_to_helper_invocation",
+ "SPV_EXT_descriptor_indexing", "SPV_EXT_fragment_fully_covered",
+ "SPV_EXT_fragment_invocation_density",
+ "SPV_EXT_fragment_shader_interlock", "SPV_EXT_mesh_shader",
+ "SPV_EXT_shader_stencil_export", "SPV_EXT_shader_viewport_index_layer",
+ // "SPV_AMD_shader_early_and_late_fragment_tests",
+ "SPV_GOOGLE_hlsl_functionality1", "SPV_GOOGLE_user_type",
+ "SPV_KHR_ray_query", "SPV_EXT_shader_image_int64",
+ "SPV_KHR_fragment_shader_barycentric", "SPV_KHR_physical_storage_buffer",
+ "SPV_KHR_vulkan_memory_model",
+ // "SPV_KHR_compute_shader_derivatives",
+ // "SPV_KHR_maximal_reconvergence",
+ "SPV_KHR_float_controls", "SPV_NV_shader_subgroup_partitioned",
+ // "SPV_KHR_quad_control"
+ };
+
+ if (SpvExtensionArg.starts_with("SPV_")) {
+ out << "+" << SpvExtensionArg;
+ return;
+ }
+
+ if (SpvExtensionArg.compare_insensitive("DXC") == 0) {
+ bool first = true;
+ std::string Operand;
+ for (StringRef E : DxcSupportedExtensions) {
+ if (!first)
+ out << ",";
+ else
+ first = false;
+ out << "+" << E;
+ }
+ return;
+ }
+ out << SpvExtensionArg;
+}
+
+SmallString<1024> getSpirvExtArg(ArrayRef<std::string> SpvExtensionArgs) {
if (SpvExtensionArgs.empty()) {
- return "-spirv-ext=all";
+ return StringRef("-spirv-ext=all");
}
- std::string LlvmOption =
- (Twine("-spirv-ext=+") + SpvExtensionArgs.front()).str();
+ llvm::SmallString<1024> LlvmOption;
+ raw_svector_ostream out(LlvmOption);
+
+ out << "-spirv-ext=";
+ getSpirvExtOperand(SpvExtensionArgs[0], out);
+
SpvExtensionArgs = SpvExtensionArgs.slice(1);
- for (auto Extension : SpvExtensionArgs) {
- if (Extension != "KHR")
- Extension = (Twine("+") + Extension).str();
- LlvmOption = (Twine(LlvmOption) + "," + Extension).str();
+ for (StringRef Extension : SpvExtensionArgs) {
+ out << ",";
+ getSpirvExtOperand(Extension, out);
}
return LlvmOption;
}
bool isValidSPIRVExtensionName(const std::string &str) {
- std::regex pattern("KHR|SPV_[a-zA-Z0-9_]+");
+ std::regex pattern("dxc|DXC|khr|KHR|SPV_[a-zA-Z0-9_]+");
return std::regex_match(str, pattern);
}
@@ -247,6 +294,32 @@ void tools::hlsl::MetalConverter::ConstructJob(
Exec, CmdArgs, Inputs, Input));
}
+void tools::hlsl::LLVMObjcopy::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ std::string ObjcopyPath = getToolChain().GetProgramPath("llvm-objcopy");
+ const char *Exec = Args.MakeArgString(ObjcopyPath);
+
+ ArgStringList CmdArgs;
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+ const InputInfo &Input = Inputs[0];
+ CmdArgs.push_back(Input.getFilename());
+ CmdArgs.push_back(Output.getFilename());
+
+ if (Args.hasArg(options::OPT_dxc_strip_rootsignature)) {
+ const char *Frs = Args.MakeArgString("--remove-section=RTS0");
+ CmdArgs.push_back(Frs);
+ }
+
+ assert(CmdArgs.size() > 2 && "Unnecessary invocation of objcopy.");
+
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs, Input));
+}
+
/// DirectX Toolchain
HLSLToolChain::HLSLToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
@@ -267,6 +340,10 @@ Tool *clang::driver::toolchains::HLSLToolChain::getTool(
if (!MetalConverter)
MetalConverter.reset(new tools::hlsl::MetalConverter(*this));
return MetalConverter.get();
+ case Action::ObjcopyJobClass:
+ if (!LLVMObjcopy)
+ LLVMObjcopy.reset(new tools::hlsl::LLVMObjcopy(*this));
+ return LLVMObjcopy.get();
default:
return ToolChain::getTool(AC);
}
@@ -304,6 +381,13 @@ HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
A->claim();
continue;
}
+ if (A->getOption().getID() == options::OPT_dxc_rootsig_define) {
+ DAL->AddJoinedArg(nullptr,
+ Opts.getOption(options::OPT_fdx_rootsignature_define),
+ A->getValue());
+ A->claim();
+ continue;
+ }
if (A->getOption().getID() == options::OPT__SLASH_O) {
StringRef OStr = A->getValue();
if (OStr == "d") {
@@ -371,7 +455,7 @@ HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
std::vector<std::string> SpvExtensionArgs =
Args.getAllArgValues(options::OPT_fspv_extension_EQ);
if (checkExtensionArgsAreValid(SpvExtensionArgs, getDriver())) {
- std::string LlvmOption = getSpirvExtArg(SpvExtensionArgs);
+ SmallString<1024> LlvmOption = getSpirvExtArg(SpvExtensionArgs);
DAL->AddSeparateArg(nullptr, Opts.getOption(options::OPT_mllvm),
LlvmOption);
}
@@ -404,16 +488,22 @@ bool HLSLToolChain::requiresBinaryTranslation(DerivedArgList &Args) const {
return Args.hasArg(options::OPT_metal) && Args.hasArg(options::OPT_dxc_Fo);
}
+bool HLSLToolChain::requiresObjcopy(DerivedArgList &Args) const {
+ return Args.hasArg(options::OPT_dxc_Fo) &&
+ Args.hasArg(options::OPT_dxc_strip_rootsignature);
+}
+
bool HLSLToolChain::isLastJob(DerivedArgList &Args,
Action::ActionClass AC) const {
- bool HasTranslation = requiresBinaryTranslation(Args);
- bool HasValidation = requiresValidation(Args);
- // If translation and validation are not required, we should only have one
- // action.
- if (!HasTranslation && !HasValidation)
- return true;
- if ((HasTranslation && AC == Action::BinaryTranslatorJobClass) ||
- (!HasTranslation && HasValidation && AC == Action::BinaryAnalyzeJobClass))
- return true;
- return false;
+ // Note: we check in the reverse order of execution
+ if (requiresBinaryTranslation(Args))
+ return AC == Action::Action::BinaryTranslatorJobClass;
+ if (requiresValidation(Args))
+ return AC == Action::Action::BinaryAnalyzeJobClass;
+ if (requiresObjcopy(Args))
+ return AC == Action::Action::ObjcopyJobClass;
+
+ // No translation, validation, or objcopy are required, so this action must
+ // output to the result file.
+ return true;
}
diff --git a/clang/lib/Driver/ToolChains/HLSL.h b/clang/lib/Driver/ToolChains/HLSL.h
index 3824b42..3aed904 100644
--- a/clang/lib/Driver/ToolChains/HLSL.h
+++ b/clang/lib/Driver/ToolChains/HLSL.h
@@ -42,6 +42,20 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+
+class LLVM_LIBRARY_VISIBILITY LLVMObjcopy : public Tool {
+public:
+ LLVMObjcopy(const ToolChain &TC)
+ : Tool("hlsl::LLVMObjcopy", "llvm-objcopy", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
} // namespace hlsl
} // namespace tools
@@ -65,6 +79,13 @@ public:
static std::optional<std::string> parseTargetProfile(StringRef TargetProfile);
bool requiresValidation(llvm::opt::DerivedArgList &Args) const;
bool requiresBinaryTranslation(llvm::opt::DerivedArgList &Args) const;
+ bool requiresObjcopy(llvm::opt::DerivedArgList &Args) const;
+
+ /// If we are targeting DXIL then the last job should output the DXContainer
+ /// to the specified output file with /Fo. Otherwise, we will emit to a
+ /// temporary file for the next job to use.
+ ///
+ /// Returns true if we should output to the final result file.
bool isLastJob(llvm::opt::DerivedArgList &Args, Action::ActionClass AC) const;
// Set default DWARF version to 4 for DXIL uses version 4.
@@ -73,6 +94,7 @@ public:
private:
mutable std::unique_ptr<tools::hlsl::Validator> Validator;
mutable std::unique_ptr<tools::hlsl::MetalConverter> MetalConverter;
+ mutable std::unique_ptr<tools::hlsl::LLVMObjcopy> LLVMObjcopy;
};
} // end namespace toolchains
diff --git a/clang/lib/Driver/ToolChains/Hurd.cpp b/clang/lib/Driver/ToolChains/Hurd.cpp
index a22a8fa..8bcc7e6 100644
--- a/clang/lib/Driver/ToolChains/Hurd.cpp
+++ b/clang/lib/Driver/ToolChains/Hurd.cpp
@@ -71,6 +71,13 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
+ GCCInstallation.TripleToDebianMultiarch = [](const llvm::Triple &T) {
+ StringRef TripleStr = T.str();
+ StringRef DebianMultiarch =
+ T.getArch() == llvm::Triple::x86 ? "i386-gnu" : TripleStr;
+ return DebianMultiarch;
+ };
+
GCCInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
SelectedMultilibs.assign({GCCInstallation.getMultilib()});
@@ -207,12 +214,7 @@ void Hurd::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
if (!GCCInstallation.isValid())
return;
- StringRef TripleStr = GCCInstallation.getTriple().str();
- StringRef DebianMultiarch =
- GCCInstallation.getTriple().getArch() == llvm::Triple::x86 ? "i386-gnu"
- : TripleStr;
-
- addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args, DebianMultiarch);
+ addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args);
}
void Hurd::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index 8ac8d4e..16e35b0 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -211,6 +211,13 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
+ GCCInstallation.TripleToDebianMultiarch = [](const llvm::Triple &T) {
+ StringRef TripleStr = T.str();
+ StringRef DebianMultiarch =
+ T.getArch() == llvm::Triple::x86 ? "i386-linux-gnu" : TripleStr;
+ return DebianMultiarch;
+ };
+
GCCInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
SelectedMultilibs.assign({GCCInstallation.getMultilib()});
@@ -693,22 +700,15 @@ void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
if (!GCCInstallation.isValid())
return;
- // Detect Debian g++-multiarch-incdir.diff.
- StringRef TripleStr = GCCInstallation.getTriple().str();
- StringRef DebianMultiarch =
- GCCInstallation.getTriple().getArch() == llvm::Triple::x86
- ? "i386-linux-gnu"
- : TripleStr;
-
// Try generic GCC detection first.
- if (Generic_GCC::addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args,
- DebianMultiarch))
+ if (Generic_GCC::addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args))
return;
StringRef LibDir = GCCInstallation.getParentLibPath();
const Multilib &Multilib = GCCInstallation.getMultilib();
const GCCVersion &Version = GCCInstallation.getVersion();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
const std::string LibStdCXXIncludePathCandidates[] = {
// Android standalone toolchain has C++ headers in yet another place.
LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
diff --git a/clang/lib/Driver/ToolChains/Managarm.cpp b/clang/lib/Driver/ToolChains/Managarm.cpp
index 0f56f0f..da4a9072 100644
--- a/clang/lib/Driver/ToolChains/Managarm.cpp
+++ b/clang/lib/Driver/ToolChains/Managarm.cpp
@@ -193,10 +193,8 @@ void Managarm::addLibStdCxxIncludePaths(
if (!GCCInstallation.isValid())
return;
- StringRef TripleStr = GCCInstallation.getTriple().str();
-
// Try generic GCC detection.
- Generic_GCC::addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args, TripleStr);
+ addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args);
}
SanitizerMask Managarm::getSupportedSanitizers() const {
diff --git a/clang/lib/Driver/ToolChains/OpenBSD.h b/clang/lib/Driver/ToolChains/OpenBSD.h
index 11b873c..ad0f9e6 100644
--- a/clang/lib/Driver/ToolChains/OpenBSD.h
+++ b/clang/lib/Driver/ToolChains/OpenBSD.h
@@ -79,6 +79,11 @@ public:
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ bool IsAArch64OutlineAtomicsDefault(
+ const llvm::opt::ArgList &Args) const override {
+ return true;
+ }
+
std::string getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
FileType Type = ToolChain::FT_Static,
bool IsFortran = false) const override;
diff --git a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index 627a1d6..40f83482 100644
--- a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1011,7 +1011,7 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
if ((MK == NSAPI::NSNumberWithInteger ||
MK == NSAPI::NSNumberWithUnsignedInteger) &&
!isTruncated) {
- if (OrigTy->getAs<EnumType>() || isEnumConstant(OrigArg))
+ if (OrigTy->isEnumeralType() || isEnumConstant(OrigArg))
break;
if ((MK==NSAPI::NSNumberWithInteger) == OrigTy->isSignedIntegerType() &&
OrigTySize >= Ctx.getTypeSize(Ctx.IntTy))
diff --git a/clang/lib/ExtractAPI/DeclarationFragments.cpp b/clang/lib/ExtractAPI/DeclarationFragments.cpp
index 51a6f6b..541af6d 100644
--- a/clang/lib/ExtractAPI/DeclarationFragments.cpp
+++ b/clang/lib/ExtractAPI/DeclarationFragments.cpp
@@ -205,45 +205,39 @@ DeclarationFragments::getStructureTypeFragment(const RecordDecl *Record) {
// Build declaration fragments for NNS recursively so that we have the USR for
// every part in a qualified name, and also leaves the actual underlying type
// cleaner for its own fragment.
-DeclarationFragments
-DeclarationFragmentsBuilder::getFragmentsForNNS(const NestedNameSpecifier *NNS,
- ASTContext &Context,
- DeclarationFragments &After) {
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForNNS(
+ NestedNameSpecifier NNS, ASTContext &Context, DeclarationFragments &After) {
DeclarationFragments Fragments;
- if (NNS->getPrefix())
- Fragments.append(getFragmentsForNNS(NNS->getPrefix(), Context, After));
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- Fragments.append(NNS->getAsIdentifier()->getName(),
- DeclarationFragments::FragmentKind::Identifier);
- break;
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ return Fragments;
- case NestedNameSpecifier::Namespace: {
- const NamespaceBaseDecl *NS = NNS->getAsNamespace();
- if (const auto *Namespace = dyn_cast<NamespaceDecl>(NS);
- Namespace && Namespace->isAnonymousNamespace())
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
+ Fragments.append(getFragmentsForNNS(Prefix, Context, After));
+ if (const auto *NS = dyn_cast<NamespaceDecl>(Namespace);
+ NS && NS->isAnonymousNamespace())
return Fragments;
SmallString<128> USR;
- index::generateUSRForDecl(NS, USR);
- Fragments.append(NS->getName(),
- DeclarationFragments::FragmentKind::Identifier, USR, NS);
+ index::generateUSRForDecl(Namespace, USR);
+ Fragments.append(Namespace->getName(),
+ DeclarationFragments::FragmentKind::Identifier, USR,
+ Namespace);
break;
}
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
// The global specifier `::` at the beginning. No stored value.
break;
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
// Microsoft's `__super` specifier.
Fragments.append("__super", DeclarationFragments::FragmentKind::Keyword);
break;
- case NestedNameSpecifier::TypeSpec: {
- const Type *T = NNS->getAsType();
+ case NestedNameSpecifier::Kind::Type: {
// FIXME: Handle C++ template specialization type
- Fragments.append(getFragmentsForType(T, Context, After));
+ Fragments.append(getFragmentsForType(NNS.getAsType(), Context, After));
break;
}
}
@@ -273,26 +267,6 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
return Fragments;
}
- // An ElaboratedType is a sugar for types that are referred to using an
- // elaborated keyword, e.g., `struct S`, `enum E`, or (in C++) via a
- // qualified name, e.g., `N::M::type`, or both.
- if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(T)) {
- ElaboratedTypeKeyword Keyword = ET->getKeyword();
- if (Keyword != ElaboratedTypeKeyword::None) {
- Fragments
- .append(ElaboratedType::getKeywordName(Keyword),
- DeclarationFragments::FragmentKind::Keyword)
- .appendSpace();
- }
-
- if (const NestedNameSpecifier *NNS = ET->getQualifier())
- Fragments.append(getFragmentsForNNS(NNS, Context, After));
-
- // After handling the elaborated keyword or qualified name, build
- // declaration fragments for the desugared underlying type.
- return Fragments.append(getFragmentsForType(ET->desugar(), Context, After));
- }
-
// If the type is a typedefed type, get the underlying TypedefNameDecl for a
// direct reference to the typedef instead of the wrapped type.
@@ -303,7 +277,18 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
TypedefUnderlyingTypeResolver TypedefResolver(Context);
std::string USR = TypedefResolver.getUSRForType(QualType(T, 0));
- if (T->isObjCIdType()) {
+ if (ElaboratedTypeKeyword Keyword = TypedefTy->getKeyword();
+ Keyword != ElaboratedTypeKeyword::None) {
+ Fragments
+ .append(KeywordHelpers::getKeywordName(Keyword),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ }
+
+ Fragments.append(
+ getFragmentsForNNS(TypedefTy->getQualifier(), Context, After));
+
+ if (TypedefTy->isObjCIdType()) {
return Fragments.append(Decl->getName(),
DeclarationFragments::FragmentKind::Keyword);
}
@@ -396,14 +381,26 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
if (const TemplateSpecializationType *TemplSpecTy =
dyn_cast<TemplateSpecializationType>(T)) {
- const auto TemplName = TemplSpecTy->getTemplateName();
+ if (ElaboratedTypeKeyword Keyword = TemplSpecTy->getKeyword();
+ Keyword != ElaboratedTypeKeyword::None)
+ Fragments
+ .append(KeywordHelpers::getKeywordName(Keyword),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ auto TemplName = TemplSpecTy->getTemplateName();
std::string Str;
raw_string_ostream Stream(Str);
TemplName.print(Stream, Context.getPrintingPolicy(),
TemplateName::Qualified::AsWritten);
SmallString<64> USR("");
+ if (const auto *QTN = TemplName.getAsQualifiedTemplateName()) {
+ Fragments.append(getFragmentsForNNS(QTN->getQualifier(), Context, After));
+ TemplName = QTN->getUnderlyingTemplate();
+ }
if (const auto *TemplDecl = TemplName.getAsTemplateDecl())
index::generateUSRForDecl(TemplDecl, USR);
+ // FIXME: Handle other kinds of TemplateNames.
return Fragments
.append(Str, DeclarationFragments::FragmentKind::TypeIdentifier, USR)
@@ -413,14 +410,19 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
.append(">", DeclarationFragments::FragmentKind::Text);
}
- // Everything we care about has been handled now, reduce to the canonical
- // unqualified base type.
- QualType Base = T->getCanonicalTypeUnqualified();
-
// If the base type is a TagType (struct/interface/union/class/enum), let's
// get the underlying Decl for better names and USRs.
- if (const TagType *TagTy = dyn_cast<TagType>(Base)) {
- const TagDecl *Decl = TagTy->getDecl();
+ if (const TagType *TagTy = dyn_cast<TagType>(T)) {
+ if (ElaboratedTypeKeyword Keyword = TagTy->getKeyword();
+ Keyword != ElaboratedTypeKeyword::None)
+ Fragments
+ .append(KeywordHelpers::getKeywordName(Keyword),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ Fragments.append(getFragmentsForNNS(TagTy->getQualifier(), Context, After));
+
+ const TagDecl *Decl = TagTy->getOriginalDecl();
// Anonymous decl, skip this fragment.
if (Decl->getName().empty())
return Fragments.append("{ ... }",
@@ -432,6 +434,10 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
TagUSR, Decl);
}
+ // Everything we care about has been handled now, reduce to the canonical
+ // unqualified base type.
+ QualType Base = T->getCanonicalTypeUnqualified();
+
// If the base type is an ObjCInterfaceType, use the underlying
// ObjCInterfaceDecl for the true USR.
if (const auto *ObjCIT = dyn_cast<ObjCInterfaceType>(Base)) {
diff --git a/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp b/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
index 41e4e0c..5adbbc6 100644
--- a/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
+++ b/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
@@ -26,7 +26,7 @@ TypedefUnderlyingTypeResolver::getUnderlyingTypeDecl(QualType Type) const {
if (TypedefTy)
TypeDecl = TypedefTy->getDecl();
if (const TagType *TagTy = Type->getAs<TagType>()) {
- TypeDecl = TagTy->getDecl();
+ TypeDecl = TagTy->getOriginalDecl();
} else if (const ObjCInterfaceType *ObjCITy =
Type->getAs<ObjCInterfaceType>()) {
TypeDecl = ObjCITy->getDecl();
diff --git a/clang/lib/Format/CMakeLists.txt b/clang/lib/Format/CMakeLists.txt
index 9f49398..24f435d 100644
--- a/clang/lib/Format/CMakeLists.txt
+++ b/clang/lib/Format/CMakeLists.txt
@@ -13,6 +13,7 @@ add_clang_library(clangFormat
MacroExpander.cpp
MatchFilePath.cpp
NamespaceEndCommentsFixer.cpp
+ NumericLiteralInfo.cpp
ObjCPropertyAttributeOrderFixer.cpp
QualifierAlignmentFixer.cpp
SortJavaScriptImports.cpp
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 9a10403..888d0fa 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -629,9 +629,16 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// name.
!Style.isJavaScript() && Previous.isNot(tok::kw_template) &&
CurrentState.BreakBeforeParameter) {
- for (const auto *Tok = &Previous; Tok; Tok = Tok->Previous)
- if (Tok->FirstAfterPPLine || Tok->is(TT_LineComment))
+ for (const auto *Tok = &Previous; Tok; Tok = Tok->Previous) {
+ if (Tok->is(TT_LineComment))
return false;
+ if (Tok->is(TT_TemplateCloser)) {
+ Tok = Tok->MatchingParen;
+ assert(Tok);
+ }
+ if (Tok->FirstAfterPPLine)
+ return false;
+ }
return true;
}
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 0637807..e3b22cd 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -763,6 +763,15 @@ struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensStyle> {
}
};
+template <>
+struct ScalarEnumerationTraits<FormatStyle::SpaceInEmptyBracesStyle> {
+ static void enumeration(IO &IO, FormatStyle::SpaceInEmptyBracesStyle &Value) {
+ IO.enumCase(Value, "Always", FormatStyle::SIEB_Always);
+ IO.enumCase(Value, "Block", FormatStyle::SIEB_Block);
+ IO.enumCase(Value, "Never", FormatStyle::SIEB_Never);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::SpacesInAnglesStyle> {
static void enumeration(IO &IO, FormatStyle::SpacesInAnglesStyle &Value) {
IO.enumCase(Value, "Never", FormatStyle::SIAS_Never);
@@ -931,6 +940,7 @@ template <> struct MappingTraits<FormatStyle> {
bool DeriveLineEnding = true;
bool UseCRLF = false;
+ bool SpaceInEmptyBlock = false;
bool SpaceInEmptyParentheses = false;
bool SpacesInConditionalStatement = false;
bool SpacesInCStyleCastParentheses = false;
@@ -960,6 +970,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("PointerBindsToType", Style.PointerAlignment);
IO.mapOptional("SpaceAfterControlStatementKeyword",
Style.SpaceBeforeParens);
+ IO.mapOptional("SpaceInEmptyBlock", SpaceInEmptyBlock);
IO.mapOptional("SpaceInEmptyParentheses", SpaceInEmptyParentheses);
IO.mapOptional("SpacesInConditionalStatement",
SpacesInConditionalStatement);
@@ -1193,7 +1204,7 @@ template <> struct MappingTraits<FormatStyle> {
Style.SpaceBeforeRangeBasedForLoopColon);
IO.mapOptional("SpaceBeforeSquareBrackets",
Style.SpaceBeforeSquareBrackets);
- IO.mapOptional("SpaceInEmptyBlock", Style.SpaceInEmptyBlock);
+ IO.mapOptional("SpaceInEmptyBraces", Style.SpaceInEmptyBraces);
IO.mapOptional("SpacesBeforeTrailingComments",
Style.SpacesBeforeTrailingComments);
IO.mapOptional("SpacesInAngles", Style.SpacesInAngles);
@@ -1276,6 +1287,13 @@ template <> struct MappingTraits<FormatStyle> {
Style.LineEnding = FormatStyle::LE_DeriveCRLF;
}
+ // If SpaceInEmptyBlock was specified but SpaceInEmptyBraces was not,
+ // initialize the latter from the former for backward compatibility.
+ if (SpaceInEmptyBlock &&
+ Style.SpaceInEmptyBraces == FormatStyle::SIEB_Never) {
+ Style.SpaceInEmptyBraces = FormatStyle::SIEB_Block;
+ }
+
if (Style.SpacesInParens != FormatStyle::SIPO_Custom &&
(SpacesInParentheses || SpaceInEmptyParentheses ||
SpacesInConditionalStatement || SpacesInCStyleCastParentheses)) {
@@ -1677,7 +1695,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeParensOptions.AfterIfMacros = true;
LLVMStyle.SpaceBeforeRangeBasedForLoopColon = true;
LLVMStyle.SpaceBeforeSquareBrackets = false;
- LLVMStyle.SpaceInEmptyBlock = false;
+ LLVMStyle.SpaceInEmptyBraces = FormatStyle::SIEB_Never;
LLVMStyle.SpacesBeforeTrailingComments = 1;
LLVMStyle.SpacesInAngles = FormatStyle::SIAS_Never;
LLVMStyle.SpacesInContainerLiterals = true;
@@ -1984,7 +2002,7 @@ FormatStyle getWebKitStyle() {
Style.ObjCSpaceAfterProperty = true;
Style.PointerAlignment = FormatStyle::PAS_Left;
Style.SpaceBeforeCpp11BracedList = true;
- Style.SpaceInEmptyBlock = true;
+ Style.SpaceInEmptyBraces = FormatStyle::SIEB_Always;
return Style;
}
diff --git a/clang/lib/Format/NumericLiteralInfo.cpp b/clang/lib/Format/NumericLiteralInfo.cpp
new file mode 100644
index 0000000..81e6dd5
--- /dev/null
+++ b/clang/lib/Format/NumericLiteralInfo.cpp
@@ -0,0 +1,65 @@
+//===--- NumericLiteralInfo.cpp ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the functionality of getting information about a
+/// numeric literal string, including 0-based positions of the base letter, the
+/// decimal/hexadecimal point, the exponent letter, and the suffix, or npos if
+/// absent.
+///
+//===----------------------------------------------------------------------===//
+
+#include "NumericLiteralInfo.h"
+#include "llvm/ADT/StringExtras.h"
+
+namespace clang {
+namespace format {
+
+using namespace llvm;
+
+NumericLiteralInfo::NumericLiteralInfo(StringRef Text, char Separator) {
+ if (Text.size() < 2)
+ return;
+
+ bool IsHex = false;
+ if (Text[0] == '0') {
+ switch (Text[1]) {
+ case 'x':
+ case 'X':
+ IsHex = true;
+ [[fallthrough]];
+ case 'b':
+ case 'B':
+ case 'o': // JavaScript octal.
+ case 'O':
+ BaseLetterPos = 1; // e.g. 0xF
+ break;
+ }
+ }
+
+ DotPos = Text.find('.', BaseLetterPos + 1); // e.g. 0x.1 or .1
+
+ // e.g. 1.e2 or 0xFp2
+ const auto Pos = DotPos != StringRef::npos ? DotPos + 1 : BaseLetterPos + 2;
+
+ ExponentLetterPos =
+ // Trim C++ user-defined suffix as in `1_Pa`.
+ (Separator == '\'' ? Text.take_front(Text.find('_')) : Text)
+ .find_insensitive(IsHex ? 'p' : 'e', Pos);
+
+ const bool HasExponent = ExponentLetterPos != StringRef::npos;
+ SuffixPos = Text.find_if_not(
+ [&](char C) {
+ return (HasExponent || !IsHex ? isDigit : isHexDigit)(C) ||
+ C == Separator;
+ },
+ HasExponent ? ExponentLetterPos + 2 : Pos); // e.g. 1e-2f
+}
+
+} // namespace format
+} // namespace clang
diff --git a/clang/lib/Format/NumericLiteralInfo.h b/clang/lib/Format/NumericLiteralInfo.h
new file mode 100644
index 0000000..0210f6c
--- /dev/null
+++ b/clang/lib/Format/NumericLiteralInfo.h
@@ -0,0 +1,29 @@
+//===--- NumericLiteralInfo.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_NUMERICLITERALINFO_H
+#define LLVM_CLANG_LIB_FORMAT_NUMERICLITERALINFO_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace format {
+
+struct NumericLiteralInfo {
+ size_t BaseLetterPos = llvm::StringRef::npos; // as in 0b1, 0xF, etc.
+ size_t DotPos = llvm::StringRef::npos; // pos of decimal/hex point
+ size_t ExponentLetterPos = llvm::StringRef::npos; // as in 9e9 and 0xFp9
+ size_t SuffixPos = llvm::StringRef::npos; // starting pos of suffix
+
+ NumericLiteralInfo(llvm::StringRef Text, char Separator = '\'');
+};
+
+} // end namespace format
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 4801d27..bbb7ef2 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -1045,6 +1045,14 @@ private:
}
}
// Parse the [DagArgList] part
+ return parseTableGenDAGArgList(Opener, BreakInside);
+ }
+
+ // DagArgList ::= "," DagArg [DagArgList]
+ // This parses SimpleValue 6's [DagArgList] part.
+ bool parseTableGenDAGArgList(FormatToken *Opener, bool BreakInside) {
+ ScopedContextCreator ContextCreator(*this, tok::l_paren, 0);
+ Contexts.back().IsTableGenDAGArgList = true;
bool FirstDAGArgListElm = true;
while (CurrentToken) {
if (!FirstDAGArgListElm && CurrentToken->is(tok::comma)) {
@@ -1101,6 +1109,9 @@ private:
// SimpleValue6 ::= "(" DagArg [DagArgList] ")"
if (Tok->is(tok::l_paren)) {
Tok->setType(TT_TableGenDAGArgOpener);
+ // Nested DAGArg requires space before '(' as separator.
+ if (Contexts.back().IsTableGenDAGArgList)
+ Tok->SpacesRequiredBefore = 1;
return parseTableGenDAGArgAndList(Tok);
}
// SimpleValue 9: Bang operator
@@ -2138,7 +2149,7 @@ private:
// Whether the braces may mean concatenation instead of structure or array
// literal.
bool VerilogMayBeConcatenation = false;
- bool IsTableGenDAGArg = false;
+ bool IsTableGenDAGArgList = false;
bool IsTableGenBangOpe = false;
bool IsTableGenCondOpe = false;
enum {
@@ -2590,6 +2601,9 @@ private:
if (!Tok.Previous || Tok.isNot(tok::identifier) || Tok.is(TT_ClassHeadName))
return false;
+ if (Tok.endsSequence(Keywords.kw_final, TT_ClassHeadName))
+ return false;
+
if ((Style.isJavaScript() || Style.isJava()) && Tok.is(Keywords.kw_extends))
return false;
@@ -4009,7 +4023,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
auto *Tok = Line.Last->Previous;
while (Tok->isNot(tok::r_brace))
Tok = Tok->Previous;
- if (auto *LBrace = Tok->MatchingParen; LBrace) {
+ if (auto *LBrace = Tok->MatchingParen; LBrace && LBrace->is(TT_Unknown)) {
assert(LBrace->is(tok::l_brace));
Tok->setBlockKind(BK_Block);
LBrace->setBlockKind(BK_Block);
@@ -4513,16 +4527,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Left.is(tok::hash);
if (Left.isOneOf(tok::hashhash, tok::hash))
return Right.is(tok::hash);
- if (Left.is(BK_Block) && Right.is(tok::r_brace) &&
- Right.MatchingParen == &Left && Line.Children.empty()) {
- return Style.SpaceInEmptyBlock;
- }
if (Style.SpacesInParens == FormatStyle::SIPO_Custom) {
- if ((Left.is(tok::l_paren) && Right.is(tok::r_paren)) ||
- (Left.is(tok::l_brace) && Left.isNot(BK_Block) &&
- Right.is(tok::r_brace) && Right.isNot(BK_Block))) {
+ if (Left.is(tok::l_paren) && Right.is(tok::r_paren))
return Style.SpacesInParensOptions.InEmptyParentheses;
- }
if (Style.SpacesInParensOptions.ExceptDoubleParentheses &&
Left.is(tok::r_paren) && Right.is(tok::r_paren)) {
auto *InnerLParen = Left.MatchingParen;
@@ -4800,8 +4807,6 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Right.is(TT_ArraySubscriptLSquare))) {
return false;
}
- if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
- return !Left.Children.empty(); // No spaces in "{}".
if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
Right.MatchingParen->isNot(BK_Block))) {
@@ -4983,6 +4988,17 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::star) && Right.is(tok::comment))
return true;
+ if (Left.is(tok::l_brace) && Right.is(tok::r_brace) &&
+ Left.Children.empty()) {
+ if (Left.is(BK_Block))
+ return Style.SpaceInEmptyBraces != FormatStyle::SIEB_Never;
+ if (Style.Cpp11BracedListStyle) {
+ return Style.SpacesInParens == FormatStyle::SIPO_Custom &&
+ Style.SpacesInParensOptions.InEmptyParentheses;
+ }
+ return Style.SpaceInEmptyBraces == FormatStyle::SIEB_Always;
+ }
+
const auto *BeforeLeft = Left.Previous;
if (IsCpp) {
@@ -6269,7 +6285,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(tok::colon) &&
- !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon)) {
+ !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon,
+ TT_BitFieldColon)) {
return false;
}
if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
diff --git a/clang/lib/Format/UnwrappedLineFormatter.cpp b/clang/lib/Format/UnwrappedLineFormatter.cpp
index 0adf7ee..2a7bfd1 100644
--- a/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -251,10 +251,13 @@ private:
: Limit - TheLine->Last->TotalLength;
if (TheLine->Last->is(TT_FunctionLBrace) &&
- TheLine->First == TheLine->Last &&
- !Style.BraceWrapping.SplitEmptyFunction &&
- NextLine.First->is(tok::r_brace)) {
- return tryMergeSimpleBlock(I, E, Limit);
+ TheLine->First == TheLine->Last) {
+ const bool EmptyFunctionBody = NextLine.First->is(tok::r_brace);
+ if ((EmptyFunctionBody && !Style.BraceWrapping.SplitEmptyFunction) ||
+ (!EmptyFunctionBody &&
+ Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Always)) {
+ return tryMergeSimpleBlock(I, E, Limit);
+ }
}
const auto *PreviousLine = I != AnnotatedLines.begin() ? I[-1] : nullptr;
@@ -864,7 +867,8 @@ private:
if (ShouldMerge()) {
// We merge empty blocks even if the line exceeds the column limit.
Tok->SpacesRequiredBefore =
- (Style.SpaceInEmptyBlock || Line.Last->is(tok::comment)) ? 1 : 0;
+ Style.SpaceInEmptyBraces != FormatStyle::SIEB_Never ||
+ Line.Last->is(tok::comment);
Tok->CanBreakBefore = true;
return 1;
} else if (Limit != 0 && !Line.startsWithNamespace() &&
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index 91b8fdc..f4bbfcf 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -1182,10 +1182,8 @@ void UnwrappedLineParser::parsePPDefine() {
if (MaybeIncludeGuard && !eof())
IncludeGuard = IG_Rejected;
- if (FormatTok->Tok.getKind() == tok::l_paren &&
- !FormatTok->hasWhitespaceBefore()) {
+ if (FormatTok->is(tok::l_paren) && !FormatTok->hasWhitespaceBefore())
parseParens();
- }
if (Style.IndentPPDirectives != FormatStyle::PPDIS_None)
Line->Level += PPBranchLevel + 1;
addUnwrappedLine();
@@ -1193,23 +1191,31 @@ void UnwrappedLineParser::parsePPDefine() {
Line->PPLevel = PPBranchLevel + (IncludeGuard == IG_Defined ? 0 : 1);
assert((int)Line->PPLevel >= 0);
+
+ if (eof())
+ return;
+
Line->InMacroBody = true;
- if (Style.SkipMacroDefinitionBody) {
- while (!eof()) {
- FormatTok->Finalized = true;
- FormatTok = Tokens->getNextToken();
- }
- addUnwrappedLine();
+ if (!Style.SkipMacroDefinitionBody) {
+ // Errors during a preprocessor directive can only affect the layout of the
+ // preprocessor directive, and thus we ignore them. An alternative approach
+ // would be to use the same approach we use on the file level (no
+ // re-indentation if there was a structural error) within the macro
+ // definition.
+ parseFile();
return;
}
- // Errors during a preprocessor directive can only affect the layout of the
- // preprocessor directive, and thus we ignore them. An alternative approach
- // would be to use the same approach we use on the file level (no
- // re-indentation if there was a structural error) within the macro
- // definition.
- parseFile();
+ for (auto *Comment : CommentsBeforeNextToken)
+ Comment->Finalized = true;
+
+ do {
+ FormatTok->Finalized = true;
+ FormatTok = Tokens->getNextToken();
+ } while (!eof());
+
+ addUnwrappedLine();
}
void UnwrappedLineParser::parsePPPragma() {
diff --git a/clang/lib/Frontend/ASTConsumers.cpp b/clang/lib/Frontend/ASTConsumers.cpp
index ab8a35a..67c8761 100644
--- a/clang/lib/Frontend/ASTConsumers.cpp
+++ b/clang/lib/Frontend/ASTConsumers.cpp
@@ -97,6 +97,7 @@ namespace {
Out << "Not a DeclContext\n";
} else if (OutputKind == Print) {
PrintingPolicy Policy(D->getASTContext().getLangOpts());
+ Policy.IncludeTagDefinition = true;
D->print(Out, Policy, /*Indentation=*/0, /*PrintInstantiation=*/true);
} else if (OutputKind != None) {
D->dump(Out, OutputKind == DumpFull, OutputFormat);
@@ -112,8 +113,10 @@ namespace {
// FIXME: Support combining -ast-dump-decl-types with -ast-dump-lookups.
if (auto *VD = dyn_cast<ValueDecl>(InnerD))
VD->getType().dump(Out, VD->getASTContext());
- if (auto *TD = dyn_cast<TypeDecl>(InnerD))
- TD->getTypeForDecl()->dump(Out, TD->getASTContext());
+ if (auto *TD = dyn_cast<TypeDecl>(InnerD)) {
+ const ASTContext &Ctx = TD->getASTContext();
+ Ctx.getTypeDeclType(TD)->dump(Out, Ctx);
+ }
}
}
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index a407825..03b08cd 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -396,7 +396,7 @@ void ASTUnit::CacheCodeCompletionResults() {
// Keep track of the type of this completion in an ASTContext-agnostic
// way.
- QualType UsageType = getDeclUsageType(*Ctx, R.Declaration);
+ QualType UsageType = getDeclUsageType(*Ctx, R.Qualifier, R.Declaration);
if (UsageType.isNull()) {
CachedResult.TypeClass = STC_Void;
CachedResult.Type = 0;
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index 9f99edad..b2c566f 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -1473,16 +1473,18 @@ static bool compileModuleAndReadASTImpl(CompilerInstance &ImportingInstance,
SourceLocation ModuleNameLoc,
Module *Module,
StringRef ModuleFileName) {
- auto Instance = ImportingInstance.cloneForModuleCompile(ModuleNameLoc, Module,
- ModuleFileName);
-
- if (!ImportingInstance.compileModule(ModuleNameLoc,
- Module->getTopLevelModuleName(),
- ModuleFileName, *Instance)) {
- ImportingInstance.getDiagnostics().Report(ModuleNameLoc,
- diag::err_module_not_built)
- << Module->Name << SourceRange(ImportLoc, ModuleNameLoc);
- return false;
+ {
+ auto Instance = ImportingInstance.cloneForModuleCompile(
+ ModuleNameLoc, Module, ModuleFileName);
+
+ if (!ImportingInstance.compileModule(ModuleNameLoc,
+ Module->getTopLevelModuleName(),
+ ModuleFileName, *Instance)) {
+ ImportingInstance.getDiagnostics().Report(ModuleNameLoc,
+ diag::err_module_not_built)
+ << Module->Name << SourceRange(ImportLoc, ModuleNameLoc);
+ return false;
+ }
}
// The module is built successfully, we can update its timestamp now.
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index 9f77e62..29f9cf3 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -640,6 +640,10 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< "-fdx-rootsignature-version" << GetInputKindName(IK);
+ if (Args.hasArg(OPT_fdx_rootsignature_define) && !LangOpts.HLSL)
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << "-fdx-rootsignature-define" << GetInputKindName(IK);
+
if (Args.hasArg(OPT_fgpu_allow_device_init) && !LangOpts.HIP)
Diags.Report(diag::warn_ignored_hip_only_option)
<< Args.getLastArg(OPT_fgpu_allow_device_init)->getAsString(Args);
@@ -1542,6 +1546,17 @@ void CompilerInvocation::setDefaultPointerAuthOptions(
Discrimination::Constant, InitFiniPointerConstantDiscriminator);
}
+ Opts.BlockInvocationFunctionPointers =
+ PointerAuthSchema(Key::ASIA, true, Discrimination::None);
+ Opts.BlockHelperFunctionPointers =
+ PointerAuthSchema(Key::ASIA, true, Discrimination::None);
+ Opts.BlockByrefHelperFunctionPointers =
+ PointerAuthSchema(Key::ASIA, true, Discrimination::None);
+ if (LangOpts.PointerAuthBlockDescriptorPointers)
+ Opts.BlockDescriptorPointers =
+ PointerAuthSchema(Key::ASDA, true, Discrimination::Constant,
+ BlockDescriptorConstantDiscriminator);
+
Opts.ObjCMethodListFunctionPointers =
PointerAuthSchema(Key::ASIA, true, Discrimination::None);
Opts.ObjCMethodListPointer =
@@ -3598,6 +3613,8 @@ static void GeneratePointerAuthArgs(const LangOptions &Opts,
GenerateArg(Consumer, OPT_fptrauth_objc_interface_sel);
if (Opts.PointerAuthObjcClassROPointers)
GenerateArg(Consumer, OPT_fptrauth_objc_class_ro);
+ if (Opts.PointerAuthBlockDescriptorPointers)
+ GenerateArg(Consumer, OPT_fptrauth_block_descriptor_pointers);
}
static void ParsePointerAuthArgs(LangOptions &Opts, ArgList &Args,
@@ -3621,7 +3638,8 @@ static void ParsePointerAuthArgs(LangOptions &Opts, ArgList &Args,
Opts.PointerAuthELFGOT = Args.hasArg(OPT_fptrauth_elf_got);
Opts.AArch64JumpTableHardening =
Args.hasArg(OPT_faarch64_jump_table_hardening);
-
+ Opts.PointerAuthBlockDescriptorPointers =
+ Args.hasArg(OPT_fptrauth_block_descriptor_pointers);
Opts.PointerAuthObjcIsa = Args.hasArg(OPT_fptrauth_objc_isa);
Opts.PointerAuthObjcClassROPointers = Args.hasArg(OPT_fptrauth_objc_class_ro);
Opts.PointerAuthObjcInterfaceSel =
@@ -3936,47 +3954,18 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts,
GenerateArg(Consumer, OPT_fsanitize_ignorelist_EQ, F);
switch (Opts.getClangABICompat()) {
- case LangOptions::ClangABI::Ver3_8:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "3.8");
- break;
- case LangOptions::ClangABI::Ver4:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "4.0");
- break;
- case LangOptions::ClangABI::Ver6:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "6.0");
- break;
- case LangOptions::ClangABI::Ver7:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "7.0");
+#define ABI_VER_MAJOR_MINOR(Major, Minor) \
+ case LangOptions::ClangABI::Ver##Major##_##Minor: \
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, #Major "." #Minor); \
break;
- case LangOptions::ClangABI::Ver9:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "9.0");
+#define ABI_VER_MAJOR(Major) \
+ case LangOptions::ClangABI::Ver##Major: \
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, #Major ".0"); \
break;
- case LangOptions::ClangABI::Ver11:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "11.0");
- break;
- case LangOptions::ClangABI::Ver12:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "12.0");
- break;
- case LangOptions::ClangABI::Ver14:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "14.0");
- break;
- case LangOptions::ClangABI::Ver15:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "15.0");
- break;
- case LangOptions::ClangABI::Ver17:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "17.0");
- break;
- case LangOptions::ClangABI::Ver18:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "18.0");
- break;
- case LangOptions::ClangABI::Ver19:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "19.0");
- break;
- case LangOptions::ClangABI::Ver20:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "20.0");
- break;
- case LangOptions::ClangABI::Latest:
+#define ABI_VER_LATEST(Latest) \
+ case LangOptions::ClangABI::Latest: \
break;
+#include "clang/Basic/ABIVersions.def"
}
if (Opts.getSignReturnAddressScope() ==
@@ -4470,7 +4459,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
StringRef Ver = A->getValue();
std::pair<StringRef, StringRef> VerParts = Ver.split('.');
- unsigned Major, Minor = 0;
+ int Major, Minor = 0;
// Check the version number is valid: either 3.x (0 <= x <= 9) or
// y or y.0 (4 <= y <= current version).
@@ -4482,32 +4471,18 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
!VerParts.second.getAsInteger(10, Minor)
: VerParts.first.size() == Ver.size() || VerParts.second == "0")) {
// Got a valid version number.
- if (Major == 3 && Minor <= 8)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver3_8);
- else if (Major <= 4)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver4);
- else if (Major <= 6)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver6);
- else if (Major <= 7)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver7);
- else if (Major <= 9)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver9);
- else if (Major <= 11)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver11);
- else if (Major <= 12)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver12);
- else if (Major <= 14)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver14);
- else if (Major <= 15)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver15);
- else if (Major <= 17)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver17);
- else if (Major <= 18)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver18);
- else if (Major <= 19)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver19);
- else if (Major <= 20)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver20);
+#define ABI_VER_MAJOR_MINOR(Major_, Minor_) \
+ if (std::tuple(Major, Minor) <= std::tuple(Major_, Minor_)) \
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver##Major_##_##Minor_); \
+ else
+#define ABI_VER_MAJOR(Major_) \
+ if (Major <= Major_) \
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver##Major_); \
+ else
+#define ABI_VER_LATEST(Latest) \
+ { /* Equivalent to latest version - do nothing */ \
+ }
+#include "clang/Basic/ABIVersions.def"
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp
index 685a9bb..ccda2c4 100644
--- a/clang/lib/Frontend/FrontendActions.cpp
+++ b/clang/lib/Frontend/FrontendActions.cpp
@@ -22,6 +22,7 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Parse/ParseHLSLRootSignature.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
@@ -1241,3 +1242,85 @@ void GetDependenciesByModuleNameAction::ExecuteAction() {
PPCallbacks *CB = PP.getPPCallbacks();
CB->moduleImport(SourceLocation(), Path, ModResult);
}
+
+//===----------------------------------------------------------------------===//
+// HLSL Specific Actions
+//===----------------------------------------------------------------------===//
+
+class InjectRootSignatureCallback : public PPCallbacks {
+private:
+ Sema &Actions;
+ StringRef RootSigName;
+ llvm::dxbc::RootSignatureVersion Version;
+
+ std::optional<StringLiteral *> processStringLiteral(ArrayRef<Token> Tokens) {
+ for (Token Tok : Tokens)
+ if (!tok::isStringLiteral(Tok.getKind()))
+ return std::nullopt;
+
+ ExprResult StringResult = Actions.ActOnUnevaluatedStringLiteral(Tokens);
+ if (StringResult.isInvalid())
+ return std::nullopt;
+
+ if (auto Signature = dyn_cast<StringLiteral>(StringResult.get()))
+ return Signature;
+
+ return std::nullopt;
+ }
+
+public:
+ void MacroDefined(const Token &MacroNameTok,
+ const MacroDirective *MD) override {
+ if (RootSigName != MacroNameTok.getIdentifierInfo()->getName())
+ return;
+
+ const MacroInfo *MI = MD->getMacroInfo();
+ auto Signature = processStringLiteral(MI->tokens());
+ if (!Signature.has_value()) {
+ Actions.getDiagnostics().Report(MI->getDefinitionLoc(),
+ diag::err_expected_string_literal)
+ << /*in attributes...*/ 4 << "RootSignature";
+ return;
+ }
+
+ IdentifierInfo *DeclIdent =
+ hlsl::ParseHLSLRootSignature(Actions, Version, *Signature);
+ Actions.HLSL().SetRootSignatureOverride(DeclIdent);
+ }
+
+ InjectRootSignatureCallback(Sema &Actions, StringRef RootSigName,
+ llvm::dxbc::RootSignatureVersion Version)
+ : PPCallbacks(), Actions(Actions), RootSigName(RootSigName),
+ Version(Version) {}
+};
+
+void HLSLFrontendAction::ExecuteAction() {
+ // Pre-requisites to invoke
+ CompilerInstance &CI = getCompilerInstance();
+ if (!CI.hasASTContext() || !CI.hasPreprocessor())
+ return WrapperFrontendAction::ExecuteAction();
+
+ // InjectRootSignatureCallback requires access to invoke Sema to lookup/
+ // register a root signature declaration. The wrapped action is required to
+ // account for this by only creating a Sema if one doesn't already exist
+ // (like we have done, and, ASTFrontendAction::ExecuteAction)
+ if (!CI.hasSema())
+ CI.createSema(getTranslationUnitKind(),
+ /*CodeCompleteConsumer=*/nullptr);
+ Sema &S = CI.getSema();
+
+ // Register HLSL specific callbacks
+ auto LangOpts = CI.getLangOpts();
+ auto MacroCallback = std::make_unique<InjectRootSignatureCallback>(
+ S, LangOpts.HLSLRootSigOverride, LangOpts.HLSLRootSigVer);
+
+ Preprocessor &PP = CI.getPreprocessor();
+ PP.addPPCallbacks(std::move(MacroCallback));
+
+ // Invoke as normal
+ WrapperFrontendAction::ExecuteAction();
+}
+
+HLSLFrontendAction::HLSLFrontendAction(
+ std::unique_ptr<FrontendAction> WrappedAction)
+ : WrapperFrontendAction(std::move(WrappedAction)) {}
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index 008a35d..4865c0b 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -1519,6 +1519,13 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (TI.getTriple().isOSBinFormatELF())
Builder.defineMacro("__ELF__");
+ if (LangOpts.Sanitize.has(SanitizerKind::Address))
+ Builder.defineMacro("__SANITIZE_ADDRESS__");
+ if (LangOpts.Sanitize.has(SanitizerKind::HWAddress))
+ Builder.defineMacro("__SANITIZE_HWADDRESS__");
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread))
+ Builder.defineMacro("__SANITIZE_THREAD__");
+
// Target OS macro definitions.
if (PPOpts.DefineTargetOSMacros) {
const llvm::Triple &Triple = TI.getTriple();
@@ -1528,6 +1535,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
#undef TARGET_OS
}
+ if (LangOpts.PointerAuthIntrinsics)
+ Builder.defineMacro("__PTRAUTH__");
+
// Get other target #defines.
TI.getTargetDefines(LangOpts, Builder);
}
diff --git a/clang/lib/Frontend/LayoutOverrideSource.cpp b/clang/lib/Frontend/LayoutOverrideSource.cpp
index a1866ec..0a60e00 100644
--- a/clang/lib/Frontend/LayoutOverrideSource.cpp
+++ b/clang/lib/Frontend/LayoutOverrideSource.cpp
@@ -8,6 +8,7 @@
#include "clang/Frontend/LayoutOverrideSource.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/Support/raw_ostream.h"
#include <fstream>
diff --git a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 8f27553..42f2d65 100644
--- a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -852,7 +852,7 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
if (!IvarT->getAs<TypedefType>() && IvarT->isRecordType()) {
- RecordDecl *RD = IvarT->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = IvarT->castAsCanonical<RecordType>()->getOriginalDecl();
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
// decltype(((Foo_IMPL*)0)->bar) *
@@ -865,7 +865,8 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get(RecName));
- QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType PtrStructIMPL =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
Expr *Zero = IntegerLiteral::Create(*Context,
@@ -2999,7 +3000,7 @@ QualType RewriteModernObjC::getSuperStructType() {
SuperStructDecl->completeDefinition();
}
- return Context->getTagDeclType(SuperStructDecl);
+ return Context->getCanonicalTagType(SuperStructDecl);
}
QualType RewriteModernObjC::getConstantStringStructType() {
@@ -3032,7 +3033,7 @@ QualType RewriteModernObjC::getConstantStringStructType() {
ConstantStringDecl->completeDefinition();
}
- return Context->getTagDeclType(ConstantStringDecl);
+ return Context->getCanonicalTagType(ConstantStringDecl);
}
/// getFunctionSourceLocation - returns start location of a function
@@ -3637,7 +3638,7 @@ bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
return RewriteObjCFieldDeclType(ElemTy, Result);
}
else if (Type->isRecordType()) {
- RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ auto *RD = Type->castAsRecordDecl();
if (RD->isCompleteDefinition()) {
if (RD->isStruct())
Result += "\n\tstruct ";
@@ -3658,27 +3659,26 @@ bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
Result += "\t} ";
return true;
}
- }
- else if (Type->isEnumeralType()) {
- EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
- if (ED->isCompleteDefinition()) {
- Result += "\n\tenum ";
- Result += ED->getName();
- if (GlobalDefinedTags.count(ED)) {
- // Enum is globall defined, use it.
- Result += " ";
- return true;
- }
-
- Result += " {\n";
- for (const auto *EC : ED->enumerators()) {
- Result += "\t"; Result += EC->getName(); Result += " = ";
- Result += toString(EC->getInitVal(), 10);
- Result += ",\n";
- }
- Result += "\t} ";
+ } else if (auto *ED = Type->getAsEnumDecl();
+ ED && ED->isCompleteDefinition()) {
+ Result += "\n\tenum ";
+ Result += ED->getName();
+ if (GlobalDefinedTags.count(ED)) {
+ // Enum is globall defined, use it.
+ Result += " ";
return true;
}
+
+ Result += " {\n";
+ for (const auto *EC : ED->enumerators()) {
+ Result += "\t";
+ Result += EC->getName();
+ Result += " = ";
+ Result += toString(EC->getInitVal(), 10);
+ Result += ",\n";
+ }
+ Result += "\t} ";
+ return true;
}
Result += "\t";
@@ -3730,15 +3730,7 @@ void RewriteModernObjC::RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDec
auto *IDecl = dyn_cast<ObjCContainerDecl>(fieldDecl->getDeclContext());
- TagDecl *TD = nullptr;
- if (Type->isRecordType()) {
- TD = Type->castAs<RecordType>()->getDecl();
- }
- else if (Type->isEnumeralType()) {
- TD = Type->castAs<EnumType>()->getDecl();
- }
-
- if (TD) {
+ if (auto *TD = Type->getAsTagDecl()) {
if (GlobalDefinedTags.count(TD))
return;
@@ -3793,7 +3785,7 @@ QualType RewriteModernObjC::SynthesizeBitfieldGroupStructType(
false, ICIS_NoInit));
}
RD->completeDefinition();
- return Context->getTagDeclType(RD);
+ return Context->getCanonicalTagType(RD);
}
QualType RewriteModernObjC::GetGroupRecordTypeForObjCIvarBitfield(ObjCIvarDecl *IV) {
@@ -4572,7 +4564,7 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__block_impl"));
- QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType PtrBlock = Context->getPointerType(Context->getCanonicalTagType(RD));
// Generate a funky cast.
SmallVector<QualType, 8> ArgTypes;
@@ -5316,7 +5308,8 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(), II);
assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType castT =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
FD = SynthBlockInitFunctionDecl(ND->getName());
Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
@@ -5719,7 +5712,7 @@ void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
}
}
} else if (VD->getType()->isRecordType()) {
- RecordDecl *RD = VD->getType()->castAs<RecordType>()->getDecl();
+ auto *RD = VD->getType()->castAsRecordDecl();
if (RD->isCompleteDefinition())
RewriteRecordBody(RD);
}
@@ -7460,7 +7453,8 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
if (!IvarT->getAs<TypedefType>() && IvarT->isRecordType()) {
- RecordDecl *RD = IvarT->castAs<RecordType>()->getDecl();
+ RecordDecl *RD =
+ IvarT->castAsCanonical<RecordType>()->getOriginalDecl();
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
// decltype(((Foo_IMPL*)0)->bar) *
@@ -7473,7 +7467,8 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
RecordDecl *RD = RecordDecl::Create(
*Context, TagTypeKind::Struct, TUDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(RecName));
- QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType PtrStructIMPL =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
Expr *Zero = IntegerLiteral::Create(*Context,
diff --git a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index f49ccf7..b9c025d 100644
--- a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -2358,7 +2358,7 @@ void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
- QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType argT = Context->getPointerType(Context->getCanonicalTagType(RD));
assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
@@ -2401,7 +2401,7 @@ void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
- QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType argT = Context->getPointerType(Context->getCanonicalTagType(RD));
assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
@@ -2552,7 +2552,7 @@ QualType RewriteObjC::getSuperStructType() {
SuperStructDecl->completeDefinition();
}
- return Context->getTagDeclType(SuperStructDecl);
+ return Context->getCanonicalTagType(SuperStructDecl);
}
QualType RewriteObjC::getConstantStringStructType() {
@@ -2585,7 +2585,7 @@ QualType RewriteObjC::getConstantStringStructType() {
ConstantStringDecl->completeDefinition();
}
- return Context->getTagDeclType(ConstantStringDecl);
+ return Context->getCanonicalTagType(ConstantStringDecl);
}
CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
@@ -3750,7 +3750,7 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__block_impl"));
- QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType PtrBlock = Context->getPointerType(Context->getCanonicalTagType(RD));
// Generate a funky cast.
SmallVector<QualType, 8> ArgTypes;
@@ -4468,7 +4468,8 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(), II);
assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType castT =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
FD = SynthBlockInitFunctionDecl((*I)->getName());
Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
@@ -4834,7 +4835,7 @@ void RewriteObjC::HandleDeclInMainFile(Decl *D) {
}
}
} else if (VD->getType()->isRecordType()) {
- RecordDecl *RD = VD->getType()->castAs<RecordType>()->getDecl();
+ auto *RD = VD->getType()->castAsRecordDecl();
if (RD->isCompleteDefinition())
RewriteRecordBody(RD);
}
@@ -5804,7 +5805,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(), II);
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType castT =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
CK_BitCast,
IV->getBase());
@@ -5845,7 +5847,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(), II);
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType castT =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
CK_BitCast,
IV->getBase());
diff --git a/clang/lib/Frontend/TextDiagnostic.cpp b/clang/lib/Frontend/TextDiagnostic.cpp
index ccdd59d..5888571 100644
--- a/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/clang/lib/Frontend/TextDiagnostic.cpp
@@ -1095,6 +1095,8 @@ prepareAndFilterRanges(const SmallVectorImpl<CharSourceRange> &Ranges,
unsigned StartColumn = SM.getExpansionColumnNumber(Begin);
unsigned EndColumn = SM.getExpansionColumnNumber(End);
+ assert(StartColumn && "StartColumn must be valid, 0 is invalid");
+ assert(EndColumn && "EndColumn must be valid, 0 is invalid");
if (R.isTokenRange())
EndColumn += Lexer::MeasureTokenLength(End, SM, LangOpts);
diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 443eb4f..9a6844d 100644
--- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -181,6 +181,9 @@ CreateFrontendAction(CompilerInstance &CI) {
const FrontendOptions &FEOpts = CI.getFrontendOpts();
+ if (CI.getLangOpts().HLSL)
+ Act = std::make_unique<HLSLFrontendAction>(std::move(Act));
+
if (FEOpts.FixAndRecompile) {
Act = std::make_unique<FixItRecompile>(std::move(Act));
}
diff --git a/clang/lib/Headers/avx10_2_512bf16intrin.h b/clang/lib/Headers/avx10_2_512bf16intrin.h
index 75290d2..95e9bd7a 100644
--- a/clang/lib/Headers/avx10_2_512bf16intrin.h
+++ b/clang/lib/Headers/avx10_2_512bf16intrin.h
@@ -441,8 +441,8 @@ _mm512_maskz_sqrt_pbh(__mmask32 __U, __m512bh __A) {
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
- return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
- (__v32bf)__C);
+ return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, (__v32bf)__B,
+ (__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
@@ -469,8 +469,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fmadd_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
- return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
- -(__v32bf)__C);
+ return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, (__v32bf)__B,
+ -(__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
@@ -497,8 +497,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fmsub_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fnmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
- return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
- (__v32bf)__C);
+ return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, -(__v32bf)__B,
+ (__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_fnmadd_pbh(
@@ -527,8 +527,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fnmadd_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fnmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
- return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
- -(__v32bf)__C);
+ return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, -(__v32bf)__B,
+ -(__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_fnmsub_pbh(
diff --git a/clang/lib/Headers/avx10_2bf16intrin.h b/clang/lib/Headers/avx10_2bf16intrin.h
index 66797ae..0c7f381 100644
--- a/clang/lib/Headers/avx10_2bf16intrin.h
+++ b/clang/lib/Headers/avx10_2bf16intrin.h
@@ -852,8 +852,8 @@ _mm_maskz_sqrt_pbh(__mmask8 __U, __m128bh __A) {
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
- return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
- (__v16bf)__C);
+ return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, (__v16bf)__B,
+ (__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
@@ -880,8 +880,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
- return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
- -(__v16bf)__C);
+ return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, (__v16bf)__B,
+ -(__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
@@ -908,8 +908,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
- return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
- (__v16bf)__C);
+ return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, -(__v16bf)__B,
+ (__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_pbh(
@@ -938,8 +938,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
- return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
- -(__v16bf)__C);
+ return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, -(__v16bf)__B,
+ -(__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_pbh(
@@ -969,8 +969,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_pbh(
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmadd_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
- return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
- (__v8bf)__C);
+ return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, (__v8bf)__B,
+ (__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -997,8 +997,8 @@ _mm_maskz_fmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmsub_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
- return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
- -(__v8bf)__C);
+ return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, (__v8bf)__B,
+ -(__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -1025,8 +1025,8 @@ _mm_maskz_fmsub_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmadd_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
- return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
- (__v8bf)__C);
+ return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, -(__v8bf)__B,
+ (__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -1053,8 +1053,8 @@ _mm_maskz_fnmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmsub_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
- return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
- -(__v8bf)__C);
+ return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, -(__v8bf)__B,
+ -(__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index dc9fc07..384faa3 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -31,6 +31,14 @@
__min_vector_width__(128)))
#endif
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#else
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#endif
+
/* SSE4 Multiple Packed Sums of Absolute Difference. */
/// Computes sixteen sum of absolute difference (SAD) operations on sets of
/// four unsigned 8-bit integers from the 256-bit integer vectors \a X and
@@ -104,10 +112,9 @@
/// \param __a
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi8(__m256i __a)
-{
- return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_abs_epi8(__m256i __a) {
+ return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
}
/// Computes the absolute value of each signed 16-bit element in the 256-bit
@@ -121,10 +128,9 @@ _mm256_abs_epi8(__m256i __a)
/// \param __a
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi16(__m256i __a)
-{
- return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_abs_epi16(__m256i __a) {
+ return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
}
/// Computes the absolute value of each signed 32-bit element in the 256-bit
@@ -138,10 +144,9 @@ _mm256_abs_epi16(__m256i __a)
/// \param __a
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi32(__m256i __a)
-{
- return (__m256i)__builtin_elementwise_abs((__v8si)__a);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_abs_epi32(__m256i __a) {
+ return (__m256i)__builtin_elementwise_abs((__v8si)__a);
}
/// Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit
@@ -359,9 +364,8 @@ _mm256_add_epi64(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector containing one of the source operands.
/// \returns A 256-bit integer vector containing the sums.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epi8(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_adds_epi8(__m256i __a, __m256i __b) {
return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
}
@@ -377,9 +381,8 @@ _mm256_adds_epi8(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epi16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_adds_epi16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
}
@@ -396,9 +399,8 @@ _mm256_adds_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector containing one of the source operands.
/// \returns A 256-bit integer vector containing the sums.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epu8(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_adds_epu8(__m256i __a, __m256i __b) {
return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
}
@@ -414,9 +416,8 @@ _mm256_adds_epu8(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epu16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_adds_epu16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
}
@@ -460,7 +461,7 @@ _mm256_adds_epu16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_and_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a & (__v4du)__b);
@@ -478,7 +479,7 @@ _mm256_and_si256(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_andnot_si256(__m256i __a, __m256i __b)
{
return (__m256i)(~(__v4du)__a & (__v4du)__b);
@@ -633,7 +634,7 @@ _mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
/// \param __b
/// A 256-bit integer vector containing one of the inputs.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qi)__a == (__v32qi)__b);
@@ -659,7 +660,7 @@ _mm256_cmpeq_epi8(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the inputs.
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a == (__v16hi)__b);
@@ -685,7 +686,7 @@ _mm256_cmpeq_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the inputs.
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a == (__v8si)__b);
@@ -711,7 +712,7 @@ _mm256_cmpeq_epi32(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [4 x i64] containing one of the inputs.
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a == (__v4di)__b);
@@ -737,7 +738,7 @@ _mm256_cmpeq_epi64(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector containing one of the inputs.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
{
/* This function always performs a signed comparison, but __v32qi is a char
@@ -765,7 +766,7 @@ _mm256_cmpgt_epi8(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the inputs.
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a > (__v16hi)__b);
@@ -791,7 +792,7 @@ _mm256_cmpgt_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the inputs.
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a > (__v8si)__b);
@@ -817,7 +818,7 @@ _mm256_cmpgt_epi32(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [4 x i64] containing one of the inputs.
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a > (__v4di)__b);
@@ -1363,9 +1364,8 @@ _mm256_movemask_epi8(__m256i __a)
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [16 x i16] containing the sign-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi16(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi8_epi16(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
@@ -1391,9 +1391,8 @@ _mm256_cvtepi8_epi16(__m128i __V)
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi32(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi8_epi32(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
@@ -1418,9 +1417,8 @@ _mm256_cvtepi8_epi32(__m128i __V)
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi64(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi8_epi64(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
@@ -1446,9 +1444,8 @@ _mm256_cvtepi8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16] containing the source values.
/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi32(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi16_epi32(__m128i __V) {
return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
}
@@ -1471,9 +1468,8 @@ _mm256_cvtepi16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16] containing the source values.
/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi64(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi16_epi64(__m128i __V) {
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
}
@@ -1496,9 +1492,8 @@ _mm256_cvtepi16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32] containing the source values.
/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi64(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi32_epi64(__m128i __V) {
return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
}
@@ -1522,9 +1517,8 @@ _mm256_cvtepi32_epi64(__m128i __V)
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [16 x i16] containing the zero-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi16(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepu8_epi16(__m128i __V) {
return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
}
@@ -1548,9 +1542,8 @@ _mm256_cvtepu8_epi16(__m128i __V)
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi32(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepu8_epi32(__m128i __V) {
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
@@ -1573,9 +1566,8 @@ _mm256_cvtepu8_epi32(__m128i __V)
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi64(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepu8_epi64(__m128i __V) {
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
}
@@ -1599,9 +1591,8 @@ _mm256_cvtepu8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16] containing the source values.
/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_epi32(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepu16_epi32(__m128i __V) {
return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
}
@@ -1624,9 +1615,8 @@ _mm256_cvtepu16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16] containing the source values.
/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_epi64(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepu16_epi64(__m128i __V) {
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
}
@@ -1649,9 +1639,8 @@ _mm256_cvtepu16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32] containing the source values.
/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
/// values.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_epi64(__m128i __V)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepu32_epi64(__m128i __V) {
return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
}
@@ -1675,9 +1664,8 @@ _mm256_cvtepu32_epi64(__m128i __V)
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [4 x i64] containing the products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mul_epi32(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mul_epi32(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
}
@@ -1721,10 +1709,10 @@ _mm256_mulhrs_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mulhi_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hu)__a, (__v16hu)__b);
}
/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
@@ -1740,7 +1728,7 @@ _mm256_mulhi_epu16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mulhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
@@ -1759,7 +1747,7 @@ _mm256_mulhi_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mullo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a * (__v16hu)__b);
@@ -1804,9 +1792,8 @@ _mm256_mullo_epi32 (__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [4 x i64] containing the products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mul_epu32(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mul_epu32(__m256i __a, __m256i __b) {
return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
}
@@ -1822,7 +1809,7 @@ _mm256_mul_epu32(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_or_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a | (__v4du)__b);
@@ -2134,9 +2121,8 @@ _mm256_sign_epi32(__m256i __a, __m256i __b)
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi16(__m256i __a, int __count)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_slli_epi16(__m256i __a, int __count) {
return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
}
@@ -2174,9 +2160,8 @@ _mm256_sll_epi16(__m256i __a, __m128i __count)
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi32(__m256i __a, int __count)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_slli_epi32(__m256i __a, int __count) {
return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
}
@@ -2214,9 +2199,8 @@ _mm256_sll_epi32(__m256i __a, __m128i __count)
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi64(__m256i __a, int __count)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_slli_epi64(__m256i __a, int __count) {
return __builtin_ia32_psllqi256((__v4di)__a, __count);
}
@@ -2255,9 +2239,8 @@ _mm256_sll_epi64(__m256i __a, __m128i __count)
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi16(__m256i __a, int __count)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_srai_epi16(__m256i __a, int __count) {
return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
}
@@ -2297,9 +2280,8 @@ _mm256_sra_epi16(__m256i __a, __m128i __count)
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi32(__m256i __a, int __count)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_srai_epi32(__m256i __a, int __count) {
return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
}
@@ -2378,9 +2360,8 @@ _mm256_sra_epi32(__m256i __a, __m128i __count)
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi16(__m256i __a, int __count)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_srli_epi16(__m256i __a, int __count) {
return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
}
@@ -2418,9 +2399,8 @@ _mm256_srl_epi16(__m256i __a, __m128i __count)
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi32(__m256i __a, int __count)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_srli_epi32(__m256i __a, int __count) {
return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
}
@@ -2458,9 +2438,8 @@ _mm256_srl_epi32(__m256i __a, __m128i __count)
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi64(__m256i __a, int __count)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_srli_epi64(__m256i __a, int __count) {
return __builtin_ia32_psrlqi256((__v4di)__a, __count);
}
@@ -2611,9 +2590,8 @@ _mm256_sub_epi64(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector containing the subtrahends.
/// \returns A 256-bit integer vector containing the differences.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epi8(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_subs_epi8(__m256i __a, __m256i __b) {
return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
}
@@ -2637,9 +2615,8 @@ _mm256_subs_epi8(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing the subtrahends.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epi16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_subs_epi16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
}
@@ -2664,9 +2641,8 @@ _mm256_subs_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector containing the subtrahends.
/// \returns A 256-bit integer vector containing the differences.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epu8(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_subs_epu8(__m256i __a, __m256i __b) {
return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
}
@@ -2690,9 +2666,8 @@ _mm256_subs_epu8(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing the subtrahends.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epu16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_subs_epu16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
}
@@ -2724,9 +2699,8 @@ _mm256_subs_epu16(__m256i __a, __m256i __b)
/// A 256-bit integer vector used as the source for the odd-numbered bytes
/// of the result.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_unpackhi_epi8(__m256i __a, __m256i __b) {
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
}
@@ -2759,9 +2733,8 @@ _mm256_unpackhi_epi8(__m256i __a, __m256i __b)
/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_unpackhi_epi16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
}
@@ -2793,9 +2766,8 @@ _mm256_unpackhi_epi16(__m256i __a, __m256i __b)
/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_unpackhi_epi32(__m256i __a, __m256i __b) {
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
}
@@ -2823,9 +2795,8 @@ _mm256_unpackhi_epi32(__m256i __a, __m256i __b)
/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_unpackhi_epi64(__m256i __a, __m256i __b) {
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
}
@@ -2857,9 +2828,8 @@ _mm256_unpackhi_epi64(__m256i __a, __m256i __b)
/// A 256-bit integer vector used as the source for the odd-numbered bytes
/// of the result.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_unpacklo_epi8(__m256i __a, __m256i __b) {
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
}
@@ -2892,9 +2862,8 @@ _mm256_unpacklo_epi8(__m256i __a, __m256i __b)
/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_unpacklo_epi16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
}
@@ -2926,9 +2895,8 @@ _mm256_unpacklo_epi16(__m256i __a, __m256i __b)
/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_unpacklo_epi32(__m256i __a, __m256i __b) {
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
}
@@ -2956,9 +2924,8 @@ _mm256_unpacklo_epi32(__m256i __a, __m256i __b)
/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_unpacklo_epi64(__m256i __a, __m256i __b) {
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
}
@@ -2974,7 +2941,7 @@ _mm256_unpacklo_epi64(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_xor_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a ^ (__v4du)__b);
@@ -3009,9 +2976,8 @@ _mm256_stream_load_si256(const void *__V)
/// \param __X
/// A 128-bit vector of [4 x float] whose low element will be broadcast.
/// \returns A 128-bit vector of [4 x float] containing the result.
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_broadcastss_ps(__m128 __X)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_broadcastss_ps(__m128 __X) {
return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
}
@@ -3026,9 +2992,8 @@ _mm_broadcastss_ps(__m128 __X)
/// \param __a
/// A 128-bit vector of [2 x double] whose low element will be broadcast.
/// \returns A 128-bit vector of [2 x double] containing the result.
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_broadcastsd_pd(__m128d __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_broadcastsd_pd(__m128d __a) {
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
}
@@ -3043,9 +3008,8 @@ _mm_broadcastsd_pd(__m128d __a)
/// \param __X
/// A 128-bit vector of [4 x float] whose low element will be broadcast.
/// \returns A 256-bit vector of [8 x float] containing the result.
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_broadcastss_ps(__m128 __X)
-{
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcastss_ps(__m128 __X) {
return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -3060,9 +3024,8 @@ _mm256_broadcastss_ps(__m128 __X)
/// \param __X
/// A 128-bit vector of [2 x double] whose low element will be broadcast.
/// \returns A 256-bit vector of [4 x double] containing the result.
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_broadcastsd_pd(__m128d __X)
-{
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcastsd_pd(__m128d __X) {
return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
}
@@ -3076,9 +3039,8 @@ _mm256_broadcastsd_pd(__m128d __X)
/// \param __X
/// A 128-bit integer vector to be broadcast.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastsi128_si256(__m128i __X)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcastsi128_si256(__m128i __X) {
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
}
@@ -3168,9 +3130,8 @@ _mm256_broadcastsi128_si256(__m128i __X)
/// \param __X
/// A 128-bit integer vector whose low byte will be broadcast.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastb_epi8(__m128i __X)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcastb_epi8(__m128i __X) {
return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -3184,9 +3145,8 @@ _mm256_broadcastb_epi8(__m128i __X)
/// \param __X
/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastw_epi16(__m128i __X)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcastw_epi16(__m128i __X) {
return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -3200,9 +3160,8 @@ _mm256_broadcastw_epi16(__m128i __X)
/// \param __X
/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastd_epi32(__m128i __X)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcastd_epi32(__m128i __X) {
return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -3216,9 +3175,8 @@ _mm256_broadcastd_epi32(__m128i __X)
/// \param __X
/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastq_epi64(__m128i __X)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcastq_epi64(__m128i __X) {
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
}
@@ -3232,9 +3190,8 @@ _mm256_broadcastq_epi64(__m128i __X)
/// \param __X
/// A 128-bit integer vector whose low byte will be broadcast.
/// \returns A 128-bit integer vector containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastb_epi8(__m128i __X)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_broadcastb_epi8(__m128i __X) {
return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -3248,9 +3205,8 @@ _mm_broadcastb_epi8(__m128i __X)
/// \param __X
/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
/// \returns A 128-bit vector of [8 x i16] containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastw_epi16(__m128i __X)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_broadcastw_epi16(__m128i __X) {
return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -3264,9 +3220,8 @@ _mm_broadcastw_epi16(__m128i __X)
/// \param __X
/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
/// \returns A 128-bit vector of [4 x i32] containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastd_epi32(__m128i __X)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_broadcastd_epi32(__m128i __X) {
return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
}
@@ -3280,9 +3235,8 @@ _mm_broadcastd_epi32(__m128i __X)
/// \param __X
/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
/// \returns A 128-bit vector of [2 x i64] containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastq_epi64(__m128i __X)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_broadcastq_epi64(__m128i __X) {
return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
}
@@ -3756,7 +3710,7 @@ _mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_sllv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
@@ -3778,7 +3732,7 @@ _mm256_sllv_epi32(__m256i __X, __m256i __Y)
/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [4 x i32] containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_sllv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
@@ -3800,7 +3754,7 @@ _mm_sllv_epi32(__m128i __X, __m128i __Y)
/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_sllv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
@@ -3822,7 +3776,7 @@ _mm256_sllv_epi64(__m256i __X, __m256i __Y)
/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [2 x i64] containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_sllv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
@@ -3845,7 +3799,7 @@ _mm_sllv_epi64(__m128i __X, __m128i __Y)
/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_srav_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
@@ -3868,7 +3822,7 @@ _mm256_srav_epi32(__m256i __X, __m256i __Y)
/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [4 x i32] containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_srav_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
@@ -3890,7 +3844,7 @@ _mm_srav_epi32(__m128i __X, __m128i __Y)
/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_srlv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
@@ -3912,7 +3866,7 @@ _mm256_srlv_epi32(__m256i __X, __m256i __Y)
/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [4 x i32] containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_srlv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
@@ -3934,7 +3888,7 @@ _mm_srlv_epi32(__m128i __X, __m128i __Y)
/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [4 x i64] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_srlv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
@@ -3956,7 +3910,7 @@ _mm256_srlv_epi64(__m256i __X, __m256i __Y)
/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [2 x i64] containing the result.
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_srlv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
@@ -5289,5 +5243,7 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
#undef __DEFAULT_FN_ATTRS256
#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
#endif /* __AVX2INTRIN_H */
diff --git a/clang/lib/Headers/avx512bitalgintrin.h b/clang/lib/Headers/avx512bitalgintrin.h
index 3c446b3..5cc3207 100644
--- a/clang/lib/Headers/avx512bitalgintrin.h
+++ b/clang/lib/Headers/avx512bitalgintrin.h
@@ -20,48 +20,42 @@
__target__("avx512bitalg,evex512"), \
__min_vector_width__(512)))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_popcnt_epi16(__m512i __A)
-{
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_popcnt_epi16(__m512i __A) {
return (__m512i)__builtin_elementwise_popcount((__v32hu)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_popcnt_epi16(__m512i __A, __mmask32 __U, __m512i __B)
-{
- return (__m512i) __builtin_ia32_selectw_512((__mmask32) __U,
- (__v32hi) _mm512_popcnt_epi16(__B),
- (__v32hi) __A);
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_mask_popcnt_epi16(__m512i __A, __mmask32 __U, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512(
+ (__mmask32)__U, (__v32hi)_mm512_popcnt_epi16(__B), (__v32hi)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __B)
-{
- return _mm512_mask_popcnt_epi16((__m512i) _mm512_setzero_si512(),
- __U,
- __B);
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __B) {
+ return _mm512_mask_popcnt_epi16((__m512i)_mm512_setzero_si512(), __U, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_popcnt_epi8(__m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_popcnt_epi8(__m512i __A) {
return (__m512i)__builtin_elementwise_popcount((__v64qu)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B)
-{
- return (__m512i) __builtin_ia32_selectb_512((__mmask64) __U,
- (__v64qi) _mm512_popcnt_epi8(__B),
- (__v64qi) __A);
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512(
+ (__mmask64)__U, (__v64qi)_mm512_popcnt_epi8(__B), (__v64qi)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B)
-{
- return _mm512_mask_popcnt_epi8((__m512i) _mm512_setzero_si512(),
- __U,
- __B);
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B) {
+ return _mm512_mask_popcnt_epi8((__m512i)_mm512_setzero_si512(), __U, __B);
}
static __inline__ __mmask64 __DEFAULT_FN_ATTRS
@@ -80,7 +74,7 @@ _mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B)
__B);
}
-
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index c854720..eabe215 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -25,6 +25,14 @@ typedef unsigned long long __mmask64;
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512bw,no-evex512")))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 constexpr
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
static __inline __mmask32 __DEFAULT_FN_ATTRS
_knot_mask32(__mmask32 __M)
{
@@ -438,7 +446,7 @@ _mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
return (__m512i) ((__v32hu) __A * (__v32hu) __B);
}
@@ -473,45 +481,39 @@ _mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
(__v32hi) __A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi8 (__m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_abs_epi8(__m512i __A) {
return (__m512i)__builtin_elementwise_abs((__v64qs)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_abs_epi8(__m512i __W, __mmask64 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_abs_epi8(__A),
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_abs_epi8(__mmask64 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_abs_epi8(__A),
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi16 (__m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_abs_epi16(__m512i __A) {
return (__m512i)__builtin_elementwise_abs((__v32hi)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_abs_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_abs_epi16(__A),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_abs_epi16(__mmask32 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_abs_epi16(__A),
(__v32hi)_mm512_setzero_si512());
@@ -605,9 +607,8 @@ _mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epi8 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_adds_epi8(__m512i __A, __m512i __B) {
return (__m512i)__builtin_elementwise_add_sat((__v64qs)__A, (__v64qs)__B);
}
@@ -627,9 +628,8 @@ _mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epi16 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_adds_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_elementwise_add_sat((__v32hi)__A, (__v32hi)__B);
}
@@ -649,9 +649,8 @@ _mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epu8 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_adds_epu8(__m512i __A, __m512i __B) {
return (__m512i)__builtin_elementwise_add_sat((__v64qu) __A, (__v64qu) __B);
}
@@ -671,9 +670,8 @@ _mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epu16 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_adds_epu16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_elementwise_add_sat((__v32hu) __A, (__v32hu) __B);
}
@@ -938,9 +936,8 @@ _mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epi8 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_subs_epi8(__m512i __A, __m512i __B) {
return (__m512i)__builtin_elementwise_sub_sat((__v64qs)__A, (__v64qs)__B);
}
@@ -960,9 +957,8 @@ _mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epi16 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_subs_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_elementwise_sub_sat((__v32hi)__A, (__v32hi)__B);
}
@@ -982,9 +978,8 @@ _mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epu8 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_subs_epu8(__m512i __A, __m512i __B) {
return (__m512i)__builtin_elementwise_sub_sat((__v64qu) __A, (__v64qu) __B);
}
@@ -1004,9 +999,8 @@ _mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epu16 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_subs_epu16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_elementwise_sub_sat((__v32hu) __A, (__v32hu) __B);
}
@@ -1082,49 +1076,40 @@ _mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhi_epi16(__m512i __A, __m512i __B)
-{
- return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B);
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mulhi_epi16(__m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_pmulhw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
-{
- return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
- (__v32hi)_mm512_mulhi_epi16(__A, __B),
- (__v32hi)__W);
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512(
+ (__mmask32)__U, (__v32hi)_mm512_mulhi_epi16(__A, __B), (__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
- return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
- (__v32hi)_mm512_mulhi_epi16(__A, __B),
- (__v32hi)_mm512_setzero_si512());
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512(
+ (__mmask32)__U, (__v32hi)_mm512_mulhi_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhi_epu16(__m512i __A, __m512i __B)
-{
- return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B);
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mulhi_epu16(__m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_pmulhuw512((__v32hu)__A, (__v32hu)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
- return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
- (__v32hi)_mm512_mulhi_epu16(__A, __B),
- (__v32hi)__W);
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512(
+ (__mmask32)__U, (__v32hi)_mm512_mulhi_epu16(__A, __B), (__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
- return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
- (__v32hi)_mm512_mulhi_epu16(__A, __B),
- (__v32hi)_mm512_setzero_si512());
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_mulhi_epu16(__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512(
+ (__mmask32)__U, (__v32hi)_mm512_mulhi_epu16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1247,7 +1232,7 @@ _mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
__builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
8, 64+8, 9, 64+9,
@@ -1282,7 +1267,7 @@ _mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
4, 32+4, 5, 32+5,
@@ -1309,7 +1294,7 @@ _mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
0, 64+0, 1, 64+1,
@@ -1344,7 +1329,7 @@ _mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
0, 32+0, 1, 32+1,
@@ -1371,9 +1356,8 @@ _mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi16(__m256i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi8_epi16(__m256i __A) {
/* This function always performs a signed extension, but __v32qi is a char
which may be signed or unsigned, so use __v32qs. */
return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi);
@@ -1395,9 +1379,8 @@ _mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi16(__m256i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepu8_epi16(__m256i __A) {
return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi);
}
@@ -1494,24 +1477,21 @@ _mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi16(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_slli_epi16(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_slli_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_slli_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
@@ -1586,24 +1566,21 @@ _mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi16(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_srai_epi16(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srai_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srai_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
@@ -1631,24 +1608,21 @@ _mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi16(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_srli_epi16(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srli_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srli_epi16(__A, (unsigned int)__B),
(__v32hi)_mm512_setzero_si512());
@@ -1883,9 +1857,8 @@ _mm512_movm_epi16 (__mmask32 __A)
return (__m512i) __builtin_ia32_cvtmask2w512 (__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastb_epi8 (__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcastb_epi8(__m128i __A) {
return (__m512i)__builtin_shufflevector((__v16qi) __A, (__v16qi) __A,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1925,9 +1898,8 @@ _mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
(__v32hi) _mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastw_epi16 (__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcastw_epi16(__m128i __A) {
return (__m512i)__builtin_shufflevector((__v8hi) __A, (__v8hi) __A,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
@@ -2010,5 +1982,7 @@ _mm512_sad_epu8 (__m512i __A, __m512i __B)
#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS512_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512cdintrin.h b/clang/lib/Headers/avx512cdintrin.h
index 33b552f..39e7671 100644
--- a/clang/lib/Headers/avx512cdintrin.h
+++ b/clang/lib/Headers/avx512cdintrin.h
@@ -19,6 +19,12 @@
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512cd,evex512"), __min_vector_width__(512)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_conflict_epi64 (__m512i __A)
{
@@ -63,45 +69,41 @@ _mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_lzcnt_epi32 (__m512i __A)
-{
- return (__m512i) __builtin_ia32_vplzcntd_512 ((__v16si) __A);
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_lzcnt_epi32(__m512i __A) {
+ return (__m512i)__builtin_elementwise_ctlz((__v16si)__A,
+ (__v16si)_mm512_set1_epi32(32));
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_mask_lzcnt_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_lzcnt_epi32(__A),
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_maskz_lzcnt_epi32(__mmask16 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_lzcnt_epi32(__A),
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_lzcnt_epi64 (__m512i __A)
-{
- return (__m512i) __builtin_ia32_vplzcntq_512 ((__v8di) __A);
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_lzcnt_epi64(__m512i __A) {
+ return (__m512i)__builtin_elementwise_ctlz(
+ (__v8di)__A, (__v8di)_mm512_set1_epi64((long long)64));
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_mask_lzcnt_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_lzcnt_epi64(__A),
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_maskz_lzcnt_epi64(__mmask8 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_lzcnt_epi64(__A),
(__v8di)_mm512_setzero_si512());
@@ -121,5 +123,6 @@ _mm512_broadcastmw_epi32 (__mmask16 __A)
}
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512dqintrin.h b/clang/lib/Headers/avx512dqintrin.h
index 88b48e3..87d16b47 100644
--- a/clang/lib/Headers/avx512dqintrin.h
+++ b/clang/lib/Headers/avx512dqintrin.h
@@ -20,6 +20,14 @@
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512dq,no-evex512")))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 constexpr
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
static __inline __mmask8 __DEFAULT_FN_ATTRS
_knot_mask8(__mmask8 __M)
{
@@ -167,7 +175,7 @@ _mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A ^ (__v8du)__B);
}
@@ -186,7 +194,7 @@ _mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_ps (__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A ^ (__v16su)__B);
}
@@ -205,7 +213,7 @@ _mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) {
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A | (__v8du)__B);
}
@@ -224,7 +232,7 @@ _mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_ps(__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A | (__v16su)__B);
}
@@ -243,7 +251,7 @@ _mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) {
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A & (__v8du)__B);
}
@@ -262,7 +270,7 @@ _mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_ps(__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A & (__v16su)__B);
}
@@ -281,7 +289,7 @@ _mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) {
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_pd(__m512d __A, __m512d __B) {
return (__m512d)(~(__v8du)__A & (__v8du)__B);
}
@@ -300,7 +308,7 @@ _mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_ps(__m512 __A, __m512 __B) {
return (__m512)(~(__v16su)__A & (__v16su)__B);
}
@@ -1076,10 +1084,8 @@ _mm512_movepi64_mask (__m512i __A)
return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A);
}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f32x2 (__m128 __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_f32x2(__m128 __A) {
return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1);
@@ -1101,9 +1107,8 @@ _mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f32x8(__m256 __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_f32x8(__m256 __A) {
return (__m512)__builtin_shufflevector((__v8sf)__A, (__v8sf)__A,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7);
@@ -1125,9 +1130,8 @@ _mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f64x2(__m128d __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_f64x2(__m128d __A) {
return (__m512d)__builtin_shufflevector((__v2df)__A, (__v2df)__A,
0, 1, 0, 1, 0, 1, 0, 1);
}
@@ -1148,9 +1152,8 @@ _mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A)
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i32x2 (__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_i32x2(__m128i __A) {
return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1);
@@ -1172,9 +1175,8 @@ _mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i32x8(__m256i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_i32x8(__m256i __A) {
return (__m512i)__builtin_shufflevector((__v8si)__A, (__v8si)__A,
0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7);
@@ -1196,9 +1198,8 @@ _mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i64x2(__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_i64x2(__m128i __A) {
return (__m512i)__builtin_shufflevector((__v2di)__A, (__v2di)__A,
0, 1, 0, 1, 0, 1, 0, 1);
}
@@ -1375,5 +1376,7 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A)
#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS512_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 45e7eeb..0006e33 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -180,9 +180,9 @@ typedef enum
#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 constexpr
#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
#else
-#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS128
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512
-#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
#endif
/* Create vectors with repeated elements */
@@ -218,9 +218,8 @@ _mm512_undefined_epi32(void)
return (__m512i)__builtin_ia32_undef512();
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastd_epi32 (__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcastd_epi32(__m128i __A) {
return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -241,9 +240,8 @@ _mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
(__v16si) _mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastq_epi64 (__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcastq_epi64(__m128i __A) {
return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A,
0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -277,20 +275,20 @@ _mm512_setzero_pd(void) {
return __extension__(__m512d){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_ps(float __w)
{
return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
__w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_pd(double __w)
{
return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_epi8(char __w)
{
return __extension__ (__m512i)(__v64qi){
@@ -304,7 +302,7 @@ _mm512_set1_epi8(char __w)
__w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_epi16(short __w)
{
return __extension__ (__m512i)(__v32hi){
@@ -314,7 +312,7 @@ _mm512_set1_epi16(short __w)
__w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_epi32(int __s)
{
return __extension__ (__m512i)(__v16si){
@@ -330,7 +328,7 @@ _mm512_maskz_set1_epi32(__mmask16 __M, int __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_epi64(long long __d)
{
return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
@@ -344,39 +342,33 @@ _mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcastss_ps(__m128 __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcastss_ps(__m128 __A) {
return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
-{
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_set4_epi32(int __A, int __B, int __C, int __D) {
return __extension__ (__m512i)(__v16si)
{ __D, __C, __B, __A, __D, __C, __B, __A,
__D, __C, __B, __A, __D, __C, __B, __A };
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi64 (long long __A, long long __B, long long __C,
- long long __D)
-{
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_set4_epi64(long long __A, long long __B, long long __C, long long __D) {
return __extension__ (__m512i) (__v8di)
{ __D, __C, __B, __A, __D, __C, __B, __A };
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set4_pd (double __A, double __B, double __C, double __D)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_set4_pd(double __A, double __B, double __C, double __D) {
return __extension__ (__m512d)
{ __D, __C, __B, __A, __D, __C, __B, __A };
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set4_ps (float __A, float __B, float __C, float __D)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_set4_ps(float __A, float __B, float __C, float __D) {
return __extension__ (__m512)
{ __D, __C, __B, __A, __D, __C, __B, __A,
__D, __C, __B, __A, __D, __C, __B, __A };
@@ -394,9 +386,8 @@ _mm512_set4_ps (float __A, float __B, float __C, float __D)
#define _mm512_setr4_ps(e0,e1,e2,e3) \
_mm512_set4_ps((e3),(e2),(e1),(e0))
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcastsd_pd(__m128d __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcastsd_pd(__m128d __A) {
return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A,
0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -435,9 +426,8 @@ _mm512_castps512_ps128(__m512 __a)
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
-static __inline __m256 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps256 (__m512 __A)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_castps512_ps256(__m512 __A) {
return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
}
@@ -516,9 +506,8 @@ _mm512_castsi512_si128 (__m512i __A)
return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
}
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si256 (__m512i __A)
-{
+static __inline __m256i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_castsi512_si256(__m512i __A) {
return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
}
@@ -645,15 +634,14 @@ _mm512_zextsi256_si512(__m256i __a)
}
/* Bitwise operators */
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a & (__v16su)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
(__v16si) _mm512_and_epi32(__a, __b),
(__v16si) __src);
@@ -666,18 +654,16 @@ _mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
__k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a & (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
- return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
- (__v8di) _mm512_and_epi64(__a, __b),
- (__v8di) __src);
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) {
+ return (__m512i)__builtin_ia32_selectq_512(
+ (__mmask8)__k, (__v8di)_mm512_and_epi64(__a, __b), (__v8di)__src);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -687,13 +673,13 @@ _mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
__k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_si512 (__m512i __A, __m512i __B)
{
return (__m512i)(~(__v8du)__A & (__v8du)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_epi32 (__m512i __A, __m512i __B)
{
return (__m512i)(~(__v16su)__A & (__v16su)__B);
@@ -714,7 +700,7 @@ _mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
__U, __A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_epi64(__m512i __A, __m512i __B)
{
return (__m512i)(~(__v8du)__A & (__v8du)__B);
@@ -735,7 +721,7 @@ _mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
__U, __A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a | (__v16su)__b);
@@ -755,7 +741,7 @@ _mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a | (__v8du)__b);
@@ -775,7 +761,7 @@ _mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a ^ (__v16su)__b);
@@ -795,7 +781,7 @@ _mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a ^ (__v8du)__b);
@@ -815,19 +801,19 @@ _mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a & (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a | (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a ^ (__v8du)__b);
@@ -835,45 +821,38 @@ _mm512_xor_si512(__m512i __a, __m512i __b)
/* Arithmetic */
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_add_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_add_pd(__m512d __a, __m512d __b) {
return (__m512d)((__v8df)__a + (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_add_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_add_ps(__m512 __a, __m512 __b) {
return (__m512)((__v16sf)__a + (__v16sf)__b);
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mul_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mul_pd(__m512d __a, __m512d __b) {
return (__m512d)((__v8df)__a * (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mul_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mul_ps(__m512 __a, __m512 __b) {
return (__m512)((__v16sf)__a * (__v16sf)__b);
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_sub_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_sub_pd(__m512d __a, __m512d __b) {
return (__m512d)((__v8df)__a - (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_sub_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_sub_ps(__m512 __a, __m512 __b) {
return (__m512)((__v16sf)__a - (__v16sf)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi64 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_add_epi64(__m512i __A, __m512i __B) {
return (__m512i) ((__v8du) __A + (__v8du) __B);
}
@@ -1429,9 +1408,8 @@ _mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epi32(__m512i __X, __m512i __Y)
-{
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mul_epi32(__m512i __X, __m512i __Y) {
return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y);
}
@@ -1451,9 +1429,8 @@ _mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512 ());
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epu32(__m512i __X, __m512i __Y)
-{
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mul_epu32(__m512i __X, __m512i __Y) {
return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y);
}
@@ -1866,45 +1843,39 @@ _mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi64(__m512i __A)
-{
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_abs_epi64(__m512i __A) {
return (__m512i)__builtin_elementwise_abs((__v8di)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_abs_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_abs_epi64(__A),
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_abs_epi64(__mmask8 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_abs_epi64(__A),
(__v8di)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi32(__m512i __A)
-{
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_abs_epi32(__m512i __A) {
return (__m512i)__builtin_elementwise_abs((__v16si) __A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_abs_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectd_512(__U,
(__v16si)_mm512_abs_epi32(__A),
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_abs_epi32(__mmask16 __U, __m512i __A) {
return (__m512i)__builtin_ia32_selectd_512(__U,
(__v16si)_mm512_abs_epi32(__A),
(__v16si)_mm512_setzero_si512());
@@ -2315,9 +2286,8 @@ _mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U), (int)(R)))
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_div_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d
+ __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_div_pd(__m512d __a, __m512d __b) {
return (__m512d)((__v8df)__a/(__v8df)__b);
}
@@ -2335,9 +2305,8 @@ _mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_div_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_div_ps(__m512 __a, __m512 __b) {
return (__m512)((__v16sf)__a/(__v16sf)__b);
}
@@ -3615,115 +3584,99 @@ _mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
(__v16sf)_mm512_setzero_ps(), \
(__mmask16)(U), (int)(R)))
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_ps (__m512i __A)
-{
+static __inline__ __m512
+ __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_cvtepu32_ps(__m512i __A) {
return (__m512)__builtin_convertvector((__v16su)__A, __v16sf);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_cvtepu32_ps(__m512 __W, __mmask16 __U, __m512i __A) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_cvtepu32_ps(__A),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_cvtepu32_ps(__mmask16 __U, __m512i __A) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_cvtepu32_ps(__A),
(__v16sf)_mm512_setzero_ps());
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_pd(__m256i __A)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi32_pd(__m256i __A) {
return (__m512d)__builtin_convertvector((__v8si)__A, __v8df);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_cvtepi32_pd(__m512d __W, __mmask8 __U, __m256i __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
(__v8df)_mm512_cvtepi32_pd(__A),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_cvtepi32_pd(__mmask8 __U, __m256i __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
(__v8df)_mm512_cvtepi32_pd(__A),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32lo_pd(__m512i __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi32lo_pd(__m512i __A) {
return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A));
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U, __m512i __A) {
return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A));
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_ps (__m512i __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi32_ps(__m512i __A) {
return (__m512)__builtin_convertvector((__v16si)__A, __v16sf);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_cvtepi32_ps(__m512 __W, __mmask16 __U, __m512i __A) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_cvtepi32_ps(__A),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_cvtepi32_ps(__mmask16 __U, __m512i __A) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_cvtepi32_ps(__A),
(__v16sf)_mm512_setzero_ps());
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_pd(__m256i __A)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepu32_pd(__m256i __A) {
return (__m512d)__builtin_convertvector((__v8su)__A, __v8df);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_cvtepu32_pd(__m512d __W, __mmask8 __U, __m256i __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
(__v8df)_mm512_cvtepu32_pd(__A),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_cvtepu32_pd(__mmask8 __U, __m256i __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
(__v8df)_mm512_cvtepu32_pd(__A),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32lo_pd(__m512i __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepu32lo_pd(__m512i __A) {
return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A));
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U, __m512i __A) {
return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A));
}
@@ -4123,9 +4076,8 @@ _mm512_cvtss_f32(__m512 __a)
/* Unpack and Interleave */
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpackhi_pd(__m512d __a, __m512d __b) {
return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
}
@@ -4146,9 +4098,8 @@ _mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpacklo_pd(__m512d __a, __m512d __b) {
return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
}
@@ -4169,9 +4120,8 @@ _mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpackhi_ps(__m512 __a, __m512 __b) {
return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
2, 18, 3, 19,
2+4, 18+4, 3+4, 19+4,
@@ -4195,9 +4145,8 @@ _mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
(__v16sf)_mm512_setzero_ps());
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpacklo_ps(__m512 __a, __m512 __b) {
return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
0, 16, 1, 17,
0+4, 16+4, 1+4, 17+4,
@@ -4221,9 +4170,8 @@ _mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi32(__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpackhi_epi32(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
2, 18, 3, 19,
2+4, 18+4, 3+4, 19+4,
@@ -4247,9 +4195,8 @@ _mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi32(__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpacklo_epi32(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
0, 16, 1, 17,
0+4, 16+4, 1+4, 17+4,
@@ -4273,9 +4220,8 @@ _mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi64(__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpackhi_epi64(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
}
@@ -4296,9 +4242,8 @@ _mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi64 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpacklo_epi64(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
}
@@ -4727,9 +4672,8 @@ _mm512_knot(__mmask16 __M)
#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \
_mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi32(__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi8_epi32(__m128i __A) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si);
@@ -4751,9 +4695,8 @@ _mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi64(__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi8_epi64(__m128i __A) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
@@ -4775,9 +4718,8 @@ _mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
(__v8di)_mm512_setzero_si512 ());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi64(__m256i __X)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi32_epi64(__m256i __X) {
return (__m512i)__builtin_convertvector((__v8si)__X, __v8di);
}
@@ -4797,9 +4739,8 @@ _mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi32(__m256i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi16_epi32(__m256i __A) {
return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si);
}
@@ -4819,9 +4760,8 @@ _mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
(__v16si)_mm512_setzero_si512 ());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi64(__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepi16_epi64(__m128i __A) {
return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di);
}
@@ -4841,9 +4781,8 @@ _mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi32(__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepu8_epi32(__m128i __A) {
return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si);
}
@@ -4863,9 +4802,8 @@ _mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi64(__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepu8_epi64(__m128i __A) {
return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
}
@@ -4885,9 +4823,8 @@ _mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_epi64(__m256i __X)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepu32_epi64(__m256i __X) {
return (__m512i)__builtin_convertvector((__v8su)__X, __v8di);
}
@@ -4907,9 +4844,8 @@ _mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi32(__m256i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepu16_epi32(__m256i __A) {
return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si);
}
@@ -4929,9 +4865,8 @@ _mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi64(__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtepu16_epi64(__m128i __A) {
return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di);
}
@@ -4954,7 +4889,7 @@ _mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rorv_epi32 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_prorvd512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_fshr((__v16su)__A,(__v16su)__A, (__v16su)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -4976,7 +4911,7 @@ _mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rorv_epi64 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_prorvq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_fshr((__v8du)__A, (__v8du)__A, (__v8du)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5066,7 +5001,7 @@ _mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rolv_epi32 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_prolvd512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_fshl((__v16su)__A, (__v16su)__A, (__v16su)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5088,7 +5023,7 @@ _mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rolv_epi64 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_prolvq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_fshl((__v8du)__A, (__v8du)__A, (__v8du)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5133,91 +5068,81 @@ _mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
(__v8di)_mm512_ror_epi64((A), (B)), \
(__v8di)_mm512_setzero_si512()))
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_slli_epi32(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_slli_epi32(__A, __B),
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_slli_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_slli_epi64(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A,
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_slli_epi64(__A, __B),
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_slli_epi64(__A, __B),
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_srli_epi32(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srli_epi32(__A, __B),
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srli_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_srli_epi64(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srli_epi64(__A, __B),
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A,
- unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srli_epi64(__A, __B),
(__v8di)_mm512_setzero_si512());
@@ -5303,7 +5228,7 @@ _mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_movedup_pd (__m512d __A)
{
return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
@@ -6622,46 +6547,41 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
(__mmask8)(U), \
(int)(R)))
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_srai_epi32(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_psradi512((__v16si)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srai_epi32(__A, __B),
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
- unsigned int __B) {
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srai_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_srai_epi64(__m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, (int)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A,
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srai_epi64(__A, __B),
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srai_epi64(__A, __B),
(__v8di)_mm512_setzero_si512());
@@ -6827,9 +6747,8 @@ _mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), (int)(R)))
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f32x4(__m128 __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_f32x4(__m128 __A) {
return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
0, 1, 2, 3, 0, 1, 2, 3,
0, 1, 2, 3, 0, 1, 2, 3);
@@ -6851,9 +6770,8 @@ _mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f64x4(__m256d __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_f64x4(__m256d __A) {
return (__m512d)__builtin_shufflevector((__v4df)__A, (__v4df)__A,
0, 1, 2, 3, 0, 1, 2, 3);
}
@@ -6874,9 +6792,8 @@ _mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A)
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i32x4(__m128i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_i32x4(__m128i __A) {
return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 2, 3, 0, 1, 2, 3,
0, 1, 2, 3, 0, 1, 2, 3);
@@ -6898,9 +6815,8 @@ _mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i64x4(__m256i __A)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_broadcast_i64x4(__m256i __A) {
return (__m512i)__builtin_shufflevector((__v4di)__A, (__v4di)__A,
0, 1, 2, 3, 0, 1, 2, 3);
}
@@ -8665,7 +8581,7 @@ _mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
_mm512_setzero_si512());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_movehdup_ps (__m512 __A)
{
return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
@@ -8688,7 +8604,7 @@ _mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_moveldup_ps (__m512 __A)
{
return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
@@ -8941,70 +8857,57 @@ _mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
(__v8df)_mm512_setzero_pd(), \
(__mmask8)(U), (int)(R)))
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtps_pd (__m256 __A)
-{
+static __inline__ __m512d
+ __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_cvtps_pd(__m256 __A) {
return (__m512d) __builtin_convertvector((__v8sf)__A, __v8df);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_cvtps_pd(__m512d __W, __mmask8 __U, __m256 __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_cvtps_pd(__A),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_cvtps_pd(__mmask8 __U, __m256 __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_cvtps_pd(__A),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtpslo_pd (__m512 __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_cvtpslo_pd(__m512 __A) {
return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_cvtpslo_pd(__m512d __W, __mmask8 __U, __m512 __A) {
return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
- return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
- (__v8df) __A,
- (__v8df) __W);
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_mov_pd(__m512d __W, __mmask8 __U, __m512d __A) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, (__v8df)__A,
+ (__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
-{
- return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
- (__v8df) __A,
- (__v8df) _mm512_setzero_pd ());
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_mov_pd(__mmask8 __U, __m512d __A) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, (__v8df)__A,
+ (__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
- return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
- (__v16sf) __A,
- (__v16sf) __W);
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_mov_ps(__m512 __W, __mmask16 __U, __m512 __A) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, (__v16sf)__A,
+ (__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
-{
- return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
- (__v16sf) __A,
- (__v16sf) _mm512_setzero_ps ());
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_mov_ps(__mmask16 __U, __m512 __A) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, (__v16sf)__A,
+ (__v16sf)_mm512_setzero_ps());
}
static __inline__ void __DEFAULT_FN_ATTRS512
@@ -9204,18 +9107,18 @@ _mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
(__v8di) __O);
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59,
- char __e58, char __e57, char __e56, char __e55, char __e54, char __e53,
- char __e52, char __e51, char __e50, char __e49, char __e48, char __e47,
- char __e46, char __e45, char __e44, char __e43, char __e42, char __e41,
- char __e40, char __e39, char __e38, char __e37, char __e36, char __e35,
- char __e34, char __e33, char __e32, char __e31, char __e30, char __e29,
- char __e28, char __e27, char __e26, char __e25, char __e24, char __e23,
- char __e22, char __e21, char __e20, char __e19, char __e18, char __e17,
- char __e16, char __e15, char __e14, char __e13, char __e12, char __e11,
- char __e10, char __e9, char __e8, char __e7, char __e6, char __e5,
- char __e4, char __e3, char __e2, char __e1, char __e0) {
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_set_epi8(
+ char __e63, char __e62, char __e61, char __e60, char __e59, char __e58,
+ char __e57, char __e56, char __e55, char __e54, char __e53, char __e52,
+ char __e51, char __e50, char __e49, char __e48, char __e47, char __e46,
+ char __e45, char __e44, char __e43, char __e42, char __e41, char __e40,
+ char __e39, char __e38, char __e37, char __e36, char __e35, char __e34,
+ char __e33, char __e32, char __e31, char __e30, char __e29, char __e28,
+ char __e27, char __e26, char __e25, char __e24, char __e23, char __e22,
+ char __e21, char __e20, char __e19, char __e18, char __e17, char __e16,
+ char __e15, char __e14, char __e13, char __e12, char __e11, char __e10,
+ char __e9, char __e8, char __e7, char __e6, char __e5, char __e4, char __e3,
+ char __e2, char __e1, char __e0) {
return __extension__ (__m512i)(__v64qi)
{__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
@@ -9228,14 +9131,13 @@ _mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59,
__e56, __e57, __e58, __e59, __e60, __e61, __e62, __e63};
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi16(short __e31, short __e30, short __e29, short __e28,
- short __e27, short __e26, short __e25, short __e24, short __e23,
- short __e22, short __e21, short __e20, short __e19, short __e18,
- short __e17, short __e16, short __e15, short __e14, short __e13,
- short __e12, short __e11, short __e10, short __e9, short __e8,
- short __e7, short __e6, short __e5, short __e4, short __e3,
- short __e2, short __e1, short __e0) {
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_set_epi16(
+ short __e31, short __e30, short __e29, short __e28, short __e27,
+ short __e26, short __e25, short __e24, short __e23, short __e22,
+ short __e21, short __e20, short __e19, short __e18, short __e17,
+ short __e16, short __e15, short __e14, short __e13, short __e12,
+ short __e11, short __e10, short __e9, short __e8, short __e7, short __e6,
+ short __e5, short __e4, short __e3, short __e2, short __e1, short __e0) {
return __extension__ (__m512i)(__v32hi)
{__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
__e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
@@ -9243,12 +9145,9 @@ _mm512_set_epi16(short __e31, short __e30, short __e29, short __e28,
__e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31 };
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi32 (int __A, int __B, int __C, int __D,
- int __E, int __F, int __G, int __H,
- int __I, int __J, int __K, int __L,
- int __M, int __N, int __O, int __P)
-{
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_set_epi32(
+ int __A, int __B, int __C, int __D, int __E, int __F, int __G, int __H,
+ int __I, int __J, int __K, int __L, int __M, int __N, int __O, int __P) {
return __extension__ (__m512i)(__v16si)
{ __P, __O, __N, __M, __L, __K, __J, __I,
__H, __G, __F, __E, __D, __C, __B, __A };
@@ -9259,11 +9158,9 @@ _mm512_set_epi32 (int __A, int __B, int __C, int __D,
_mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
(e5),(e4),(e3),(e2),(e1),(e0))
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi64 (long long __A, long long __B, long long __C,
- long long __D, long long __E, long long __F,
- long long __G, long long __H)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_set_epi64(long long __A, long long __B, long long __C, long long __D,
+ long long __E, long long __F, long long __G, long long __H) {
return __extension__ (__m512i) (__v8di)
{ __H, __G, __F, __E, __D, __C, __B, __A };
}
@@ -9271,10 +9168,9 @@ _mm512_set_epi64 (long long __A, long long __B, long long __C,
#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7) \
_mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_set_pd (double __A, double __B, double __C, double __D,
- double __E, double __F, double __G, double __H)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_set_pd(double __A, double __B, double __C, double __D, double __E,
+ double __F, double __G, double __H) {
return __extension__ (__m512d)
{ __H, __G, __F, __E, __D, __C, __B, __A };
}
@@ -9282,12 +9178,10 @@ _mm512_set_pd (double __A, double __B, double __C, double __D,
#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7) \
_mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_set_ps (float __A, float __B, float __C, float __D,
- float __E, float __F, float __G, float __H,
- float __I, float __J, float __K, float __L,
- float __M, float __N, float __O, float __P)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_set_ps(float __A, float __B, float __C, float __D, float __E, float __F,
+ float __G, float __H, float __I, float __J, float __K, float __L,
+ float __M, float __N, float __O, float __P) {
return __extension__ (__m512)
{ __P, __O, __N, __M, __L, __K, __J, __I,
__H, __G, __F, __E, __D, __C, __B, __A };
@@ -9297,27 +9191,23 @@ _mm512_set_ps (float __A, float __B, float __C, float __D,
_mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
(e4),(e3),(e2),(e1),(e0))
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_abs_ps(__m512 __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_abs_ps(__m512 __A) {
return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A)
-{
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A) {
return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_abs_pd(__m512d __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_abs_pd(__m512d __A) {
return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ;
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
-{
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A) {
return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A);
}
@@ -9337,19 +9227,23 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
* This takes log2(n) steps where n is the number of elements in the vector.
*/
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_reduce_add_epi64(__m512i __W) {
return __builtin_reduce_add((__v8di)__W);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_reduce_mul_epi64(__m512i __W) {
return __builtin_reduce_mul((__v8di)__W);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_reduce_and_epi64(__m512i __W) {
return __builtin_reduce_and((__v8di)__W);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_reduce_or_epi64(__m512i __W) {
return __builtin_reduce_or((__v8di)__W);
}
@@ -9400,22 +9294,22 @@ _mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
return __builtin_ia32_reduce_fmul_pd512(1.0, __W);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_add_epi32(__m512i __W) {
return __builtin_reduce_add((__v16si)__W);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_mul_epi32(__m512i __W) {
return __builtin_reduce_mul((__v16si)__W);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_and_epi32(__m512i __W) {
return __builtin_reduce_and((__v16si)__W);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_or_epi32(__m512i __W) {
return __builtin_reduce_or((__v16si)__W);
}
@@ -9466,22 +9360,22 @@ _mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
return __builtin_ia32_reduce_fmul_ps512(1.0f, __W);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_max_epi64(__m512i __V) {
return __builtin_reduce_max((__v8di)__V);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_max_epu64(__m512i __V) {
return __builtin_reduce_max((__v8du)__V);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_min_epi64(__m512i __V) {
return __builtin_reduce_min((__v8di)__V);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_min_epu64(__m512i __V) {
return __builtin_reduce_min((__v8du)__V);
}
@@ -9509,22 +9403,22 @@ _mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V);
return __builtin_reduce_min((__v8du)__V);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_max_epi32(__m512i __V) {
return __builtin_reduce_max((__v16si)__V);
}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
+static __inline__ unsigned int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_max_epu32(__m512i __V) {
return __builtin_reduce_max((__v16su)__V);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_min_epi32(__m512i __V) {
return __builtin_reduce_min((__v16si)__V);
}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
+static __inline__ unsigned int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_min_epu32(__m512i __V) {
return __builtin_reduce_min((__v16su)__V);
}
diff --git a/clang/lib/Headers/avx512fp16intrin.h b/clang/lib/Headers/avx512fp16intrin.h
index 92df320..6989b86 100644
--- a/clang/lib/Headers/avx512fp16intrin.h
+++ b/clang/lib/Headers/avx512fp16intrin.h
@@ -33,15 +33,26 @@ typedef _Float16 __m512h_u __attribute__((__vector_size__(64), __aligned__(1)));
__target__("avx512fp16,no-evex512"), \
__min_vector_width__(128)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 constexpr
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#else
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#endif
+
static __inline__ _Float16 __DEFAULT_FN_ATTRS512 _mm512_cvtsh_h(__m512h __a) {
return __a[0];
}
-static __inline __m128h __DEFAULT_FN_ATTRS128 _mm_setzero_ph(void) {
+static __inline __m128h __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_setzero_ph(void) {
return (__m128h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
}
-static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_setzero_ph(void) {
+static __inline __m256h __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_setzero_ph(void) {
return (__m256h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
}
@@ -50,7 +61,8 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_undefined_ph(void) {
return (__m256h)__builtin_ia32_undef256();
}
-static __inline __m512h __DEFAULT_FN_ATTRS512 _mm512_setzero_ph(void) {
+static __inline __m512h __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_setzero_ph(void) {
return (__m512h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
@@ -64,14 +76,15 @@ static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_undefined_ph(void) {
return (__m512h)__builtin_ia32_undef512();
}
-static __inline __m512h __DEFAULT_FN_ATTRS512 _mm512_set1_ph(_Float16 __h) {
+static __inline __m512h __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_set1_ph(_Float16 __h) {
return (__m512h)(__v32hf){__h, __h, __h, __h, __h, __h, __h, __h,
__h, __h, __h, __h, __h, __h, __h, __h,
__h, __h, __h, __h, __h, __h, __h, __h,
__h, __h, __h, __h, __h, __h, __h, __h};
}
-static __inline __m512h __DEFAULT_FN_ATTRS512
+static __inline __m512h __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
_Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8,
_Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12,
@@ -548,7 +561,8 @@ _mm512_maskz_max_ph(__mmask32 __U, __m512h __A, __m512h __B) {
(__mmask32)(U), (__v32hf)_mm512_max_round_ph((A), (B), (R)), \
(__v32hf)_mm512_setzero_ph()))
-static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_abs_ph(__m512h __A) {
+static __inline__ __m512h __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_abs_ph(__m512h __A) {
return (__m512h)_mm512_and_epi32(_mm512_set1_epi32(0x7FFF7FFF), (__m512i)__A);
}
@@ -3348,6 +3362,9 @@ _mm512_permutexvar_ph(__m512i __A, __m512h __B) {
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
#undef __DEFAULT_FN_ATTRS512
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS512_CONSTEXPR
#endif
#endif
diff --git a/clang/lib/Headers/avx512vbmi2intrin.h b/clang/lib/Headers/avx512vbmi2intrin.h
index 11598c8..f9a5f82 100644
--- a/clang/lib/Headers/avx512vbmi2intrin.h
+++ b/clang/lib/Headers/avx512vbmi2intrin.h
@@ -215,8 +215,8 @@ _mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_shldv_epi64(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i)__builtin_ia32_vpshldvq512((__v8di)__A, (__v8di)__B,
- (__v8di)__C);
+ return (__m512i)__builtin_elementwise_fshl((__v8du)__A, (__v8du)__B,
+ (__v8du)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -238,8 +238,8 @@ _mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_shldv_epi32(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i)__builtin_ia32_vpshldvd512((__v16si)__A, (__v16si)__B,
- (__v16si)__C);
+ return (__m512i)__builtin_elementwise_fshl((__v16su)__A, (__v16su)__B,
+ (__v16su)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -261,8 +261,8 @@ _mm512_maskz_shldv_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_shldv_epi16(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i)__builtin_ia32_vpshldvw512((__v32hi)__A, (__v32hi)__B,
- (__v32hi)__C);
+ return (__m512i)__builtin_elementwise_fshl((__v32hu)__A, (__v32hu)__B,
+ (__v32hu)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -284,8 +284,9 @@ _mm512_maskz_shldv_epi16(__mmask32 __U, __m512i __A, __m512i __B, __m512i __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_shrdv_epi64(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i)__builtin_ia32_vpshrdvq512((__v8di)__A, (__v8di)__B,
- (__v8di)__C);
+ // Ops __A and __B are swapped.
+ return (__m512i)__builtin_elementwise_fshr((__v8du)__B, (__v8du)__A,
+ (__v8du)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -307,8 +308,9 @@ _mm512_maskz_shrdv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_shrdv_epi32(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i)__builtin_ia32_vpshrdvd512((__v16si)__A, (__v16si)__B,
- (__v16si)__C);
+ // Ops __A and __B are swapped.
+ return (__m512i)__builtin_elementwise_fshr((__v16su)__B, (__v16su)__A,
+ (__v16su)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -330,8 +332,9 @@ _mm512_maskz_shrdv_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_shrdv_epi16(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i)__builtin_ia32_vpshrdvw512((__v32hi)__A, (__v32hi)__B,
- (__v32hi)__C);
+ // Ops __A and __B are swapped.
+ return (__m512i)__builtin_elementwise_fshr((__v32hu)__B, (__v32hu)__A,
+ (__v32hu)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
diff --git a/clang/lib/Headers/avx512vlbitalgintrin.h b/clang/lib/Headers/avx512vlbitalgintrin.h
index 1b01fe0..21bf858 100644
--- a/clang/lib/Headers/avx512vlbitalgintrin.h
+++ b/clang/lib/Headers/avx512vlbitalgintrin.h
@@ -24,92 +24,76 @@
__target__("avx512vl,avx512bitalg,no-evex512"), \
__min_vector_width__(256)))
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_popcnt_epi16(__m256i __A)
-{
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#else
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#endif
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_popcnt_epi16(__m256i __A) {
return (__m256i)__builtin_elementwise_popcount((__v16hu)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B)
-{
- return (__m256i) __builtin_ia32_selectw_256((__mmask16) __U,
- (__v16hi) _mm256_popcnt_epi16(__B),
- (__v16hi) __A);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_popcnt_epi16(__B), (__v16hi)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B)
-{
- return _mm256_mask_popcnt_epi16((__m256i) _mm256_setzero_si256(),
- __U,
- __B);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B) {
+ return _mm256_mask_popcnt_epi16((__m256i)_mm256_setzero_si256(), __U, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_popcnt_epi16(__m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_popcnt_epi16(__m128i __A) {
return (__m128i)__builtin_elementwise_popcount((__v8hu)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B)
-{
- return (__m128i) __builtin_ia32_selectw_128((__mmask8) __U,
- (__v8hi) _mm_popcnt_epi16(__B),
- (__v8hi) __A);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_popcnt_epi16(__B), (__v8hi)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __B)
-{
- return _mm_mask_popcnt_epi16((__m128i) _mm_setzero_si128(),
- __U,
- __B);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __B) {
+ return _mm_mask_popcnt_epi16((__m128i)_mm_setzero_si128(), __U, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_popcnt_epi8(__m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_popcnt_epi8(__m256i __A) {
return (__m256i)__builtin_elementwise_popcount((__v32qu)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B)
-{
- return (__m256i) __builtin_ia32_selectb_256((__mmask32) __U,
- (__v32qi) _mm256_popcnt_epi8(__B),
- (__v32qi) __A);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_popcnt_epi8(__B), (__v32qi)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B)
-{
- return _mm256_mask_popcnt_epi8((__m256i) _mm256_setzero_si256(),
- __U,
- __B);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B) {
+ return _mm256_mask_popcnt_epi8((__m256i)_mm256_setzero_si256(), __U, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_popcnt_epi8(__m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_popcnt_epi8(__m128i __A) {
return (__m128i)__builtin_elementwise_popcount((__v16qu)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B)
-{
- return (__m128i) __builtin_ia32_selectb_128((__mmask16) __U,
- (__v16qi) _mm_popcnt_epi8(__B),
- (__v16qi) __A);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_popcnt_epi8(__B), (__v16qi)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __B)
-{
- return _mm_mask_popcnt_epi8((__m128i) _mm_setzero_si128(),
- __U,
- __B);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __B) {
+ return _mm_mask_popcnt_epi8((__m128i)_mm_setzero_si128(), __U, __B);
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
@@ -147,5 +131,7 @@ _mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B)
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 9aedba0..ea61440 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -24,6 +24,14 @@
__target__("avx512vl,avx512bw,no-evex512"), \
__min_vector_width__(256)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#else
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#endif
+
/* Integer compare */
#define _mm_cmp_epi8_mask(a, b, p) \
@@ -478,65 +486,57 @@ _mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
(__v16hi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_abs_epi8(__A),
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_abs_epi8(__A),
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_abs_epi8(__A),
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_abs_epi8(__mmask32 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_abs_epi8(__A),
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_abs_epi16(__A),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_abs_epi16(__A),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_abs_epi16(__A),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_abs_epi16(__A),
(__v16hi)_mm256_setzero_si256());
@@ -1592,67 +1592,62 @@ _mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_mulhi_epu16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_mulhi_epu16(__A, __B), (__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhi_epu16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_mulhi_epu16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_mulhi_epu16(__A, __B), (__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_mulhi_epu16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_mulhi_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_mulhi_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_mulhi_epi16(__A, __B), (__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhi_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_mulhi_epi16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_mulhi_epi16(__A, __B), (__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_mulhi_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_mulhi_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_unpackhi_epi8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_unpackhi_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -1960,18 +1955,16 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_slli_epi16(__A, (int)__B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_slli_epi16(__A, (int)__B),
(__v16hi)_mm256_setzero_si256());
@@ -2097,34 +2090,30 @@ _mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srai_epi16(__A, (int)__B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srai_epi16(__A, (int)__B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
- unsigned int __B)
-{
+ unsigned int __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_srai_epi16(__A, (int)__B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_srai_epi16(__A, (int)__B),
(__v16hi)_mm256_setzero_si256());
@@ -2162,103 +2151,90 @@ _mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srli_epi16(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, int __B)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_srli_epi16(__mmask8 __U, __m128i __A, int __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srli_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_srli_epi16(__A, __B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_srli_epi16(__A, __B),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_mov_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
(__v8hi) __A,
(__v8hi) __W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_mov_epi16(__mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
(__v8hi) __A,
(__v8hi) _mm_setzero_si128 ());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_mov_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
(__v16hi) __A,
(__v16hi) __W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_mov_epi16(__mmask16 __U, __m256i __A) {
return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
(__v16hi) __A,
(__v16hi) _mm256_setzero_si256 ());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_mov_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
(__v16qi) __A,
(__v16qi) __W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_mov_epi8(__mmask16 __U, __m128i __A) {
return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
(__v16qi) __A,
(__v16qi) _mm_setzero_si128 ());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_mov_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
(__v32qi) __A,
(__v32qi) __W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_mov_epi8(__mmask32 __U, __m256i __A) {
return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
(__v32qi) __A,
(__v32qi) _mm256_setzero_si256 ());
}
-
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
{
@@ -2809,353 +2785,353 @@ _mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
(__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
(__v16hi)_mm256_setzero_si256()))
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_add_epi16(__m128i __W) {
return __builtin_reduce_add((__v8hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_mul_epi16(__m128i __W) {
return __builtin_reduce_mul((__v8hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_and_epi16(__m128i __W) {
return __builtin_reduce_and((__v8hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_or_epi16(__m128i __W) {
return __builtin_reduce_or((__v8hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
-_mm_mask_reduce_add_epi16( __mmask8 __M, __m128i __W) {
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_reduce_add_epi16(__mmask8 __M, __m128i __W) {
__W = _mm_maskz_mov_epi16(__M, __W);
return __builtin_reduce_add((__v8hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
-_mm_mask_reduce_mul_epi16( __mmask8 __M, __m128i __W) {
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_reduce_mul_epi16(__mmask8 __M, __m128i __W) {
__W = _mm_mask_mov_epi16(_mm_set1_epi16(1), __M, __W);
return __builtin_reduce_mul((__v8hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
-_mm_mask_reduce_and_epi16( __mmask8 __M, __m128i __W) {
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_reduce_and_epi16(__mmask8 __M, __m128i __W) {
__W = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __W);
return __builtin_reduce_and((__v8hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W) {
__W = _mm_maskz_mov_epi16(__M, __W);
return __builtin_reduce_or((__v8hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_max_epi16(__m128i __V) {
return __builtin_reduce_max((__v8hi)__V);
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_max_epu16(__m128i __V) {
return __builtin_reduce_max((__v8hu)__V);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_min_epi16(__m128i __V) {
return __builtin_reduce_min((__v8hi)__V);
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_min_epu16(__m128i __V) {
return __builtin_reduce_min((__v8hu)__V);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_max_epi16(__mmask16 __M, __m128i __V) {
__V = _mm_mask_mov_epi16(_mm_set1_epi16(-32767-1), __M, __V);
return __builtin_reduce_max((__v8hi)__V);
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_max_epu16(__mmask16 __M, __m128i __V) {
__V = _mm_maskz_mov_epi16(__M, __V);
return __builtin_reduce_max((__v8hu)__V);
}
-static __inline__ short __DEFAULT_FN_ATTRS128
+static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_min_epi16(__mmask16 __M, __m128i __V) {
__V = _mm_mask_mov_epi16(_mm_set1_epi16(32767), __M, __V);
return __builtin_reduce_min((__v8hi)__V);
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_min_epu16(__mmask16 __M, __m128i __V) {
__V = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __V);
return __builtin_reduce_min((__v8hu)__V);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_add_epi16(__m256i __W) {
return __builtin_reduce_add((__v16hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_mul_epi16(__m256i __W) {
return __builtin_reduce_mul((__v16hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_and_epi16(__m256i __W) {
return __builtin_reduce_and((__v16hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_or_epi16(__m256i __W) {
return __builtin_reduce_or((__v16hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
-_mm256_mask_reduce_add_epi16( __mmask16 __M, __m256i __W) {
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_reduce_add_epi16(__mmask16 __M, __m256i __W) {
__W = _mm256_maskz_mov_epi16(__M, __W);
return __builtin_reduce_add((__v16hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
-_mm256_mask_reduce_mul_epi16( __mmask16 __M, __m256i __W) {
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_reduce_mul_epi16(__mmask16 __M, __m256i __W) {
__W = _mm256_mask_mov_epi16(_mm256_set1_epi16(1), __M, __W);
return __builtin_reduce_mul((__v16hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
-_mm256_mask_reduce_and_epi16( __mmask16 __M, __m256i __W) {
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_reduce_and_epi16(__mmask16 __M, __m256i __W) {
__W = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __W);
return __builtin_reduce_and((__v16hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W) {
__W = _mm256_maskz_mov_epi16(__M, __W);
return __builtin_reduce_or((__v16hi)__W);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_max_epi16(__m256i __V) {
return __builtin_reduce_max((__v16hi)__V);
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_max_epu16(__m256i __V) {
return __builtin_reduce_max((__v16hu)__V);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_min_epi16(__m256i __V) {
return __builtin_reduce_min((__v16hi)__V);
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_min_epu16(__m256i __V) {
return __builtin_reduce_min((__v16hu)__V);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __V) {
__V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-32767-1), __M, __V);
return __builtin_reduce_max((__v16hi)__V);
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __V) {
__V = _mm256_maskz_mov_epi16(__M, __V);
return __builtin_reduce_max((__v16hu)__V);
}
-static __inline__ short __DEFAULT_FN_ATTRS256
+static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __V) {
__V = _mm256_mask_mov_epi16(_mm256_set1_epi16(32767), __M, __V);
return __builtin_reduce_min((__v16hi)__V);
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __V) {
__V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __V);
return __builtin_reduce_min((__v16hu)__V);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_add_epi8(__m128i __W) {
return __builtin_reduce_add((__v16qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_mul_epi8(__m128i __W) {
return __builtin_reduce_mul((__v16qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_and_epi8(__m128i __W) {
return __builtin_reduce_and((__v16qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_or_epi8(__m128i __W) {
return __builtin_reduce_or((__v16qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_add_epi8(__mmask16 __M, __m128i __W) {
__W = _mm_maskz_mov_epi8(__M, __W);
return __builtin_reduce_add((__v16qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_mul_epi8(__mmask16 __M, __m128i __W) {
__W = _mm_mask_mov_epi8(_mm_set1_epi8(1), __M, __W);
return __builtin_reduce_mul((__v16qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_and_epi8(__mmask16 __M, __m128i __W) {
__W = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __W);
return __builtin_reduce_and((__v16qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W) {
__W = _mm_maskz_mov_epi8(__M, __W);
return __builtin_reduce_or((__v16qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_max_epi8(__m128i __V) {
return __builtin_reduce_max((__v16qs)__V);
}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_max_epu8(__m128i __V) {
return __builtin_reduce_max((__v16qu)__V);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_min_epi8(__m128i __V) {
return __builtin_reduce_min((__v16qs)__V);
}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_min_epu8(__m128i __V) {
return __builtin_reduce_min((__v16qu)__V);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __V) {
__V = _mm_mask_mov_epi8(_mm_set1_epi8(-127-1), __M, __V);
return __builtin_reduce_max((__v16qs)__V);
}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __V) {
__V = _mm_maskz_mov_epi8(__M, __V);
return __builtin_reduce_max((__v16qu)__V);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS128
+static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __V) {
__V = _mm_mask_mov_epi8(_mm_set1_epi8(127), __M, __V);
return __builtin_reduce_min((__v16qs)__V);
}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __V) {
__V = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __V);
return __builtin_reduce_min((__v16qu)__V);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_add_epi8(__m256i __W) {
return __builtin_reduce_add((__v32qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_mul_epi8(__m256i __W) {
return __builtin_reduce_mul((__v32qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_and_epi8(__m256i __W) {
return __builtin_reduce_and((__v32qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_or_epi8(__m256i __W) {
return __builtin_reduce_or((__v32qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_add_epi8(__mmask32 __M, __m256i __W) {
__W = _mm256_maskz_mov_epi8(__M, __W);
return __builtin_reduce_add((__v32qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_mul_epi8(__mmask32 __M, __m256i __W) {
__W = _mm256_mask_mov_epi8(_mm256_set1_epi8(1), __M, __W);
return __builtin_reduce_mul((__v32qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_and_epi8(__mmask32 __M, __m256i __W) {
__W = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __W);
return __builtin_reduce_and((__v32qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W) {
__W = _mm256_maskz_mov_epi8(__M, __W);
return __builtin_reduce_or((__v32qs)__W);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_max_epi8(__m256i __V) {
return __builtin_reduce_max((__v32qs)__V);
}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_max_epu8(__m256i __V) {
return __builtin_reduce_max((__v32qu)__V);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_min_epi8(__m256i __V) {
return __builtin_reduce_min((__v32qs)__V);
}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_reduce_min_epu8(__m256i __V) {
return __builtin_reduce_min((__v32qu)__V);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __V) {
__V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-127-1), __M, __V);
return __builtin_reduce_max((__v32qs)__V);
}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __V) {
__V = _mm256_maskz_mov_epi8(__M, __V);
return __builtin_reduce_max((__v32qu)__V);
}
-static __inline__ signed char __DEFAULT_FN_ATTRS256
+static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __V) {
__V = _mm256_mask_mov_epi8(_mm256_set1_epi8(127), __M, __V);
return __builtin_reduce_min((__v32qs)__V);
}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __V) {
__V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __V);
return __builtin_reduce_min((__v32qu)__V);
@@ -3163,5 +3139,7 @@ _mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __V) {
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#endif /* __AVX512VLBWINTRIN_H */
diff --git a/clang/lib/Headers/avx512vlcdintrin.h b/clang/lib/Headers/avx512vlcdintrin.h
index 923e2c5..8f42675 100644
--- a/clang/lib/Headers/avx512vlcdintrin.h
+++ b/clang/lib/Headers/avx512vlcdintrin.h
@@ -23,6 +23,14 @@
__target__("avx512vl,avx512cd,no-evex512"), \
__min_vector_width__(256)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#else
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#endif
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastmb_epi64 (__mmask8 __A)
{
@@ -136,89 +144,81 @@ _mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_lzcnt_epi32 (__m128i __A)
-{
- return (__m128i) __builtin_ia32_vplzcntd_128 ((__v4si) __A);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_lzcnt_epi32(__m128i __A) {
+ return (__m128i)__builtin_elementwise_ctlz((__v4si)__A,
+ (__v4si)_mm_set1_epi32(32));
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_lzcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_lzcnt_epi32(__A),
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_lzcnt_epi32(__mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_lzcnt_epi32(__A),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_lzcnt_epi32 (__m256i __A)
-{
- return (__m256i) __builtin_ia32_vplzcntd_256 ((__v8si) __A);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_lzcnt_epi32(__m256i __A) {
+ return (__m256i)__builtin_elementwise_ctlz((__v8si)__A,
+ (__v8si)_mm256_set1_epi32(32));
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_lzcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_lzcnt_epi32(__A),
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_lzcnt_epi32(__mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_lzcnt_epi32(__A),
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_lzcnt_epi64 (__m128i __A)
-{
- return (__m128i) __builtin_ia32_vplzcntq_128 ((__v2di) __A);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_lzcnt_epi64(__m128i __A) {
+ return (__m128i)__builtin_elementwise_ctlz(
+ (__v2di)__A, (__v2di)_mm_set1_epi64x((long long)64));
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_lzcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_lzcnt_epi64(__A),
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_lzcnt_epi64(__mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_lzcnt_epi64(__A),
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_lzcnt_epi64 (__m256i __A)
-{
- return (__m256i) __builtin_ia32_vplzcntq_256 ((__v4di) __A);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_lzcnt_epi64(__m256i __A) {
+ return (__m256i)__builtin_elementwise_ctlz(
+ (__v4di)__A, (__v4di)_mm256_set1_epi64x((long long)64));
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_lzcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_lzcnt_epi64(__A),
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_lzcnt_epi64(__mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_lzcnt_epi64(__A),
(__v4di)_mm256_setzero_si256());
@@ -226,5 +226,7 @@ _mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A)
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#endif /* __AVX512VLCDINTRIN_H */
diff --git a/clang/lib/Headers/avx512vldqintrin.h b/clang/lib/Headers/avx512vldqintrin.h
index 272cdd8..ceebd09 100644
--- a/clang/lib/Headers/avx512vldqintrin.h
+++ b/clang/lib/Headers/avx512vldqintrin.h
@@ -24,6 +24,14 @@
__target__("avx512vl,avx512dq,no-evex512"), \
__min_vector_width__(256)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#else
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#endif
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
return (__m256i) ((__v4du) __A * (__v4du) __B);
@@ -956,9 +964,8 @@ _mm256_movepi64_mask (__m256i __A)
return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_broadcast_f32x2 (__m128 __A)
-{
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcast_f32x2(__m128 __A) {
return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
0, 1, 0, 1, 0, 1, 0, 1);
}
@@ -979,9 +986,8 @@ _mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A)
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_broadcast_f64x2(__m128d __A)
-{
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcast_f64x2(__m128d __A) {
return (__m256d)__builtin_shufflevector((__v2df)__A, (__v2df)__A,
0, 1, 0, 1);
}
@@ -1002,9 +1008,8 @@ _mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcast_i32x2 (__m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_broadcast_i32x2(__m128i __A) {
return (__m128i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 0, 1);
}
@@ -1025,9 +1030,8 @@ _mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcast_i32x2 (__m128i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcast_i32x2(__m128i __A) {
return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 0, 1, 0, 1, 0, 1);
}
@@ -1048,9 +1052,8 @@ _mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcast_i64x2(__m128i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcast_i64x2(__m128i __A) {
return (__m256i)__builtin_shufflevector((__v2di)__A, (__v2di)__A,
0, 1, 0, 1);
}
@@ -1169,5 +1172,7 @@ _mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512vlfp16intrin.h b/clang/lib/Headers/avx512vlfp16intrin.h
index a12acb7..98ad9b5 100644
--- a/clang/lib/Headers/avx512vlfp16intrin.h
+++ b/clang/lib/Headers/avx512vlfp16intrin.h
@@ -26,6 +26,14 @@
__target__("avx512fp16,avx512vl,no-evex512"), \
__min_vector_width__(128)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#else
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#endif
+
static __inline__ _Float16 __DEFAULT_FN_ATTRS128 _mm_cvtsh_h(__m128h __a) {
return __a[0];
}
@@ -315,11 +323,13 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_max_ph(__mmask8 __U,
(__v8hf)_mm_setzero_ph());
}
-static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_abs_ph(__m256h __A) {
+static __inline__ __m256h __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_abs_ph(__m256h __A) {
return (__m256h)_mm256_and_epi32(_mm256_set1_epi32(0x7FFF7FFF), (__m256i)__A);
}
-static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_abs_ph(__m128h __A) {
+static __inline__ __m128h __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_abs_ph(__m128h __A) {
return (__m128h)_mm_and_epi32(_mm_set1_epi32(0x7FFF7FFF), (__m128i)__A);
}
@@ -1419,8 +1429,8 @@ _mm256_maskz_cvtxps_ph(__mmask8 __U, __m256 __A) {
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_ph(__m128h __A,
__m128h __B,
__m128h __C) {
- return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
- (__v8hf)__C);
+ return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__C);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A,
@@ -1429,7 +1439,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A,
__m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)__A);
}
@@ -1437,7 +1447,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)__C);
}
@@ -1445,15 +1455,15 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)_mm_setzero_ph());
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_ph(__m128h __A,
__m128h __B,
__m128h __C) {
- return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
- -(__v8hf)__C);
+ return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
+ -(__v8hf)__C);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ph(__m128h __A,
@@ -1476,7 +1486,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)__C);
}
@@ -1484,7 +1494,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)_mm_setzero_ph());
}
@@ -1492,22 +1502,22 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
(__v8hf)_mm_setzero_ph());
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_ph(__m256h __A,
__m256h __B,
__m256h __C) {
- return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
- (__v16hf)__C);
+ return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
+ (__v16hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask_fmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)__A);
}
@@ -1515,7 +1525,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)__C);
}
@@ -1523,22 +1533,22 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)_mm256_setzero_ph());
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmsub_ph(__m256h __A,
__m256h __B,
__m256h __C) {
- return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
- -(__v16hf)__C);
+ return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
+ -(__v16hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
(__v16hf)__A);
}
@@ -1546,7 +1556,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
(__v16hf)_mm256_setzero_ph());
}
@@ -1554,7 +1564,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)__C);
}
@@ -1562,7 +1572,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)_mm256_setzero_ph());
}
@@ -1570,7 +1580,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
(__v16hf)_mm256_setzero_ph());
}
@@ -1684,7 +1694,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
(__v8hf)__C);
}
@@ -1692,7 +1702,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
(__v16hf)__C);
}
@@ -1715,45 +1725,45 @@ _mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_ph(__m128h __A,
__m128h __B,
__m128h __C) {
- return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
- (__v8hf)__C);
+ return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
+ (__v8hf)__C);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
(__v8hf)__A);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmadd_ph(__m256h __A,
__m256h __B,
__m256h __C) {
- return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
- (__v16hf)__C);
+ return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
+ (__v16hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
(__v16hf)__A);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_ph(__m128h __A,
__m128h __B,
__m128h __C) {
- return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
- -(__v8hf)__C);
+ return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
+ -(__v8hf)__C);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
(__v8hf)__A);
}
@@ -1761,22 +1771,22 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
(__v8hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmsub_ph(__m256h __A,
__m256h __B,
__m256h __C) {
- return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
- -(__v16hf)__C);
+ return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
+ -(__v16hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
(__v16hf)__A);
}
@@ -1784,7 +1794,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
(__v16hf)__C);
}
@@ -2066,6 +2076,8 @@ _mm_reduce_min_ph(__m128h __V) {
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
#endif
#endif
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 2a5f7b4..6e16d2d 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -23,6 +23,14 @@
__target__("avx512vl,no-evex512"), \
__min_vector_width__(256)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#else
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#endif
+
typedef short __v2hi __attribute__((__vector_size__(4)));
typedef char __v4qi __attribute__((__vector_size__(4)));
typedef char __v2qi __attribute__((__vector_size__(2)));
@@ -453,9 +461,8 @@ _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_and_epi32(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_and_epi32(__m256i __a, __m256i __b) {
return (__m256i)((__v8su)__a & (__v8su)__b);
}
@@ -473,9 +480,8 @@ _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_and_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_and_epi32(__m128i __a, __m128i __b) {
return (__m128i)((__v4su)__a & (__v4su)__b);
}
@@ -899,321 +905,289 @@ _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df) __A);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df) __C);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df)_mm_setzero_pd());
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C),
- (__v2df) __A);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+ (__v2df)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C),
- (__v2df)_mm_setzero_pd());
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd (-(__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df) __C);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd (-(__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df)_mm_setzero_pd());
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd (-(__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C),
- (__v2df)_mm_setzero_pd());
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, -(__v2df)__C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df) __A);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)__A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df) __C);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df)_mm256_setzero_pd());
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C),
- (__v4df) __A);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+ (__v4df)__A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C),
- (__v4df)_mm256_setzero_pd());
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df) __C);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df)_mm256_setzero_pd());
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C),
- (__v4df)_mm256_setzero_pd());
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, -(__v4df)__C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf) __A);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf) __C);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf)_mm_setzero_ps());
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf) __A);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf)_mm_setzero_ps());
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps (-(__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf) __C);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps (-(__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf)_mm_setzero_ps());
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps (-(__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf)_mm_setzero_ps());
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf) __A);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)__A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf) __C);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf)_mm256_setzero_ps());
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf) __A);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)__A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf)_mm256_setzero_ps());
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf) __C);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf)_mm256_setzero_ps());
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf)_mm256_setzero_ps());
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -1420,41 +1394,37 @@ _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C),
- (__v2df) __C);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+ (__v2df)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C),
- (__v4df) __C);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+ (__v4df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf) __C);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf) __C);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -1500,121 +1470,109 @@ _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- -(__v2df) __B,
- (__v2df) __C),
- (__v2df) __A);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, (__v2df)__C),
+ (__v2df)__A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- -(__v4df) __B,
- (__v4df) __C),
- (__v4df) __A);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, (__v4df)__C),
+ (__v4df)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- -(__v4sf) __B,
- (__v4sf) __C),
- (__v4sf) __A);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C),
+ (__v4sf)__A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- -(__v8sf) __B,
- (__v8sf) __C),
- (__v8sf) __A);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, (__v8sf)__C),
+ (__v8sf)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- -(__v2df) __B,
- -(__v2df) __C),
- (__v2df) __A);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
+ (__v2df)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- -(__v2df) __B,
- -(__v2df) __C),
- (__v2df) __C);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
+ (__v2df)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- -(__v4df) __B,
- -(__v4df) __C),
- (__v4df) __A);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
+ (__v4df)__A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- -(__v4df) __B,
- -(__v4df) __C),
- (__v4df) __C);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
+ (__v4df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- -(__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf) __A);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- -(__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf) __C);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- -(__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf) __A);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)__A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- -(__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf) __C);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -2964,67 +2922,67 @@ _mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) {
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_abs_epi32(__A),
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_abs_epi32(__A),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_abs_epi32(__A),
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_abs_epi32(__A),
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_abs_epi64 (__m128i __A) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_abs_epi64(__m128i __A) {
return (__m128i)__builtin_elementwise_abs((__v2di)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_abs_epi64(__A),
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_abs_epi64(__mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_abs_epi64(__A),
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi64 (__m256i __A) {
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_abs_epi64(__m256i __A) {
return (__m256i)__builtin_elementwise_abs((__v4di)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_abs_epi64(__A),
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_abs_epi64(__A),
(__v4di)_mm256_setzero_si256());
@@ -4358,7 +4316,7 @@ _mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rolv_epi32 (__m128i __A, __m128i __B)
{
- return (__m128i)__builtin_ia32_prolvd128((__v4si)__A, (__v4si)__B);
+ return (__m128i)__builtin_elementwise_fshl((__v4su)__A, (__v4su)__A, (__v4su)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -4380,7 +4338,7 @@ _mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_rolv_epi32 (__m256i __A, __m256i __B)
{
- return (__m256i)__builtin_ia32_prolvd256((__v8si)__A, (__v8si)__B);
+ return (__m256i)__builtin_elementwise_fshl((__v8su)__A, (__v8su)__A, (__v8su)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -4402,7 +4360,7 @@ _mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rolv_epi64 (__m128i __A, __m128i __B)
{
- return (__m128i)__builtin_ia32_prolvq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_fshl((__v2du)__A, (__v2du)__A, (__v2du)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -4424,7 +4382,7 @@ _mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_rolv_epi64 (__m256i __A, __m256i __B)
{
- return (__m256i)__builtin_ia32_prolvq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_fshl((__v4du)__A, (__v4du)__A, (__v4du)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -4543,17 +4501,16 @@ _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A,
+ unsigned int __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_slli_epi32(__A, (int)__B),
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_slli_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
@@ -4607,17 +4564,16 @@ _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A,
+ unsigned int __B) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_slli_epi64(__A, (int)__B),
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_slli_epi64(__A, (int)__B),
(__v4di)_mm256_setzero_si256());
@@ -4626,7 +4582,7 @@ _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rorv_epi32 (__m128i __A, __m128i __B)
{
- return (__m128i)__builtin_ia32_prorvd128((__v4si)__A, (__v4si)__B);
+ return (__m128i)__builtin_elementwise_fshr((__v4su)__A, (__v4su)__A, (__v4su)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -4648,7 +4604,7 @@ _mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_rorv_epi32 (__m256i __A, __m256i __B)
{
- return (__m256i)__builtin_ia32_prorvd256((__v8si)__A, (__v8si)__B);
+ return (__m256i)__builtin_elementwise_fshr((__v8su)__A, (__v8su)__A, (__v8su)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -4670,7 +4626,7 @@ _mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rorv_epi64 (__m128i __A, __m128i __B)
{
- return (__m128i)__builtin_ia32_prorvq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_fshr((__v2du)__A, (__v2du)__A, (__v2du)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -4692,7 +4648,7 @@ _mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_rorv_epi64 (__m256i __A, __m256i __B)
{
- return (__m256i)__builtin_ia32_prorvq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_fshr((__v4du)__A, (__v4du)__A, (__v4du)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -4711,7 +4667,7 @@ _mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -4719,7 +4675,7 @@ _mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -4727,7 +4683,7 @@ _mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4735,7 +4691,7 @@ _mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4743,7 +4699,7 @@ _mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4751,7 +4707,7 @@ _mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4759,7 +4715,7 @@ _mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4767,7 +4723,7 @@ _mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4775,7 +4731,7 @@ _mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -4783,7 +4739,7 @@ _mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -4791,7 +4747,7 @@ _mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4799,7 +4755,7 @@ _mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4807,7 +4763,7 @@ _mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4815,7 +4771,7 @@ _mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4823,7 +4779,7 @@ _mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4831,7 +4787,7 @@ _mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4887,17 +4843,16 @@ _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A,
+ unsigned int __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srli_epi32(__A, (int)__B),
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srli_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
@@ -4951,23 +4906,22 @@ _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A,
+ unsigned int __B) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_srli_epi64(__A, (int)__B),
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_srli_epi64(__A, (int)__B),
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4975,7 +4929,7 @@ _mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4983,7 +4937,7 @@ _mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4991,7 +4945,7 @@ _mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6410,33 +6364,30 @@ _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srai_epi32(__A, (int)__B),
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srai_epi32(__A, (int)__B),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A,
+ unsigned int __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srai_epi32(__A, (int)__B),
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srai_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
@@ -6486,46 +6437,40 @@ _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srai_epi64(__m128i __A, unsigned int __imm)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_srai_epi64(__m128i __A, unsigned int __imm) {
return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, (int)__imm);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srai_epi64(
+ __m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm) {
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
(__v2di)_mm_srai_epi64(__A, __imm), \
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm) {
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
(__v2di)_mm_srai_epi64(__A, __imm), \
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi64(__m256i __A, unsigned int __imm)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_srai_epi64(__m256i __A, unsigned int __imm) {
return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, (int)__imm);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A,
- unsigned int __imm)
-{
+ unsigned int __imm) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
(__v4di)_mm256_srai_epi64(__A, __imm), \
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
(__v4di)_mm256_srai_epi64(__A, __imm), \
(__v4di)_mm256_setzero_si256());
@@ -6792,9 +6737,8 @@ _mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_broadcast_f32x4(__m128 __A)
-{
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcast_f32x4(__m128 __A) {
return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
0, 1, 2, 3, 0, 1, 2, 3);
}
@@ -6815,9 +6759,8 @@ _mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A)
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcast_i32x4(__m128i __A)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_broadcast_i32x4(__m128i __A) {
return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 2, 3, 0, 1, 2, 3);
}
@@ -8306,68 +8249,52 @@ _mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
(__v4si)_mm_shuffle_epi32((A), (I)), \
(__v4si)_mm_setzero_si128()))
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
- return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
- (__v2df) __A,
- (__v2df) __W);
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, (__v2df)__A,
+ (__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
-{
- return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
- (__v2df) __A,
- (__v2df) _mm_setzero_pd ());
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_mov_pd(__mmask8 __U, __m128d __A) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, (__v2df)__A,
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
- return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
- (__v4df) __A,
- (__v4df) __W);
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, (__v4df)__A,
+ (__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
-{
- return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
- (__v4df) __A,
- (__v4df) _mm256_setzero_pd ());
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_mov_pd(__mmask8 __U, __m256d __A) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, (__v4df)__A,
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
- return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
- (__v4sf) __A,
- (__v4sf) __W);
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, (__v4sf)__A,
+ (__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
-{
- return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
- (__v4sf) __A,
- (__v4sf) _mm_setzero_ps ());
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_maskz_mov_ps(__mmask8 __U, __m128 __A) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, (__v4sf)__A,
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
- return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
- (__v8sf) __A,
- (__v8sf) __W);
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, (__v8sf)__A,
+ (__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
-{
- return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
- (__v8sf) __A,
- (__v8sf) _mm256_setzero_ps ());
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_mov_ps(__mmask8 __U, __m256 __A) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, (__v8sf)__A,
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -8430,8 +8357,9 @@ _mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
#define _mm256_mask_cvtps_ph _mm256_mask_cvt_roundps_ph
#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph
-
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
#endif /* __AVX512VLINTRIN_H */
diff --git a/clang/lib/Headers/avx512vlvbmi2intrin.h b/clang/lib/Headers/avx512vlvbmi2intrin.h
index 77af2d5..04db52c 100644
--- a/clang/lib/Headers/avx512vlvbmi2intrin.h
+++ b/clang/lib/Headers/avx512vlvbmi2intrin.h
@@ -415,8 +415,8 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i)__builtin_ia32_vpshldvq256((__v4di)__A, (__v4di)__B,
- (__v4di)__C);
+ return (__m256i)__builtin_elementwise_fshl((__v4du)__A, (__v4du)__B,
+ (__v4du)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -438,8 +438,8 @@ _mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_shldv_epi64(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i)__builtin_ia32_vpshldvq128((__v2di)__A, (__v2di)__B,
- (__v2di)__C);
+ return (__m128i)__builtin_elementwise_fshl((__v2du)__A, (__v2du)__B,
+ (__v2du)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -461,8 +461,8 @@ _mm_maskz_shldv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shldv_epi32(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i)__builtin_ia32_vpshldvd256((__v8si)__A, (__v8si)__B,
- (__v8si)__C);
+ return (__m256i)__builtin_elementwise_fshl((__v8su)__A, (__v8su)__B,
+ (__v8su)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -484,8 +484,8 @@ _mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_shldv_epi32(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i)__builtin_ia32_vpshldvd128((__v4si)__A, (__v4si)__B,
- (__v4si)__C);
+ return (__m128i)__builtin_elementwise_fshl((__v4su)__A, (__v4su)__B,
+ (__v4su)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -507,8 +507,8 @@ _mm_maskz_shldv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shldv_epi16(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i)__builtin_ia32_vpshldvw256((__v16hi)__A, (__v16hi)__B,
- (__v16hi)__C);
+ return (__m256i)__builtin_elementwise_fshl((__v16hu)__A, (__v16hu)__B,
+ (__v16hu)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -530,8 +530,8 @@ _mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_shldv_epi16(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i)__builtin_ia32_vpshldvw128((__v8hi)__A, (__v8hi)__B,
- (__v8hi)__C);
+ return (__m128i)__builtin_elementwise_fshl((__v8hu)__A, (__v8hu)__B,
+ (__v8hu)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -553,8 +553,9 @@ _mm_maskz_shldv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shrdv_epi64(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i)__builtin_ia32_vpshrdvq256((__v4di)__A, (__v4di)__B,
- (__v4di)__C);
+ // Ops __A and __B are swapped.
+ return (__m256i)__builtin_elementwise_fshr((__v4du)__B, (__v4du)__A,
+ (__v4du)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -576,8 +577,9 @@ _mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_shrdv_epi64(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i)__builtin_ia32_vpshrdvq128((__v2di)__A, (__v2di)__B,
- (__v2di)__C);
+ // Ops __A and __B are swapped.
+ return (__m128i)__builtin_elementwise_fshr((__v2du)__B, (__v2du)__A,
+ (__v2du)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -599,8 +601,9 @@ _mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shrdv_epi32(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i)__builtin_ia32_vpshrdvd256((__v8si)__A, (__v8si)__B,
- (__v8si)__C);
+ // Ops __A and __B are swapped.
+ return (__m256i)__builtin_elementwise_fshr((__v8su)__B, (__v8su)__A,
+ (__v8su)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -622,8 +625,9 @@ _mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_shrdv_epi32(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i)__builtin_ia32_vpshrdvd128((__v4si)__A, (__v4si)__B,
- (__v4si)__C);
+ // Ops __A and __B are swapped.
+ return (__m128i)__builtin_elementwise_fshr((__v4su)__B, (__v4su)__A,
+ (__v4su)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -645,8 +649,9 @@ _mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shrdv_epi16(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i)__builtin_ia32_vpshrdvw256((__v16hi)__A, (__v16hi)__B,
- (__v16hi)__C);
+ // Ops __A and __B are swapped.
+ return (__m256i)__builtin_elementwise_fshr((__v16hu)__B, (__v16hu)__A,
+ (__v16hu)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -668,8 +673,9 @@ _mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_shrdv_epi16(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i)__builtin_ia32_vpshrdvw128((__v8hi)__A, (__v8hi)__B,
- (__v8hi)__C);
+ // Ops __A and __B are swapped.
+ return (__m128i)__builtin_elementwise_fshr((__v8hu)__B, (__v8hu)__A,
+ (__v8hu)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
diff --git a/clang/lib/Headers/avx512vpopcntdqintrin.h b/clang/lib/Headers/avx512vpopcntdqintrin.h
index e24c2c5..ac71808 100644
--- a/clang/lib/Headers/avx512vpopcntdqintrin.h
+++ b/clang/lib/Headers/avx512vpopcntdqintrin.h
@@ -16,19 +16,19 @@
#define __AVX512VPOPCNTDQINTRIN_H
/* Define the default attributes for the functions in this file. */
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512vpopcntdq,evex512"), \
- __min_vector_width__(512)))
-
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+ __min_vector_width__(512))) constexpr
#else
-#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vpopcntdq,evex512"), \
+ __min_vector_width__(512)))
#endif
-static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
-_mm512_popcnt_epi64(__m512i __A) {
+static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_popcnt_epi64(__m512i __A) {
return (__m512i)__builtin_elementwise_popcount((__v8du)__A);
}
@@ -43,8 +43,7 @@ _mm512_maskz_popcnt_epi64(__mmask8 __U, __m512i __A) {
return _mm512_mask_popcnt_epi64((__m512i)_mm512_setzero_si512(), __U, __A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
-_mm512_popcnt_epi32(__m512i __A) {
+static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_popcnt_epi32(__m512i __A) {
return (__m512i)__builtin_elementwise_popcount((__v16su)__A);
}
diff --git a/clang/lib/Headers/avx512vpopcntdqvlintrin.h b/clang/lib/Headers/avx512vpopcntdqvlintrin.h
index b6c819b..bed951b 100644
--- a/clang/lib/Headers/avx512vpopcntdqvlintrin.h
+++ b/clang/lib/Headers/avx512vpopcntdqvlintrin.h
@@ -16,6 +16,17 @@
#define __AVX512VPOPCNTDQVLINTRIN_H
/* Define the default attributes for the functions in this file. */
+
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vpopcntdq,avx512vl,no-evex512"), \
+ __min_vector_width__(128))) constexpr
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vpopcntdq,avx512vl,no-evex512"), \
+ __min_vector_width__(256))) constexpr
+#else
#define __DEFAULT_FN_ATTRS128 \
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512vpopcntdq,avx512vl,no-evex512"), \
@@ -24,17 +35,9 @@
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512vpopcntdq,avx512vl,no-evex512"), \
__min_vector_width__(256)))
-
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
-#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
-#else
-#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
-#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
#endif
-static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_popcnt_epi64(__m128i __A) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_popcnt_epi64(__m128i __A) {
return (__m128i)__builtin_elementwise_popcount((__v2du)__A);
}
@@ -49,8 +52,7 @@ _mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) {
return _mm_mask_popcnt_epi64((__m128i)_mm_setzero_si128(), __U, __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_popcnt_epi32(__m128i __A) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_popcnt_epi32(__m128i __A) {
return (__m128i)__builtin_elementwise_popcount((__v4su)__A);
}
@@ -65,7 +67,7 @@ _mm_maskz_popcnt_epi32(__mmask8 __U, __m128i __A) {
return _mm_mask_popcnt_epi32((__m128i)_mm_setzero_si128(), __U, __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_popcnt_epi64(__m256i __A) {
return (__m256i)__builtin_elementwise_popcount((__v4du)__A);
}
@@ -81,7 +83,7 @@ _mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) {
return _mm256_mask_popcnt_epi64((__m256i)_mm256_setzero_si256(), __U, __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_popcnt_epi32(__m256i __A) {
return (__m256i)__builtin_elementwise_popcount((__v8su)__A);
}
diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h
index b9ca013..26096da9 100644
--- a/clang/lib/Headers/avxintrin.h
+++ b/clang/lib/Headers/avxintrin.h
@@ -70,8 +70,8 @@ typedef __bf16 __m256bh __attribute__((__vector_size__(32), __aligned__(32)));
#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
#else
-#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS128
-#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
#endif
/* Arithmetic */
@@ -87,9 +87,8 @@ typedef __bf16 __m256bh __attribute__((__vector_size__(32), __aligned__(32)));
/// A 256-bit vector of [4 x double] containing one of the source operands.
/// \returns A 256-bit vector of [4 x double] containing the sums of both
/// operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_add_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_add_pd(__m256d __a, __m256d __b) {
return (__m256d)((__v4df)__a+(__v4df)__b);
}
@@ -105,9 +104,8 @@ _mm256_add_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the source operands.
/// \returns A 256-bit vector of [8 x float] containing the sums of both
/// operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_add_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_ps(__m256 __a,
+ __m256 __b) {
return (__m256)((__v8sf)__a+(__v8sf)__b);
}
@@ -123,9 +121,8 @@ _mm256_add_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing the subtrahend.
/// \returns A 256-bit vector of [4 x double] containing the differences between
/// both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_sub_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_sub_pd(__m256d __a, __m256d __b) {
return (__m256d)((__v4df)__a-(__v4df)__b);
}
@@ -141,9 +138,8 @@ _mm256_sub_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing the subtrahend.
/// \returns A 256-bit vector of [8 x float] containing the differences between
/// both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_sub_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_ps(__m256 __a,
+ __m256 __b) {
return (__m256)((__v8sf)__a-(__v8sf)__b);
}
@@ -197,9 +193,8 @@ _mm256_addsub_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing the divisor.
/// \returns A 256-bit vector of [4 x double] containing the quotients of both
/// operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_div_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_div_pd(__m256d __a, __m256d __b) {
return (__m256d)((__v4df)__a/(__v4df)__b);
}
@@ -215,9 +210,8 @@ _mm256_div_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing the divisor.
/// \returns A 256-bit vector of [8 x float] containing the quotients of both
/// operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_div_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_ps(__m256 __a,
+ __m256 __b) {
return (__m256)((__v8sf)__a/(__v8sf)__b);
}
@@ -317,9 +311,8 @@ _mm256_min_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing one of the operands.
/// \returns A 256-bit vector of [4 x double] containing the products of both
/// operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_mul_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_mul_pd(__m256d __a, __m256d __b) {
return (__m256d)((__v4df)__a * (__v4df)__b);
}
@@ -335,9 +328,8 @@ _mm256_mul_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the operands.
/// \returns A 256-bit vector of [8 x float] containing the products of both
/// operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_mul_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_ps(__m256 __a,
+ __m256 __b) {
return (__m256)((__v8sf)__a * (__v8sf)__b);
}
@@ -555,7 +547,7 @@ _mm256_rcp_ps(__m256 __a)
/// A 256-bit vector of [4 x double] containing one of the source operands.
/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
/// values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_and_pd(__m256d __a, __m256d __b)
{
return (__m256d)((__v4du)__a & (__v4du)__b);
@@ -573,7 +565,7 @@ _mm256_and_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the source operands.
/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
/// values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_and_ps(__m256 __a, __m256 __b)
{
return (__m256)((__v8su)__a & (__v8su)__b);
@@ -594,7 +586,7 @@ _mm256_and_ps(__m256 __a, __m256 __b)
/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
/// values of the second operand and the one's complement of the first
/// operand.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_andnot_pd(__m256d __a, __m256d __b)
{
return (__m256d)(~(__v4du)__a & (__v4du)__b);
@@ -615,7 +607,7 @@ _mm256_andnot_pd(__m256d __a, __m256d __b)
/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
/// values of the second operand and the one's complement of the first
/// operand.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_andnot_ps(__m256 __a, __m256 __b)
{
return (__m256)(~(__v8su)__a & (__v8su)__b);
@@ -633,7 +625,7 @@ _mm256_andnot_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing one of the source operands.
/// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the
/// values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_or_pd(__m256d __a, __m256d __b)
{
return (__m256d)((__v4du)__a | (__v4du)__b);
@@ -651,7 +643,7 @@ _mm256_or_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the source operands.
/// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the
/// values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_or_ps(__m256 __a, __m256 __b)
{
return (__m256)((__v8su)__a | (__v8su)__b);
@@ -669,7 +661,7 @@ _mm256_or_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing one of the source operands.
/// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the
/// values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_xor_pd(__m256d __a, __m256d __b)
{
return (__m256d)((__v4du)__a ^ (__v4du)__b);
@@ -687,7 +679,7 @@ _mm256_xor_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the source operands.
/// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the
/// values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_xor_ps(__m256 __a, __m256 __b)
{
return (__m256)((__v8su)__a ^ (__v8su)__b);
@@ -2190,9 +2182,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// \param __a
/// A 128-bit integer vector of [4 x i32].
/// \returns A 256-bit vector of [4 x double] containing the converted values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_cvtepi32_pd(__m128i __a)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_cvtepi32_pd(__m128i __a) {
return (__m256d)__builtin_convertvector((__v4si)__a, __v4df);
}
@@ -2205,9 +2196,8 @@ _mm256_cvtepi32_pd(__m128i __a)
/// \param __a
/// A 256-bit integer vector.
/// \returns A 256-bit vector of [8 x float] containing the converted values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_cvtepi32_ps(__m256i __a)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_cvtepi32_ps(__m256i __a) {
return (__m256)__builtin_convertvector((__v8si)__a, __v8sf);
}
@@ -2256,9 +2246,8 @@ _mm256_cvtps_epi32(__m256 __a)
/// \param __a
/// A 128-bit vector of [4 x float].
/// \returns A 256-bit vector of [4 x double] containing the converted values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_cvtps_pd(__m128 __a)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_cvtps_pd(__m128 __a) {
return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
}
@@ -2392,7 +2381,7 @@ _mm256_cvtss_f32(__m256 __a)
/// return value.
/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated
/// values.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_movehdup_ps(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
@@ -2417,7 +2406,7 @@ _mm256_movehdup_ps(__m256 __a)
/// return value.
/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated
/// values.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_moveldup_ps(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
@@ -2439,7 +2428,7 @@ _mm256_moveldup_ps(__m256 __a)
/// the return value.
/// \returns A 256-bit vector of [4 x double] containing the moved and
/// duplicated values.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_movedup_pd(__m256d __a)
{
return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2);
@@ -2462,9 +2451,8 @@ _mm256_movedup_pd(__m256d __a)
/// Bits [127:64] are written to bits [127:64] of the return value. \n
/// Bits [255:192] are written to bits [255:192] of the return value. \n
/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_unpackhi_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_unpackhi_pd(__m256d __a, __m256d __b) {
return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
}
@@ -2484,9 +2472,8 @@ _mm256_unpackhi_pd(__m256d __a, __m256d __b)
/// Bits [63:0] are written to bits [127:64] of the return value. \n
/// Bits [191:128] are written to bits [255:192] of the return value. \n
/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_unpacklo_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_unpacklo_pd(__m256d __a, __m256d __b) {
return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
}
@@ -2511,9 +2498,8 @@ _mm256_unpacklo_pd(__m256d __a, __m256d __b)
/// Bits [223:192] are written to bits [191:160] of the return value. \n
/// Bits [255:224] are written to bits [255:224] of the return value.
/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_unpackhi_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_unpackhi_ps(__m256 __a, __m256 __b) {
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
}
@@ -2538,9 +2524,8 @@ _mm256_unpackhi_ps(__m256 __a, __m256 __b)
/// Bits [159:128] are written to bits [191:160] of the return value. \n
/// Bits [191:160] are written to bits [255:224] of the return value.
/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_unpacklo_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_unpacklo_ps(__m256 __a, __m256 __b) {
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
}
@@ -4379,7 +4364,7 @@ _mm256_setzero_si256(void) {
/// A 256-bit floating-point vector of [4 x double].
/// \returns A 256-bit floating-point vector of [8 x float] containing the same
/// bitwise pattern as the parameter.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castpd_ps(__m256d __a)
{
return (__m256)__a;
@@ -4396,7 +4381,7 @@ _mm256_castpd_ps(__m256d __a)
/// A 256-bit floating-point vector of [4 x double].
/// \returns A 256-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline __m256i __DEFAULT_FN_ATTRS
+static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castpd_si256(__m256d __a)
{
return (__m256i)__a;
@@ -4413,7 +4398,7 @@ _mm256_castpd_si256(__m256d __a)
/// A 256-bit floating-point vector of [8 x float].
/// \returns A 256-bit floating-point vector of [4 x double] containing the same
/// bitwise pattern as the parameter.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castps_pd(__m256 __a)
{
return (__m256d)__a;
@@ -4430,7 +4415,7 @@ _mm256_castps_pd(__m256 __a)
/// A 256-bit floating-point vector of [8 x float].
/// \returns A 256-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline __m256i __DEFAULT_FN_ATTRS
+static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castps_si256(__m256 __a)
{
return (__m256i)__a;
@@ -4447,7 +4432,7 @@ _mm256_castps_si256(__m256 __a)
/// A 256-bit integer vector.
/// \returns A 256-bit floating-point vector of [8 x float] containing the same
/// bitwise pattern as the parameter.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castsi256_ps(__m256i __a)
{
return (__m256)__a;
@@ -4464,7 +4449,7 @@ _mm256_castsi256_ps(__m256i __a)
/// A 256-bit integer vector.
/// \returns A 256-bit floating-point vector of [4 x double] containing the same
/// bitwise pattern as the parameter.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castsi256_pd(__m256i __a)
{
return (__m256d)__a;
@@ -4481,7 +4466,7 @@ _mm256_castsi256_pd(__m256i __a)
/// A 256-bit floating-point vector of [4 x double].
/// \returns A 128-bit floating-point vector of [2 x double] containing the
/// lower 128 bits of the parameter.
-static __inline __m128d __DEFAULT_FN_ATTRS
+static __inline __m128d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castpd256_pd128(__m256d __a)
{
return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
@@ -4498,7 +4483,7 @@ _mm256_castpd256_pd128(__m256d __a)
/// A 256-bit floating-point vector of [8 x float].
/// \returns A 128-bit floating-point vector of [4 x float] containing the
/// lower 128 bits of the parameter.
-static __inline __m128 __DEFAULT_FN_ATTRS
+static __inline __m128 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castps256_ps128(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
@@ -4514,7 +4499,7 @@ _mm256_castps256_ps128(__m256 __a)
/// A 256-bit integer vector.
/// \returns A 128-bit integer vector containing the lower 128 bits of the
/// parameter.
-static __inline __m128i __DEFAULT_FN_ATTRS
+static __inline __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castsi256_si128(__m256i __a)
{
return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
diff --git a/clang/lib/Headers/cpuid.h b/clang/lib/Headers/cpuid.h
index 52addb7..ce8c79e 100644
--- a/clang/lib/Headers/cpuid.h
+++ b/clang/lib/Headers/cpuid.h
@@ -345,10 +345,15 @@ static __inline int __get_cpuid_count (unsigned int __leaf,
// In some configurations, __cpuidex is defined as a builtin (primarily
// -fms-extensions) which will conflict with the __cpuidex definition below.
#if !(__has_builtin(__cpuidex))
+// In some cases, offloading will set the host as the aux triple and define the
+// builtin. Given __has_builtin does not detect builtins on aux triples, we need
+// to explicitly check for some offloading cases.
+#ifndef __NVPTX__
static __inline void __cpuidex(int __cpu_info[4], int __leaf, int __subleaf) {
__cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2],
__cpu_info[3]);
}
#endif
+#endif
#endif /* __CPUID_H */
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index 78e8a42..e15a260 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -17,7 +17,6 @@
#include <xmmintrin.h>
typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));
-typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
typedef long long __m128i_u
@@ -25,13 +24,9 @@ typedef long long __m128i_u
/* Type defines. */
typedef double __v2df __attribute__((__vector_size__(16)));
-typedef long long __v2di __attribute__((__vector_size__(16)));
-typedef short __v8hi __attribute__((__vector_size__(16)));
-typedef char __v16qi __attribute__((__vector_size__(16)));
/* Unsigned types */
typedef unsigned long long __v2du __attribute__((__vector_size__(16)));
-typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
/* We need an explicitly signed variant for char. Note that this shouldn't
@@ -67,6 +62,9 @@ typedef __bf16 __m128bh __attribute__((__vector_size__(16), __aligned__(16)));
#define __trunc64(x) \
(__m64) __builtin_shufflevector((__v2di)(x), __extension__(__v2di){}, 0)
+#define __zext128(x) \
+ (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
+ 1, 2, 3)
#define __anyext128(x) \
(__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
1, -1, -1)
@@ -2127,8 +2125,9 @@ _mm_add_epi32(__m128i __a, __m128i __b) {
/// \param __b
/// A 64-bit integer.
/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_si64(__m64 __a, __m64 __b) {
- return (__m64)(((unsigned long long)__a) + ((unsigned long long)__b));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_si64(__m64 __a,
+ __m64 __b) {
+ return (__m64)(((__v1du)__a)[0] + ((__v1du)__b)[0]);
}
/// Adds the corresponding elements of two 128-bit vectors of [2 x i64],
@@ -2169,8 +2168,8 @@ _mm_add_epi64(__m128i __a, __m128i __b) {
/// A 128-bit signed [16 x i8] vector.
/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
/// both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_adds_epi8(__m128i __a, __m128i __b) {
return (__m128i)__builtin_elementwise_add_sat((__v16qs)__a, (__v16qs)__b);
}
@@ -2191,8 +2190,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
/// both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_adds_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_elementwise_add_sat((__v8hi)__a, (__v8hi)__b);
}
@@ -2213,8 +2212,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
/// of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_adds_epu8(__m128i __a, __m128i __b) {
return (__m128i)__builtin_elementwise_add_sat((__v16qu)__a, (__v16qu)__b);
}
@@ -2235,8 +2234,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
/// of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_adds_epu16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_elementwise_add_sat((__v8hu)__a, (__v8hu)__b);
}
@@ -2393,8 +2392,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a,
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
/// each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhi_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
}
@@ -2412,9 +2411,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
/// of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,
- __m128i __b) {
- return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhi_epu16(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_pmulhuw128((__v8hu)__a, (__v8hu)__b);
}
/// Multiplies the corresponding elements of two signed [8 x i16]
@@ -2431,8 +2430,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
/// each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mullo_epi16(__m128i __a, __m128i __b) {
return (__m128i)((__v8hu)__a * (__v8hu)__b);
}
@@ -2449,9 +2448,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a,
/// \param __b
/// A 64-bit integer containing one of the source operands.
/// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_mul_su32(__m64 __a, __m64 __b) {
- return __trunc64(__builtin_ia32_pmuludq128((__v4si)__anyext128(__a),
- (__v4si)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_su32(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_pmuludq128((__v4si)__zext128(__a),
+ (__v4si)__zext128(__b)));
}
/// Multiplies 32-bit unsigned integer values contained in the lower
@@ -2467,8 +2467,8 @@ static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_mul_su32(__m64 __a, __m64 __b) {
/// \param __b
/// A [2 x i64] vector containing one of the source operands.
/// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mul_epu32(__m128i __a, __m128i __b) {
return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
}
@@ -2557,8 +2557,9 @@ _mm_sub_epi32(__m128i __a, __m128i __b) {
/// A 64-bit integer vector containing the subtrahend.
/// \returns A 64-bit integer vector containing the difference of the values in
/// the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_si64(__m64 __a, __m64 __b) {
- return (__m64)((unsigned long long)__a - (unsigned long long)__b);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_si64(__m64 __a,
+ __m64 __b) {
+ return (__m64)(((__v1du)__a)[0] - ((__v1du)__b)[0]);
}
/// Subtracts the corresponding elements of two [2 x i64] vectors.
@@ -2595,8 +2596,8 @@ _mm_sub_epi64(__m128i __a, __m128i __b) {
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_subs_epi8(__m128i __a, __m128i __b) {
return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b);
}
@@ -2617,8 +2618,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_subs_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b);
}
@@ -2638,8 +2639,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the unsigned integer
/// differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_subs_epu8(__m128i __a, __m128i __b) {
return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b);
}
@@ -2659,8 +2660,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the unsigned integer
/// differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_subs_epu16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_elementwise_sub_sat((__v8hu)__a, (__v8hu)__b);
}
@@ -2676,8 +2677,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a,
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise AND of the values
/// in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_and_si128(__m128i __a, __m128i __b) {
return (__m128i)((__v2du)__a & (__v2du)__b);
}
@@ -2695,8 +2696,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,
/// A 128-bit vector containing the right source operand.
/// \returns A 128-bit integer vector containing the bitwise AND of the one's
/// complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_andnot_si128(__m128i __a, __m128i __b) {
return (__m128i)(~(__v2du)__a & (__v2du)__b);
}
/// Performs a bitwise OR of two 128-bit integer vectors.
@@ -2711,8 +2712,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise OR of the values
/// in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_or_si128(__m128i __a, __m128i __b) {
return (__m128i)((__v2du)__a | (__v2du)__b);
}
@@ -2728,8 +2729,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
/// values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_xor_si128(__m128i __a, __m128i __b) {
return (__m128i)((__v2du)__a ^ (__v2du)__b);
}
@@ -2771,8 +2772,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a,
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a,
- int __count) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_slli_epi16(__m128i __a, int __count) {
return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
}
@@ -2807,8 +2808,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a,
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a,
- int __count) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_slli_epi32(__m128i __a, int __count) {
return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
}
@@ -2843,8 +2844,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a,
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a,
- int __count) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_slli_epi64(__m128i __a, int __count) {
return __builtin_ia32_psllqi128((__v2di)__a, __count);
}
@@ -2880,8 +2881,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a,
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a,
- int __count) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_srai_epi16(__m128i __a, int __count) {
return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
}
@@ -2918,8 +2919,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a,
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a,
- int __count) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_srai_epi32(__m128i __a, int __count) {
return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
}
@@ -2980,8 +2981,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a,
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a,
- int __count) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_srli_epi16(__m128i __a, int __count) {
return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
}
@@ -3016,8 +3017,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a,
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a,
- int __count) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_srli_epi32(__m128i __a, int __count) {
return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
}
@@ -3052,8 +3053,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a,
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a,
- int __count) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_srli_epi64(__m128i __a, int __count) {
return __builtin_ia32_psrlqi128((__v2di)__a, __count);
}
@@ -3089,8 +3090,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmpeq_epi8(__m128i __a, __m128i __b) {
return (__m128i)((__v16qi)__a == (__v16qi)__b);
}
@@ -3108,8 +3109,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmpeq_epi16(__m128i __a, __m128i __b) {
return (__m128i)((__v8hi)__a == (__v8hi)__b);
}
@@ -3127,8 +3128,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmpeq_epi32(__m128i __a, __m128i __b) {
return (__m128i)((__v4si)__a == (__v4si)__b);
}
@@ -3147,8 +3148,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmpgt_epi8(__m128i __a, __m128i __b) {
/* This function always performs a signed comparison, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i)((__v16qs)__a > (__v16qs)__b);
@@ -3169,8 +3170,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmpgt_epi16(__m128i __a, __m128i __b) {
return (__m128i)((__v8hi)__a > (__v8hi)__b);
}
@@ -3189,8 +3190,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmpgt_epi32(__m128i __a, __m128i __b) {
return (__m128i)((__v4si)__a > (__v4si)__b);
}
@@ -3209,8 +3210,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmplt_epi8(__m128i __a, __m128i __b) {
return _mm_cmpgt_epi8(__b, __a);
}
@@ -3229,8 +3230,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmplt_epi16(__m128i __a, __m128i __b) {
return _mm_cmpgt_epi16(__b, __a);
}
@@ -3249,8 +3250,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmplt_epi32(__m128i __a, __m128i __b) {
return _mm_cmpgt_epi32(__b, __a);
}
@@ -3379,7 +3380,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) {
/// \param __a
/// A 32-bit signed integer operand.
/// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtsi32_si128(int __a) {
return __extension__(__m128i)(__v4si){__a, 0, 0, 0};
}
@@ -3394,7 +3396,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) {
/// \param __a
/// A 64-bit signed integer operand containing the value to be converted.
/// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtsi64_si128(long long __a) {
return __extension__(__m128i)(__v2di){__a, 0};
}
@@ -3409,7 +3412,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) {
/// A vector of [4 x i32]. The least significant 32 bits are moved to the
/// destination.
/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) {
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtsi128_si32(__m128i __a) {
__v4si __b = (__v4si)__a;
return __b[0];
}
@@ -3425,7 +3429,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) {
/// A vector of [2 x i64]. The least significant 64 bits are moved to the
/// destination.
/// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a) {
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtsi128_si64(__m128i __a) {
return __a[0];
}
@@ -4415,8 +4420,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {
/// Bits [119:112] are written to bits [111:104] of the result. \n
/// Bits [127:120] are written to bits [127:120] of the result.
/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_unpackhi_epi8(__m128i __a, __m128i __b) {
return (__m128i)__builtin_shufflevector(
(__v16qi)__a, (__v16qi)__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
@@ -4443,8 +4448,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a,
/// Bits [111:96] are written to bits [95:80] of the result. \n
/// Bits [127:112] are written to bits [127:112] of the result.
/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_unpackhi_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8 + 4, 5,
8 + 5, 6, 8 + 6, 7, 8 + 7);
}
@@ -4466,8 +4471,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a,
/// Bits [95:64] are written to bits [64:32] of the destination. \n
/// Bits [127:96] are written to bits [127:96] of the destination.
/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_unpackhi_epi32(__m128i __a, __m128i __b) {
return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4 + 2, 3,
4 + 3);
}
@@ -4487,8 +4492,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a,
/// A 128-bit vector of [2 x i64]. \n
/// Bits [127:64] are written to bits [127:64] of the destination.
/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_unpackhi_epi64(__m128i __a, __m128i __b) {
return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2 + 1);
}
@@ -4521,8 +4526,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a,
/// Bits [55:48] are written to bits [111:104] of the result. \n
/// Bits [63:56] are written to bits [127:120] of the result.
/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_unpacklo_epi8(__m128i __a, __m128i __b) {
return (__m128i)__builtin_shufflevector(
(__v16qi)__a, (__v16qi)__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
@@ -4550,8 +4555,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a,
/// Bits [47:32] are written to bits [95:80] of the result. \n
/// Bits [63:48] are written to bits [127:112] of the result.
/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_unpacklo_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8 + 0, 1,
8 + 1, 2, 8 + 2, 3, 8 + 3);
}
@@ -4573,8 +4578,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a,
/// Bits [31:0] are written to bits [64:32] of the destination. \n
/// Bits [63:32] are written to bits [127:96] of the destination.
/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_unpacklo_epi32(__m128i __a, __m128i __b) {
return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4 + 0, 1,
4 + 1);
}
@@ -4594,8 +4599,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a,
/// A 128-bit vector of [2 x i64]. \n
/// Bits [63:0] are written to bits [127:64] of the destination. \n
/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_unpacklo_epi64(__m128i __a, __m128i __b) {
return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2 + 0);
}
diff --git a/clang/lib/Headers/f16cintrin.h b/clang/lib/Headers/f16cintrin.h
index 94a662c..ede67af 100644
--- a/clang/lib/Headers/f16cintrin.h
+++ b/clang/lib/Headers/f16cintrin.h
@@ -38,9 +38,7 @@
static __inline float __DEFAULT_FN_ATTRS128
_cvtsh_ss(unsigned short __a)
{
- __v8hi __v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};
- __v4sf __r = __builtin_ia32_vcvtph2ps(__v);
- return __r[0];
+ return (float)__builtin_bit_cast(__fp16, __a);
}
/// Converts a 32-bit single-precision float value to a 16-bit
@@ -109,7 +107,10 @@ _cvtsh_ss(unsigned short __a)
static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_cvtph_ps(__m128i __a)
{
- return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
+ typedef __fp16 __v4fp16 __attribute__((__vector_size__(8)));
+
+ __v4hi __v = __builtin_shufflevector((__v8hi)__a, (__v8hi)__a, 0, 1, 2, 3);
+ return (__m128) __builtin_convertvector((__v4fp16)__v, __v4sf);
}
/// Converts a 256-bit vector of [8 x float] into a 128-bit vector
@@ -153,7 +154,9 @@ _mm_cvtph_ps(__m128i __a)
static __inline __m256 __DEFAULT_FN_ATTRS256
_mm256_cvtph_ps(__m128i __a)
{
- return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
+ typedef __fp16 __v8fp16 __attribute__((__vector_size__(16), __aligned__(16)));
+
+ return (__m256) __builtin_convertvector((__v8fp16)__a, __v8sf);
}
#undef __DEFAULT_FN_ATTRS128
diff --git a/clang/lib/Headers/fma4intrin.h b/clang/lib/Headers/fma4intrin.h
index 694801b..e0a0e4c 100644
--- a/clang/lib/Headers/fma4intrin.h
+++ b/clang/lib/Headers/fma4intrin.h
@@ -20,16 +20,24 @@
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma4"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma4"), __min_vector_width__(256)))
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#else
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#endif
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) {
+ return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) {
+ return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+ (__v2df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -44,16 +52,16 @@ _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) {
+ return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+ -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C) {
+ return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+ -(__v2df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -68,16 +76,16 @@ _mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C) {
+ return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C) {
+ return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+ (__v2df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -92,16 +100,16 @@ _mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C) {
+ return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+ -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C) {
+ return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+ -(__v2df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -140,52 +148,52 @@ _mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C) {
+ return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C) {
+ return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+ (__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C) {
+ return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+ -(__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C) {
+ return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+ -(__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C) {
+ return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C) {
+ return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+ (__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C) {
+ return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+ -(__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C) {
+ return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+ -(__v4df)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
@@ -214,5 +222,7 @@ _mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#endif /* __FMA4INTRIN_H */
diff --git a/clang/lib/Headers/fmaintrin.h b/clang/lib/Headers/fmaintrin.h
index 22d1a78..d8ea489 100644
--- a/clang/lib/Headers/fmaintrin.h
+++ b/clang/lib/Headers/fmaintrin.h
@@ -18,6 +18,14 @@
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#else
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#endif
+
/// Computes a multiply-add of 128-bit vectors of [4 x float].
/// For each element, computes <c> (__A * __B) + __C </c>.
///
@@ -32,10 +40,11 @@
/// \param __C
/// A 128-bit vector of [4 x float] containing the addend.
/// \returns A 128-bit vector of [4 x float] containing the result.
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C);
}
/// Computes a multiply-add of 128-bit vectors of [2 x double].
@@ -52,10 +61,11 @@ _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend.
/// \returns A 128-bit [2 x double] vector containing the result.
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+ (__v2df)__C);
}
/// Computes a scalar multiply-add of the single-precision values in the
@@ -130,10 +140,11 @@ _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
/// \param __C
/// A 128-bit vector of [4 x float] containing the subtrahend.
/// \returns A 128-bit vector of [4 x float] containing the result.
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+ -(__v4sf)__C);
}
/// Computes a multiply-subtract of 128-bit vectors of [2 x double].
@@ -150,10 +161,11 @@ _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend.
/// \returns A 128-bit vector of [2 x double] containing the result.
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+ -(__v2df)__C);
}
/// Computes a scalar multiply-subtract of the single-precision values in
@@ -228,10 +240,11 @@ _mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
/// \param __C
/// A 128-bit vector of [4 x float] containing the addend.
/// \returns A 128-bit [4 x float] vector containing the result.
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C);
}
/// Computes a negated multiply-add of 128-bit vectors of [2 x double].
@@ -248,10 +261,11 @@ _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend.
/// \returns A 128-bit vector of [2 x double] containing the result.
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+ (__v2df)__C);
}
/// Computes a scalar negated multiply-add of the single-precision values in
@@ -326,10 +340,11 @@ _mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
/// \param __C
/// A 128-bit vector of [4 x float] containing the subtrahend.
/// \returns A 128-bit vector of [4 x float] containing the result.
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+ -(__v4sf)__C);
}
/// Computes a negated multiply-subtract of 128-bit vectors of [2 x double].
@@ -346,10 +361,11 @@ _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
/// \param __C
/// A 128-bit vector of [2 x double] containing the subtrahend.
/// \returns A 128-bit vector of [2 x double] containing the result.
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+ -(__v2df)__C);
}
/// Computes a scalar negated multiply-subtract of the single-precision
@@ -528,10 +544,11 @@ _mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
/// \param __C
/// A 256-bit vector of [8 x float] containing the addend.
/// \returns A 256-bit vector of [8 x float] containing the result.
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C);
}
/// Computes a multiply-add of 256-bit vectors of [4 x double].
@@ -548,10 +565,11 @@ _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
/// \param __C
/// A 256-bit vector of [4 x double] containing the addend.
/// \returns A 256-bit vector of [4 x double] containing the result.
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+ (__v4df)__C);
}
/// Computes a multiply-subtract of 256-bit vectors of [8 x float].
@@ -568,10 +586,11 @@ _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
/// \param __C
/// A 256-bit vector of [8 x float] containing the subtrahend.
/// \returns A 256-bit vector of [8 x float] containing the result.
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+ -(__v8sf)__C);
}
/// Computes a multiply-subtract of 256-bit vectors of [4 x double].
@@ -588,10 +607,11 @@ _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
/// \param __C
/// A 256-bit vector of [4 x double] containing the subtrahend.
/// \returns A 256-bit vector of [4 x double] containing the result.
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+ -(__v4df)__C);
}
/// Computes a negated multiply-add of 256-bit vectors of [8 x float].
@@ -608,10 +628,11 @@ _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
/// \param __C
/// A 256-bit vector of [8 x float] containing the addend.
/// \returns A 256-bit vector of [8 x float] containing the result.
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C);
}
/// Computes a negated multiply-add of 256-bit vectors of [4 x double].
@@ -628,10 +649,11 @@ _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
/// \param __C
/// A 256-bit vector of [4 x double] containing the addend.
/// \returns A 256-bit vector of [4 x double] containing the result.
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+ (__v4df)__C);
}
/// Computes a negated multiply-subtract of 256-bit vectors of [8 x float].
@@ -648,10 +670,11 @@ _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
/// \param __C
/// A 256-bit vector of [8 x float] containing the subtrahend.
/// \returns A 256-bit vector of [8 x float] containing the result.
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+ -(__v8sf)__C);
}
/// Computes a negated multiply-subtract of 256-bit vectors of [4 x double].
@@ -668,10 +691,11 @@ _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
/// \param __C
/// A 256-bit vector of [4 x double] containing the subtrahend.
/// \returns A 256-bit vector of [4 x double] containing the result.
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+ -(__v4df)__C);
}
/// Computes a multiply with alternating add/subtract of 256-bit vectors of
@@ -792,5 +816,7 @@ _mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#endif /* __FMAINTRIN_H */
diff --git a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
index cbc518d..21a9c30 100644
--- a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
@@ -2190,10 +2190,10 @@ float4 sqrt(float4);
// step builtins
//===----------------------------------------------------------------------===//
-/// \fn T step(T x, T y)
+/// \fn T step(T y, T x)
/// \brief Returns 1 if the x parameter is greater than or equal to the y
-/// parameter; otherwise, 0. vector. \param x [in] The first floating-point
-/// value to compare. \param y [in] The first floating-point value to compare.
+/// parameter; otherwise, 0. vector. \param y [in] The first floating-point
+/// value to compare. \param x [in] The second floating-point value to compare.
///
/// Step is based on the following formula: (x >= y) ? 1 : 0
diff --git a/clang/lib/Headers/mmintrin.h b/clang/lib/Headers/mmintrin.h
index dc0fa5c..4ed95c5 100644
--- a/clang/lib/Headers/mmintrin.h
+++ b/clang/lib/Headers/mmintrin.h
@@ -57,6 +57,9 @@ typedef char __v16qi __attribute__((__vector_size__(16)));
#define __trunc64(x) \
(__m64) __builtin_shufflevector((__v2di)(x), __extension__(__v2di){}, 0)
+#define __zext128(x) \
+ (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
+ 1, 2, 3)
#define __anyext128(x) \
(__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
1, -1, -1)
@@ -85,7 +88,7 @@ _mm_empty(void) {
/// A 32-bit integer value.
/// \returns A 64-bit integer vector. The lower 32 bits contain the value of the
/// parameter. The upper 32 bits are set to 0.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtsi32_si64(int __i)
{
return __extension__ (__m64)(__v2si){__i, 0};
@@ -102,7 +105,7 @@ _mm_cvtsi32_si64(int __i)
/// A 64-bit integer vector.
/// \returns A 32-bit signed integer value containing the lower 32 bits of the
/// parameter.
-static __inline__ int __DEFAULT_FN_ATTRS_SSE2
+static __inline__ int __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtsi64_si32(__m64 __m)
{
return ((__v2si)__m)[0];
@@ -118,10 +121,10 @@ _mm_cvtsi64_si32(__m64 __m)
/// A 64-bit signed integer.
/// \returns A 64-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtsi64_m64(long long __i)
{
- return (__m64)__i;
+ return __extension__ (__m64)(__v1di){__i};
}
/// Casts a 64-bit integer vector into a 64-bit signed integer value.
@@ -134,10 +137,10 @@ _mm_cvtsi64_m64(long long __i)
/// A 64-bit integer vector.
/// \returns A 64-bit signed integer containing the same bitwise pattern as the
/// parameter.
-static __inline__ long long __DEFAULT_FN_ATTRS_SSE2
+static __inline__ long long __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtm64_si64(__m64 __m)
{
- return (long long)__m;
+ return ((__v1di)__m)[0];
}
/// Converts, with saturation, 16-bit signed integers from both 64-bit integer
@@ -239,11 +242,10 @@ _mm_packs_pu16(__m64 __m1, __m64 __m2)
/// Bits [63:56] are written to bits [63:56] of the result.
/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2,
- 4, 12, 5, 13, 6, 14, 7, 15);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_unpackhi_pi8(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 4, 12, 5,
+ 13, 6, 14, 7, 15);
}
/// Unpacks the upper 32 bits from two 64-bit integer vectors of
@@ -263,11 +265,9 @@ _mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
/// Bits [63:48] are written to bits [63:48] of the result.
/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2,
- 2, 6, 3, 7);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_unpackhi_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 2, 6, 3, 7);
}
/// Unpacks the upper 32 bits from two 64-bit integer vectors of
@@ -285,10 +285,9 @@ _mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
/// the upper 32 bits of the result.
/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 1, 3);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_unpackhi_pi32(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 1, 3);
}
/// Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8]
@@ -312,11 +311,10 @@ _mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
/// Bits [31:24] are written to bits [63:56] of the result.
/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2,
- 0, 8, 1, 9, 2, 10, 3, 11);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_unpacklo_pi8(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 0, 8, 1, 9,
+ 2, 10, 3, 11);
}
/// Unpacks the lower 32 bits from two 64-bit integer vectors of
@@ -336,11 +334,9 @@ _mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
/// Bits [31:16] are written to bits [63:48] of the result.
/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2,
- 0, 4, 1, 5);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_unpacklo_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 0, 4, 1, 5);
}
/// Unpacks the lower 32 bits from two 64-bit integer vectors of
@@ -358,10 +354,9 @@ _mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
/// the upper 32 bits of the result.
/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 0, 2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_unpacklo_pi32(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 0, 2);
}
/// Adds each 8-bit integer element of the first 64-bit integer vector
@@ -379,7 +374,7 @@ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8].
/// \returns A 64-bit integer vector of [8 x i8] containing the sums of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_add_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v8qu)__m1) + ((__v8qu)__m2));
@@ -400,7 +395,7 @@ _mm_add_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the sums of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_add_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v4hu)__m1) + ((__v4hu)__m2));
@@ -421,7 +416,7 @@ _mm_add_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [2 x i32].
/// \returns A 64-bit integer vector of [2 x i32] containing the sums of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_add_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v2su)__m1) + ((__v2su)__m2));
@@ -445,10 +440,9 @@ _mm_add_pi32(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8].
/// \returns A 64-bit integer vector of [8 x i8] containing the saturated sums
/// of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_adds_pi8(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_elementwise_add_sat((__v8qs)__m1, (__v8qs)__m2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_adds_pi8(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_elementwise_add_sat((__v8qs)__m1, (__v8qs)__m2);
}
/// Adds, with saturation, each 16-bit signed integer element of the first
@@ -469,10 +463,9 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the saturated sums
/// of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_adds_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_elementwise_add_sat((__v4hi)__m1, (__v4hi)__m2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_adds_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_elementwise_add_sat((__v4hi)__m1, (__v4hi)__m2);
}
/// Adds, with saturation, each 8-bit unsigned integer element of the first
@@ -492,10 +485,9 @@ _mm_adds_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8].
/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
/// unsigned sums of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_adds_pu8(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_elementwise_add_sat((__v8qu)__m1, (__v8qu)__m2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_adds_pu8(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_elementwise_add_sat((__v8qu)__m1, (__v8qu)__m2);
}
/// Adds, with saturation, each 16-bit unsigned integer element of the first
@@ -515,10 +507,9 @@ _mm_adds_pu8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
/// unsigned sums of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_adds_pu16(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_elementwise_add_sat((__v4hu)__m1, (__v4hu)__m2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_adds_pu16(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_elementwise_add_sat((__v4hu)__m1, (__v4hu)__m2);
}
/// Subtracts each 8-bit integer element of the second 64-bit integer
@@ -536,7 +527,7 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8] containing the subtrahends.
/// \returns A 64-bit integer vector of [8 x i8] containing the differences of
/// both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_sub_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v8qu)__m1) - ((__v8qu)__m2));
@@ -557,7 +548,7 @@ _mm_sub_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16] containing the subtrahends.
/// \returns A 64-bit integer vector of [4 x i16] containing the differences of
/// both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_sub_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v4hu)__m1) - ((__v4hu)__m2));
@@ -578,7 +569,7 @@ _mm_sub_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [2 x i32] containing the subtrahends.
/// \returns A 64-bit integer vector of [2 x i32] containing the differences of
/// both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_sub_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v2su)__m1) - ((__v2su)__m2));
@@ -602,10 +593,9 @@ _mm_sub_pi32(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8] containing the subtrahends.
/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
/// differences of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_subs_pi8(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_elementwise_sub_sat((__v8qs)__m1, (__v8qs)__m2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_subs_pi8(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_elementwise_sub_sat((__v8qs)__m1, (__v8qs)__m2);
}
/// Subtracts, with saturation, each 16-bit signed integer element of the
@@ -626,10 +616,9 @@ _mm_subs_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16] containing the subtrahends.
/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
/// differences of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_subs_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_elementwise_sub_sat((__v4hi)__m1, (__v4hi)__m2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_subs_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_elementwise_sub_sat((__v4hi)__m1, (__v4hi)__m2);
}
/// Subtracts each 8-bit unsigned integer element of the second 64-bit
@@ -650,10 +639,9 @@ _mm_subs_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8] containing the subtrahends.
/// \returns A 64-bit integer vector of [8 x i8] containing the saturated
/// differences of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_subs_pu8(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_elementwise_sub_sat((__v8qu)__m1, (__v8qu)__m2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_subs_pu8(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_elementwise_sub_sat((__v8qu)__m1, (__v8qu)__m2);
}
/// Subtracts each 16-bit unsigned integer element of the second 64-bit
@@ -674,10 +662,9 @@ _mm_subs_pu8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16] containing the subtrahends.
/// \returns A 64-bit integer vector of [4 x i16] containing the saturated
/// differences of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_subs_pu16(__m64 __m1, __m64 __m2)
-{
- return (__m64)__builtin_elementwise_sub_sat((__v4hu)__m1, (__v4hu)__m2);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_subs_pu16(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_elementwise_sub_sat((__v4hu)__m1, (__v4hu)__m2);
}
/// Multiplies each 16-bit signed integer element of the first 64-bit
@@ -723,11 +710,11 @@ _mm_madd_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits
/// of the products of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
{
- return __trunc64(__builtin_ia32_pmulhw128((__v8hi)__anyext128(__m1),
- (__v8hi)__anyext128(__m2)));
+ return __trunc64(__builtin_ia32_pmulhw128((__v8hi)__zext128(__m1),
+ (__v8hi)__zext128(__m2)));
}
/// Multiplies each 16-bit signed integer element of the first 64-bit
@@ -745,7 +732,7 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits
/// of the products of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_mullo_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v4hu)__m1) * ((__v4hu)__m2));
@@ -791,11 +778,9 @@ _mm_sll_pi16(__m64 __m, __m64 __count)
/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted
/// values. If \a __count is greater or equal to 16, the result is set to all
/// 0.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_slli_pi16(__m64 __m, int __count)
-{
- return __trunc64(__builtin_ia32_psllwi128((__v8hi)__anyext128(__m),
- __count));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_slli_pi16(__m64 __m, int __count) {
+ return __trunc64(__builtin_ia32_psllwi128((__v8hi)__zext128(__m), __count));
}
/// Left-shifts each 32-bit signed integer element of the first
@@ -838,11 +823,9 @@ _mm_sll_pi32(__m64 __m, __m64 __count)
/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted
/// values. If \a __count is greater or equal to 32, the result is set to all
/// 0.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_slli_pi32(__m64 __m, int __count)
-{
- return __trunc64(__builtin_ia32_pslldi128((__v4si)__anyext128(__m),
- __count));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_slli_pi32(__m64 __m, int __count) {
+ return __trunc64(__builtin_ia32_pslldi128((__v4si)__zext128(__m), __count));
}
/// Left-shifts the first 64-bit integer parameter by the number of bits
@@ -880,11 +863,9 @@ _mm_sll_si64(__m64 __m, __m64 __count)
/// A 32-bit integer value.
/// \returns A 64-bit integer vector containing the left-shifted value. If
/// \a __count is greater or equal to 64, the result is set to 0.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_slli_si64(__m64 __m, int __count)
-{
- return __trunc64(__builtin_ia32_psllqi128((__v2di)__anyext128(__m),
- __count));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_slli_si64(__m64 __m, int __count) {
+ return __trunc64(__builtin_ia32_psllqi128((__v2di)__zext128(__m), __count));
}
/// Right-shifts each 16-bit integer element of the first parameter,
@@ -929,11 +910,9 @@ _mm_sra_pi16(__m64 __m, __m64 __count)
/// A 32-bit integer value.
/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_srai_pi16(__m64 __m, int __count)
-{
- return __trunc64(__builtin_ia32_psrawi128((__v8hi)__anyext128(__m),
- __count));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_srai_pi16(__m64 __m, int __count) {
+ return __trunc64(__builtin_ia32_psrawi128((__v8hi)__zext128(__m), __count));
}
/// Right-shifts each 32-bit integer element of the first parameter,
@@ -978,11 +957,9 @@ _mm_sra_pi32(__m64 __m, __m64 __count)
/// A 32-bit integer value.
/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_srai_pi32(__m64 __m, int __count)
-{
- return __trunc64(__builtin_ia32_psradi128((__v4si)__anyext128(__m),
- __count));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_srai_pi32(__m64 __m, int __count) {
+ return __trunc64(__builtin_ia32_psradi128((__v4si)__zext128(__m), __count));
}
/// Right-shifts each 16-bit integer element of the first parameter,
@@ -1025,11 +1002,9 @@ _mm_srl_pi16(__m64 __m, __m64 __count)
/// A 32-bit integer value.
/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_srli_pi16(__m64 __m, int __count)
-{
- return __trunc64(__builtin_ia32_psrlwi128((__v8hi)__anyext128(__m),
- __count));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_srli_pi16(__m64 __m, int __count) {
+ return __trunc64(__builtin_ia32_psrlwi128((__v8hi)__zext128(__m), __count));
}
/// Right-shifts each 32-bit integer element of the first parameter,
@@ -1072,11 +1047,9 @@ _mm_srl_pi32(__m64 __m, __m64 __count)
/// A 32-bit integer value.
/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_srli_pi32(__m64 __m, int __count)
-{
- return __trunc64(__builtin_ia32_psrldi128((__v4si)__anyext128(__m),
- __count));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_srli_pi32(__m64 __m, int __count) {
+ return __trunc64(__builtin_ia32_psrldi128((__v4si)__zext128(__m), __count));
}
/// Right-shifts the first 64-bit integer parameter by the number of bits
@@ -1115,11 +1088,9 @@ _mm_srl_si64(__m64 __m, __m64 __count)
/// \param __count
/// A 32-bit integer value.
/// \returns A 64-bit integer vector containing the right-shifted value.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_srli_si64(__m64 __m, int __count)
-{
- return __trunc64(__builtin_ia32_psrlqi128((__v2di)__anyext128(__m),
- __count));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_srli_si64(__m64 __m, int __count) {
+ return __trunc64(__builtin_ia32_psrlqi128((__v2di)__zext128(__m), __count));
}
/// Performs a bitwise AND of two 64-bit integer vectors.
@@ -1134,7 +1105,7 @@ _mm_srli_si64(__m64 __m, int __count)
/// A 64-bit integer vector.
/// \returns A 64-bit integer vector containing the bitwise AND of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_and_si64(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v1du)__m1) & ((__v1du)__m2));
@@ -1155,7 +1126,7 @@ _mm_and_si64(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector.
/// \returns A 64-bit integer vector containing the bitwise AND of the second
/// parameter and the one's complement of the first parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_andnot_si64(__m64 __m1, __m64 __m2)
{
return (__m64)(~((__v1du)__m1) & ((__v1du)__m2));
@@ -1173,7 +1144,7 @@ _mm_andnot_si64(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector.
/// \returns A 64-bit integer vector containing the bitwise OR of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_or_si64(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v1du)__m1) | ((__v1du)__m2));
@@ -1191,7 +1162,7 @@ _mm_or_si64(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector.
/// \returns A 64-bit integer vector containing the bitwise exclusive OR of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_xor_si64(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v1du)__m1) ^ ((__v1du)__m2));
@@ -1213,7 +1184,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8].
/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v8qi)__m1) == ((__v8qi)__m2));
@@ -1235,7 +1206,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v4hi)__m1) == ((__v4hi)__m2));
@@ -1257,7 +1228,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [2 x i32].
/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v2si)__m1) == ((__v2si)__m2));
@@ -1279,7 +1250,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8].
/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
{
/* This function always performs a signed comparison, but __v8qi is a char
@@ -1303,7 +1274,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)((__v4hi)__m1 > (__v4hi)__m2);
@@ -1325,7 +1296,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [2 x i32].
/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)((__v2si)__m1 > (__v2si)__m2);
diff --git a/clang/lib/Headers/ptrauth.h b/clang/lib/Headers/ptrauth.h
index 7f7d387..f902ca1 100644
--- a/clang/lib/Headers/ptrauth.h
+++ b/clang/lib/Headers/ptrauth.h
@@ -95,7 +95,7 @@ typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t;
__ptrauth qualifier; the compiler will perform this check
automatically. */
-#if __has_feature(ptrauth_intrinsics)
+#if __has_feature(ptrauth_intrinsics) || defined(__PTRAUTH__)
/* Strip the signature from a value without authenticating it.
@@ -388,6 +388,6 @@ typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t;
#define __ptrauth_objc_isa_uintptr
#define __ptrauth_objc_super_pointer
-#endif /* __has_feature(ptrauth_intrinsics) */
+#endif /* __has_feature(ptrauth_intrinsics) || defined(__PTRAUTH__) */
#endif /* __PTRAUTH_H */
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index bc6fe4c..f68dd7e 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -27,6 +27,12 @@
__min_vector_width__(128)))
#endif
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
/* SSE4 Rounding macros. */
#define _MM_FROUND_TO_NEAREST_INT 0x00
#define _MM_FROUND_TO_NEG_INF 0x01
@@ -561,8 +567,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1,
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [2 x i64] containing the products of both
/// operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1,
- __m128i __V2) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mul_epi32(__m128i __V1, __m128i __V2) {
return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);
}
@@ -1205,8 +1211,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,
- __m128i __V2) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmpeq_epi64(__m128i __V1, __m128i __V2) {
return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
@@ -1224,7 +1230,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
/// sign-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepi8_epi16(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i) __builtin_convertvector(
@@ -1246,7 +1253,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepi8_epi32(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i) __builtin_convertvector(
@@ -1266,7 +1274,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepi8_epi64(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i) __builtin_convertvector(
@@ -1286,7 +1295,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepi16_epi32(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
}
@@ -1304,7 +1314,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepi16_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
}
@@ -1322,7 +1333,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepi32_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
}
@@ -1341,7 +1353,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
/// zero-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepu8_epi16(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,
7),
@@ -1361,7 +1374,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepu8_epi32(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
}
@@ -1379,7 +1393,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepu8_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
}
@@ -1397,7 +1412,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepu16_epi32(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
}
@@ -1415,7 +1431,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepu16_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
}
@@ -1433,7 +1450,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cvtepu32_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
}
@@ -2320,12 +2338,13 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1,
- __m128i __V2) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_cmpgt_epi64(__m128i __V1, __m128i __V2) {
return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#include <popcntintrin.h>
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index 371cc82..f01c61a 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -33,6 +33,12 @@
(__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
1, -1, -1)
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
/// Computes the absolute value of each of the packed 8-bit signed
/// integers in the source operand and stores the 8-bit unsigned integer
/// results in the destination.
@@ -45,9 +51,7 @@
/// A 64-bit vector of [8 x i8].
/// \returns A 64-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_abs_pi8(__m64 __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_abs_pi8(__m64 __a) {
return (__m64)__builtin_elementwise_abs((__v8qs)__a);
}
@@ -63,10 +67,9 @@ _mm_abs_pi8(__m64 __a)
/// A 128-bit vector of [16 x i8].
/// \returns A 128-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_abs_epi8(__m128i __a)
-{
- return (__m128i)__builtin_elementwise_abs((__v16qs)__a);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_abs_epi8(__m128i __a) {
+ return (__m128i)__builtin_elementwise_abs((__v16qs)__a);
}
/// Computes the absolute value of each of the packed 16-bit signed
@@ -81,10 +84,8 @@ _mm_abs_epi8(__m128i __a)
/// A 64-bit vector of [4 x i16].
/// \returns A 64-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_abs_pi16(__m64 __a)
-{
- return (__m64)__builtin_elementwise_abs((__v4hi)__a);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_abs_pi16(__m64 __a) {
+ return (__m64)__builtin_elementwise_abs((__v4hi)__a);
}
/// Computes the absolute value of each of the packed 16-bit signed
@@ -99,10 +100,9 @@ _mm_abs_pi16(__m64 __a)
/// A 128-bit vector of [8 x i16].
/// \returns A 128-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_abs_epi16(__m128i __a)
-{
- return (__m128i)__builtin_elementwise_abs((__v8hi)__a);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_abs_epi16(__m128i __a) {
+ return (__m128i)__builtin_elementwise_abs((__v8hi)__a);
}
/// Computes the absolute value of each of the packed 32-bit signed
@@ -117,10 +117,8 @@ _mm_abs_epi16(__m128i __a)
/// A 64-bit vector of [2 x i32].
/// \returns A 64-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_abs_pi32(__m64 __a)
-{
- return (__m64)__builtin_elementwise_abs((__v2si)__a);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_abs_pi32(__m64 __a) {
+ return (__m64)__builtin_elementwise_abs((__v2si)__a);
}
/// Computes the absolute value of each of the packed 32-bit signed
@@ -135,10 +133,9 @@ _mm_abs_pi32(__m64 __a)
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_abs_epi32(__m128i __a)
-{
- return (__m128i)__builtin_elementwise_abs((__v4si)__a);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_abs_epi32(__m128i __a) {
+ return (__m128i)__builtin_elementwise_abs((__v4si)__a);
}
/// Concatenates the two 128-bit integer vector operands, and
@@ -806,5 +803,6 @@ _mm_sign_pi32(__m64 __a, __m64 __b)
#undef __anyext128
#undef __trunc64
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif /* __TMMINTRIN_H */
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index 6a64369..6d44cff 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -16,7 +16,6 @@
#include <mmintrin.h>
-typedef int __v4si __attribute__((__vector_size__(16)));
typedef float __v4sf __attribute__((__vector_size__(16)));
typedef float __m128 __attribute__((__vector_size__(16), __aligned__(16)));
@@ -24,6 +23,7 @@ typedef float __m128_u __attribute__((__vector_size__(16), __aligned__(1)));
/* Unsigned types */
typedef unsigned int __v4su __attribute__((__vector_size__(16)));
+typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
/* This header should only be included in a hosted environment as it depends on
* a standard library to provide allocation routines. */
@@ -1688,7 +1688,7 @@ _mm_cvtsi64_ss(__m128 __a, long long __b) {
/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
/// converted value of the second operand. The upper 64 bits are copied from
/// the upper 64 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtpi32_ps(__m128 __a, __m64 __b)
{
return (__m128)__builtin_shufflevector(
@@ -1714,7 +1714,7 @@ _mm_cvtpi32_ps(__m128 __a, __m64 __b)
/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
/// converted value from the second operand. The upper 64 bits are copied
/// from the upper 64 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvt_pi2ps(__m128 __a, __m64 __b)
{
return _mm_cvtpi32_ps(__a, __b);
@@ -2447,11 +2447,11 @@ _mm_movemask_pi8(__m64 __a)
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_mulhi_pu16(__m64 __a, __m64 __b)
{
- return __trunc64(__builtin_ia32_pmulhuw128((__v8hi)__anyext128(__a),
- (__v8hi)__anyext128(__b)));
+ return __trunc64(__builtin_ia32_pmulhuw128((__v8hu)__zext128(__a),
+ (__v8hu)__zext128(__b)));
}
/// Shuffles the 4 16-bit integers from a 64-bit integer vector to the
@@ -2873,7 +2873,7 @@ _mm_movelh_ps(__m128 __a, __m128 __b) {
/// from the corresponding elements in this operand.
/// \returns A 128-bit vector of [4 x float] containing the copied and converted
/// values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtpi16_ps(__m64 __a)
{
return __builtin_convertvector((__v4hi)__a, __v4sf);
@@ -2891,7 +2891,7 @@ _mm_cvtpi16_ps(__m64 __a)
/// destination are copied from the corresponding elements in this operand.
/// \returns A 128-bit vector of [4 x float] containing the copied and converted
/// values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtpu16_ps(__m64 __a)
{
return __builtin_convertvector((__v4hu)__a, __v4sf);
@@ -2909,7 +2909,7 @@ _mm_cvtpu16_ps(__m64 __a)
/// from the corresponding lower 4 elements in this operand.
/// \returns A 128-bit vector of [4 x float] containing the copied and converted
/// values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtpi8_ps(__m64 __a)
{
return __builtin_convertvector(
@@ -2930,7 +2930,7 @@ _mm_cvtpi8_ps(__m64 __a)
/// operand.
/// \returns A 128-bit vector of [4 x float] containing the copied and converted
/// values from the source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtpu8_ps(__m64 __a)
{
return __builtin_convertvector(
@@ -2954,7 +2954,7 @@ _mm_cvtpu8_ps(__m64 __a)
/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
/// copied and converted values from the first operand. The upper 64 bits
/// contain the copied and converted values from the second operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
{
return __builtin_convertvector(
diff --git a/clang/lib/Headers/xopintrin.h b/clang/lib/Headers/xopintrin.h
index 976cdf4..7015719 100644
--- a/clang/lib/Headers/xopintrin.h
+++ b/clang/lib/Headers/xopintrin.h
@@ -20,6 +20,14 @@
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(256)))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#endif
+
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
{
@@ -182,13 +190,13 @@ _mm_hsubq_epi32(__m128i __A)
return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)(((__v2du)__A & (__v2du)__C) | ((__v2du)__B & ~(__v2du)__C));
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
{
return (__m256i)(((__v4du)__A & (__v4du)__C) | ((__v4du)__B & ~(__v4du)__C));
@@ -203,25 +211,25 @@ _mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rot_epi8(__m128i __A, __m128i __B)
{
- return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);
+ return (__m128i)__builtin_elementwise_fshl((__v16qu)__A, (__v16qu)__A, (__v16qu)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rot_epi16(__m128i __A, __m128i __B)
{
- return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);
+ return (__m128i)__builtin_elementwise_fshl((__v8hu)__A, (__v8hu)__A, (__v8hu)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rot_epi32(__m128i __A, __m128i __B)
{
- return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);
+ return (__m128i)__builtin_elementwise_fshl((__v4su)__A, (__v4su)__A, (__v4su)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rot_epi64(__m128i __A, __m128i __B)
{
- return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_fshl((__v2du)__A, (__v2du)__A, (__v2du)__B);
}
#define _mm_roti_epi8(A, N) \
@@ -766,5 +774,7 @@ _mm256_frcz_pd(__m256d __A)
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#endif /* __XOPINTRIN_H */
diff --git a/clang/lib/Index/IndexSymbol.cpp b/clang/lib/Index/IndexSymbol.cpp
index 419ff79..c3cbc03 100644
--- a/clang/lib/Index/IndexSymbol.cpp
+++ b/clang/lib/Index/IndexSymbol.cpp
@@ -507,6 +507,9 @@ bool index::printSymbolName(const Decl *D, const LangOptions &LO,
StringRef index::getSymbolKindString(SymbolKind K) {
switch (K) {
+ // FIXME: for backwards compatibility, the include directive kind is treated
+ // the same as Unknown
+ case SymbolKind::IncludeDirective:
case SymbolKind::Unknown: return "<unknown>";
case SymbolKind::Module: return "module";
case SymbolKind::Namespace: return "namespace";
diff --git a/clang/lib/Index/IndexTypeSourceInfo.cpp b/clang/lib/Index/IndexTypeSourceInfo.cpp
index adc33b3..74c6c11 100644
--- a/clang/lib/Index/IndexTypeSourceInfo.cpp
+++ b/clang/lib/Index/IndexTypeSourceInfo.cpp
@@ -59,9 +59,9 @@ public:
bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
SourceLocation Loc = TL.getNameLoc();
- TypedefNameDecl *ND = TL.getTypedefNameDecl();
+ TypedefNameDecl *ND = TL.getDecl();
if (ND->isTransparentTag()) {
- TagDecl *Underlying = ND->getUnderlyingType()->getAsTagDecl();
+ auto *Underlying = ND->getUnderlyingType()->castAsTagDecl();
return IndexCtx.handleReference(Underlying, Loc, Parent,
ParentDC, SymbolRoleSet(), Relations);
}
@@ -117,7 +117,7 @@ public:
}
bool VisitTagTypeLoc(TagTypeLoc TL) {
- TagDecl *D = TL.getDecl();
+ TagDecl *D = TL.getOriginalDecl();
if (!IndexCtx.shouldIndexFunctionLocalSymbols() &&
D->getParentFunctionOrMethod())
return true;
@@ -172,7 +172,8 @@ public:
return true;
}
- bool TraverseTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) {
+ bool TraverseTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL,
+ bool TraverseQualifier) {
if (!WalkUpFromTemplateSpecializationTypeLoc(TL))
return false;
if (!TraverseTemplateName(TL.getTypePtr()->getTemplateName()))
@@ -202,11 +203,6 @@ public:
return true;
}
- bool VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- return IndexCtx.handleReference(TL.getDecl(), TL.getNameLoc(), Parent,
- ParentDC, SymbolRoleSet(), Relations);
- }
-
bool VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
std::vector<const NamedDecl *> Symbols =
IndexCtx.getResolver()->resolveDependentNameType(TL.getTypePtr());
@@ -248,32 +244,28 @@ void IndexingContext::indexTypeLoc(TypeLoc TL,
TypeIndexer(*this, Parent, DC, isBase, isIBType).TraverseTypeLoc(TL);
}
-void IndexingContext::indexNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
- const NamedDecl *Parent,
- const DeclContext *DC) {
- if (!NNS)
- return;
-
- if (NestedNameSpecifierLoc Prefix = NNS.getPrefix())
- indexNestedNameSpecifierLoc(Prefix, Parent, DC);
-
+void IndexingContext::indexNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc QualifierLoc, const NamedDecl *Parent,
+ const DeclContext *DC) {
if (!DC)
DC = Parent->getLexicalDeclContext();
- SourceLocation Loc = NNS.getLocalBeginLoc();
-
- switch (NNS.getNestedNameSpecifier()->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ switch (NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
break;
- case NestedNameSpecifier::Namespace:
- handleReference(NNS.getNestedNameSpecifier()->getAsNamespace(),
- Loc, Parent, DC, SymbolRoleSet());
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = QualifierLoc.castAsNamespaceAndPrefix();
+ indexNestedNameSpecifierLoc(Prefix, Parent, DC);
+ handleReference(Namespace, QualifierLoc.getLocalBeginLoc(), Parent, DC,
+ SymbolRoleSet());
break;
+ }
- case NestedNameSpecifier::TypeSpec:
- indexTypeLoc(NNS.getTypeLoc(), Parent, DC);
+ case NestedNameSpecifier::Kind::Type:
+ indexTypeLoc(QualifierLoc.castAsTypeLoc(), Parent, DC);
break;
}
}
diff --git a/clang/lib/Index/USRGeneration.cpp b/clang/lib/Index/USRGeneration.cpp
index 6a884f7..c78d66f 100644
--- a/clang/lib/Index/USRGeneration.cpp
+++ b/clang/lib/Index/USRGeneration.cpp
@@ -653,14 +653,14 @@ bool USRGenerator::GenLoc(const Decl *D, bool IncludeOffset) {
}
static void printQualifier(llvm::raw_ostream &Out, const LangOptions &LangOpts,
- NestedNameSpecifier *NNS) {
+ NestedNameSpecifier NNS) {
// FIXME: Encode the qualifier, don't just print it.
PrintingPolicy PO(LangOpts);
PO.SuppressTagKeyword = true;
PO.SuppressUnwrittenScope = true;
PO.ConstantArraySizeAsWritten = false;
PO.AnonymousTagLocations = false;
- NNS->print(Out, PO);
+ NNS.print(Out, PO);
}
void USRGenerator::VisitType(QualType T) {
@@ -910,9 +910,14 @@ void USRGenerator::VisitType(QualType T) {
continue;
}
if (const TagType *TT = T->getAs<TagType>()) {
- Out << '$';
- VisitTagDecl(TT->getDecl());
- return;
+ if (const auto *ICNT = dyn_cast<InjectedClassNameType>(TT)) {
+ T = ICNT->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ Ctx);
+ } else {
+ Out << '$';
+ VisitTagDecl(TT->getOriginalDecl());
+ return;
+ }
}
if (const ObjCInterfaceType *OIT = T->getAs<ObjCInterfaceType>()) {
Out << '$';
@@ -926,7 +931,8 @@ void USRGenerator::VisitType(QualType T) {
VisitObjCProtocolDecl(Prot);
return;
}
- if (const TemplateTypeParmType *TTP = T->getAs<TemplateTypeParmType>()) {
+ if (const TemplateTypeParmType *TTP =
+ T->getAsCanonical<TemplateTypeParmType>()) {
Out << 't' << TTP->getDepth() << '.' << TTP->getIndex();
return;
}
@@ -945,10 +951,6 @@ void USRGenerator::VisitType(QualType T) {
Out << ':' << DNT->getIdentifier()->getName();
return;
}
- if (const InjectedClassNameType *InjT = T->getAs<InjectedClassNameType>()) {
- T = InjT->getInjectedSpecializationType();
- continue;
- }
if (const auto *VT = T->getAs<VectorType>()) {
Out << (T->isExtVectorType() ? ']' : '[');
Out << VT->getNumElements();
diff --git a/clang/lib/InstallAPI/Visitor.cpp b/clang/lib/InstallAPI/Visitor.cpp
index 487be2c..f12e040 100644
--- a/clang/lib/InstallAPI/Visitor.cpp
+++ b/clang/lib/InstallAPI/Visitor.cpp
@@ -424,7 +424,7 @@ std::string
InstallAPIVisitor::getMangledCXXRTTIName(const CXXRecordDecl *D) const {
SmallString<256> Name;
raw_svector_ostream NameStream(Name);
- MC->mangleCXXRTTIName(QualType(D->getTypeForDecl(), 0), NameStream);
+ MC->mangleCXXRTTIName(MC->getASTContext().getCanonicalTagType(D), NameStream);
return getBackendMangledName(Name);
}
@@ -432,7 +432,7 @@ InstallAPIVisitor::getMangledCXXRTTIName(const CXXRecordDecl *D) const {
std::string InstallAPIVisitor::getMangledCXXRTTI(const CXXRecordDecl *D) const {
SmallString<256> Name;
raw_svector_ostream NameStream(Name);
- MC->mangleCXXRTTI(QualType(D->getTypeForDecl(), 0), NameStream);
+ MC->mangleCXXRTTI(MC->getASTContext().getCanonicalTagType(D), NameStream);
return getBackendMangledName(Name);
}
@@ -543,8 +543,8 @@ void InstallAPIVisitor::emitVTableSymbols(const CXXRecordDecl *D,
}
for (const auto &It : D->bases()) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(It.getType()->castAs<RecordType>()->getDecl());
+ const CXXRecordDecl *Base = cast<CXXRecordDecl>(
+ It.getType()->castAs<RecordType>()->getOriginalDecl());
const auto BaseAccess = getAccessForDecl(Base);
if (!BaseAccess)
continue;
diff --git a/clang/lib/Interpreter/CMakeLists.txt b/clang/lib/Interpreter/CMakeLists.txt
index 70de4a2..d8dda45 100644
--- a/clang/lib/Interpreter/CMakeLists.txt
+++ b/clang/lib/Interpreter/CMakeLists.txt
@@ -22,6 +22,7 @@ endif()
add_clang_library(clangInterpreter
DeviceOffload.cpp
CodeCompletion.cpp
+ IncrementalAction.cpp
IncrementalExecutor.cpp
IncrementalParser.cpp
Interpreter.cpp
diff --git a/clang/lib/Interpreter/DeviceOffload.cpp b/clang/lib/Interpreter/DeviceOffload.cpp
index 9a25a26..7c3404e 100644
--- a/clang/lib/Interpreter/DeviceOffload.cpp
+++ b/clang/lib/Interpreter/DeviceOffload.cpp
@@ -26,9 +26,10 @@ namespace clang {
IncrementalCUDADeviceParser::IncrementalCUDADeviceParser(
CompilerInstance &DeviceInstance, CompilerInstance &HostInstance,
+ IncrementalAction *DeviceAct,
llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS,
- llvm::Error &Err, const std::list<PartialTranslationUnit> &PTUs)
- : IncrementalParser(DeviceInstance, Err), PTUs(PTUs), VFS(FS),
+ llvm::Error &Err, std::list<PartialTranslationUnit> &PTUs)
+ : IncrementalParser(DeviceInstance, DeviceAct, Err, PTUs), VFS(FS),
CodeGenOpts(HostInstance.getCodeGenOpts()),
TargetOpts(DeviceInstance.getTargetOpts()) {
if (Err)
diff --git a/clang/lib/Interpreter/DeviceOffload.h b/clang/lib/Interpreter/DeviceOffload.h
index 0b903e3..a31bd5a 100644
--- a/clang/lib/Interpreter/DeviceOffload.h
+++ b/clang/lib/Interpreter/DeviceOffload.h
@@ -22,15 +22,16 @@ struct PartialTranslationUnit;
class CompilerInstance;
class CodeGenOptions;
class TargetOptions;
+class IncrementalAction;
class IncrementalCUDADeviceParser : public IncrementalParser {
- const std::list<PartialTranslationUnit> &PTUs;
public:
IncrementalCUDADeviceParser(
CompilerInstance &DeviceInstance, CompilerInstance &HostInstance,
+ IncrementalAction *DeviceAct,
llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS,
- llvm::Error &Err, const std::list<PartialTranslationUnit> &PTUs);
+ llvm::Error &Err, std::list<PartialTranslationUnit> &PTUs);
// Generate PTX for the last PTU.
llvm::Expected<llvm::StringRef> GeneratePTX();
diff --git a/clang/lib/Interpreter/IncrementalAction.cpp b/clang/lib/Interpreter/IncrementalAction.cpp
new file mode 100644
index 0000000..4d1bc4c
--- /dev/null
+++ b/clang/lib/Interpreter/IncrementalAction.cpp
@@ -0,0 +1,152 @@
+//===--- IncrementalAction.h - Incremental Frontend Action -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "IncrementalAction.h"
+
+#include "clang/AST/ASTConsumer.h"
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "clang/FrontendTool/Utils.h"
+#include "clang/Interpreter/Interpreter.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+IncrementalAction::IncrementalAction(CompilerInstance &Instance,
+ llvm::LLVMContext &LLVMCtx,
+ llvm::Error &Err, Interpreter &I,
+ std::unique_ptr<ASTConsumer> Consumer)
+ : WrapperFrontendAction([&]() {
+ llvm::ErrorAsOutParameter EAO(&Err);
+ std::unique_ptr<FrontendAction> Act;
+ switch (Instance.getFrontendOpts().ProgramAction) {
+ default:
+ Err = llvm::createStringError(
+ std::errc::state_not_recoverable,
+ "Driver initialization failed. "
+ "Incremental mode for action %d is not supported",
+ Instance.getFrontendOpts().ProgramAction);
+ return Act;
+ case frontend::ASTDump:
+ case frontend::ASTPrint:
+ case frontend::ParseSyntaxOnly:
+ Act = CreateFrontendAction(Instance);
+ break;
+ case frontend::PluginAction:
+ case frontend::EmitAssembly:
+ case frontend::EmitBC:
+ case frontend::EmitObj:
+ case frontend::PrintPreprocessedInput:
+ case frontend::EmitLLVMOnly:
+ Act.reset(new EmitLLVMOnlyAction(&LLVMCtx));
+ break;
+ }
+ return Act;
+ }()),
+ Interp(I), CI(Instance), Consumer(std::move(Consumer)) {}
+
+std::unique_ptr<ASTConsumer>
+IncrementalAction::CreateASTConsumer(CompilerInstance & /*CI*/,
+ StringRef InFile) {
+ std::unique_ptr<ASTConsumer> C =
+ WrapperFrontendAction::CreateASTConsumer(this->CI, InFile);
+
+ if (Consumer) {
+ std::vector<std::unique_ptr<ASTConsumer>> Cs;
+ Cs.push_back(std::move(Consumer));
+ Cs.push_back(std::move(C));
+ return std::make_unique<MultiplexConsumer>(std::move(Cs));
+ }
+
+ return std::make_unique<InProcessPrintingASTConsumer>(std::move(C), Interp);
+}
+
+void IncrementalAction::ExecuteAction() {
+ WrapperFrontendAction::ExecuteAction();
+ getCompilerInstance().getSema().CurContext = nullptr;
+}
+
+void IncrementalAction::EndSourceFile() {
+ if (IsTerminating && getWrapped())
+ WrapperFrontendAction::EndSourceFile();
+}
+
+void IncrementalAction::FinalizeAction() {
+ assert(!IsTerminating && "Already finalized!");
+ IsTerminating = true;
+ EndSourceFile();
+}
+
+void IncrementalAction::CacheCodeGenModule() {
+ CachedInCodeGenModule = GenModule();
+}
+
+llvm::Module *IncrementalAction::getCachedCodeGenModule() const {
+ return CachedInCodeGenModule.get();
+}
+
+std::unique_ptr<llvm::Module> IncrementalAction::GenModule() {
+ static unsigned ID = 0;
+ if (CodeGenerator *CG = getCodeGen()) {
+ // Clang's CodeGen is designed to work with a single llvm::Module. In many
+ // cases for convenience various CodeGen parts have a reference to the
+ // llvm::Module (TheModule or Module) which does not change when a new
+ // module is pushed. However, the execution engine wants to take ownership
+ // of the module which does not map well to CodeGen's design. To work this
+ // around we created an empty module to make CodeGen happy. We should make
+ // sure it always stays empty.
+ assert(((!CachedInCodeGenModule ||
+ !CI.getPreprocessorOpts().Includes.empty()) ||
+ (CachedInCodeGenModule->empty() &&
+ CachedInCodeGenModule->global_empty() &&
+ CachedInCodeGenModule->alias_empty() &&
+ CachedInCodeGenModule->ifunc_empty())) &&
+ "CodeGen wrote to a readonly module");
+ std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
+ CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext());
+ return M;
+ }
+ return nullptr;
+}
+
+CodeGenerator *IncrementalAction::getCodeGen() const {
+ FrontendAction *WrappedAct = getWrapped();
+ if (!WrappedAct || !WrappedAct->hasIRSupport())
+ return nullptr;
+ return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
+}
+
+InProcessPrintingASTConsumer::InProcessPrintingASTConsumer(
+ std::unique_ptr<ASTConsumer> C, Interpreter &I)
+ : MultiplexConsumer(std::move(C)), Interp(I) {}
+
+bool InProcessPrintingASTConsumer::HandleTopLevelDecl(DeclGroupRef DGR) {
+ if (DGR.isNull())
+ return true;
+
+ for (Decl *D : DGR)
+ if (auto *TLSD = llvm::dyn_cast<TopLevelStmtDecl>(D))
+ if (TLSD && TLSD->isSemiMissing()) {
+ auto ExprOrErr = Interp.convertExprToValue(cast<Expr>(TLSD->getStmt()));
+ if (llvm::Error E = ExprOrErr.takeError()) {
+ llvm::logAllUnhandledErrors(std::move(E), llvm::errs(),
+ "Value printing failed: ");
+ return false; // abort parsing
+ }
+ TLSD->setStmt(*ExprOrErr);
+ }
+
+ return MultiplexConsumer::HandleTopLevelDecl(DGR);
+}
+
+} // namespace clang
diff --git a/clang/lib/Interpreter/IncrementalAction.h b/clang/lib/Interpreter/IncrementalAction.h
new file mode 100644
index 0000000..725cdd0
--- /dev/null
+++ b/clang/lib/Interpreter/IncrementalAction.h
@@ -0,0 +1,90 @@
+//===--- IncrementalAction.h - Incremental Frontend Action -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INTERPRETER_INCREMENTALACTION_H
+#define LLVM_CLANG_INTERPRETER_INCREMENTALACTION_H
+
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+
+namespace llvm {
+class Module;
+}
+
+namespace clang {
+
+class Interpreter;
+class CodeGenerator;
+
+/// A custom action enabling the incremental processing functionality.
+///
+/// The usual \p FrontendAction expects one call to ExecuteAction and once it
+/// sees a call to \p EndSourceFile it deletes some of the important objects
+/// such as \p Preprocessor and \p Sema assuming no further input will come.
+///
+/// \p IncrementalAction ensures it keep its underlying action's objects alive
+/// as long as the \p IncrementalParser needs them.
+///
+class IncrementalAction : public WrapperFrontendAction {
+private:
+ bool IsTerminating = false;
+ Interpreter &Interp;
+ [[maybe_unused]] CompilerInstance &CI;
+ std::unique_ptr<ASTConsumer> Consumer;
+
+ /// When CodeGen is created the first llvm::Module gets cached in many places
+ /// and we must keep it alive.
+ std::unique_ptr<llvm::Module> CachedInCodeGenModule;
+
+public:
+ IncrementalAction(CompilerInstance &Instance, llvm::LLVMContext &LLVMCtx,
+ llvm::Error &Err, Interpreter &I,
+ std::unique_ptr<ASTConsumer> Consumer = nullptr);
+
+ FrontendAction *getWrapped() const { return WrappedAction.get(); }
+
+ TranslationUnitKind getTranslationUnitKind() override {
+ return TU_Incremental;
+ }
+
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+
+ void ExecuteAction() override;
+
+ // Do not terminate after processing the input. This allows us to keep various
+ // clang objects alive and to incrementally grow the current TU.
+ void EndSourceFile() override;
+
+ void FinalizeAction();
+
+ /// Cache the current CodeGen module to preserve internal references.
+ void CacheCodeGenModule();
+
+ /// Access the cached CodeGen module.
+ llvm::Module *getCachedCodeGenModule() const;
+
+ /// Access the current code generator.
+ CodeGenerator *getCodeGen() const;
+
+ /// Generate an LLVM module for the most recent parsed input.
+ std::unique_ptr<llvm::Module> GenModule();
+};
+
+class InProcessPrintingASTConsumer final : public MultiplexConsumer {
+ Interpreter &Interp;
+
+public:
+ InProcessPrintingASTConsumer(std::unique_ptr<ASTConsumer> C, Interpreter &I);
+
+ bool HandleTopLevelDecl(DeclGroupRef DGR) override;
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_INTERPRETER_INCREMENTALACTION_H
diff --git a/clang/lib/Interpreter/IncrementalParser.cpp b/clang/lib/Interpreter/IncrementalParser.cpp
index 6343f17..32d1663 100644
--- a/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/clang/lib/Interpreter/IncrementalParser.cpp
@@ -11,24 +11,29 @@
//===----------------------------------------------------------------------===//
#include "IncrementalParser.h"
+#include "IncrementalAction.h"
#include "clang/AST/DeclContextInternals.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Interpreter/PartialTranslationUnit.h"
#include "clang/Parse/Parser.h"
#include "clang/Sema/Sema.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Error.h"
#include <sstream>
+#define DEBUG_TYPE "clang-repl"
+
namespace clang {
// IncrementalParser::IncrementalParser() {}
IncrementalParser::IncrementalParser(CompilerInstance &Instance,
- llvm::Error &Err)
- : S(Instance.getSema()) {
+ IncrementalAction *Act, llvm::Error &Err,
+ std::list<PartialTranslationUnit> &PTUs)
+ : S(Instance.getSema()), Act(Act), PTUs(PTUs) {
llvm::ErrorAsOutParameter EAO(&Err);
Consumer = &S.getASTConsumer();
P.reset(new Parser(S.getPreprocessor(), S, /*SkipBodies=*/false));
@@ -185,4 +190,25 @@ void IncrementalParser::CleanUpPTU(TranslationUnitDecl *MostRecentTU) {
}
}
+PartialTranslationUnit &
+IncrementalParser::RegisterPTU(TranslationUnitDecl *TU,
+ std::unique_ptr<llvm::Module> M /*={}*/) {
+ PTUs.emplace_back(PartialTranslationUnit());
+ PartialTranslationUnit &LastPTU = PTUs.back();
+ LastPTU.TUPart = TU;
+
+ if (!M)
+ M = Act->GenModule();
+
+ assert((!Act->getCodeGen() || M) && "Must have a llvm::Module at this point");
+
+ LastPTU.TheModule = std::move(M);
+ LLVM_DEBUG(llvm::dbgs() << "compile-ptu " << PTUs.size() - 1
+ << ": [TU=" << LastPTU.TUPart);
+ if (LastPTU.TheModule)
+ LLVM_DEBUG(llvm::dbgs() << ", M=" << LastPTU.TheModule.get() << " ("
+ << LastPTU.TheModule->getName() << ")");
+ LLVM_DEBUG(llvm::dbgs() << "]\n");
+ return LastPTU;
+}
} // end namespace clang
diff --git a/clang/lib/Interpreter/IncrementalParser.h b/clang/lib/Interpreter/IncrementalParser.h
index 4fdde74..9b042bc 100644
--- a/clang/lib/Interpreter/IncrementalParser.h
+++ b/clang/lib/Interpreter/IncrementalParser.h
@@ -19,13 +19,18 @@
#include <list>
#include <memory>
+namespace llvm {
+class Module;
+}
+
namespace clang {
class ASTConsumer;
-class CodeGenerator;
class CompilerInstance;
class Parser;
class Sema;
class TranslationUnitDecl;
+class IncrementalAction;
+struct PartialTranslationUnit;
/// Provides support for incremental compilation. Keeps track of the state
/// changes between the subsequent incremental input.
@@ -44,10 +49,14 @@ protected:
/// Counts the number of direct user input lines that have been parsed.
unsigned InputCount = 0;
- // IncrementalParser();
+ /// The FrontendAction used during incremental parsing.
+ IncrementalAction *Act = nullptr;
+
+ std::list<PartialTranslationUnit> &PTUs;
public:
- IncrementalParser(CompilerInstance &Instance, llvm::Error &Err);
+ IncrementalParser(CompilerInstance &Instance, IncrementalAction *Act,
+ llvm::Error &Err, std::list<PartialTranslationUnit> &PTUs);
virtual ~IncrementalParser();
/// Parses incremental input by creating an in-memory file.
@@ -57,6 +66,10 @@ public:
void CleanUpPTU(TranslationUnitDecl *MostRecentTU);
+ /// Register a PTU produced by Parse.
+ PartialTranslationUnit &RegisterPTU(TranslationUnitDecl *TU,
+ std::unique_ptr<llvm::Module> M = {});
+
private:
llvm::Expected<TranslationUnitDecl *> ParseOrWrapTopLevelDecl();
};
diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp
index 5e5ae81..4799521 100644
--- a/clang/lib/Interpreter/Interpreter.cpp
+++ b/clang/lib/Interpreter/Interpreter.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "DeviceOffload.h"
+#include "IncrementalAction.h"
#include "IncrementalExecutor.h"
#include "IncrementalParser.h"
#include "InterpreterUtils.h"
@@ -28,7 +29,6 @@
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CodeGenAction.h"
-#include "clang/CodeGen/ModuleBuilder.h"
#include "clang/CodeGen/ObjectFilePCHContainerWriter.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@@ -248,120 +248,6 @@ IncrementalCompilerBuilder::CreateCudaHost() {
return IncrementalCompilerBuilder::createCuda(false);
}
-class InProcessPrintingASTConsumer final : public MultiplexConsumer {
- Interpreter &Interp;
-
-public:
- InProcessPrintingASTConsumer(std::unique_ptr<ASTConsumer> C, Interpreter &I)
- : MultiplexConsumer(std::move(C)), Interp(I) {}
- bool HandleTopLevelDecl(DeclGroupRef DGR) override final {
- if (DGR.isNull())
- return true;
-
- for (Decl *D : DGR)
- if (auto *TLSD = llvm::dyn_cast<TopLevelStmtDecl>(D))
- if (TLSD && TLSD->isSemiMissing()) {
- auto ExprOrErr =
- Interp.convertExprToValue(cast<Expr>(TLSD->getStmt()));
- if (llvm::Error E = ExprOrErr.takeError()) {
- llvm::logAllUnhandledErrors(std::move(E), llvm::errs(),
- "Value printing failed: ");
- return false; // abort parsing
- }
- TLSD->setStmt(*ExprOrErr);
- }
-
- return MultiplexConsumer::HandleTopLevelDecl(DGR);
- }
-};
-
-/// A custom action enabling the incremental processing functionality.
-///
-/// The usual \p FrontendAction expects one call to ExecuteAction and once it
-/// sees a call to \p EndSourceFile it deletes some of the important objects
-/// such as \p Preprocessor and \p Sema assuming no further input will come.
-///
-/// \p IncrementalAction ensures it keep its underlying action's objects alive
-/// as long as the \p IncrementalParser needs them.
-///
-class IncrementalAction : public WrapperFrontendAction {
-private:
- bool IsTerminating = false;
- Interpreter &Interp;
- std::unique_ptr<ASTConsumer> Consumer;
-
-public:
- IncrementalAction(CompilerInstance &CI, llvm::LLVMContext &LLVMCtx,
- llvm::Error &Err, Interpreter &I,
- std::unique_ptr<ASTConsumer> Consumer = nullptr)
- : WrapperFrontendAction([&]() {
- llvm::ErrorAsOutParameter EAO(&Err);
- std::unique_ptr<FrontendAction> Act;
- switch (CI.getFrontendOpts().ProgramAction) {
- default:
- Err = llvm::createStringError(
- std::errc::state_not_recoverable,
- "Driver initialization failed. "
- "Incremental mode for action %d is not supported",
- CI.getFrontendOpts().ProgramAction);
- return Act;
- case frontend::ASTDump:
- case frontend::ASTPrint:
- case frontend::ParseSyntaxOnly:
- Act = CreateFrontendAction(CI);
- break;
- case frontend::PluginAction:
- case frontend::EmitAssembly:
- case frontend::EmitBC:
- case frontend::EmitObj:
- case frontend::PrintPreprocessedInput:
- case frontend::EmitLLVMOnly:
- Act.reset(new EmitLLVMOnlyAction(&LLVMCtx));
- break;
- }
- return Act;
- }()),
- Interp(I), Consumer(std::move(Consumer)) {}
- FrontendAction *getWrapped() const { return WrappedAction.get(); }
- TranslationUnitKind getTranslationUnitKind() override {
- return TU_Incremental;
- }
-
- std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) override {
- std::unique_ptr<ASTConsumer> C =
- WrapperFrontendAction::CreateASTConsumer(CI, InFile);
-
- if (Consumer) {
- std::vector<std::unique_ptr<ASTConsumer>> Cs;
- Cs.push_back(std::move(Consumer));
- Cs.push_back(std::move(C));
- return std::make_unique<MultiplexConsumer>(std::move(Cs));
- }
-
- return std::make_unique<InProcessPrintingASTConsumer>(std::move(C), Interp);
- }
-
- void ExecuteAction() override {
- WrapperFrontendAction::ExecuteAction();
- getCompilerInstance().getSema().CurContext = nullptr;
- }
-
- // Do not terminate after processing the input. This allows us to keep various
- // clang objects alive and to incrementally grow the current TU.
- void EndSourceFile() override {
- // The WrappedAction can be nullptr if we issued an error in the ctor.
- if (IsTerminating && getWrapped())
- WrapperFrontendAction::EndSourceFile();
- }
-
- void FinalizeAction() {
- assert(!IsTerminating && "Already finalized!");
- IsTerminating = true;
- EndSourceFile();
- }
-};
-
Interpreter::Interpreter(std::unique_ptr<CompilerInstance> Instance,
llvm::Error &ErrOut,
std::unique_ptr<llvm::orc::LLJITBuilder> JITBuilder,
@@ -381,22 +267,23 @@ Interpreter::Interpreter(std::unique_ptr<CompilerInstance> Instance,
return;
CI->ExecuteAction(*Act);
- IncrParser = std::make_unique<IncrementalParser>(*CI, ErrOut);
+ IncrParser =
+ std::make_unique<IncrementalParser>(*CI, Act.get(), ErrOut, PTUs);
if (ErrOut)
return;
- if (getCodeGen()) {
- CachedInCodeGenModule = GenModule();
+ if (Act->getCodeGen()) {
+ Act->CacheCodeGenModule();
// The initial PTU is filled by `-include` or by CUDA includes
// automatically.
if (!CI->getPreprocessorOpts().Includes.empty()) {
// We can't really directly pass the CachedInCodeGenModule to the Jit
// because it will steal it, causing dangling references as explained in
// Interpreter::Execute
- auto M = llvm::CloneModule(*CachedInCodeGenModule);
+ auto M = llvm::CloneModule(*Act->getCachedCodeGenModule());
ASTContext &C = CI->getASTContext();
- RegisterPTU(C.getTranslationUnitDecl(), std::move(M));
+ IncrParser->RegisterPTU(C.getTranslationUnitDecl(), std::move(M));
}
if (llvm::Error Err = CreateExecutor()) {
ErrOut = joinErrors(std::move(ErrOut), std::move(Err));
@@ -405,7 +292,7 @@ Interpreter::Interpreter(std::unique_ptr<CompilerInstance> Instance,
}
// Not all frontends support code-generation, e.g. ast-dump actions don't
- if (getCodeGen()) {
+ if (Act->getCodeGen()) {
// Process the PTUs that came from initialization. For example -include will
// give us a header that's processed at initialization of the preprocessor.
for (PartialTranslationUnit &PTU : PTUs)
@@ -515,8 +402,8 @@ Interpreter::createWithCUDA(std::unique_ptr<CompilerInstance> CI,
Interp->DeviceCI = std::move(DCI);
auto DeviceParser = std::make_unique<IncrementalCUDADeviceParser>(
- *Interp->DeviceCI, *Interp->getCompilerInstance(), IMVFS, Err,
- Interp->PTUs);
+ *Interp->DeviceCI, *Interp->getCompilerInstance(),
+ Interp->DeviceAct.get(), IMVFS, Err, Interp->PTUs);
if (Err)
return std::move(Err);
@@ -557,30 +444,6 @@ size_t Interpreter::getEffectivePTUSize() const {
return PTUs.size() - InitPTUSize;
}
-PartialTranslationUnit &
-Interpreter::RegisterPTU(TranslationUnitDecl *TU,
- std::unique_ptr<llvm::Module> M /*={}*/,
- IncrementalAction *Action) {
- PTUs.emplace_back(PartialTranslationUnit());
- PartialTranslationUnit &LastPTU = PTUs.back();
- LastPTU.TUPart = TU;
-
- if (!M)
- M = GenModule(Action);
-
- assert((!getCodeGen(Action) || M) &&
- "Must have a llvm::Module at this point");
-
- LastPTU.TheModule = std::move(M);
- LLVM_DEBUG(llvm::dbgs() << "compile-ptu " << PTUs.size() - 1
- << ": [TU=" << LastPTU.TUPart);
- if (LastPTU.TheModule)
- LLVM_DEBUG(llvm::dbgs() << ", M=" << LastPTU.TheModule.get() << " ("
- << LastPTU.TheModule->getName() << ")");
- LLVM_DEBUG(llvm::dbgs() << "]\n");
- return LastPTU;
-}
-
llvm::Expected<PartialTranslationUnit &>
Interpreter::Parse(llvm::StringRef Code) {
// If we have a device parser, parse it first. The generated code will be
@@ -590,7 +453,7 @@ Interpreter::Parse(llvm::StringRef Code) {
if (auto E = DeviceTU.takeError())
return std::move(E);
- RegisterPTU(*DeviceTU, nullptr, DeviceAct.get());
+ DeviceParser->RegisterPTU(*DeviceTU);
llvm::Expected<llvm::StringRef> PTX = DeviceParser->GeneratePTX();
if (!PTX)
@@ -614,7 +477,7 @@ Interpreter::Parse(llvm::StringRef Code) {
PartialTranslationUnit &LastPTU = PTUs.back();
LastPTU.TUPart = *TuOrErr;
- if (std::unique_ptr<llvm::Module> M = GenModule())
+ if (std::unique_ptr<llvm::Module> M = Act->GenModule())
LastPTU.TheModule = std::move(M);
return LastPTU;
@@ -654,7 +517,7 @@ llvm::Error Interpreter::CreateExecutor() {
return llvm::make_error<llvm::StringError>("Operation failed. "
"Execution engine exists",
std::error_code());
- if (!getCodeGen())
+ if (!Act->getCodeGen())
return llvm::make_error<llvm::StringError>("Operation failed. "
"No code generator available",
std::error_code());
@@ -733,7 +596,7 @@ Interpreter::getSymbolAddress(GlobalDecl GD) const {
return llvm::make_error<llvm::StringError>("Operation failed. "
"No execution engine",
std::error_code());
- llvm::StringRef MangledName = getCodeGen()->GetMangledName(GD);
+ llvm::StringRef MangledName = Act->getCodeGen()->GetMangledName(GD);
return getSymbolAddress(MangledName);
}
@@ -809,38 +672,4 @@ llvm::Error Interpreter::LoadDynamicLibrary(const char *name) {
return llvm::Error::success();
}
-
-std::unique_ptr<llvm::Module>
-Interpreter::GenModule(IncrementalAction *Action) {
- static unsigned ID = 0;
- if (CodeGenerator *CG = getCodeGen(Action)) {
- // Clang's CodeGen is designed to work with a single llvm::Module. In many
- // cases for convenience various CodeGen parts have a reference to the
- // llvm::Module (TheModule or Module) which does not change when a new
- // module is pushed. However, the execution engine wants to take ownership
- // of the module which does not map well to CodeGen's design. To work this
- // around we created an empty module to make CodeGen happy. We should make
- // sure it always stays empty.
- assert(((!CachedInCodeGenModule ||
- !getCompilerInstance()->getPreprocessorOpts().Includes.empty()) ||
- ((CachedInCodeGenModule->empty() &&
- CachedInCodeGenModule->global_empty() &&
- CachedInCodeGenModule->alias_empty() &&
- CachedInCodeGenModule->ifunc_empty()))) &&
- "CodeGen wrote to a readonly module");
- std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
- CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext());
- return M;
- }
- return nullptr;
-}
-
-CodeGenerator *Interpreter::getCodeGen(IncrementalAction *Action) const {
- if (!Action)
- Action = Act.get();
- FrontendAction *WrappedAct = Action->getWrapped();
- if (!WrappedAct->hasIRSupport())
- return nullptr;
- return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
-}
} // end namespace clang
diff --git a/clang/lib/Interpreter/InterpreterValuePrinter.cpp b/clang/lib/Interpreter/InterpreterValuePrinter.cpp
index 0ea6274..54abfa6 100644
--- a/clang/lib/Interpreter/InterpreterValuePrinter.cpp
+++ b/clang/lib/Interpreter/InterpreterValuePrinter.cpp
@@ -10,7 +10,7 @@
//
//===----------------------------------------------------------------------===//
-#include "IncrementalParser.h"
+#include "IncrementalAction.h"
#include "InterpreterUtils.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/PrettyPrinter.h"
@@ -66,10 +66,10 @@ static std::string QualTypeToString(ASTContext &Ctx, QualType QT) {
const QualType NonRefTy = QT.getNonReferenceType();
if (const auto *TTy = llvm::dyn_cast<TagType>(NonRefTy))
- return DeclTypeToString(NonRefTy, TTy->getDecl());
+ return DeclTypeToString(NonRefTy, TTy->getOriginalDecl());
if (const auto *TRy = dyn_cast<RecordType>(NonRefTy))
- return DeclTypeToString(NonRefTy, TRy->getDecl());
+ return DeclTypeToString(NonRefTy, TRy->getOriginalDecl());
const QualType Canon = NonRefTy.getCanonicalType();
@@ -101,15 +101,11 @@ static std::string EnumToString(const Value &V) {
llvm::raw_string_ostream SS(Str);
ASTContext &Ctx = const_cast<ASTContext &>(V.getASTContext());
- QualType DesugaredTy = V.getType().getDesugaredType(Ctx);
- const EnumType *EnumTy = DesugaredTy.getNonReferenceType()->getAs<EnumType>();
- assert(EnumTy && "Fail to cast to enum type");
-
- EnumDecl *ED = EnumTy->getDecl();
uint64_t Data = V.convertTo<uint64_t>();
bool IsFirst = true;
- llvm::APSInt AP = Ctx.MakeIntValue(Data, DesugaredTy);
+ llvm::APSInt AP = Ctx.MakeIntValue(Data, V.getType());
+ auto *ED = V.getType()->castAsEnumDecl();
for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; ++I) {
if (I->getInitVal() == AP) {
if (!IsFirst)
@@ -366,7 +362,7 @@ Interpreter::CompileDtorCall(CXXRecordDecl *CXXRD) const {
getCompilerInstance()->getSema().LookupDestructor(CXXRD);
llvm::StringRef Name =
- getCodeGen()->GetMangledName(GlobalDecl(DtorRD, Dtor_Base));
+ Act->getCodeGen()->GetMangledName(GlobalDecl(DtorRD, Dtor_Base));
auto AddrOrErr = getSymbolAddress(Name);
if (!AddrOrErr)
return AddrOrErr.takeError();
@@ -665,8 +661,8 @@ __clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
if (VRef.getKind() == Value::K_PtrOrObj) {
VRef.setPtr(va_arg(args, void *));
} else {
- if (const auto *ET = QT->getAs<EnumType>())
- QT = ET->getDecl()->getIntegerType();
+ if (const auto *ED = QT->getAsEnumDecl())
+ QT = ED->getIntegerType();
switch (QT->castAs<BuiltinType>()->getKind()) {
default:
llvm_unreachable("unknown type kind!");
diff --git a/clang/lib/Interpreter/Value.cpp b/clang/lib/Interpreter/Value.cpp
index be2ab55..d4c9d51 100644
--- a/clang/lib/Interpreter/Value.cpp
+++ b/clang/lib/Interpreter/Value.cpp
@@ -101,8 +101,8 @@ static Value::Kind ConvertQualTypeToKind(const ASTContext &Ctx, QualType QT) {
if (Ctx.hasSameType(QT, Ctx.VoidTy))
return Value::K_Void;
- if (const auto *ET = QT->getAs<EnumType>())
- QT = ET->getDecl()->getIntegerType();
+ if (const auto *ED = QT->getAsEnumDecl())
+ QT = ED->getIntegerType();
const auto *BT = QT->getAs<BuiltinType>();
if (!BT || BT->isNullPtrType())
@@ -147,15 +147,12 @@ Value::Value(const Interpreter *In, void *Ty) : Interp(In), OpaqueType(Ty) {
} while (ArrTy);
ElementsSize = static_cast<size_t>(ArrSize.getZExtValue());
}
- if (const auto *RT = DtorTy->getAs<RecordType>()) {
- if (CXXRecordDecl *CXXRD =
- llvm::dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (llvm::Expected<llvm::orc::ExecutorAddr> Addr =
- Interp.CompileDtorCall(CXXRD))
- DtorF = reinterpret_cast<void *>(Addr->getValue());
- else
- llvm::logAllUnhandledErrors(Addr.takeError(), llvm::errs());
- }
+ if (auto *CXXRD = DtorTy->getAsCXXRecordDecl()) {
+ if (llvm::Expected<llvm::orc::ExecutorAddr> Addr =
+ Interp.CompileDtorCall(CXXRD))
+ DtorF = reinterpret_cast<void *>(Addr->getValue());
+ else
+ llvm::logAllUnhandledErrors(Addr.takeError(), llvm::errs());
}
size_t AllocSize =
diff --git a/clang/lib/Lex/DependencyDirectivesScanner.cpp b/clang/lib/Lex/DependencyDirectivesScanner.cpp
index 9ccff5e..eee57c7 100644
--- a/clang/lib/Lex/DependencyDirectivesScanner.cpp
+++ b/clang/lib/Lex/DependencyDirectivesScanner.cpp
@@ -83,6 +83,8 @@ struct Scanner {
/// \returns True on error.
bool scan(SmallVectorImpl<Directive> &Directives);
+ friend bool clang::scanInputForCXX20ModulesUsage(StringRef Source);
+
private:
/// Lexes next token and advances \p First and the \p Lexer.
[[nodiscard]] dependency_directives_scan::Token &
@@ -1075,3 +1077,51 @@ void clang::printDependencyDirectivesAsSource(
}
}
}
+
+static void skipUntilMaybeCXX20ModuleDirective(const char *&First,
+ const char *const End) {
+ assert(First <= End);
+ while (First != End) {
+ if (*First == '#') {
+ ++First;
+ skipToNewlineRaw(First, End);
+ }
+ skipWhitespace(First, End);
+ if (const auto Len = isEOL(First, End)) {
+ First += Len;
+ continue;
+ }
+ break;
+ }
+}
+
+bool clang::scanInputForCXX20ModulesUsage(StringRef Source) {
+ const char *First = Source.begin();
+ const char *const End = Source.end();
+ skipUntilMaybeCXX20ModuleDirective(First, End);
+ if (First == End)
+ return false;
+
+ // Check if the next token can even be a module directive before creating a
+ // full lexer.
+ if (!(*First == 'i' || *First == 'e' || *First == 'm'))
+ return false;
+
+ llvm::SmallVector<dependency_directives_scan::Token> Tokens;
+ Scanner S(StringRef(First, End - First), Tokens, nullptr, SourceLocation());
+ S.TheLexer.setParsingPreprocessorDirective(true);
+ if (S.lexModule(First, End))
+ return false;
+ auto IsCXXNamedModuleDirective = [](const DirectiveWithTokens &D) {
+ switch (D.Kind) {
+ case dependency_directives_scan::cxx_module_decl:
+ case dependency_directives_scan::cxx_import_decl:
+ case dependency_directives_scan::cxx_export_module_decl:
+ case dependency_directives_scan::cxx_export_import_decl:
+ return true;
+ default:
+ return false;
+ }
+ };
+ return llvm::any_of(S.DirsWithToks, IsCXXNamedModuleDirective);
+}
diff --git a/clang/lib/Lex/Lexer.cpp b/clang/lib/Lex/Lexer.cpp
index 1f695b4..b282a60 100644
--- a/clang/lib/Lex/Lexer.cpp
+++ b/clang/lib/Lex/Lexer.cpp
@@ -174,8 +174,6 @@ void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
ExtendedTokenMode = 0;
NewLinePtr = nullptr;
-
- IsFirstPPToken = true;
}
/// Lexer constructor - Create a new lexer object for the specified buffer
@@ -3225,7 +3223,6 @@ std::optional<Token> Lexer::peekNextPPToken() {
bool atStartOfLine = IsAtStartOfLine;
bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
bool leadingSpace = HasLeadingSpace;
- bool isFirstPPToken = IsFirstPPToken;
Token Tok;
Lex(Tok);
@@ -3236,7 +3233,6 @@ std::optional<Token> Lexer::peekNextPPToken() {
HasLeadingSpace = leadingSpace;
IsAtStartOfLine = atStartOfLine;
IsAtPhysicalStartOfLine = atPhysicalStartOfLine;
- IsFirstPPToken = isFirstPPToken;
// Restore the lexer back to non-skipping mode.
LexingRawMode = false;
@@ -3726,11 +3722,6 @@ bool Lexer::Lex(Token &Result) {
HasLeadingEmptyMacro = false;
}
- if (IsFirstPPToken) {
- Result.setFlag(Token::FirstPPToken);
- IsFirstPPToken = false;
- }
-
bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
IsAtPhysicalStartOfLine = false;
bool isRawLex = isLexingRawMode();
diff --git a/clang/lib/Lex/ModuleMapFile.cpp b/clang/lib/Lex/ModuleMapFile.cpp
index 183e919..f0cd9d2 100644
--- a/clang/lib/Lex/ModuleMapFile.cpp
+++ b/clang/lib/Lex/ModuleMapFile.cpp
@@ -118,7 +118,8 @@ struct ModuleMapFileParser {
std::optional<ExcludeDecl> parseExcludeDecl(clang::SourceLocation LeadingLoc);
std::optional<UmbrellaDirDecl>
parseUmbrellaDirDecl(SourceLocation UmbrellaLoc);
- std::optional<LinkDecl> parseLinkDecl();
+ std::optional<LinkDecl>
+ parseLinkDecl(llvm::StringMap<SourceLocation> &SeenLinkDecl, bool Allowed);
SourceLocation consumeToken();
void skipUntil(MMToken::TokenKind K);
@@ -325,6 +326,7 @@ std::optional<ModuleDecl> ModuleMapFileParser::parseModuleDecl(bool TopLevel) {
SourceLocation LBraceLoc = consumeToken();
bool Done = false;
+ llvm::StringMap<SourceLocation> SeenLinkDecl;
do {
std::optional<Decl> SubDecl;
switch (Tok.Kind) {
@@ -405,7 +407,9 @@ std::optional<ModuleDecl> ModuleMapFileParser::parseModuleDecl(bool TopLevel) {
break;
case MMToken::LinkKeyword:
- SubDecl = parseLinkDecl();
+ // Link decls are only allowed in top level modules or explicit
+ // submodules.
+ SubDecl = parseLinkDecl(SeenLinkDecl, TopLevel || MDecl.Explicit);
break;
default:
@@ -822,7 +826,8 @@ ModuleMapFileParser::parseUmbrellaDirDecl(clang::SourceLocation UmbrellaLoc) {
///
/// module-declaration:
/// 'link' 'framework'[opt] string-literal
-std::optional<LinkDecl> ModuleMapFileParser::parseLinkDecl() {
+std::optional<LinkDecl> ModuleMapFileParser::parseLinkDecl(
+ llvm::StringMap<SourceLocation> &SeenLinkDecl, bool Allowed) {
assert(Tok.is(MMToken::LinkKeyword));
LinkDecl LD;
LD.Location = consumeToken();
@@ -838,12 +843,33 @@ std::optional<LinkDecl> ModuleMapFileParser::parseLinkDecl() {
if (!Tok.is(MMToken::StringLiteral)) {
Diags.Report(Tok.getLocation(), diag::err_mmap_expected_library_name)
<< LD.Framework << SourceRange(LD.Location);
+ consumeToken();
HadError = true;
return std::nullopt;
}
- LD.Library = Tok.getString();
+ StringRef Library = Tok.getString();
+
+ LD.Library = Library;
consumeToken();
+
+ // Make sure we eat all the tokens when we report the errors so parsing
+ // can continue.
+ if (!Allowed) {
+ Diags.Report(LD.Location, diag::err_mmap_submodule_link_decl);
+ HadError = true;
+ return std::nullopt;
+ }
+
+ auto [It, Inserted] =
+ SeenLinkDecl.insert(std::make_pair(Library, LD.Location));
+ if (!Inserted) {
+ Diags.Report(LD.Location, diag::warn_mmap_link_redeclaration) << Library;
+ Diags.Report(It->second, diag::note_mmap_prev_link_declaration);
+ HadError = true;
+ return std::nullopt;
+ }
+
return std::move(LD);
}
diff --git a/clang/lib/Lex/PPDirectives.cpp b/clang/lib/Lex/PPDirectives.cpp
index 3fa060f..9d01b8d 100644
--- a/clang/lib/Lex/PPDirectives.cpp
+++ b/clang/lib/Lex/PPDirectives.cpp
@@ -3793,9 +3793,13 @@ Preprocessor::LexEmbedParameters(Token &CurTok, bool ForHasEmbed) {
[[fallthrough]];
case tok::r_brace:
case tok::r_square: {
+ if (BracketStack.empty()) {
+ ExpectOrDiagAndSkipToEOD(tok::r_paren);
+ return false;
+ }
tok::TokenKind Matching =
GetMatchingCloseBracket(BracketStack.back().first);
- if (BracketStack.empty() || CurTok.getKind() != Matching) {
+ if (CurTok.getKind() != Matching) {
DiagMismatchedBracesAndSkipToEOD(Matching, BracketStack.back());
return false;
}
diff --git a/clang/lib/Lex/Preprocessor.cpp b/clang/lib/Lex/Preprocessor.cpp
index e278846..e003ad3 100644
--- a/clang/lib/Lex/Preprocessor.cpp
+++ b/clang/lib/Lex/Preprocessor.cpp
@@ -43,6 +43,7 @@
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/NoTrivialPPDirectiveTracer.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/PreprocessorLexer.h"
@@ -247,8 +248,6 @@ void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
llvm::errs() << " [LeadingSpace]";
if (Tok.isExpandDisabled())
llvm::errs() << " [ExpandDisabled]";
- if (Tok.isFirstPPToken())
- llvm::errs() << " [First pp-token]";
if (Tok.needsCleaning()) {
const char *Start = SourceMgr.getCharacterData(Tok.getLocation());
llvm::errs() << " [UnClean='" << StringRef(Start, Tok.getLength())
@@ -577,8 +576,11 @@ void Preprocessor::EnterMainSourceFile() {
// export module M; // error: module declaration must occur
// // at the start of the translation unit.
if (getLangOpts().CPlusPlusModules) {
+ auto Tracer = std::make_unique<NoTrivialPPDirectiveTracer>(*this);
+ DirTracer = Tracer.get();
+ addPPCallbacks(std::move(Tracer));
std::optional<Token> FirstPPTok = CurLexer->peekNextPPToken();
- if (FirstPPTok && FirstPPTok->isFirstPPToken())
+ if (FirstPPTok)
FirstPPTokenLoc = FirstPPTok->getLocation();
}
}
@@ -940,6 +942,8 @@ void Preprocessor::Lex(Token &Result) {
StdCXXImportSeqState.handleHeaderName();
break;
case tok::kw_export:
+ if (hasSeenNoTrivialPPDirective())
+ Result.setFlag(Token::HasSeenNoTrivialPPDirective);
TrackGMFState.handleExport();
StdCXXImportSeqState.handleExport();
ModuleDeclState.handleExport();
@@ -968,6 +972,8 @@ void Preprocessor::Lex(Token &Result) {
}
break;
} else if (Result.getIdentifierInfo() == getIdentifierInfo("module")) {
+ if (hasSeenNoTrivialPPDirective())
+ Result.setFlag(Token::HasSeenNoTrivialPPDirective);
TrackGMFState.handleModule(StdCXXImportSeqState.afterTopLevelSeq());
ModuleDeclState.handleModule();
break;
@@ -1682,3 +1688,31 @@ const char *Preprocessor::getCheckPoint(FileID FID, const char *Start) const {
return nullptr;
}
+
+bool Preprocessor::hasSeenNoTrivialPPDirective() const {
+ return DirTracer && DirTracer->hasSeenNoTrivialPPDirective();
+}
+
+bool NoTrivialPPDirectiveTracer::hasSeenNoTrivialPPDirective() const {
+ return SeenNoTrivialPPDirective;
+}
+
+void NoTrivialPPDirectiveTracer::setSeenNoTrivialPPDirective() {
+ if (InMainFile && !SeenNoTrivialPPDirective)
+ SeenNoTrivialPPDirective = true;
+}
+
+void NoTrivialPPDirectiveTracer::LexedFileChanged(
+ FileID FID, LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType, FileID PrevFID, SourceLocation Loc) {
+ InMainFile = (FID == PP.getSourceManager().getMainFileID());
+}
+
+void NoTrivialPPDirectiveTracer::MacroExpands(const Token &MacroNameTok,
+ const MacroDefinition &MD,
+ SourceRange Range,
+ const MacroArgs *Args) {
+ // FIXME: Does only enable builtin macro expansion make sense?
+ if (!MD.getMacroInfo()->isBuiltinMacro())
+ setSeenNoTrivialPPDirective();
+}
diff --git a/clang/lib/Parse/ParseCXXInlineMethods.cpp b/clang/lib/Parse/ParseCXXInlineMethods.cpp
index 9a010fb..74e2500 100644
--- a/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -1161,6 +1161,12 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
while (true) {
switch (Tok.getKind()) {
+ case tok::ellipsis:
+ // We found an elipsis at the end of the parameter list;
+ // it is not part of a parameter declaration.
+ if (ParenCount == 1 && NextToken().is(tok::r_paren))
+ return true;
+ goto consume_token;
case tok::comma:
// If we might be in a template, perform a tentative parse to check.
if (!AngleCount)
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index fd53cca..10355bb 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -6224,7 +6224,6 @@ void Parser::ParseTypeQualifierListOpt(
case tok::kw___funcref:
ParseWebAssemblyFuncrefTypeAttribute(DS.getAttributes());
continue;
- goto DoneWithTypeQuals;
case tok::kw___pascal:
if (AttrReqs & AR_VendorAttributesParsed) {
@@ -7395,7 +7394,7 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
Diag(Tok, diag::ext_ident_list_in_param);
// Maintain an efficient lookup of params we have seen so far.
- llvm::SmallSet<const IdentifierInfo*, 16> ParamsSoFar;
+ llvm::SmallPtrSet<const IdentifierInfo *, 16> ParamsSoFar;
do {
// If this isn't an identifier, report the error and skip until ')'.
@@ -7879,9 +7878,9 @@ void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
D.AddTypeInfo(Chunk, TempDeclarator.getAttributePool(), SourceLocation());
}
- // The missing identifier would have been diagnosed in ParseDirectDeclarator.
+ // The missing name would have been diagnosed in ParseDirectDeclarator.
// If parentheses are required, always suggest them.
- if (!D.getIdentifier() && !NeedParens)
+ if (!D.hasName() && !NeedParens)
return;
SourceLocation EndBracketLoc = TempDeclarator.getEndLoc();
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index bc8841c..8135f4f 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -591,7 +591,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
NextToken().isRegularKeywordAttribute() ||
NextToken().is(tok::kw___attribute)) &&
D.SS.isNotEmpty() && LastII == Tok.getIdentifierInfo() &&
- D.SS.getScopeRep()->getKind() != NestedNameSpecifier::Namespace) {
+ D.SS.getScopeRep().getKind() != NestedNameSpecifier::Kind::Namespace) {
SourceLocation IdLoc = ConsumeToken();
ParsedType Type =
Actions.getInheritingConstructorName(D.SS, IdLoc, *LastII);
@@ -4512,6 +4512,27 @@ bool Parser::ParseCXX11AttributeArgs(
Form = ParsedAttr::Form::Microsoft();
}
+ if (LO.CPlusPlus) {
+ TentativeParsingAction TPA(*this);
+ bool HasInvalidArgument = false;
+ while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::eof)) {
+ if (Tok.isOneOf(tok::hash, tok::hashhash)) {
+ Diag(Tok.getLocation(), diag::ext_invalid_attribute_argument)
+ << PP.getSpelling(Tok);
+ HasInvalidArgument = true;
+ }
+ ConsumeAnyToken();
+ }
+
+ if (HasInvalidArgument) {
+ SkipUntil(tok::r_paren);
+ TPA.Commit();
+ return true;
+ }
+
+ TPA.Revert();
+ }
+
// If the attribute isn't known, we will not attempt to parse any
// arguments.
if (Form.getSyntax() != ParsedAttr::AS_Microsoft &&
@@ -4923,33 +4944,20 @@ void Parser::ParseHLSLRootSignatureAttributeArgs(ParsedAttributes &Attrs) {
return std::nullopt;
};
- auto StrLiteral = ProcessStringLiteral();
- if (!StrLiteral.has_value()) {
+ auto Signature = ProcessStringLiteral();
+ if (!Signature.has_value()) {
Diag(Tok, diag::err_expected_string_literal)
- << /*in attributes...*/ 4 << RootSignatureIdent->getName();
- SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
- T.consumeClose();
+ << /*in attributes...*/ 4 << "RootSignature";
return;
}
// Construct our identifier
- StringLiteral *Signature = StrLiteral.value();
- auto [DeclIdent, Found] =
- Actions.HLSL().ActOnStartRootSignatureDecl(Signature->getString());
- // If we haven't found an already defined DeclIdent then parse the root
- // signature string and construct the in-memory elements
- if (!Found) {
- // Invoke the root signature parser to construct the in-memory constructs
- hlsl::RootSignatureParser Parser(getLangOpts().HLSLRootSigVer, Signature,
- PP);
- if (Parser.parse()) {
- T.consumeClose();
- return;
- }
-
- // Construct the declaration.
- Actions.HLSL().ActOnFinishRootSignatureDecl(RootSignatureLoc, DeclIdent,
- Parser.getElements());
+ IdentifierInfo *DeclIdent = hlsl::ParseHLSLRootSignature(
+ Actions, getLangOpts().HLSLRootSigVer, *Signature);
+ if (!DeclIdent) {
+ SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
+ T.consumeClose();
+ return;
}
// Create the arg for the ParsedAttr
diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp
index bc238a9..3515343 100644
--- a/clang/lib/Parse/ParseExpr.cpp
+++ b/clang/lib/Parse/ParseExpr.cpp
@@ -3342,7 +3342,8 @@ ExprResult Parser::ParseBlockLiteralExpression() {
Actions.ActOnBlockError(CaretLoc, getCurScope());
return ExprError();
}
-
+ EnterExpressionEvaluationContextForFunction PotentiallyEvaluated(
+ Actions, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
StmtResult Stmt(ParseCompoundStatementBody());
BlockScope.Exit();
if (!Stmt.isInvalid())
diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp
index 8dce226..8605ba2 100644
--- a/clang/lib/Parse/ParseExprCXX.cpp
+++ b/clang/lib/Parse/ParseExprCXX.cpp
@@ -195,9 +195,10 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
if (DS.getTypeSpecType() == DeclSpec::TST_error)
return false;
- QualType Type = Actions.ActOnPackIndexingType(
- DS.getRepAsType().get(), DS.getPackIndexingExpr(), DS.getBeginLoc(),
- DS.getEllipsisLoc());
+ QualType Pattern = Sema::GetTypeFromParser(DS.getRepAsType());
+ QualType Type =
+ Actions.ActOnPackIndexingType(Pattern, DS.getPackIndexingExpr(),
+ DS.getBeginLoc(), DS.getEllipsisLoc());
if (Type.isNull())
return false;
@@ -2355,8 +2356,10 @@ bool Parser::ParseUnqualifiedIdTemplateId(
// Constructor and destructor names.
TypeResult Type = Actions.ActOnTemplateIdType(
- getCurScope(), SS, TemplateKWLoc, Template, Name, NameLoc, LAngleLoc,
- TemplateArgsPtr, RAngleLoc, /*IsCtorOrDtorName=*/true);
+ getCurScope(), ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS, TemplateKWLoc, Template,
+ Name, NameLoc, LAngleLoc, TemplateArgsPtr, RAngleLoc,
+ /*IsCtorOrDtorName=*/true);
if (Type.isInvalid())
return true;
diff --git a/clang/lib/Parse/ParseHLSLRootSignature.cpp b/clang/lib/Parse/ParseHLSLRootSignature.cpp
index 98dc458..1af72f8 100644
--- a/clang/lib/Parse/ParseHLSLRootSignature.cpp
+++ b/clang/lib/Parse/ParseHLSLRootSignature.cpp
@@ -9,6 +9,7 @@
#include "clang/Parse/ParseHLSLRootSignature.h"
#include "clang/Lex/LiteralSupport.h"
+#include "clang/Sema/Sema.h"
using namespace llvm::hlsl::rootsig;
@@ -234,15 +235,15 @@ std::optional<RootDescriptor> RootSignatureParser::parseRootDescriptor() {
default:
llvm_unreachable("Switch for consumed token was not provided");
case TokenKind::kw_CBV:
- Descriptor.Type = DescriptorType::CBuffer;
+ Descriptor.Type = ResourceClass::CBuffer;
ExpectedReg = TokenKind::bReg;
break;
case TokenKind::kw_SRV:
- Descriptor.Type = DescriptorType::SRV;
+ Descriptor.Type = ResourceClass::SRV;
ExpectedReg = TokenKind::tReg;
break;
case TokenKind::kw_UAV:
- Descriptor.Type = DescriptorType::UAV;
+ Descriptor.Type = ResourceClass::UAV;
ExpectedReg = TokenKind::uReg;
break;
}
@@ -360,19 +361,19 @@ RootSignatureParser::parseDescriptorTableClause() {
default:
llvm_unreachable("Switch for consumed token was not provided");
case TokenKind::kw_CBV:
- Clause.Type = ClauseType::CBuffer;
+ Clause.Type = ResourceClass::CBuffer;
ExpectedReg = TokenKind::bReg;
break;
case TokenKind::kw_SRV:
- Clause.Type = ClauseType::SRV;
+ Clause.Type = ResourceClass::SRV;
ExpectedReg = TokenKind::tReg;
break;
case TokenKind::kw_UAV:
- Clause.Type = ClauseType::UAV;
+ Clause.Type = ResourceClass::UAV;
ExpectedReg = TokenKind::uReg;
break;
case TokenKind::kw_Sampler:
- Clause.Type = ClauseType::Sampler;
+ Clause.Type = ResourceClass::Sampler;
ExpectedReg = TokenKind::sReg;
break;
}
@@ -1448,5 +1449,28 @@ SourceLocation RootSignatureParser::getTokenLocation(RootSignatureToken Tok) {
PP.getLangOpts(), PP.getTargetInfo());
}
+IdentifierInfo *ParseHLSLRootSignature(Sema &Actions,
+ llvm::dxbc::RootSignatureVersion Version,
+ StringLiteral *Signature) {
+ // Construct our identifier
+ auto [DeclIdent, Found] =
+ Actions.HLSL().ActOnStartRootSignatureDecl(Signature->getString());
+ // If we haven't found an already defined DeclIdent then parse the root
+ // signature string and construct the in-memory elements
+ if (!Found) {
+ // Invoke the root signature parser to construct the in-memory constructs
+ hlsl::RootSignatureParser Parser(Version, Signature,
+ Actions.getPreprocessor());
+ if (Parser.parse())
+ return nullptr;
+
+ // Construct the declaration.
+ Actions.HLSL().ActOnFinishRootSignatureDecl(
+ Signature->getBeginLoc(), DeclIdent, Parser.getElements());
+ }
+
+ return DeclIdent;
+}
+
} // namespace hlsl
} // namespace clang
diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp
index 0277dfb..74aff0b 100644
--- a/clang/lib/Parse/ParseTemplate.cpp
+++ b/clang/lib/Parse/ParseTemplate.cpp
@@ -1128,12 +1128,14 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
// Build the annotation token.
if (TNK == TNK_Type_template && AllowTypeAnnotation) {
- TypeResult Type = ArgsInvalid
- ? TypeError()
- : Actions.ActOnTemplateIdType(
- getCurScope(), SS, TemplateKWLoc, Template,
- TemplateName.Identifier, TemplateNameLoc,
- LAngleLoc, TemplateArgsPtr, RAngleLoc);
+ TypeResult Type =
+ ArgsInvalid
+ ? TypeError()
+ : Actions.ActOnTemplateIdType(
+ getCurScope(), ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS, TemplateKWLoc,
+ Template, TemplateName.Identifier, TemplateNameLoc, LAngleLoc,
+ TemplateArgsPtr, RAngleLoc);
Tok.setKind(tok::annot_typename);
setTypeAnnotation(Tok, Type);
@@ -1194,10 +1196,11 @@ void Parser::AnnotateTemplateIdTokenAsType(
TemplateId->isInvalid()
? TypeError()
: Actions.ActOnTemplateIdType(
- getCurScope(), SS, TemplateId->TemplateKWLoc,
- TemplateId->Template, TemplateId->Name,
- TemplateId->TemplateNameLoc, TemplateId->LAngleLoc,
- TemplateArgsPtr, TemplateId->RAngleLoc,
+ getCurScope(), ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
+ TemplateId->TemplateKWLoc, TemplateId->Template,
+ TemplateId->Name, TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc, TemplateArgsPtr, TemplateId->RAngleLoc,
/*IsCtorOrDtorName=*/false, IsClassName, AllowImplicitTypename);
// Create the new "type" annotation token.
Tok.setKind(tok::annot_typename);
@@ -1263,7 +1266,8 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
Actions.ActOnTemplateName(getCurScope(), SS, TemplateKWLoc, Name,
/*ObjectType=*/nullptr,
/*EnteringContext=*/false, Template))
- Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ Result = ParsedTemplateArgument(TemplateKWLoc, SS, Template,
+ Name.StartLocation);
}
} else if (Tok.is(tok::identifier) || Tok.is(tok::annot_template_id) ||
Tok.is(tok::annot_non_type)) {
@@ -1300,11 +1304,14 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
TNK == TNK_Var_template || TNK == TNK_Concept_template) {
// We have an id-expression that refers to a class template or
// (C++0x) alias template.
- Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ Result = ParsedTemplateArgument(/*TemplateKwLoc=*/SourceLocation(), SS,
+ Template, Name.StartLocation);
}
}
}
+ Result = Actions.ActOnTemplateTemplateArgument(Result);
+
// If this is a pack expansion, build it as such.
if (EllipsisLoc.isValid() && !Result.isInvalid())
Result = Actions.ActOnPackExpansion(Result, EllipsisLoc);
diff --git a/clang/lib/Parse/ParseTentative.cpp b/clang/lib/Parse/ParseTentative.cpp
index 2a731a1..82f2294 100644
--- a/clang/lib/Parse/ParseTentative.cpp
+++ b/clang/lib/Parse/ParseTentative.cpp
@@ -1328,7 +1328,7 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
Tok.getAnnotationRange(),
SS);
- if (SS.getScopeRep() && SS.getScopeRep()->isDependent()) {
+ if (SS.getScopeRep().isDependent()) {
RevertingTentativeParsingAction PA(*this);
ConsumeAnnotationToken();
ConsumeToken();
diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp
index a8cfe20..a17398b 100644
--- a/clang/lib/Parse/Parser.cpp
+++ b/clang/lib/Parse/Parser.cpp
@@ -1418,6 +1418,10 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// parameter list was specified.
CurTemplateDepthTracker.addDepth(1);
+ // Late attributes are parsed in the same scope as the function body.
+ if (LateParsedAttrs)
+ ParseLexedAttributeList(*LateParsedAttrs, Res, false, true);
+
if (SkipFunctionBodies && (!Res || Actions.canSkipFunctionBody(Res)) &&
trySkippingFunctionBody()) {
BodyScope.Exit();
@@ -1442,10 +1446,6 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
} else
Actions.ActOnDefaultCtorInitializers(Res);
- // Late attributes are parsed in the same scope as the function body.
- if (LateParsedAttrs)
- ParseLexedAttributeList(*LateParsedAttrs, Res, false, true);
-
return ParseFunctionStatementBody(Res, BodyScope);
}
@@ -1774,9 +1774,9 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC,
/// An Objective-C object type followed by '<' is a specialization of
/// a parameterized class type or a protocol-qualified type.
ParsedType Ty = Classification.getType();
+ QualType T = Actions.GetTypeFromParser(Ty);
if (getLangOpts().ObjC && NextToken().is(tok::less) &&
- (Ty.get()->isObjCObjectType() ||
- Ty.get()->isObjCObjectPointerType())) {
+ (T->isObjCObjectType() || T->isObjCObjectPointerType())) {
// Consume the name.
SourceLocation IdentifierLoc = ConsumeToken();
SourceLocation NewEndLoc;
@@ -2032,11 +2032,12 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(
if (SS.isNotEmpty()) // it was a C++ qualified type name.
BeginLoc = SS.getBeginLoc();
+ QualType T = Actions.GetTypeFromParser(Ty);
+
/// An Objective-C object type followed by '<' is a specialization of
/// a parameterized class type or a protocol-qualified type.
if (getLangOpts().ObjC && NextToken().is(tok::less) &&
- (Ty.get()->isObjCObjectType() ||
- Ty.get()->isObjCObjectPointerType())) {
+ (T->isObjCObjectType() || T->isObjCObjectPointerType())) {
// Consume the name.
SourceLocation IdentifierLoc = ConsumeToken();
SourceLocation NewEndLoc;
@@ -2362,9 +2363,10 @@ Parser::ParseModuleDecl(Sema::ModuleImportState &ImportState) {
// Parse a global-module-fragment, if present.
if (getLangOpts().CPlusPlusModules && Tok.is(tok::semi)) {
SourceLocation SemiLoc = ConsumeToken();
- if (!Introducer.isFirstPPToken()) {
+ if (ImportState != Sema::ModuleImportState::FirstDecl ||
+ Introducer.hasSeenNoTrivialPPDirective()) {
Diag(StartLoc, diag::err_global_module_introducer_not_at_start)
- << SourceRange(StartLoc, SemiLoc);
+ << SourceRange(StartLoc, SemiLoc);
return nullptr;
}
if (MDK == Sema::ModuleDeclKind::Interface) {
@@ -2419,7 +2421,8 @@ Parser::ParseModuleDecl(Sema::ModuleImportState &ImportState) {
ExpectAndConsumeSemi(diag::err_module_expected_semi);
return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path, Partition,
- ImportState, Introducer.isFirstPPToken());
+ ImportState,
+ Introducer.hasSeenNoTrivialPPDirective());
}
Decl *Parser::ParseModuleImport(SourceLocation AtLoc,
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index fdb6302..0b94b10 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -227,14 +227,11 @@ static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) {
// Skip function calls which are qualified with a templated class.
if (const DeclRefExpr *DRE =
- dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts())) {
- if (NestedNameSpecifier *NNS = DRE->getQualifier()) {
- if (NNS->getKind() == NestedNameSpecifier::TypeSpec &&
- isa<TemplateSpecializationType>(NNS->getAsType())) {
+ dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts()))
+ if (NestedNameSpecifier NNS = DRE->getQualifier();
+ NNS.getKind() == NestedNameSpecifier::Kind::Type)
+ if (isa_and_nonnull<TemplateSpecializationType>(NNS.getAsType()))
continue;
- }
- }
- }
const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE);
if (!MCE || isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) ||
@@ -2783,6 +2780,31 @@ public:
}
};
+namespace clang::lifetimes {
+namespace {
+class LifetimeSafetyReporterImpl : public LifetimeSafetyReporter {
+
+public:
+ LifetimeSafetyReporterImpl(Sema &S) : S(S) {}
+
+ void reportUseAfterFree(const Expr *IssueExpr, const Expr *UseExpr,
+ SourceLocation FreeLoc, Confidence C) override {
+ S.Diag(IssueExpr->getExprLoc(),
+ C == Confidence::Definite
+ ? diag::warn_lifetime_safety_loan_expires_permissive
+ : diag::warn_lifetime_safety_loan_expires_strict)
+ << IssueExpr->getEndLoc();
+ S.Diag(FreeLoc, diag::note_lifetime_safety_destroyed_here);
+ S.Diag(UseExpr->getExprLoc(), diag::note_lifetime_safety_used_here)
+ << UseExpr->getEndLoc();
+ }
+
+private:
+ Sema &S;
+};
+} // namespace
+} // namespace clang::lifetimes
+
void clang::sema::AnalysisBasedWarnings::IssueWarnings(
TranslationUnitDecl *TU) {
if (!TU)
@@ -3032,8 +3054,10 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
// TODO: Enable lifetime safety analysis for other languages once it is
// stable.
if (EnableLifetimeSafetyAnalysis && S.getLangOpts().CPlusPlus) {
- if (AC.getCFG())
- lifetimes::runLifetimeSafetyAnalysis(AC);
+ if (AC.getCFG()) {
+ lifetimes::LifetimeSafetyReporterImpl LifetimeSafetyReporter(S);
+ lifetimes::runLifetimeSafetyAnalysis(AC, &LifetimeSafetyReporter);
+ }
}
// Check for violations of "called once" parameter properties.
if (S.getLangOpts().ObjC && !S.getLangOpts().CPlusPlus &&
diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp
index f0f1d66..8756ce5 100644
--- a/clang/lib/Sema/DeclSpec.cpp
+++ b/clang/lib/Sema/DeclSpec.cpp
@@ -48,9 +48,9 @@ void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) {
EndLocation = TemplateId->RAngleLoc;
}
-void CXXScopeSpec::Extend(ASTContext &Context, TypeLoc TL,
- SourceLocation ColonColonLoc) {
- Builder.Extend(Context, TL, ColonColonLoc);
+void CXXScopeSpec::Make(ASTContext &Context, TypeLoc TL,
+ SourceLocation ColonColonLoc) {
+ Builder.Make(Context, TL, ColonColonLoc);
if (Range.getBegin().isInvalid())
Range.setBegin(TL.getBeginLoc());
Range.setEnd(ColonColonLoc);
@@ -59,19 +59,6 @@ void CXXScopeSpec::Extend(ASTContext &Context, TypeLoc TL,
"NestedNameSpecifierLoc range computation incorrect");
}
-void CXXScopeSpec::Extend(ASTContext &Context, IdentifierInfo *Identifier,
- SourceLocation IdentifierLoc,
- SourceLocation ColonColonLoc) {
- Builder.Extend(Context, Identifier, IdentifierLoc, ColonColonLoc);
-
- if (Range.getBegin().isInvalid())
- Range.setBegin(IdentifierLoc);
- Range.setEnd(ColonColonLoc);
-
- assert(Range == Builder.getSourceRange() &&
- "NestedNameSpecifierLoc range computation incorrect");
-}
-
void CXXScopeSpec::Extend(ASTContext &Context, NamespaceBaseDecl *Namespace,
SourceLocation NamespaceLoc,
SourceLocation ColonColonLoc) {
@@ -95,10 +82,10 @@ void CXXScopeSpec::MakeGlobal(ASTContext &Context,
"NestedNameSpecifierLoc range computation incorrect");
}
-void CXXScopeSpec::MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
- SourceLocation SuperLoc,
- SourceLocation ColonColonLoc) {
- Builder.MakeSuper(Context, RD, SuperLoc, ColonColonLoc);
+void CXXScopeSpec::MakeMicrosoftSuper(ASTContext &Context, CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.MakeMicrosoftSuper(Context, RD, SuperLoc, ColonColonLoc);
Range.setBegin(SuperLoc);
Range.setEnd(ColonColonLoc);
@@ -108,7 +95,7 @@ void CXXScopeSpec::MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
}
void CXXScopeSpec::MakeTrivial(ASTContext &Context,
- NestedNameSpecifier *Qualifier, SourceRange R) {
+ NestedNameSpecifier Qualifier, SourceRange R) {
Builder.MakeTrivial(Context, Qualifier, R);
Range = R;
}
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
index 87f9ae0..806800c 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
@@ -313,9 +313,6 @@ TemplateParameterListBuilder::finalizeTemplateArgs(ConceptDecl *CD) {
Builder.Record->getDeclContext()->addDecl(Builder.Template);
Params.clear();
- QualType T = Builder.Template->getInjectedClassNameSpecialization();
- T = AST.getInjectedClassNameType(Builder.Record, T);
-
return Builder;
}
@@ -351,7 +348,7 @@ BuiltinTypeMethodBuilder::BuiltinTypeMethodBuilder(BuiltinTypeDeclBuilder &DB,
ASTContext &AST = DB.SemaRef.getASTContext();
if (IsCtor) {
Name = AST.DeclarationNames.getCXXConstructorName(
- DB.Record->getTypeForDecl()->getCanonicalTypeUnqualified());
+ AST.getCanonicalTagType(DB.Record));
} else {
const IdentifierInfo &II =
AST.Idents.get(NameStr, tok::TokenKind::identifier);
@@ -553,9 +550,9 @@ BuiltinTypeDeclBuilder::BuiltinTypeDeclBuilder(Sema &SemaRef,
return;
}
- Record = CXXRecordDecl::Create(AST, TagDecl::TagKind::Class, HLSLNamespace,
- SourceLocation(), SourceLocation(), &II,
- PrevDecl, true);
+ Record =
+ CXXRecordDecl::Create(AST, TagDecl::TagKind::Class, HLSLNamespace,
+ SourceLocation(), SourceLocation(), &II, PrevDecl);
Record->setImplicit(true);
Record->setLexicalDeclContext(HLSLNamespace);
Record->setHasExternalLexicalStorage();
@@ -570,18 +567,6 @@ BuiltinTypeDeclBuilder::~BuiltinTypeDeclBuilder() {
HLSLNamespace->addDecl(Record);
}
-CXXRecordDecl *BuiltinTypeDeclBuilder::finalizeForwardDeclaration() {
- // Force the QualType to be generated for the record declaration. In most
- // cases this will happen naturally when something uses the type the
- // QualType gets lazily created. Unfortunately, with our injected types if a
- // type isn't used in a translation unit the QualType may not get
- // automatically generated before a PCH is generated. To resolve this we
- // just force that the QualType is generated after we create a forward
- // declaration.
- (void)Record->getASTContext().getRecordType(Record);
- return Record;
-}
-
BuiltinTypeDeclBuilder &
BuiltinTypeDeclBuilder::addMemberVariable(StringRef Name, QualType Type,
llvm::ArrayRef<Attr *> Attrs,
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
index 36c4add..098b726 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
@@ -64,7 +64,7 @@ public:
BuiltinTypeDeclBuilder &addSimpleTemplateParams(ArrayRef<StringRef> Names,
ConceptDecl *CD);
- CXXRecordDecl *finalizeForwardDeclaration();
+ CXXRecordDecl *finalizeForwardDeclaration() { return Record; }
BuiltinTypeDeclBuilder &completeDefinition();
BuiltinTypeDeclBuilder &
diff --git a/clang/lib/Sema/HeuristicResolver.cpp b/clang/lib/Sema/HeuristicResolver.cpp
index 6874d30..6d79f3f 100644
--- a/clang/lib/Sema/HeuristicResolver.cpp
+++ b/clang/lib/Sema/HeuristicResolver.cpp
@@ -44,7 +44,7 @@ public:
resolveDependentNameType(const DependentNameType *DNT);
std::vector<const NamedDecl *> resolveTemplateSpecializationType(
const DependentTemplateSpecializationType *DTST);
- QualType resolveNestedNameSpecifierToType(const NestedNameSpecifier *NNS);
+ QualType resolveNestedNameSpecifierToType(NestedNameSpecifier NNS);
QualType getPointeeType(QualType T);
std::vector<const NamedDecl *>
lookupDependentName(CXXRecordDecl *RD, DeclarationName Name,
@@ -57,7 +57,7 @@ private:
ASTContext &Ctx;
// Recursion protection sets
- llvm::SmallSet<const DependentNameType *, 4> SeenDependentNameTypes;
+ llvm::SmallPtrSet<const DependentNameType *, 4> SeenDependentNameTypes;
// Given a tag-decl type and a member name, heuristically resolve the
// name to one or more declarations.
@@ -101,9 +101,8 @@ QualType resolveDeclsToType(const std::vector<const NamedDecl *> &Decls,
ASTContext &Ctx) {
if (Decls.size() != 1) // Names an overload set -- just bail.
return QualType();
- if (const auto *TD = dyn_cast<TypeDecl>(Decls[0])) {
- return Ctx.getTypeDeclType(TD);
- }
+ if (const auto *TD = dyn_cast<TypeDecl>(Decls[0]))
+ return Ctx.getCanonicalTypeDeclType(TD);
if (const auto *VD = dyn_cast<ValueDecl>(Decls[0])) {
return VD->getType();
}
@@ -139,8 +138,7 @@ TagDecl *HeuristicResolverImpl::resolveTypeToTagDecl(QualType QT) {
T = T->getCanonicalTypeInternal().getTypePtr();
}
- if (auto *TT = T->getAs<TagType>()) {
- TagDecl *TD = TT->getDecl();
+ if (auto *TD = T->getAsTagDecl()) {
// Template might not be instantiated yet, fall back to primary template
// in such cases.
if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(TD)) {
@@ -151,11 +149,6 @@ TagDecl *HeuristicResolverImpl::resolveTypeToTagDecl(QualType QT) {
return TD;
}
- if (const auto *ICNT = T->getAs<InjectedClassNameType>())
- T = ICNT->getInjectedSpecializationType().getTypePtrOrNull();
- if (!T)
- return nullptr;
-
TemplateName TN = getReferencedTemplateName(T);
if (TN.isNull())
return nullptr;
@@ -262,6 +255,21 @@ QualType HeuristicResolverImpl::simplifyType(QualType Type, const Expr *E,
}
}
}
+ // Check if the expression refers to an explicit object parameter of
+ // templated type. If so, heuristically treat it as having the type of the
+ // enclosing class.
+ if (!T.Type.isNull() &&
+ (T.Type->isUndeducedAutoType() || T.Type->isTemplateTypeParmType())) {
+ if (auto *DRE = dyn_cast_if_present<DeclRefExpr>(T.E)) {
+ auto *PrDecl = dyn_cast<ParmVarDecl>(DRE->getDecl());
+ if (PrDecl && PrDecl->isExplicitObjectParameter()) {
+ const auto *Parent =
+ dyn_cast<TagDecl>(PrDecl->getDeclContext()->getParent());
+ return {Ctx.getCanonicalTagType(Parent)};
+ }
+ }
+ }
+
return T;
};
// As an additional protection against infinite loops, bound the number of
@@ -292,7 +300,7 @@ std::vector<const NamedDecl *> HeuristicResolverImpl::resolveMemberExpr(
// an instance method, it's represented as a CXXDependentScopeMemberExpr
// with `this` as the base expression as `X` as the qualifier
// (which could be valid if `X` names a base class after instantiation).
- if (NestedNameSpecifier *NNS = ME->getQualifier()) {
+ if (NestedNameSpecifier NNS = ME->getQualifier()) {
if (QualType QualifierType = resolveNestedNameSpecifierToType(NNS);
!QualifierType.isNull()) {
auto Decls =
@@ -348,7 +356,10 @@ HeuristicResolverImpl::resolveCalleeOfCallExpr(const CallExpr *CE) {
std::vector<const NamedDecl *> HeuristicResolverImpl::resolveUsingValueDecl(
const UnresolvedUsingValueDecl *UUVD) {
- return resolveDependentMember(QualType(UUVD->getQualifier()->getAsType(), 0),
+ NestedNameSpecifier Qualifier = UUVD->getQualifier();
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Type)
+ return {};
+ return resolveDependentMember(QualType(Qualifier.getAsType(), 0),
UUVD->getNameInfo().getName(), ValueFilter);
}
@@ -399,23 +410,23 @@ QualType HeuristicResolverImpl::resolveExprToType(const Expr *E) {
}
QualType HeuristicResolverImpl::resolveNestedNameSpecifierToType(
- const NestedNameSpecifier *NNS) {
- if (!NNS)
- return QualType();
-
+ NestedNameSpecifier NNS) {
// The purpose of this function is to handle the dependent (Kind ==
// Identifier) case, but we need to recurse on the prefix because
// that may be dependent as well, so for convenience handle
// the TypeSpec cases too.
- switch (NNS->getKind()) {
- case NestedNameSpecifier::TypeSpec:
- return QualType(NNS->getAsType(), 0);
- case NestedNameSpecifier::Identifier: {
- return resolveDeclsToType(
- resolveDependentMember(
- resolveNestedNameSpecifierToType(NNS->getPrefix()),
- NNS->getAsIdentifier(), TypeFilter),
- Ctx);
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Type: {
+ const auto *T = NNS.getAsType();
+ // FIXME: Should this handle the DependentTemplateSpecializationType as
+ // well?
+ if (const auto *DTN = dyn_cast<DependentNameType>(T))
+ return resolveDeclsToType(
+ resolveDependentMember(
+ resolveNestedNameSpecifierToType(DTN->getQualifier()),
+ DTN->getIdentifier(), TypeFilter),
+ Ctx);
+ return QualType(T, 0);
}
default:
break;
@@ -590,7 +601,7 @@ HeuristicResolver::resolveTemplateSpecializationType(
return HeuristicResolverImpl(Ctx).resolveTemplateSpecializationType(DTST);
}
QualType HeuristicResolver::resolveNestedNameSpecifierToType(
- const NestedNameSpecifier *NNS) const {
+ NestedNameSpecifier NNS) const {
return HeuristicResolverImpl(Ctx).resolveNestedNameSpecifierToType(NNS);
}
std::vector<const NamedDecl *> HeuristicResolver::lookupDependentName(
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index cfb2f60..39fa25f 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -1434,7 +1434,7 @@ void Sema::ActOnEndOfTranslationUnit() {
// translation unit contains a file scope declaration of that
// identifier, with the composite type as of the end of the
// translation unit, with an initializer equal to 0.
- llvm::SmallSet<VarDecl *, 32> Seen;
+ llvm::SmallPtrSet<VarDecl *, 32> Seen;
for (TentativeDefinitionsType::iterator
T = TentativeDefinitions.begin(ExternalSource.get()),
TEnd = TentativeDefinitions.end();
@@ -1882,21 +1882,21 @@ public:
// Visit the dtors of all members
for (const FieldDecl *FD : RD->fields()) {
QualType FT = FD->getType();
- if (const auto *RT = FT->getAs<RecordType>())
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- if (ClassDecl->hasDefinition())
- if (CXXDestructorDecl *MemberDtor = ClassDecl->getDestructor())
- asImpl().visitUsedDecl(MemberDtor->getLocation(), MemberDtor);
+ if (const auto *ClassDecl = FT->getAsCXXRecordDecl();
+ ClassDecl &&
+ (ClassDecl->isBeingDefined() || ClassDecl->isCompleteDefinition()))
+ if (CXXDestructorDecl *MemberDtor = ClassDecl->getDestructor())
+ asImpl().visitUsedDecl(MemberDtor->getLocation(), MemberDtor);
}
// Also visit base class dtors
for (const auto &Base : RD->bases()) {
QualType BaseType = Base.getType();
- if (const auto *RT = BaseType->getAs<RecordType>())
- if (const auto *BaseDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- if (BaseDecl->hasDefinition())
- if (CXXDestructorDecl *BaseDtor = BaseDecl->getDestructor())
- asImpl().visitUsedDecl(BaseDtor->getLocation(), BaseDtor);
+ if (const auto *BaseDecl = BaseType->getAsCXXRecordDecl();
+ BaseDecl &&
+ (BaseDecl->isBeingDefined() || BaseDecl->isCompleteDefinition()))
+ if (CXXDestructorDecl *BaseDtor = BaseDecl->getDestructor())
+ asImpl().visitUsedDecl(BaseDtor->getLocation(), BaseDtor);
}
}
@@ -1907,11 +1907,11 @@ public:
if (VD->isThisDeclarationADefinition() &&
VD->needsDestruction(S.Context)) {
QualType VT = VD->getType();
- if (const auto *RT = VT->getAs<RecordType>())
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- if (ClassDecl->hasDefinition())
- if (CXXDestructorDecl *Dtor = ClassDecl->getDestructor())
- asImpl().visitUsedDecl(Dtor->getLocation(), Dtor);
+ if (const auto *ClassDecl = VT->getAsCXXRecordDecl();
+ ClassDecl && (ClassDecl->isBeingDefined() ||
+ ClassDecl->isCompleteDefinition()))
+ if (CXXDestructorDecl *Dtor = ClassDecl->getDestructor())
+ asImpl().visitUsedDecl(Dtor->getLocation(), Dtor);
}
Inherited::VisitDeclStmt(DS);
diff --git a/clang/lib/Sema/SemaAPINotes.cpp b/clang/lib/Sema/SemaAPINotes.cpp
index 044abb0..4cc1b76 100644
--- a/clang/lib/Sema/SemaAPINotes.cpp
+++ b/clang/lib/Sema/SemaAPINotes.cpp
@@ -336,6 +336,10 @@ static void ProcessAPINotes(Sema &S, Decl *D,
});
}
+ if (auto ConformsTo = Info.getSwiftConformance())
+ D->addAttr(
+ SwiftAttrAttr::Create(S.Context, "conforms_to:" + ConformsTo.value()));
+
ProcessAPINotes(S, D, static_cast<const api_notes::CommonEntityInfo &>(Info),
Metadata);
}
@@ -694,14 +698,13 @@ static void ProcessAPINotes(Sema &S, TagDecl *D, const api_notes::TagInfo &Info,
if (auto ReleaseOp = Info.SwiftReleaseOp)
D->addAttr(
SwiftAttrAttr::Create(S.Context, "release:" + ReleaseOp.value()));
+ if (auto DestroyOp = Info.SwiftDestroyOp)
+ D->addAttr(
+ SwiftAttrAttr::Create(S.Context, "destroy:" + DestroyOp.value()));
if (auto DefaultOwnership = Info.SwiftDefaultOwnership)
D->addAttr(SwiftAttrAttr::Create(
S.Context, "returned_as_" + DefaultOwnership.value() + "_by_default"));
- if (auto ConformsTo = Info.SwiftConformance)
- D->addAttr(
- SwiftAttrAttr::Create(S.Context, "conforms_to:" + ConformsTo.value()));
-
if (auto Copyable = Info.isSwiftCopyable()) {
if (!*Copyable)
D->addAttr(SwiftAttrAttr::Create(S.Context, "~Copyable"));
diff --git a/clang/lib/Sema/SemaAccess.cpp b/clang/lib/Sema/SemaAccess.cpp
index 83a07a2..17415b4 100644
--- a/clang/lib/Sema/SemaAccess.cpp
+++ b/clang/lib/Sema/SemaAccess.cpp
@@ -318,11 +318,8 @@ static AccessResult IsDerivedFromInclusive(const CXXRecordDecl *Derived,
const CXXRecordDecl *RD;
QualType T = I.getType();
- if (const RecordType *RT = T->getAs<RecordType>()) {
- RD = cast<CXXRecordDecl>(RT->getDecl());
- } else if (const InjectedClassNameType *IT
- = T->getAs<InjectedClassNameType>()) {
- RD = IT->getDecl();
+ if (CXXRecordDecl *Rec = T->getAsCXXRecordDecl()) {
+ RD = Rec;
} else {
assert(T->isDependentType() && "non-dependent base wasn't a record?");
OnFailure = AR_dependent;
@@ -442,8 +439,8 @@ static AccessResult MatchesFriend(Sema &S,
static AccessResult MatchesFriend(Sema &S,
const EffectiveContext &EC,
CanQualType Friend) {
- if (const RecordType *RT = Friend->getAs<RecordType>())
- return MatchesFriend(S, EC, cast<CXXRecordDecl>(RT->getDecl()));
+ if (const auto *RD = Friend->getAsCXXRecordDecl())
+ return MatchesFriend(S, EC, RD);
// TODO: we can do better than this
if (Friend->isDependentType())
@@ -671,11 +668,8 @@ struct ProtectedFriendContext {
const CXXRecordDecl *RD;
QualType T = I.getType();
- if (const RecordType *RT = T->getAs<RecordType>()) {
- RD = cast<CXXRecordDecl>(RT->getDecl());
- } else if (const InjectedClassNameType *IT
- = T->getAs<InjectedClassNameType>()) {
- RD = IT->getDecl();
+ if (CXXRecordDecl *Rec = T->getAsCXXRecordDecl()) {
+ RD = Rec;
} else {
assert(T->isDependentType() && "non-dependent base wasn't a record?");
EverDependent = true;
@@ -1072,7 +1066,7 @@ static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC,
// TODO: it would be great to have a fixit here, since this is
// such an obvious error.
S.Diag(D->getLocation(), diag::note_access_protected_restricted_noobject)
- << S.Context.getTypeDeclType(ECRecord);
+ << S.Context.getCanonicalTagType(ECRecord);
return true;
}
@@ -1101,7 +1095,7 @@ static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC,
// Otherwise, use the generic diagnostic.
return S.Diag(D->getLocation(),
diag::note_access_protected_restricted_object)
- << S.Context.getTypeDeclType(ECRecord);
+ << S.Context.getCanonicalTagType(ECRecord);
}
return false;
@@ -1129,7 +1123,7 @@ static void diagnoseBadDirectAccess(Sema &S,
else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(D))
PrevDecl = TND->getPreviousDecl();
else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
- if (auto *RD = dyn_cast<CXXRecordDecl>(D);
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(TD);
RD && RD->isInjectedClassName())
break;
PrevDecl = TD->getPreviousDecl();
@@ -1284,10 +1278,10 @@ static void DiagnoseBadAccess(Sema &S, SourceLocation Loc,
NamedDecl *D = (Entity.isMemberAccess() ? Entity.getTargetDecl() : nullptr);
S.Diag(Loc, Entity.getDiag())
- << (Entity.getAccess() == AS_protected)
- << (D ? D->getDeclName() : DeclarationName())
- << S.Context.getTypeDeclType(NamingClass)
- << S.Context.getTypeDeclType(DeclaringClass);
+ << (Entity.getAccess() == AS_protected)
+ << (D ? D->getDeclName() : DeclarationName())
+ << S.Context.getCanonicalTagType(NamingClass)
+ << S.Context.getCanonicalTagType(DeclaringClass);
DiagnoseAccessPath(S, EC, Entity);
}
@@ -1640,7 +1634,8 @@ Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc,
return AR_accessible;
CXXRecordDecl *NamingClass = Dtor->getParent();
- if (ObjectTy.isNull()) ObjectTy = Context.getTypeDeclType(NamingClass);
+ if (ObjectTy.isNull())
+ ObjectTy = Context.getCanonicalTagType(NamingClass);
AccessTarget Entity(Context, AccessTarget::Member, NamingClass,
DeclAccessPair::make(Dtor, Access),
@@ -1728,7 +1723,7 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
AccessTarget AccessEntity(
Context, AccessTarget::Member, NamingClass,
DeclAccessPair::make(Constructor, Found.getAccess()),
- Context.getTypeDeclType(ObjectClass));
+ Context.getCanonicalTagType(ObjectClass));
AccessEntity.setDiag(PD);
return CheckAccess(*this, UseLoc, AccessEntity);
@@ -1776,7 +1771,7 @@ Sema::CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
return AR_accessible;
AccessTarget Entity(Context, AccessTarget::Member, DecomposedClass, Field,
- Context.getRecordType(DecomposedClass));
+ Context.getCanonicalTagType(DecomposedClass));
Entity.setDiag(diag::err_decomp_decl_inaccessible_field);
return CheckAccess(*this, UseLoc, Entity);
@@ -1789,9 +1784,7 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
if (!getLangOpts().AccessControl || Found.getAccess() == AS_public)
return AR_accessible;
- const RecordType *RT = ObjectExpr->getType()->castAs<RecordType>();
- CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(RT->getDecl());
-
+ auto *NamingClass = ObjectExpr->getType()->castAsCXXRecordDecl();
AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
ObjectExpr->getType());
Entity.setDiag(diag::err_access) << ObjectExpr->getSourceRange() << Range;
diff --git a/clang/lib/Sema/SemaAvailability.cpp b/clang/lib/Sema/SemaAvailability.cpp
index 68a698f..1c48b3c 100644
--- a/clang/lib/Sema/SemaAvailability.cpp
+++ b/clang/lib/Sema/SemaAvailability.cpp
@@ -102,7 +102,7 @@ Sema::ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message,
break;
for (const Type *T = TD->getUnderlyingType().getTypePtr(); /**/; /**/) {
if (auto *TT = dyn_cast<TagType>(T)) {
- D = TT->getDecl();
+ D = TT->getOriginalDecl()->getDefinitionOrSelf();
} else if (isa<SubstTemplateTypeParmType>(T)) {
// A Subst* node represents a use through a template.
// Any uses of the underlying declaration happened through it's template
@@ -1017,7 +1017,7 @@ bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
return true;
if (const auto *TT = dyn_cast<TagType>(TyPtr)) {
- TagDecl *TD = TT->getDecl();
+ TagDecl *TD = TT->getOriginalDecl()->getDefinitionOrSelf();
DiagnoseDeclAvailability(TD, Range);
} else if (const auto *TD = dyn_cast<TypedefType>(TyPtr)) {
diff --git a/clang/lib/Sema/SemaBPF.cpp b/clang/lib/Sema/SemaBPF.cpp
index 7c00084..be890ab 100644
--- a/clang/lib/Sema/SemaBPF.cpp
+++ b/clang/lib/Sema/SemaBPF.cpp
@@ -56,14 +56,9 @@ static bool isValidPreserveTypeInfoArg(Expr *Arg) {
return true;
// Record type or Enum type.
- const Type *Ty = ArgType->getUnqualifiedDesugaredType();
- if (const auto *RT = Ty->getAs<RecordType>()) {
- if (!RT->getDecl()->getDeclName().isEmpty())
+ if (const auto *RT = ArgType->getAsCanonical<TagType>())
+ if (!RT->getOriginalDecl()->getDeclName().isEmpty())
return true;
- } else if (const auto *ET = Ty->getAs<EnumType>()) {
- if (!ET->getDecl()->getDeclName().isEmpty())
- return true;
- }
return false;
}
@@ -99,13 +94,12 @@ static bool isValidPreserveEnumValueArg(Expr *Arg) {
return false;
// The type must be EnumType.
- const Type *Ty = ArgType->getUnqualifiedDesugaredType();
- const auto *ET = Ty->getAs<EnumType>();
- if (!ET)
+ const auto *ED = ArgType->getAsEnumDecl();
+ if (!ED)
return false;
// The enum value must be supported.
- return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
+ return llvm::is_contained(ED->enumerators(), Enumerator);
}
bool SemaBPF::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp
index 24cb8c3..2e3cbb3 100644
--- a/clang/lib/Sema/SemaCUDA.cpp
+++ b/clang/lib/Sema/SemaCUDA.cpp
@@ -421,12 +421,10 @@ bool SemaCUDA::inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
}
for (const auto *B : Bases) {
- const RecordType *BaseType = B->getType()->getAs<RecordType>();
- if (!BaseType) {
+ auto *BaseClassDecl = B->getType()->getAsCXXRecordDecl();
+ if (!BaseClassDecl)
continue;
- }
- CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
Sema::SpecialMemberOverloadResult SMOR =
SemaRef.LookupSpecialMember(BaseClassDecl, CSM,
/* ConstArg */ ConstRHS,
@@ -465,13 +463,11 @@ bool SemaCUDA::inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
continue;
}
- const RecordType *FieldType =
- getASTContext().getBaseElementType(F->getType())->getAs<RecordType>();
- if (!FieldType) {
+ auto *FieldRecDecl =
+ getASTContext().getBaseElementType(F->getType())->getAsCXXRecordDecl();
+ if (!FieldRecDecl)
continue;
- }
- CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
Sema::SpecialMemberOverloadResult SMOR =
SemaRef.LookupSpecialMember(FieldRecDecl, CSM,
/* ConstArg */ ConstRHS && !F->isMutable(),
diff --git a/clang/lib/Sema/SemaCXXScopeSpec.cpp b/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 6ac0483..ef14dde 100644
--- a/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -28,24 +28,21 @@ static CXXRecordDecl *getCurrentInstantiationOf(QualType T,
if (T.isNull())
return nullptr;
- const Type *Ty = T->getCanonicalTypeInternal().getTypePtr();
- if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
- if (Record->isCurrentInstantiation(CurContext))
- return Record;
-
- return nullptr;
- } else if (isa<InjectedClassNameType>(Ty))
- return cast<InjectedClassNameType>(Ty)->getDecl();
- else
+ const TagType *TagTy = dyn_cast<TagType>(T->getCanonicalTypeInternal());
+ if (!isa_and_present<RecordType, InjectedClassNameType>(TagTy))
return nullptr;
+ auto *RD =
+ cast<CXXRecordDecl>(TagTy->getOriginalDecl())->getDefinitionOrSelf();
+ if (isa<InjectedClassNameType>(TagTy) ||
+ RD->isCurrentInstantiation(CurContext))
+ return RD;
+ return nullptr;
}
DeclContext *Sema::computeDeclContext(QualType T) {
if (!T->isDependentType())
- if (const TagType *Tag = T->getAs<TagType>())
- return Tag->getDecl();
-
+ if (auto *D = T->getAsTagDecl())
+ return D;
return ::getCurrentInstantiationOf(T, CurContext);
}
@@ -54,18 +51,17 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
if (!SS.isSet() || SS.isInvalid())
return nullptr;
- NestedNameSpecifier *NNS = SS.getScopeRep();
- if (NNS->isDependent()) {
+ NestedNameSpecifier NNS = SS.getScopeRep();
+ if (NNS.isDependent()) {
// If this nested-name-specifier refers to the current
// instantiation, return its DeclContext.
if (CXXRecordDecl *Record = getCurrentInstantiationOf(NNS))
return Record;
if (EnteringContext) {
- const Type *NNSType = NNS->getAsType();
- if (!NNSType) {
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
return nullptr;
- }
+ const Type *NNSType = NNS.getAsType();
// Look through type alias templates, per C++0x [temp.dep.type]p1.
NNSType = Context.getCanonicalType(NNSType);
@@ -118,38 +114,36 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
// If the type of the nested name specifier is the same as the
// injected class name of the named class template, we're entering
// into that class template definition.
- QualType Injected =
- ClassTemplate->getInjectedClassNameSpecialization();
+ CanQualType Injected =
+ ClassTemplate->getCanonicalInjectedSpecializationType(Context);
if (Context.hasSameType(Injected, QualType(SpecType, 0)))
return ClassTemplate->getTemplatedDecl();
}
- } else if (const RecordType *RecordT = NNSType->getAs<RecordType>()) {
+ } else if (const auto *RecordT = dyn_cast<RecordType>(NNSType)) {
// The nested name specifier refers to a member of a class template.
- return RecordT->getDecl();
+ return RecordT->getOriginalDecl()->getDefinitionOrSelf();
}
}
return nullptr;
}
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- llvm_unreachable("Dependent nested-name-specifier has no DeclContext");
-
- case NestedNameSpecifier::Namespace:
- return NNS->getAsNamespace()->getNamespace();
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace:
+ return const_cast<NamespaceDecl *>(
+ NNS.getAsNamespaceAndPrefix().Namespace->getNamespace());
- case NestedNameSpecifier::TypeSpec: {
- const TagType *Tag = NNS->getAsType()->getAs<TagType>();
- assert(Tag && "Non-tag type in nested-name-specifier");
- return Tag->getDecl();
- }
+ case NestedNameSpecifier::Kind::Type:
+ return NNS.getAsType()->castAsTagDecl();
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
return Context.getTranslationUnitDecl();
- case NestedNameSpecifier::Super:
- return NNS->getAsRecordDecl();
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return NNS.getAsMicrosoftSuper();
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
@@ -159,17 +153,17 @@ bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) {
if (!SS.isSet() || SS.isInvalid())
return false;
- return SS.getScopeRep()->isDependent();
+ return SS.getScopeRep().isDependent();
}
-CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
+CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier NNS) {
assert(getLangOpts().CPlusPlus && "Only callable in C++");
- assert(NNS->isDependent() && "Only dependent nested-name-specifier allowed");
+ assert(NNS.isDependent() && "Only dependent nested-name-specifier allowed");
- if (!NNS->getAsType())
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
return nullptr;
- QualType T = QualType(NNS->getAsType(), 0);
+ QualType T = QualType(NNS.getAsType(), 0);
return ::getCurrentInstantiationOf(T, CurContext);
}
@@ -195,8 +189,7 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
return false;
// Grab the tag definition, if there is one.
- QualType type = Context.getTypeDeclType(tag);
- tag = type->getAsTagDecl();
+ tag = tag->getDefinitionOrSelf();
// If we're currently defining this type, then lookup into the
// type is okay: don't complain that it isn't complete yet.
@@ -207,7 +200,8 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
if (loc.isInvalid()) loc = SS.getRange().getBegin();
// The type must be complete.
- if (RequireCompleteType(loc, type, diag::err_incomplete_nested_name_spec,
+ if (RequireCompleteType(loc, Context.getCanonicalTagType(tag),
+ diag::err_incomplete_nested_name_spec,
SS.getRange())) {
SS.SetInvalid(SS.getRange());
return true;
@@ -259,10 +253,10 @@ bool Sema::RequireCompleteEnumDecl(EnumDecl *EnumD, SourceLocation L,
if (SS) {
Diag(L, diag::err_incomplete_nested_name_spec)
- << QualType(EnumD->getTypeForDecl(), 0) << SS->getRange();
+ << Context.getCanonicalTagType(EnumD) << SS->getRange();
SS->SetInvalid(SS->getRange());
} else {
- Diag(L, diag::err_incomplete_enum) << QualType(EnumD->getTypeForDecl(), 0);
+ Diag(L, diag::err_incomplete_enum) << Context.getCanonicalTagType(EnumD);
Diag(EnumD->getLocation(), diag::note_declared_at);
}
@@ -304,7 +298,7 @@ bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
return true;
}
- SS.MakeSuper(Context, RD, SuperLoc, ColonColonLoc);
+ SS.MakeMicrosoftSuper(Context, RD, SuperLoc, ColonColonLoc);
return false;
}
@@ -324,9 +318,6 @@ bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
// Determine whether we have a class (or, in C++11, an enum) or
// a typedef thereof. If so, build the nested-name-specifier.
- QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD));
- if (T->isDependentType())
- return true;
if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
if (TD->getUnderlyingType()->isRecordType())
return true;
@@ -344,32 +335,42 @@ bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
if (IsExtension)
*IsExtension = true;
}
+ if (auto *TD = dyn_cast<TagDecl>(SD)) {
+ if (TD->isDependentType())
+ return true;
+ } else if (Context.getCanonicalTypeDeclType(cast<TypeDecl>(SD))
+ ->isDependentType()) {
+ return true;
+ }
return false;
}
-NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
- if (!S || !NNS)
+NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier NNS) {
+ if (!S)
return nullptr;
- while (NNS->getPrefix())
- NNS = NNS->getPrefix();
-
- if (NNS->getKind() != NestedNameSpecifier::Identifier)
- return nullptr;
+ while (NNS.getKind() == NestedNameSpecifier::Kind::Type) {
+ const Type *T = NNS.getAsType();
+ if ((NNS = T->getPrefix()))
+ continue;
- LookupResult Found(*this, NNS->getAsIdentifier(), SourceLocation(),
- LookupNestedNameSpecifierName);
- LookupName(Found, S);
- assert(!Found.isAmbiguous() && "Cannot handle ambiguities here yet");
+ const auto *DNT = dyn_cast<DependentNameType>(T);
+ if (!DNT)
+ break;
- if (!Found.isSingleResult())
- return nullptr;
+ LookupResult Found(*this, DNT->getIdentifier(), SourceLocation(),
+ LookupNestedNameSpecifierName);
+ LookupName(Found, S);
+ assert(!Found.isAmbiguous() && "Cannot handle ambiguities here yet");
- NamedDecl *Result = Found.getFoundDecl();
- if (isAcceptableNestedNameSpecifier(Result))
- return Result;
+ if (!Found.isSingleResult())
+ return nullptr;
+ NamedDecl *Result = Found.getFoundDecl();
+ if (isAcceptableNestedNameSpecifier(Result))
+ return Result;
+ }
return nullptr;
}
@@ -493,7 +494,18 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
// base object type or prior nested-name-specifier, so this
// nested-name-specifier refers to an unknown specialization. Just build
// a dependent nested-name-specifier.
- SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc, IdInfo.CCLoc);
+
+ TypeLocBuilder TLB;
+
+ QualType DTN = Context.getDependentNameType(
+ ElaboratedTypeKeyword::None, SS.getScopeRep(), IdInfo.Identifier);
+ auto DTNL = TLB.push<DependentNameTypeLoc>(DTN);
+ DTNL.setElaboratedKeywordLoc(SourceLocation());
+ DTNL.setNameLoc(IdInfo.IdentifierLoc);
+ DTNL.setQualifierLoc(SS.getWithLocInContext(Context));
+
+ SS.clear();
+ SS.Make(Context, TLB.getTypeLocInContext(Context, DTN), IdInfo.CCLoc);
return false;
}
@@ -599,8 +611,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
OuterDecl->getCanonicalDecl() != SD->getCanonicalDecl() &&
(!isa<TypeDecl>(OuterDecl) || !isa<TypeDecl>(SD) ||
!Context.hasSameType(
- Context.getTypeDeclType(cast<TypeDecl>(OuterDecl)),
- Context.getTypeDeclType(cast<TypeDecl>(SD))))) {
+ Context.getCanonicalTypeDeclType(cast<TypeDecl>(OuterDecl)),
+ Context.getCanonicalTypeDeclType(cast<TypeDecl>(SD))))) {
if (ErrorRecoveryLookup)
return true;
@@ -613,7 +625,7 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
// Fall through so that we'll pick the name we found in the object
// type, since that's probably what the user wanted anyway.
- }
+ }
}
if (auto *TD = dyn_cast_or_null<TypedefNameDecl>(SD))
@@ -637,50 +649,44 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
return false;
}
- QualType T =
- Context.getTypeDeclType(cast<TypeDecl>(SD->getUnderlyingDecl()));
-
- if (T->isEnumeralType())
+ const auto *TD = cast<TypeDecl>(SD->getUnderlyingDecl());
+ if (isa<EnumDecl>(TD))
Diag(IdInfo.IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
+ QualType T;
TypeLocBuilder TLB;
if (const auto *USD = dyn_cast<UsingShadowDecl>(SD)) {
- T = Context.getUsingType(USD, T);
- TLB.pushTypeSpec(T).setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<InjectedClassNameType>(T)) {
- InjectedClassNameTypeLoc InjectedTL
- = TLB.push<InjectedClassNameTypeLoc>(T);
- InjectedTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<RecordType>(T)) {
- RecordTypeLoc RecordTL = TLB.push<RecordTypeLoc>(T);
- RecordTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<TypedefType>(T)) {
- TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(T);
- TypedefTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<EnumType>(T)) {
- EnumTypeLoc EnumTL = TLB.push<EnumTypeLoc>(T);
- EnumTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<TemplateTypeParmType>(T)) {
- TemplateTypeParmTypeLoc TemplateTypeTL
- = TLB.push<TemplateTypeParmTypeLoc>(T);
- TemplateTypeTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<UnresolvedUsingType>(T)) {
- UnresolvedUsingTypeLoc UnresolvedTL
- = TLB.push<UnresolvedUsingTypeLoc>(T);
- UnresolvedTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<SubstTemplateTypeParmType>(T)) {
- SubstTemplateTypeParmTypeLoc TL
- = TLB.push<SubstTemplateTypeParmTypeLoc>(T);
- TL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<SubstTemplateTypeParmPackType>(T)) {
- SubstTemplateTypeParmPackTypeLoc TL
- = TLB.push<SubstTemplateTypeParmPackTypeLoc>(T);
- TL.setNameLoc(IdInfo.IdentifierLoc);
+ T = Context.getUsingType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ USD);
+ TLB.push<UsingTypeLoc>(T).set(/*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context),
+ IdInfo.IdentifierLoc);
+ } else if (const auto *Tag = dyn_cast<TagDecl>(TD)) {
+ T = Context.getTagType(ElaboratedTypeKeyword::None, SS.getScopeRep(), Tag,
+ /*OwnsTag=*/false);
+ auto TTL = TLB.push<TagTypeLoc>(T);
+ TTL.setElaboratedKeywordLoc(SourceLocation());
+ TTL.setQualifierLoc(SS.getWithLocInContext(SemaRef.Context));
+ TTL.setNameLoc(IdInfo.IdentifierLoc);
+ } else if (auto *TN = dyn_cast<TypedefNameDecl>(TD)) {
+ T = Context.getTypedefType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ TN);
+ TLB.push<TypedefTypeLoc>(T).set(/*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(SemaRef.Context),
+ IdInfo.IdentifierLoc);
+ } else if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(TD)) {
+ T = Context.getUnresolvedUsingType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), UD);
+ TLB.push<UnresolvedUsingTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(SemaRef.Context), IdInfo.IdentifierLoc);
} else {
- llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier");
+ assert(SS.isEmpty());
+ T = Context.getTypeDeclType(TD);
+ TLB.pushTypeSpec(T).setNameLoc(IdInfo.IdentifierLoc);
}
-
- SS.Extend(Context, TLB.getTypeLocInContext(Context, T), IdInfo.CCLoc);
+ SS.clear();
+ SS.Make(Context, TLB.getTypeLocInContext(Context, T), IdInfo.CCLoc);
return false;
}
@@ -722,16 +728,34 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
Diag(IdInfo.IdentifierLoc,
diag::ext_undeclared_unqual_id_with_dependent_base)
<< IdInfo.Identifier << ContainingClass;
+
+ TypeLocBuilder TLB;
+
// Fake up a nested-name-specifier that starts with the
// injected-class-name of the enclosing class.
- QualType T = Context.getTypeDeclType(ContainingClass);
- TypeLocBuilder TLB;
- TLB.pushTrivial(Context, T, IdInfo.IdentifierLoc);
- SS.Extend(Context, TLB.getTypeLocInContext(Context, T),
- IdInfo.IdentifierLoc);
- // Add the identifier to form a dependent name.
- SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc,
- IdInfo.CCLoc);
+ // FIXME: This should be done as part of an adjustment, so that this
+ // doesn't get confused with something written in source.
+ QualType Result =
+ Context.getTagType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ ContainingClass, /*OwnsTag=*/false);
+ auto TTL = TLB.push<TagTypeLoc>(Result);
+ TTL.setElaboratedKeywordLoc(SourceLocation());
+ TTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TTL.setNameLoc(IdInfo.IdentifierLoc);
+ SS.Make(Context, TLB.getTypeLocInContext(Context, Result),
+ SourceLocation());
+
+ TLB.clear();
+
+ // Form a DependentNameType.
+ QualType DTN = Context.getDependentNameType(
+ ElaboratedTypeKeyword::None, SS.getScopeRep(), IdInfo.Identifier);
+ auto DTNL = TLB.push<DependentNameTypeLoc>(DTN);
+ DTNL.setElaboratedKeywordLoc(SourceLocation());
+ DTNL.setNameLoc(IdInfo.IdentifierLoc);
+ DTNL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SS.clear();
+ SS.Make(Context, TLB.getTypeLocInContext(Context, DTN), IdInfo.CCLoc);
return false;
}
}
@@ -739,8 +763,19 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
if (!Found.empty()) {
if (TypeDecl *TD = Found.getAsSingle<TypeDecl>()) {
+ QualType T;
+ if (auto *TN = dyn_cast<TypedefNameDecl>(TD)) {
+ T = Context.getTypedefType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TN);
+ } else {
+ // FIXME: Enumerate the possibilities here.
+ assert(!isa<TagDecl>(TD));
+ assert(SS.isEmpty());
+ T = Context.getTypeDeclType(TD);
+ }
+
Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace)
- << Context.getTypeDeclType(TD) << getLangOpts().CPlusPlus;
+ << T << getLangOpts().CPlusPlus;
} else if (Found.getAsSingle<TemplateDecl>()) {
ParsedType SuggestedType;
DiagnoseUnknownTypeName(IdInfo.Identifier, IdInfo.IdentifierLoc, S, &SS,
@@ -786,17 +821,19 @@ bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
if (T.isNull())
return true;
- if (!T->isDependentType() && !T->getAs<TagType>()) {
+ if (!T->isDependentType() && !isa<TagType>(T.getCanonicalType())) {
Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class_or_namespace)
<< T << getLangOpts().CPlusPlus;
return true;
}
+ assert(SS.isEmpty());
+
TypeLocBuilder TLB;
DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
- SS.Extend(Context, TLB.getTypeLocInContext(Context, T), ColonColonLoc);
+ SS.Make(Context, TLB.getTypeLocInContext(Context, T), ColonColonLoc);
return false;
}
@@ -812,13 +849,15 @@ bool Sema::ActOnCXXNestedNameSpecifierIndexedPack(CXXScopeSpec &SS,
if (Type.isNull())
return true;
+ assert(SS.isEmpty());
+
TypeLocBuilder TLB;
TLB.pushTrivial(getASTContext(),
cast<PackIndexingType>(Type.getTypePtr())->getPattern(),
DS.getBeginLoc());
PackIndexingTypeLoc PIT = TLB.push<PackIndexingTypeLoc>(Type);
PIT.setEllipsisLoc(DS.getEllipsisLoc());
- SS.Extend(Context, TLB.getTypeLocInContext(Context, Type), ColonColonLoc);
+ SS.Make(Context, TLB.getTypeLocInContext(Context, Type), ColonColonLoc);
return false;
}
@@ -858,7 +897,7 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
assert(DTN->getQualifier() == SS.getScopeRep());
QualType T = Context.getDependentTemplateSpecializationType(
ElaboratedTypeKeyword::None,
- {/*Qualifier=*/nullptr, DTN->getName().getIdentifier(),
+ {SS.getScopeRep(), DTN->getName().getIdentifier(),
TemplateKWLoc.isValid()},
TemplateArgs.arguments());
@@ -867,7 +906,7 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
DependentTemplateSpecializationTypeLoc SpecTL
= Builder.push<DependentTemplateSpecializationTypeLoc>(T);
SpecTL.setElaboratedKeywordLoc(SourceLocation());
- SpecTL.setQualifierLoc(NestedNameSpecifierLoc());
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateNameLoc);
SpecTL.setLAngleLoc(LAngleLoc);
@@ -875,7 +914,8 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
- SS.Extend(Context, Builder.getTypeLocInContext(Context, T), CCLoc);
+ SS.clear();
+ SS.Make(Context, Builder.getTypeLocInContext(Context, T), CCLoc);
return false;
}
@@ -900,30 +940,28 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
// We were able to resolve the template name to an actual template.
// Build an appropriate nested-name-specifier.
- QualType T = CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
+ QualType T = CheckTemplateIdType(ElaboratedTypeKeyword::None, Template,
+ TemplateNameLoc, TemplateArgs);
if (T.isNull())
return true;
// Alias template specializations can produce types which are not valid
// nested name specifiers.
- if (!T->isDependentType() && !T->getAs<TagType>()) {
+ if (!T->isDependentType() && !isa<TagType>(T.getCanonicalType())) {
Diag(TemplateNameLoc, diag::err_nested_name_spec_non_tag) << T;
NoteAllFoundTemplates(Template);
return true;
}
// Provide source-location information for the template specialization type.
- TypeLocBuilder Builder;
- TemplateSpecializationTypeLoc SpecTL
- = Builder.push<TemplateSpecializationTypeLoc>(T);
- SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
- SpecTL.setTemplateNameLoc(TemplateNameLoc);
- SpecTL.setLAngleLoc(LAngleLoc);
- SpecTL.setRAngleLoc(RAngleLoc);
- for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
- SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
-
- SS.Extend(Context, Builder.getTypeLocInContext(Context, T), CCLoc);
+ TypeLocBuilder TLB;
+ TLB.push<TemplateSpecializationTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), TemplateKWLoc, TemplateNameLoc,
+ TemplateArgs);
+
+ SS.clear();
+ SS.Make(Context, TLB.getTypeLocInContext(Context, T), CCLoc);
return false;
}
@@ -931,7 +969,7 @@ namespace {
/// A structure that stores a nested-name-specifier annotation,
/// including both the nested-name-specifier
struct NestedNameSpecifierAnnotation {
- NestedNameSpecifier *NNS;
+ NestedNameSpecifier NNS = std::nullopt;
};
}
@@ -970,8 +1008,6 @@ bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
if (isa<ObjCContainerDecl>(CurContext) || isa<ObjCMethodDecl>(CurContext))
return false;
- NestedNameSpecifier *Qualifier = SS.getScopeRep();
-
// There are only two places a well-formed program may qualify a
// declarator: first, when defining a namespace or class member
// out-of-line, and second, when naming an explicitly-qualified
@@ -986,18 +1022,20 @@ bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
// granting friendship.
// i.e. we don't push a scope unless it's a class member.
- switch (Qualifier->getKind()) {
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Namespace:
+ switch (SS.getScopeRep().getKind()) {
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::Namespace:
// These are always namespace scopes. We never want to enter a
// namespace scope from anything but a file context.
return CurContext->getRedeclContext()->isFileContext();
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::TypeSpec:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Type:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
// These are never namespace scopes.
return true;
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp
index 01252a4..d986e3b 100644
--- a/clang/lib/Sema/SemaCast.cpp
+++ b/clang/lib/Sema/SemaCast.cpp
@@ -595,13 +595,11 @@ static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType,
DifferentPtrness--;
}
if (!DifferentPtrness) {
- auto RecFrom = From->getAs<RecordType>();
- auto RecTo = To->getAs<RecordType>();
- if (RecFrom && RecTo) {
- auto DeclFrom = RecFrom->getAsCXXRecordDecl();
+ if (auto *DeclFrom = From->getAsCXXRecordDecl(),
+ *DeclTo = To->getAsCXXRecordDecl();
+ DeclFrom && DeclTo) {
if (!DeclFrom->isCompleteDefinition())
S.Diag(DeclFrom->getLocation(), diag::note_type_incomplete) << DeclFrom;
- auto DeclTo = RecTo->getAsCXXRecordDecl();
if (!DeclTo->isCompleteDefinition())
S.Diag(DeclTo->getLocation(), diag::note_type_incomplete) << DeclTo;
}
@@ -865,7 +863,7 @@ void CastOperation::CheckDynamicCast() {
return;
}
- const RecordType *DestRecord = DestPointee->getAs<RecordType>();
+ const auto *DestRecord = DestPointee->getAsCanonical<RecordType>();
if (DestPointee->isVoidType()) {
assert(DestPointer && "Reference to void is not possible");
} else if (DestRecord) {
@@ -912,7 +910,7 @@ void CastOperation::CheckDynamicCast() {
SrcPointee = SrcType;
}
- const RecordType *SrcRecord = SrcPointee->getAs<RecordType>();
+ const auto *SrcRecord = SrcPointee->getAsCanonical<RecordType>();
if (SrcRecord) {
if (Self.RequireCompleteType(OpRange.getBegin(), SrcPointee,
diag::err_bad_cast_incomplete,
@@ -964,7 +962,7 @@ void CastOperation::CheckDynamicCast() {
}
// C++ 5.2.7p6: Otherwise, v shall be [polymorphic].
- const RecordDecl *SrcDecl = SrcRecord->getDecl()->getDefinition();
+ const RecordDecl *SrcDecl = SrcRecord->getOriginalDecl()->getDefinition();
assert(SrcDecl && "Definition missing");
if (!cast<CXXRecordDecl>(SrcDecl)->isPolymorphic()) {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_polymorphic)
@@ -1454,8 +1452,8 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// C++0x 5.2.9p9: A value of a scoped enumeration type can be explicitly
// converted to an integral type. [...] A value of a scoped enumeration type
// can also be explicitly converted to a floating-point type [...].
- if (const EnumType *Enum = SrcType->getAs<EnumType>()) {
- if (Enum->getDecl()->isScoped()) {
+ if (const EnumType *Enum = dyn_cast<EnumType>(SrcType)) {
+ if (Enum->getOriginalDecl()->isScoped()) {
if (DestType->isBooleanType()) {
Kind = CK_IntegralToBoolean;
return TC_Success;
@@ -1486,9 +1484,8 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
if (SrcType->isIntegralOrEnumerationType()) {
// [expr.static.cast]p10 If the enumeration type has a fixed underlying
// type, the value is first converted to that type by integral conversion
- const EnumType *Enum = DestType->castAs<EnumType>();
- Kind = Enum->getDecl()->isFixed() &&
- Enum->getDecl()->getIntegerType()->isBooleanType()
+ const auto *ED = DestType->castAsEnumDecl();
+ Kind = ED->isFixed() && ED->getIntegerType()->isBooleanType()
? CK_IntegralToBoolean
: CK_IntegralCast;
return TC_Success;
@@ -1581,11 +1578,11 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// See if it looks like the user is trying to convert between
// related record types, and select a better diagnostic if so.
- if (auto SrcPointer = SrcType->getAs<PointerType>())
- if (auto DestPointer = DestType->getAs<PointerType>())
- if (SrcPointer->getPointeeType()->getAs<RecordType>() &&
- DestPointer->getPointeeType()->getAs<RecordType>())
- msg = diag::err_bad_cxx_cast_unrelated_class;
+ if (const auto *SrcPointer = SrcType->getAs<PointerType>())
+ if (const auto *DestPointer = DestType->getAs<PointerType>())
+ if (SrcPointer->getPointeeType()->isRecordType() &&
+ DestPointer->getPointeeType()->isRecordType())
+ msg = diag::err_bad_cxx_cast_unrelated_class;
if (SrcType->isMatrixType() && DestType->isMatrixType()) {
if (Self.CheckMatrixCast(OpRange, DestType, SrcType, Kind)) {
@@ -1856,7 +1853,7 @@ TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr,
FoundOverload)) {
CXXMethodDecl *M = cast<CXXMethodDecl>(Fn);
SrcType = Self.Context.getMemberPointerType(
- Fn->getType(), /*Qualifier=*/nullptr, M->getParent());
+ Fn->getType(), /*Qualifier=*/std::nullopt, M->getParent());
WasOverloadedFunction = true;
}
}
@@ -2102,9 +2099,9 @@ void Sema::CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
return;
}
// or one of the types is a tag type.
- if (SrcTy->getAs<TagType>() || DestTy->getAs<TagType>()) {
+ if (isa<TagType>(SrcTy.getCanonicalType()) ||
+ isa<TagType>(DestTy.getCanonicalType()))
return;
- }
// FIXME: Scoped enums?
if ((SrcTy->isUnsignedIntegerType() && DestTy->isSignedIntegerType()) ||
@@ -3097,27 +3094,27 @@ void CastOperation::CheckCStyleCast() {
if (!DestType->isScalarType() && !DestType->isVectorType() &&
!DestType->isMatrixType()) {
- const RecordType *DestRecordTy = DestType->getAs<RecordType>();
-
- if (DestRecordTy && Self.Context.hasSameUnqualifiedType(DestType, SrcType)){
- // GCC struct/union extension: allow cast to self.
- Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_nonscalar)
- << DestType << SrcExpr.get()->getSourceRange();
- Kind = CK_NoOp;
- return;
- }
-
- // GCC's cast to union extension.
- if (DestRecordTy && DestRecordTy->getDecl()->isUnion()) {
- RecordDecl *RD = DestRecordTy->getDecl();
- if (CastExpr::getTargetFieldForToUnionCast(RD, SrcType)) {
- Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_to_union)
- << SrcExpr.get()->getSourceRange();
- Kind = CK_ToUnion;
+ if (const RecordType *DestRecordTy =
+ DestType->getAsCanonical<RecordType>()) {
+ if (Self.Context.hasSameUnqualifiedType(DestType, SrcType)) {
+ // GCC struct/union extension: allow cast to self.
+ Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_nonscalar)
+ << DestType << SrcExpr.get()->getSourceRange();
+ Kind = CK_NoOp;
return;
- } else {
+ }
+
+ // GCC's cast to union extension.
+ if (RecordDecl *RD = DestRecordTy->getOriginalDecl(); RD->isUnion()) {
+ if (CastExpr::getTargetFieldForToUnionCast(RD->getDefinitionOrSelf(),
+ SrcType)) {
+ Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_to_union)
+ << SrcExpr.get()->getSourceRange();
+ Kind = CK_ToUnion;
+ return;
+ }
Self.Diag(OpRange.getBegin(), diag::err_typecheck_cast_to_union_no_type)
- << SrcType << SrcExpr.get()->getSourceRange();
+ << SrcType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
@@ -3174,7 +3171,12 @@ void CastOperation::CheckCStyleCast() {
SrcExpr = ExprError();
return;
}
- if (!DestType->isNullPtrType()) {
+ if (DestType->isBooleanType()) {
+ SrcExpr = ImplicitCastExpr::Create(
+ Self.Context, DestType, CK_PointerToBoolean, SrcExpr.get(), nullptr,
+ VK_PRValue, Self.CurFPFeatureOverrides());
+
+ } else if (!DestType->isNullPtrType()) {
// Implicitly cast from the null pointer type to the type of the
// destination.
CastKind CK = DestType->isPointerType() ? CK_NullToPointer : CK_BitCast;
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 3c4511b..26a6b9b 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -41,6 +41,7 @@
#include "clang/AST/UnresolvedSet.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
@@ -285,6 +286,9 @@ static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
if (S.checkArgCount(TheCall, 1))
return true;
+ if (TheCall->getArg(0)->containsErrors())
+ return true;
+
ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
if (Arg.isInvalid())
return true;
@@ -604,7 +608,7 @@ struct BuiltinDumpStructGenerator {
bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
Expr *IndentLit = getIndentString(Depth);
- Expr *TypeLit = getTypeString(S.Context.getRecordType(RD));
+ Expr *TypeLit = getTypeString(S.Context.getCanonicalTagType(RD));
if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit})
: callPrintFunction("%s", {TypeLit}))
return true;
@@ -2213,7 +2217,7 @@ static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
QualType ArgTy = Arg->getType();
- if (!ArgTy->isUnsignedIntegerType()) {
+ if (!ArgTy->isUnsignedIntegerType() && !ArgTy->isExtVectorBoolType()) {
S.Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
<< 1 << /* scalar */ 1 << /* unsigned integer ty */ 3 << /* no fp */ 0
<< ArgTy;
@@ -2238,7 +2242,7 @@ static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
QualType Arg0Ty = Arg0->getType();
- if (!Arg0Ty->isUnsignedIntegerType()) {
+ if (!Arg0Ty->isUnsignedIntegerType() && !Arg0Ty->isExtVectorBoolType()) {
S.Diag(Arg0->getBeginLoc(), diag::err_builtin_invalid_arg_type)
<< 1 << /* scalar */ 1 << /* unsigned integer ty */ 3 << /* no fp */ 0
<< Arg0Ty;
@@ -2265,6 +2269,98 @@ static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
return false;
}
+static bool CheckMaskedBuiltinArgs(Sema &S, Expr *MaskArg, Expr *PtrArg,
+ unsigned Pos) {
+ QualType MaskTy = MaskArg->getType();
+ if (!MaskTy->isExtVectorBoolType())
+ return S.Diag(MaskArg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* vector of */ 4 << /* booleans */ 6 << /* no fp */ 0
+ << MaskTy;
+
+ QualType PtrTy = PtrArg->getType();
+ if (!PtrTy->isPointerType() || !PtrTy->getPointeeType()->isVectorType())
+ return S.Diag(PtrArg->getExprLoc(), diag::err_vec_masked_load_store_ptr)
+ << Pos << "pointer to vector";
+ return false;
+}
+
+static ExprResult BuiltinMaskedLoad(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCountRange(TheCall, 2, 3))
+ return ExprError();
+
+ Expr *MaskArg = TheCall->getArg(0);
+ Expr *PtrArg = TheCall->getArg(1);
+ if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 2))
+ return ExprError();
+
+ QualType MaskTy = MaskArg->getType();
+ QualType PtrTy = PtrArg->getType();
+ QualType PointeeTy = PtrTy->getPointeeType();
+ const VectorType *MaskVecTy = MaskTy->getAs<VectorType>();
+ const VectorType *DataVecTy = PointeeTy->getAs<VectorType>();
+
+ if (TheCall->getNumArgs() == 3) {
+ Expr *PassThruArg = TheCall->getArg(2);
+ QualType PassThruTy = PassThruArg->getType();
+ if (!S.Context.hasSameType(PassThruTy, PointeeTy))
+ return S.Diag(PtrArg->getExprLoc(), diag::err_vec_masked_load_store_ptr)
+ << /* third argument */ 3 << PointeeTy;
+ }
+
+ if (MaskVecTy->getNumElements() != DataVecTy->getNumElements())
+ return ExprError(
+ S.Diag(TheCall->getBeginLoc(), diag::err_vec_masked_load_store_size)
+ << S.getASTContext().BuiltinInfo.getQuotedName(
+ TheCall->getBuiltinCallee())
+ << MaskTy << PointeeTy);
+
+ TheCall->setType(PointeeTy);
+ return TheCall;
+}
+
+static ExprResult BuiltinMaskedStore(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCount(TheCall, 3))
+ return ExprError();
+
+ Expr *MaskArg = TheCall->getArg(0);
+ Expr *ValArg = TheCall->getArg(1);
+ Expr *PtrArg = TheCall->getArg(2);
+
+ if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 3))
+ return ExprError();
+
+ QualType MaskTy = MaskArg->getType();
+ QualType PtrTy = PtrArg->getType();
+ QualType ValTy = ValArg->getType();
+ if (!ValTy->isVectorType())
+ return ExprError(
+ S.Diag(ValArg->getExprLoc(), diag::err_vec_masked_load_store_ptr)
+ << 2 << "vector");
+
+ QualType PointeeTy = PtrTy->getPointeeType();
+ const VectorType *MaskVecTy = MaskTy->getAs<VectorType>();
+ const VectorType *ValVecTy = ValTy->getAs<VectorType>();
+ const VectorType *PtrVecTy = PointeeTy->getAs<VectorType>();
+
+ if (MaskVecTy->getNumElements() != ValVecTy->getNumElements() ||
+ MaskVecTy->getNumElements() != PtrVecTy->getNumElements())
+ return ExprError(
+ S.Diag(TheCall->getBeginLoc(), diag::err_vec_masked_load_store_size)
+ << S.getASTContext().BuiltinInfo.getQuotedName(
+ TheCall->getBuiltinCallee())
+ << MaskTy << PointeeTy);
+
+ if (!S.Context.hasSameType(ValTy, PointeeTy))
+ return ExprError(S.Diag(TheCall->getBeginLoc(),
+ diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ 2
+ << SourceRange(TheCall->getArg(1)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc()));
+
+ TheCall->setType(S.Context.VoidTy);
+ return TheCall;
+}
+
static ExprResult BuiltinInvoke(Sema &S, CallExpr *TheCall) {
SourceLocation Loc = TheCall->getBeginLoc();
MutableArrayRef Args(TheCall->getArgs(), TheCall->getNumArgs());
@@ -2289,7 +2385,7 @@ static ExprResult BuiltinInvoke(Sema &S, CallExpr *TheCall) {
return ExprError();
}
- const Type *MemPtrClass = MPT->getQualifier()->getAsType();
+ const Type *MemPtrClass = MPT->getQualifier().getAsType();
QualType ObjectT = Args[1]->getType();
if (MPT->isMemberDataPointer() && S.checkArgCount(TheCall, 2))
@@ -2517,6 +2613,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return BuiltinShuffleVector(TheCall);
// TheCall will be freed by the smart pointer here, but that's fine, since
// BuiltinShuffleVector guts it, but then doesn't release it.
+ case Builtin::BI__builtin_masked_load:
+ case Builtin::BI__builtin_masked_expand_load:
+ return BuiltinMaskedLoad(*this, TheCall);
+ case Builtin::BI__builtin_masked_store:
+ case Builtin::BI__builtin_masked_compress_store:
+ return BuiltinMaskedStore(*this, TheCall);
case Builtin::BI__builtin_invoke:
return BuiltinInvoke(*this, TheCall);
case Builtin::BI__builtin_prefetch:
@@ -3031,6 +3133,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
EltwiseBuiltinArgTyRestriction::IntegerTy))
return ExprError();
break;
+ case Builtin::BI__builtin_elementwise_fshl:
+ case Builtin::BI__builtin_elementwise_fshr:
+ if (BuiltinElementwiseTernaryMath(
+ TheCall, EltwiseBuiltinArgTyRestriction::IntegerTy))
+ return ExprError();
+ break;
case Builtin::BI__builtin_elementwise_min:
case Builtin::BI__builtin_elementwise_max:
if (BuiltinElementwiseMath(TheCall))
@@ -3073,6 +3181,19 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
TheCall->setType(Magnitude.get()->getType());
break;
}
+ case Builtin::BI__builtin_elementwise_ctlz:
+ case Builtin::BI__builtin_elementwise_cttz:
+ // These builtins can be unary or binary. Note for empty calls we call the
+ // unary checker in order to not emit an error that says the function
+ // expects 2 arguments, which would be misleading.
+ if (TheCall->getNumArgs() <= 1) {
+ if (PrepareBuiltinElementwiseMathOneArgCall(
+ TheCall, EltwiseBuiltinArgTyRestriction::IntegerTy))
+ return ExprError();
+ } else if (BuiltinElementwiseMath(
+ TheCall, EltwiseBuiltinArgTyRestriction::IntegerTy))
+ return ExprError();
+ break;
case Builtin::BI__builtin_reduce_max:
case Builtin::BI__builtin_reduce_min: {
if (PrepareBuiltinReduceMathOneArgCall(TheCall))
@@ -3318,7 +3439,9 @@ static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
// As a special case, transparent unions initialized with zero are
// considered null for the purposes of the nonnull attribute.
if (const RecordType *UT = Expr->getType()->getAsUnionType();
- UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
+ UT && UT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<TransparentUnionAttr>()) {
if (const auto *CLE = dyn_cast<CompoundLiteralExpr>(Expr))
if (const auto *ILE = dyn_cast<InitListExpr>(CLE->getInitializer()))
Expr = ILE->getInit(0);
@@ -5173,11 +5296,10 @@ bool Sema::BuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
// extra checking to see what their promotable type actually is.
if (!Context.isPromotableIntegerType(Type))
return false;
- if (!Type->isEnumeralType())
+ const auto *ED = Type->getAsEnumDecl();
+ if (!ED)
return true;
- const EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
- return !(ED &&
- Context.typesAreCompatible(ED->getPromotionType(), Type));
+ return !Context.typesAreCompatible(ED->getPromotionType(), Type);
}()) {
unsigned Reason = 0;
if (Type->isReferenceType()) Reason = 1;
@@ -5494,8 +5616,10 @@ ExprResult Sema::BuiltinShuffleVector(CallExpr *TheCall) {
TheCall->getArg(1)->getEndLoc()));
} else if (numElements != numResElements) {
QualType eltType = LHSType->castAs<VectorType>()->getElementType();
- resType =
- Context.getVectorType(eltType, numResElements, VectorKind::Generic);
+ resType = resType->isExtVectorType()
+ ? Context.getExtVectorType(eltType, numResElements)
+ : Context.getVectorType(eltType, numResElements,
+ VectorKind::Generic);
}
}
@@ -7646,17 +7770,13 @@ bool EquatableFormatArgument::VerifyCompatible(
break;
case MK::NoMatchSignedness:
- if (!S.getDiagnostics().isIgnored(
- diag::warn_format_conversion_argument_type_mismatch_signedness,
- ElementLoc)) {
- EmitDiagnostic(S,
- S.PDiag(diag::warn_format_cmp_specifier_sign_mismatch)
- << buildFormatSpecifier()
- << Other.buildFormatSpecifier(),
- FmtExpr, InFunctionCall);
- HadError = S.Diag(Other.ElementLoc, diag::note_format_cmp_with)
- << 0 << Other.Range;
- }
+ EmitDiagnostic(S,
+ S.PDiag(diag::warn_format_cmp_specifier_sign_mismatch)
+ << buildFormatSpecifier()
+ << Other.buildFormatSpecifier(),
+ FmtExpr, InFunctionCall);
+ HadError = S.Diag(Other.ElementLoc, diag::note_format_cmp_with)
+ << 0 << Other.Range;
break;
}
return !HadError;
@@ -7786,13 +7906,10 @@ bool DecomposePrintfHandler::HandlePrintfSpecifier(
template<typename MemberKind>
static llvm::SmallPtrSet<MemberKind*, 1>
CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
- const RecordType *RT = Ty->getAs<RecordType>();
+ auto *RD = Ty->getAsCXXRecordDecl();
llvm::SmallPtrSet<MemberKind*, 1> Results;
- if (!RT)
- return Results;
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD || !RD->getDefinition())
+ if (!RD || !(RD->isBeingDefined() || RD->isCompleteDefinition()))
return Results;
LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
@@ -7801,7 +7918,7 @@ CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
// We just need to include all members of the right kind turned up by the
// filter, at this point.
- if (S.LookupQualifiedName(R, RT->getDecl()))
+ if (S.LookupQualifiedName(R, RD))
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
NamedDecl *decl = (*I)->getUnderlyingDecl();
if (MemberKind *FK = dyn_cast<MemberKind>(decl))
@@ -8190,11 +8307,14 @@ static analyze_format_string::ArgType::MatchKind
handleFormatSignedness(analyze_format_string::ArgType::MatchKind Match,
DiagnosticsEngine &Diags, SourceLocation Loc) {
if (Match == analyze_format_string::ArgType::NoMatchSignedness) {
- Match =
+ if (Diags.isIgnored(
+ diag::warn_format_conversion_argument_type_mismatch_signedness,
+ Loc) ||
Diags.isIgnored(
- diag::warn_format_conversion_argument_type_mismatch_signedness, Loc)
- ? analyze_format_string::ArgType::Match
- : analyze_format_string::ArgType::NoMatch;
+ // Arbitrary -Wformat diagnostic to detect -Wno-format:
+ diag::warn_format_conversion_argument_type_mismatch, Loc)) {
+ return analyze_format_string::ArgType::Match;
+ }
}
return Match;
}
@@ -8327,9 +8447,9 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
bool IsEnum = false;
bool IsScopedEnum = false;
QualType IntendedTy = ExprTy;
- if (auto EnumTy = ExprTy->getAs<EnumType>()) {
- IntendedTy = EnumTy->getDecl()->getIntegerType();
- if (EnumTy->isUnscopedEnumerationType()) {
+ if (const auto *ED = ExprTy->getAsEnumDecl()) {
+ IntendedTy = ED->getIntegerType();
+ if (!ED->isScoped()) {
ExprTy = IntendedTy;
// This controls whether we're talking about the underlying type or not,
// which we only want to do when it's an unscoped enum.
@@ -8364,7 +8484,9 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
NamedDecl *ND = Result.getFoundDecl();
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
if (TD->getUnderlyingType() == IntendedTy)
- IntendedTy = S.Context.getTypedefType(TD);
+ IntendedTy =
+ S.Context.getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
}
}
}
@@ -8408,8 +8530,10 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
case ArgType::Match:
case ArgType::MatchPromotion:
case ArgType::NoMatchPromotionTypeConfusion:
- case ArgType::NoMatchSignedness:
llvm_unreachable("expected non-matching");
+ case ArgType::NoMatchSignedness:
+ Diag = diag::warn_format_conversion_argument_type_mismatch_signedness;
+ break;
case ArgType::NoMatchPedantic:
Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
break;
@@ -8734,9 +8858,10 @@ bool CheckScanfHandler::HandleScanfSpecifier(
analyze_format_string::ArgType::MatchKind Match =
AT.matchesType(S.Context, Ex->getType());
Match = handleFormatSignedness(Match, S.getDiagnostics(), Ex->getExprLoc());
- bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
if (Match == analyze_format_string::ArgType::Match)
return true;
+ bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
+ bool Signedness = Match == analyze_format_string::ArgType::NoMatchSignedness;
ScanfSpecifier fixedFS = FS;
bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
@@ -8744,7 +8869,9 @@ bool CheckScanfHandler::HandleScanfSpecifier(
unsigned Diag =
Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
- : diag::warn_format_conversion_argument_type_mismatch;
+ : Signedness
+ ? diag::warn_format_conversion_argument_type_mismatch_signedness
+ : diag::warn_format_conversion_argument_type_mismatch;
if (Success) {
// Get the fix string from the fixed format specifier.
@@ -9622,7 +9749,7 @@ struct SearchNonTrivialToInitializeField
S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
}
void visitStruct(QualType FT, SourceLocation SL) {
- for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
+ for (const FieldDecl *FD : FT->castAsRecordDecl()->fields())
visit(FD->getType(), FD->getLocation());
}
void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
@@ -9667,7 +9794,7 @@ struct SearchNonTrivialToCopyField
S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
}
void visitStruct(QualType FT, SourceLocation SL) {
- for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
+ for (const FieldDecl *FD : FT->castAsRecordDecl()->fields())
visit(FD->getType(), FD->getLocation());
}
void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
@@ -9938,18 +10065,18 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
PDiag(diag::warn_arc_object_memaccess)
<< ArgIdx << FnName << PointeeTy
<< Call->getCallee()->getSourceRange());
- else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
+ else if (const auto *RD = PointeeTy->getAsRecordDecl()) {
// FIXME: Do not consider incomplete types even though they may be
// completed later. GCC does not diagnose such code, but we may want to
// consider diagnosing it in the future, perhaps under a different, but
// related, diagnostic group.
bool NonTriviallyCopyableCXXRecord =
- getLangOpts().CPlusPlus && !RT->isIncompleteType() &&
- !RT->desugar().isTriviallyCopyableType(Context);
+ getLangOpts().CPlusPlus && RD->isCompleteDefinition() &&
+ !PointeeTy.isTriviallyCopyableType(Context);
if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
- RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
+ RD->isNonTrivialToPrimitiveDefaultInitialize()) {
DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
PDiag(diag::warn_cstruct_memaccess)
<< ArgIdx << FnName << PointeeTy << 0);
@@ -9962,7 +10089,7 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
PDiag(diag::warn_cxxstruct_memaccess)
<< FnName << PointeeTy);
} else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
- RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
+ RD->isNonTrivialToPrimitiveCopy()) {
DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
PDiag(diag::warn_cstruct_memaccess)
<< ArgIdx << FnName << PointeeTy << 1);
@@ -10467,16 +10594,15 @@ struct IntRange {
if (!C.getLangOpts().CPlusPlus) {
// For enum types in C code, use the underlying datatype.
- if (const auto *ET = dyn_cast<EnumType>(T))
- T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr();
- } else if (const auto *ET = dyn_cast<EnumType>(T)) {
+ if (const auto *ED = T->getAsEnumDecl())
+ T = ED->getIntegerType().getDesugaredType(C).getTypePtr();
+ } else if (auto *Enum = T->getAsEnumDecl()) {
// For enum types in C++, use the known bit width of the enumerators.
- EnumDecl *Enum = ET->getDecl();
// In C++11, enums can have a fixed underlying type. Use this type to
// compute the range.
if (Enum->isFixed()) {
return IntRange(C.getIntWidth(QualType(T, 0)),
- !ET->isSignedIntegerOrEnumerationType());
+ !Enum->getIntegerType()->isSignedIntegerType());
}
unsigned NumPositive = Enum->getNumPositiveBits();
@@ -10512,8 +10638,8 @@ struct IntRange {
T = CT->getElementType().getTypePtr();
if (const AtomicType *AT = dyn_cast<AtomicType>(T))
T = AT->getValueType().getTypePtr();
- if (const EnumType *ET = dyn_cast<EnumType>(T))
- T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
+ if (const auto *ED = T->getAsEnumDecl())
+ T = C.getCanonicalType(ED->getIntegerType()).getTypePtr();
if (const auto *EIT = dyn_cast<BitIntType>(T))
return IntRange(EIT->getNumBits(), EIT->isUnsigned());
@@ -11484,8 +11610,7 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
if (BitfieldType->isBooleanType())
return false;
- if (BitfieldType->isEnumeralType()) {
- EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl();
+ if (auto *BitfieldEnumDecl = BitfieldType->getAsEnumDecl()) {
// If the underlying enum type was not explicitly specified as an unsigned
// type and the enum contain only positive values, MSVC++ will cause an
// inconsistency by storing this as a signed type.
@@ -11514,15 +11639,14 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
// The RHS is not constant. If the RHS has an enum type, make sure the
// bitfield is wide enough to hold all the values of the enum without
// truncation.
- const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>();
+ const auto *ED = OriginalInit->getType()->getAsEnumDecl();
const PreferredTypeAttr *PTAttr = nullptr;
- if (!EnumTy) {
+ if (!ED) {
PTAttr = Bitfield->getAttr<PreferredTypeAttr>();
if (PTAttr)
- EnumTy = PTAttr->getType()->getAs<EnumType>();
+ ED = PTAttr->getType()->getAsEnumDecl();
}
- if (EnumTy) {
- EnumDecl *ED = EnumTy->getDecl();
+ if (ED) {
bool SignedBitfield = BitfieldType->isSignedIntegerOrEnumerationType();
// Enum types are implicitly signed on Windows, so check if there are any
@@ -12586,10 +12710,10 @@ void Sema::CheckImplicitConversion(Expr *E, QualType T, SourceLocation CC,
// type, to give us better diagnostics.
Source = Context.getCanonicalType(SourceType).getTypePtr();
- if (const EnumType *SourceEnum = Source->getAs<EnumType>())
- if (const EnumType *TargetEnum = Target->getAs<EnumType>())
- if (SourceEnum->getDecl()->hasNameForLinkage() &&
- TargetEnum->getDecl()->hasNameForLinkage() &&
+ if (const EnumType *SourceEnum = Source->getAsCanonical<EnumType>())
+ if (const EnumType *TargetEnum = Target->getAsCanonical<EnumType>())
+ if (SourceEnum->getOriginalDecl()->hasNameForLinkage() &&
+ TargetEnum->getOriginalDecl()->hasNameForLinkage() &&
SourceEnum != TargetEnum) {
if (SourceMgr.isInSystemMacro(CC))
return;
@@ -14089,7 +14213,6 @@ void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
CheckUnsequencedOperations(E);
if (!IsConstexpr && !E->isValueDependent())
CheckForIntOverflow(E);
- DiagnoseMisalignedMembers();
}
void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
@@ -15113,10 +15236,9 @@ static bool isLayoutCompatible(const ASTContext &C, const EnumDecl *ED1,
static bool isLayoutCompatible(const ASTContext &C, const FieldDecl *Field1,
const FieldDecl *Field2,
bool AreUnionMembers = false) {
- [[maybe_unused]] const Type *Field1Parent =
- Field1->getParent()->getTypeForDecl();
- [[maybe_unused]] const Type *Field2Parent =
- Field2->getParent()->getTypeForDecl();
+#ifndef NDEBUG
+ CanQualType Field1Parent = C.getCanonicalTagType(Field1->getParent());
+ CanQualType Field2Parent = C.getCanonicalTagType(Field2->getParent());
assert(((Field1Parent->isStructureOrClassType() &&
Field2Parent->isStructureOrClassType()) ||
(Field1Parent->isUnionType() && Field2Parent->isUnionType())) &&
@@ -15125,6 +15247,7 @@ static bool isLayoutCompatible(const ASTContext &C, const FieldDecl *Field1,
assert(((!AreUnionMembers && Field1Parent->isStructureOrClassType()) ||
(AreUnionMembers && Field1Parent->isUnionType())) &&
"AreUnionMembers should be 'true' for union fields (only).");
+#endif
if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
return false;
@@ -15228,17 +15351,14 @@ static bool isLayoutCompatible(const ASTContext &C, QualType T1, QualType T2) {
if (TC1 != TC2)
return false;
- if (TC1 == Type::Enum) {
- return isLayoutCompatible(C,
- cast<EnumType>(T1)->getDecl(),
- cast<EnumType>(T2)->getDecl());
- } else if (TC1 == Type::Record) {
+ if (TC1 == Type::Enum)
+ return isLayoutCompatible(C, T1->castAsEnumDecl(), T2->castAsEnumDecl());
+ if (TC1 == Type::Record) {
if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
return false;
- return isLayoutCompatible(C,
- cast<RecordType>(T1)->getDecl(),
- cast<RecordType>(T2)->getDecl());
+ return isLayoutCompatible(C, T1->castAsRecordDecl(),
+ T2->castAsRecordDecl());
}
return false;
@@ -15534,11 +15654,12 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment) {
- MisalignedMembers.emplace_back(E, RD, MD, Alignment);
+ currentEvaluationContext().MisalignedMembers.emplace_back(E, RD, MD,
+ Alignment);
}
void Sema::DiagnoseMisalignedMembers() {
- for (MisalignedMember &m : MisalignedMembers) {
+ for (MisalignedMember &m : currentEvaluationContext().MisalignedMembers) {
const NamedDecl *ND = m.RD;
if (ND->getName().empty()) {
if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
@@ -15547,7 +15668,7 @@ void Sema::DiagnoseMisalignedMembers() {
Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member)
<< m.MD << ND << m.E->getSourceRange();
}
- MisalignedMembers.clear();
+ currentEvaluationContext().MisalignedMembers.clear();
}
void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
@@ -15558,13 +15679,15 @@ void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
if (isa<MemberExpr>(Op)) {
- auto *MA = llvm::find(MisalignedMembers, MisalignedMember(Op));
- if (MA != MisalignedMembers.end() &&
+ auto &MisalignedMembersForExpr =
+ currentEvaluationContext().MisalignedMembers;
+ auto *MA = llvm::find(MisalignedMembersForExpr, MisalignedMember(Op));
+ if (MA != MisalignedMembersForExpr.end() &&
(T->isDependentType() || T->isIntegerType() ||
(T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
Context.getTypeAlignInChars(
T->getPointeeType()) <= MA->Alignment))))
- MisalignedMembers.erase(MA);
+ MisalignedMembersForExpr.erase(MA);
}
}
}
@@ -15592,7 +15715,7 @@ void Sema::RefersToMemberWithReducedAlignment(
return;
if (ME->isArrow())
BaseType = BaseType->getPointeeType();
- RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl();
+ auto *RD = BaseType->castAsRecordDecl();
if (RD->isInvalidDecl())
return;
@@ -15637,7 +15760,7 @@ void Sema::RefersToMemberWithReducedAlignment(
// Compute the CompleteObjectAlignment as the alignment of the whole chain.
CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
- ReverseMemberChain.back()->getParent()->getTypeForDecl());
+ Context.getCanonicalTagType(ReverseMemberChain.back()->getParent()));
// The base expression of the innermost MemberExpr may give
// stronger guarantees than the class containing the member.
@@ -15667,9 +15790,9 @@ void Sema::RefersToMemberWithReducedAlignment(
if (FDI->hasAttr<PackedAttr>() ||
FDI->getParent()->hasAttr<PackedAttr>()) {
FD = FDI;
- Alignment = std::min(
- Context.getTypeAlignInChars(FD->getType()),
- Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
+ Alignment = std::min(Context.getTypeAlignInChars(FD->getType()),
+ Context.getTypeAlignInChars(
+ Context.getCanonicalTagType(FD->getParent())));
break;
}
}
@@ -15753,6 +15876,54 @@ static bool checkBuiltinVectorMathMixedEnums(Sema &S, Expr *LHS, Expr *RHS,
return false;
}
+/// Check if all arguments have the same type. If the types don't match, emit an
+/// error message and return true. Otherwise return false.
+///
+/// For scalars we directly compare their unqualified types. But even if we
+/// compare unqualified vector types, a difference in qualifiers in the element
+/// types can make the vector types be considered not equal. For example,
+/// vector of 4 'const float' values vs vector of 4 'float' values.
+/// So we compare unqualified types of their elements and number of elements.
+static bool checkBuiltinVectorMathArgTypes(Sema &SemaRef,
+ ArrayRef<Expr *> Args) {
+ assert(!Args.empty() && "Should have at least one argument.");
+
+ Expr *Arg0 = Args.front();
+ QualType Ty0 = Arg0->getType();
+
+ auto EmitError = [&](Expr *ArgI) {
+ SemaRef.Diag(Arg0->getBeginLoc(),
+ diag::err_typecheck_call_different_arg_types)
+ << Arg0->getType() << ArgI->getType();
+ };
+
+ // Compare scalar types.
+ if (!Ty0->isVectorType()) {
+ for (Expr *ArgI : Args.drop_front())
+ if (!SemaRef.Context.hasSameUnqualifiedType(Ty0, ArgI->getType())) {
+ EmitError(ArgI);
+ return true;
+ }
+
+ return false;
+ }
+
+ // Compare vector types.
+ const auto *Vec0 = Ty0->castAs<VectorType>();
+ for (Expr *ArgI : Args.drop_front()) {
+ const auto *VecI = ArgI->getType()->getAs<VectorType>();
+ if (!VecI ||
+ !SemaRef.Context.hasSameUnqualifiedType(Vec0->getElementType(),
+ VecI->getElementType()) ||
+ Vec0->getNumElements() != VecI->getNumElements()) {
+ EmitError(ArgI);
+ return true;
+ }
+ }
+
+ return false;
+}
+
std::optional<QualType>
Sema::BuiltinVectorMath(CallExpr *TheCall,
EltwiseBuiltinArgTyRestriction ArgTyRestr) {
@@ -15774,15 +15945,12 @@ Sema::BuiltinVectorMath(CallExpr *TheCall,
SourceLocation LocA = Args[0]->getBeginLoc();
QualType TyA = Args[0]->getType();
- QualType TyB = Args[1]->getType();
if (checkMathBuiltinElementType(*this, LocA, TyA, ArgTyRestr, 1))
return std::nullopt;
- if (!Context.hasSameUnqualifiedType(TyA, TyB)) {
- Diag(LocA, diag::err_typecheck_call_different_arg_types) << TyA << TyB;
+ if (checkBuiltinVectorMathArgTypes(*this, Args))
return std::nullopt;
- }
TheCall->setArg(0, Args[0]);
TheCall->setArg(1, Args[1]);
@@ -15817,16 +15985,11 @@ bool Sema::BuiltinElementwiseTernaryMath(
return true;
}
- for (int I = 1; I < 3; ++I) {
- if (Args[0]->getType().getCanonicalType() !=
- Args[I]->getType().getCanonicalType()) {
- return Diag(Args[0]->getBeginLoc(),
- diag::err_typecheck_call_different_arg_types)
- << Args[0]->getType() << Args[I]->getType();
- }
+ if (checkBuiltinVectorMathArgTypes(*this, Args))
+ return true;
+ for (int I = 0; I < 3; ++I)
TheCall->setArg(I, Args[I]);
- }
TheCall->setType(Args[0]->getType());
return false;
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index 0de5580..03bf4b3 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -197,6 +197,9 @@ private:
/// Whether the \p ObjectTypeQualifiers field is active.
bool HasObjectTypeQualifiers;
+ // Whether the member function is using an explicit object parameter
+ bool IsExplicitObjectMemberFunction;
+
/// The selector that we prefer.
Selector PreferredSelector;
@@ -218,8 +221,8 @@ public:
LookupFilter Filter = nullptr)
: SemaRef(SemaRef), Allocator(Allocator), CCTUInfo(CCTUInfo),
Filter(Filter), AllowNestedNameSpecifiers(false),
- HasObjectTypeQualifiers(false), CompletionContext(CompletionContext),
- ObjCImplementation(nullptr) {
+ HasObjectTypeQualifiers(false), IsExplicitObjectMemberFunction(false),
+ CompletionContext(CompletionContext), ObjCImplementation(nullptr) {
// If this is an Objective-C instance method definition, dig out the
// corresponding implementation.
switch (CompletionContext.getKind()) {
@@ -275,6 +278,10 @@ public:
HasObjectTypeQualifiers = true;
}
+ void setExplicitObjectMemberFn(bool IsExplicitObjectFn) {
+ IsExplicitObjectMemberFunction = IsExplicitObjectFn;
+ }
+
/// Set the preferred selector.
///
/// When an Objective-C method declaration result is added, and that
@@ -732,7 +739,7 @@ ResultBuilder::ShadowMapEntry::end() const {
///
/// \returns a nested name specifier that refers into the target context, or
/// NULL if no qualification is needed.
-static NestedNameSpecifier *
+static NestedNameSpecifier
getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
const DeclContext *TargetContext) {
SmallVector<const DeclContext *, 4> TargetParents;
@@ -747,7 +754,7 @@ getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
TargetParents.push_back(CommonAncestor);
}
- NestedNameSpecifier *Result = nullptr;
+ NestedNameSpecifier Result = std::nullopt;
while (!TargetParents.empty()) {
const DeclContext *Parent = TargetParents.pop_back_val();
@@ -755,10 +762,12 @@ getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
if (!Namespace->getIdentifier())
continue;
- Result = NestedNameSpecifier::Create(Context, Result, Namespace);
- } else if (const auto *TD = dyn_cast<TagDecl>(Parent))
- Result = NestedNameSpecifier::Create(
- Context, Result, Context.getTypeDeclType(TD).getTypePtr());
+ Result = NestedNameSpecifier(Context, Namespace, Result);
+ } else if (const auto *TD = dyn_cast<TagDecl>(Parent)) {
+ QualType TT = Context.getTagType(ElaboratedTypeKeyword::None, Result, TD,
+ /*OwnsTag=*/false);
+ Result = NestedNameSpecifier(TT.getTypePtr());
+ }
}
return Result;
}
@@ -937,11 +946,12 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
/// Get the type that a given expression will have if this declaration
/// is used as an expression in its "typical" code-completion form.
-QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
+QualType clang::getDeclUsageType(ASTContext &C, NestedNameSpecifier Qualifier,
+ const NamedDecl *ND) {
ND = ND->getUnderlyingDecl();
if (const auto *Type = dyn_cast<TypeDecl>(ND))
- return C.getTypeDeclType(Type);
+ return C.getTypeDeclType(ElaboratedTypeKeyword::None, Qualifier, Type);
if (const auto *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
return C.getObjCInterfaceType(Iface);
@@ -951,7 +961,9 @@ QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
else if (const auto *Method = dyn_cast<ObjCMethodDecl>(ND))
T = Method->getSendResultType();
else if (const auto *Enumerator = dyn_cast<EnumConstantDecl>(ND))
- T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
+ T = C.getTagType(ElaboratedTypeKeyword::None, Qualifier,
+ cast<EnumDecl>(Enumerator->getDeclContext()),
+ /*OwnsTag=*/false);
else if (const auto *Property = dyn_cast<ObjCPropertyDecl>(ND))
T = Property->getType();
else if (const auto *Value = dyn_cast<ValueDecl>(ND))
@@ -1053,7 +1065,7 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
// If we have a preferred type, adjust the priority for results with exactly-
// matching or nearly-matching types.
if (!PreferredType.isNull()) {
- QualType T = getDeclUsageType(SemaRef.Context, R.Declaration);
+ QualType T = getDeclUsageType(SemaRef.Context, R.Qualifier, R.Declaration);
if (!T.isNull()) {
CanQualType TC = SemaRef.Context.getCanonicalType(T);
// Check for exactly-matching types (modulo qualifiers).
@@ -1070,10 +1082,9 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
static DeclContext::lookup_result getConstructors(ASTContext &Context,
const CXXRecordDecl *Record) {
- QualType RecordTy = Context.getTypeDeclType(Record);
+ CanQualType RecordTy = Context.getCanonicalTagType(Record);
DeclarationName ConstructorName =
- Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(RecordTy));
+ Context.DeclarationNames.getCXXConstructorName(RecordTy);
return Record->lookup(ConstructorName);
}
@@ -1216,11 +1227,13 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
const DeclContext *Ctx = R.Declaration->getDeclContext();
if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
R.Qualifier =
- NestedNameSpecifier::Create(SemaRef.Context, nullptr, Namespace);
+ NestedNameSpecifier(SemaRef.Context, Namespace, std::nullopt);
else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(
- SemaRef.Context, nullptr,
- SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ R.Qualifier = NestedNameSpecifier(
+ SemaRef.Context
+ .getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Tag, /*OwnsTag=*/false)
+ .getTypePtr());
else
R.QualifierIsInformative = false;
}
@@ -1405,11 +1418,13 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
const DeclContext *Ctx = R.Declaration->getDeclContext();
if (const auto *Namespace = dyn_cast<NamespaceDecl>(Ctx))
R.Qualifier =
- NestedNameSpecifier::Create(SemaRef.Context, nullptr, Namespace);
+ NestedNameSpecifier(SemaRef.Context, Namespace, std::nullopt);
else if (const auto *Tag = dyn_cast<TagDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(
- SemaRef.Context, nullptr,
- SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ R.Qualifier = NestedNameSpecifier(
+ SemaRef.Context
+ .getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Tag, /*OwnsTag=*/false)
+ .getTypePtr());
else
R.QualifierIsInformative = false;
}
@@ -1420,10 +1435,27 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
AdjustResultPriorityForDecl(R);
+ // Account for explicit object parameter
+ const auto GetQualifiers = [&](const CXXMethodDecl *MethodDecl) {
+ if (MethodDecl->isExplicitObjectMemberFunction())
+ return MethodDecl->getFunctionObjectParameterType().getQualifiers();
+ else
+ return MethodDecl->getMethodQualifiers();
+ };
+
+ if (IsExplicitObjectMemberFunction &&
+ R.Kind == CodeCompletionResult::RK_Declaration &&
+ (isa<CXXMethodDecl>(R.Declaration) || isa<FieldDecl>(R.Declaration))) {
+ // If result is a member in the context of an explicit-object member
+ // function, drop it because it must be accessed through the object
+ // parameter
+ return;
+ }
+
if (HasObjectTypeQualifiers)
if (const auto *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
if (Method->isInstance()) {
- Qualifiers MethodQuals = Method->getMethodQualifiers();
+ Qualifiers MethodQuals = GetQualifiers(Method);
if (ObjectTypeQualifiers == MethodQuals)
R.Priority += CCD_ObjectQualifierMatch;
else if (ObjectTypeQualifiers - MethodQuals) {
@@ -1664,7 +1696,8 @@ static bool isObjCReceiverType(ASTContext &C, QualType T) {
}
bool ResultBuilder::IsObjCMessageReceiver(const NamedDecl *ND) const {
- QualType T = getDeclUsageType(SemaRef.Context, ND);
+ QualType T =
+ getDeclUsageType(SemaRef.Context, /*Qualifier=*/std::nullopt, ND);
if (T.isNull())
return false;
@@ -1689,7 +1722,8 @@ bool ResultBuilder::IsObjCCollection(const NamedDecl *ND) const {
(!SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryNonTypeName(ND)))
return false;
- QualType T = getDeclUsageType(SemaRef.Context, ND);
+ QualType T =
+ getDeclUsageType(SemaRef.Context, /*Qualifier=*/std::nullopt, ND);
if (T.isNull())
return false;
@@ -1745,8 +1779,10 @@ public:
void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
bool InBaseClass) override {
- ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
- false, IsAccessible(ND, Ctx), FixIts);
+ ResultBuilder::Result Result(ND, Results.getBasePriority(ND),
+ /*Qualifier=*/std::nullopt,
+ /*QualifierIsInformative=*/false,
+ IsAccessible(ND, Ctx), FixIts);
Results.AddResult(Result, InitialLookupCtx, Hiding, InBaseClass, BaseType);
}
@@ -2010,7 +2046,6 @@ static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
Policy.AnonymousTagLocations = false;
Policy.SuppressStrongLifetime = true;
Policy.SuppressUnwrittenScope = true;
- Policy.SuppressScope = true;
Policy.CleanUglifiedParameters = true;
return Policy;
}
@@ -2035,7 +2070,7 @@ static const char *GetCompletionTypeString(QualType T, ASTContext &Context,
// Anonymous tag types are constant strings.
if (const TagType *TagT = dyn_cast<TagType>(T))
- if (TagDecl *Tag = TagT->getDecl())
+ if (TagDecl *Tag = TagT->getOriginalDecl())
if (!Tag->hasNameForLinkage()) {
switch (Tag->getTagKind()) {
case TagTypeKind::Struct:
@@ -2925,8 +2960,8 @@ static void AddResultTypeChunk(ASTContext &Context,
else
T = Method->getReturnType();
} else if (const auto *Enumerator = dyn_cast<EnumConstantDecl>(ND)) {
- T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
- T = clang::TypeName::getFullyQualifiedType(T, Context);
+ T = Context.getCanonicalTagType(
+ cast<EnumDecl>(Enumerator->getDeclContext()));
} else if (isa<UnresolvedUsingValueDecl>(ND)) {
/* Do nothing: ignore unresolved using declarations*/
} else if (const auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
@@ -3021,7 +3056,7 @@ static void findTypeLocationForBlockDecl(const TypeSourceInfo *TSInfo,
if (!SuppressBlock) {
if (TypedefTypeLoc TypedefTL = TL.getAsAdjusted<TypedefTypeLoc>()) {
if (TypeSourceInfo *InnerTSInfo =
- TypedefTL.getTypedefNameDecl()->getTypeSourceInfo()) {
+ TypedefTL.getDecl()->getTypeSourceInfo()) {
TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
continue;
}
@@ -3381,7 +3416,7 @@ static void AddTemplateParameterChunks(
/// Add a qualifier to the given code-completion string, if the
/// provided nested-name-specifier is non-NULL.
static void AddQualifierToCompletionString(CodeCompletionBuilder &Result,
- NestedNameSpecifier *Qualifier,
+ NestedNameSpecifier Qualifier,
bool QualifierIsInformative,
ASTContext &Context,
const PrintingPolicy &Policy) {
@@ -3391,7 +3426,7 @@ static void AddQualifierToCompletionString(CodeCompletionBuilder &Result,
std::string PrintedNNS;
{
llvm::raw_string_ostream OS(PrintedNNS);
- Qualifier->print(OS, Policy);
+ Qualifier.print(OS, Policy);
}
if (QualifierIsInformative)
Result.AddInformativeChunk(Result.getAllocator().CopyString(PrintedNNS));
@@ -3399,43 +3434,58 @@ static void AddQualifierToCompletionString(CodeCompletionBuilder &Result,
Result.AddTextChunk(Result.getAllocator().CopyString(PrintedNNS));
}
-static void
-AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
- const FunctionDecl *Function) {
- const auto *Proto = Function->getType()->getAs<FunctionProtoType>();
- if (!Proto || !Proto->getMethodQuals())
- return;
-
+static void AddFunctionTypeQuals(CodeCompletionBuilder &Result,
+ const Qualifiers Quals) {
// FIXME: Add ref-qualifier!
// Handle single qualifiers without copying
- if (Proto->getMethodQuals().hasOnlyConst()) {
+ if (Quals.hasOnlyConst()) {
Result.AddInformativeChunk(" const");
return;
}
- if (Proto->getMethodQuals().hasOnlyVolatile()) {
+ if (Quals.hasOnlyVolatile()) {
Result.AddInformativeChunk(" volatile");
return;
}
- if (Proto->getMethodQuals().hasOnlyRestrict()) {
+ if (Quals.hasOnlyRestrict()) {
Result.AddInformativeChunk(" restrict");
return;
}
// Handle multiple qualifiers.
std::string QualsStr;
- if (Proto->isConst())
+ if (Quals.hasConst())
QualsStr += " const";
- if (Proto->isVolatile())
+ if (Quals.hasVolatile())
QualsStr += " volatile";
- if (Proto->isRestrict())
+ if (Quals.hasRestrict())
QualsStr += " restrict";
Result.AddInformativeChunk(Result.getAllocator().CopyString(QualsStr));
}
static void
+AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
+ const FunctionDecl *Function) {
+ if (auto *CxxMethodDecl = llvm::dyn_cast_if_present<CXXMethodDecl>(Function);
+ CxxMethodDecl && CxxMethodDecl->hasCXXExplicitFunctionObjectParameter()) {
+ // if explicit object method, infer quals from the object parameter
+ const auto Quals = CxxMethodDecl->getFunctionObjectParameterType();
+ if (!Quals.hasQualifiers())
+ return;
+
+ AddFunctionTypeQuals(Result, Quals.getQualifiers());
+ } else {
+ const auto *Proto = Function->getType()->getAs<FunctionProtoType>();
+ if (!Proto || !Proto->getMethodQuals())
+ return;
+
+ AddFunctionTypeQuals(Result, Proto->getMethodQuals());
+ }
+}
+
+static void
AddFunctionExceptSpecToCompletionString(std::string &NameAndSignature,
const FunctionDecl *Function) {
const auto *Proto = Function->getType()->getAs<FunctionProtoType>();
@@ -3520,11 +3570,9 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
case DeclarationName::CXXConstructorName: {
CXXRecordDecl *Record = nullptr;
QualType Ty = Name.getCXXNameType();
- if (const auto *RecordTy = Ty->getAs<RecordType>())
- Record = cast<CXXRecordDecl>(RecordTy->getDecl());
- else if (const auto *InjectedTy = Ty->getAs<InjectedClassNameType>())
- Record = InjectedTy->getDecl();
- else {
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
+ Record = RD;
+ } else {
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
break;
@@ -4506,12 +4554,12 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
// If we need a nested-name-specifier, add one now.
if (!InContext) {
- NestedNameSpecifier *NNS = getRequiredQualification(
+ NestedNameSpecifier NNS = getRequiredQualification(
S.Context, CurContext, Overridden->getDeclContext());
if (NNS) {
std::string Str;
llvm::raw_string_ostream OS(Str);
- NNS->print(OS, Policy);
+ NNS.print(OS, Policy);
Builder.AddTextChunk(Results.getAllocator().CopyString(Str));
}
} else if (!InContext->Equals(Overridden->getDeclContext()))
@@ -4627,12 +4675,19 @@ void SemaCodeCompletion::CodeCompleteOrdinaryName(
break;
}
- // If we are in a C++ non-static member function, check the qualifiers on
- // the member function to filter/prioritize the results list.
auto ThisType = SemaRef.getCurrentThisType();
- if (!ThisType.isNull())
+ if (ThisType.isNull()) {
+ // check if function scope is an explicit object function
+ if (auto *MethodDecl = llvm::dyn_cast_if_present<CXXMethodDecl>(
+ SemaRef.getCurFunctionDecl()))
+ Results.setExplicitObjectMemberFn(
+ MethodDecl->isExplicitObjectMemberFunction());
+ } else {
+ // If we are in a C++ non-static member function, check the qualifiers on
+ // the member function to filter/prioritize the results list.
Results.setObjectTypeQualifiers(ThisType->getPointeeType().getQualifiers(),
VK_LValue);
+ }
CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
SemaRef.LookupVisibleDecls(S, SemaRef.LookupOrdinaryName, Consumer,
@@ -4920,14 +4975,14 @@ namespace {
/// Information that allows to avoid completing redundant enumerators.
struct CoveredEnumerators {
llvm::SmallPtrSet<EnumConstantDecl *, 8> Seen;
- NestedNameSpecifier *SuggestedQualifier = nullptr;
+ NestedNameSpecifier SuggestedQualifier = std::nullopt;
};
} // namespace
static void AddEnumerators(ResultBuilder &Results, ASTContext &Context,
EnumDecl *Enum, DeclContext *CurContext,
const CoveredEnumerators &Enumerators) {
- NestedNameSpecifier *Qualifier = Enumerators.SuggestedQualifier;
+ NestedNameSpecifier Qualifier = Enumerators.SuggestedQualifier;
if (Context.getLangOpts().CPlusPlus && !Qualifier && Enumerators.Seen.empty()) {
// If there are no prior enumerators in C++, check whether we have to
// qualify the names of the enumerators that we suggest, because they
@@ -5058,10 +5113,7 @@ void SemaCodeCompletion::CodeCompleteExpression(
PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType() ||
Data.PreferredType->isMemberPointerType() ||
Data.PreferredType->isBlockPointerType();
- if (Data.PreferredType->isEnumeralType()) {
- EnumDecl *Enum = Data.PreferredType->castAs<EnumType>()->getDecl();
- if (auto *Def = Enum->getDefinition())
- Enum = Def;
+ if (auto *Enum = Data.PreferredType->getAsEnumDecl()) {
// FIXME: collect covered enumerators in cases like:
// if (x == my_enum::one) { ... } else if (x == ^) {}
AddEnumerators(Results, getASTContext(), Enum, SemaRef.CurContext,
@@ -5185,7 +5237,8 @@ AddObjCProperties(const CodeCompletionContext &CCContext,
// expressions.
if (!P->getType().getTypePtr()->isBlockPointerType() ||
!IsBaseExprStatement) {
- Result R = Result(P, Results.getBasePriority(P), nullptr);
+ Result R =
+ Result(P, Results.getBasePriority(P), /*Qualifier=*/std::nullopt);
if (!InOriginalClass)
setInBaseClass(R);
Results.MaybeAddResult(R, CurContext);
@@ -5199,7 +5252,8 @@ AddObjCProperties(const CodeCompletionContext &CCContext,
findTypeLocationForBlockDecl(P->getTypeSourceInfo(), BlockLoc,
BlockProtoLoc);
if (!BlockLoc) {
- Result R = Result(P, Results.getBasePriority(P), nullptr);
+ Result R =
+ Result(P, Results.getBasePriority(P), /*Qualifier=*/std::nullopt);
if (!InOriginalClass)
setInBaseClass(R);
Results.MaybeAddResult(R, CurContext);
@@ -5611,15 +5665,18 @@ private:
// In T::foo, `foo` is a static member function/variable.
bool VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) override {
- if (E->getQualifier() && isApprox(E->getQualifier()->getAsType(), T))
+ NestedNameSpecifier Qualifier = E->getQualifier();
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::Type &&
+ isApprox(Qualifier.getAsType(), T))
addValue(E, E->getDeclName(), Member::Colons);
return true;
}
// In T::typename foo, `foo` is a type.
bool VisitDependentNameType(DependentNameType *DNT) override {
- const auto *Q = DNT->getQualifier();
- if (Q && isApprox(Q->getAsType(), T))
+ NestedNameSpecifier Q = DNT->getQualifier();
+ if (Q.getKind() == NestedNameSpecifier::Kind::Type &&
+ isApprox(Q.getAsType(), T))
addType(DNT->getIdentifier());
return true;
}
@@ -5628,10 +5685,15 @@ private:
// VisitNNS() doesn't exist, and TraverseNNS isn't always called :-(
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSL) override {
if (NNSL) {
- NestedNameSpecifier *NNS = NNSL.getNestedNameSpecifier();
- const auto *Q = NNS->getPrefix();
- if (Q && isApprox(Q->getAsType(), T))
- addType(NNS->getAsIdentifier());
+ NestedNameSpecifier NNS = NNSL.getNestedNameSpecifier();
+ if (NNS.getKind() == NestedNameSpecifier::Kind::Type) {
+ const Type *NNST = NNS.getAsType();
+ if (NestedNameSpecifier Q = NNST->getPrefix();
+ Q.getKind() == NestedNameSpecifier::Kind::Type &&
+ isApprox(Q.getAsType(), T))
+ if (const auto *DNT = dyn_cast_or_null<DependentNameType>(NNST))
+ addType(DNT->getIdentifier());
+ }
}
// FIXME: also handle T::foo<X>::bar
return DynamicRecursiveASTVisitor::TraverseNestedNameSpecifierLoc(NNSL);
@@ -5777,7 +5839,7 @@ QualType getApproximateType(const Expr *E, HeuristicResolver &Resolver) {
if (auto Decls = Resolver.resolveDependentNameType(DNT);
Decls.size() == 1) {
if (const auto *TD = dyn_cast<TypeDecl>(Decls[0]))
- return QualType(TD->getTypeForDecl(), 0);
+ return TD->getASTContext().getTypeDeclType(TD);
}
}
// We only resolve DependentTy, or undeduced autos (including auto* etc).
@@ -6175,19 +6237,14 @@ void SemaCodeCompletion::CodeCompleteCase(Scope *S) {
if (!Switch->getCond())
return;
QualType type = Switch->getCond()->IgnoreImplicit()->getType();
- if (!type->isEnumeralType()) {
+ EnumDecl *Enum = type->getAsEnumDecl();
+ if (!Enum) {
CodeCompleteExpressionData Data(type);
Data.IntegralConstantExpression = true;
CodeCompleteExpression(S, Data);
return;
}
- // Code-complete the cases of a switch statement over an enumeration type
- // by providing the list of
- EnumDecl *Enum = type->castAs<EnumType>()->getDecl();
- if (EnumDecl *Def = Enum->getDefinition())
- Enum = Def;
-
// Determine which enumerators we have already seen in the switch statement.
// FIXME: Ideally, we would also be able to look *past* the code-completion
// token, in case we are code-completing in the middle of the switch and not
@@ -6887,8 +6944,8 @@ void SemaCodeCompletion::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// Try to instantiate any non-dependent declaration contexts before
// we look in them. Bail out if we fail.
- NestedNameSpecifier *NNS = SS.getScopeRep();
- if (NNS != nullptr && SS.isValid() && !NNS->isDependent()) {
+ NestedNameSpecifier NNS = SS.getScopeRep();
+ if (NNS && !NNS.isDependent()) {
if (Ctx == nullptr || SemaRef.RequireCompleteDeclContext(SS, Ctx))
return;
}
@@ -6902,14 +6959,13 @@ void SemaCodeCompletion::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// The "template" keyword can follow "::" in the grammar, but only
// put it into the grammar if the nested-name-specifier is dependent.
// FIXME: results is always empty, this appears to be dead.
- if (!Results.empty() && NNS && NNS->isDependent())
+ if (!Results.empty() && NNS.isDependent())
Results.AddResult("template");
// If the scope is a concept-constrained type parameter, infer nested
// members based on the constraints.
- if (NNS) {
- if (const auto *TTPT =
- dyn_cast_or_null<TemplateTypeParmType>(NNS->getAsType())) {
+ if (NNS.getKind() == NestedNameSpecifier::Kind::Type) {
+ if (const auto *TTPT = dyn_cast<TemplateTypeParmType>(NNS.getAsType())) {
for (const auto &R : ConceptInfo(*TTPT, S).members()) {
if (R.Operator != ConceptInfo::Member::Colons)
continue;
@@ -7034,7 +7090,7 @@ void SemaCodeCompletion::CodeCompleteNamespaceDecl(Scope *S) {
NS != NSEnd; ++NS)
Results.AddResult(
CodeCompletionResult(NS->second, Results.getBasePriority(NS->second),
- nullptr),
+ /*Qualifier=*/std::nullopt),
SemaRef.CurContext, nullptr, false);
Results.ExitScope();
}
@@ -7821,7 +7877,8 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
if (!Selectors.insert(M->getSelector()).second)
continue;
- Result R = Result(M, Results.getBasePriority(M), nullptr);
+ Result R =
+ Result(M, Results.getBasePriority(M), /*Qualifier=*/std::nullopt);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = (WantKind != MK_Any);
if (!InOriginalClass)
@@ -8412,7 +8469,8 @@ AddClassMessageCompletions(Sema &SemaRef, Scope *S, ParsedType Receiver,
continue;
Result R(MethList->getMethod(),
- Results.getBasePriority(MethList->getMethod()), nullptr);
+ Results.getBasePriority(MethList->getMethod()),
+ /*Qualifier=*/std::nullopt);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
Results.MaybeAddResult(R, SemaRef.CurContext);
@@ -8588,7 +8646,8 @@ void SemaCodeCompletion::CodeCompleteObjCInstanceMessage(
continue;
Result R(MethList->getMethod(),
- Results.getBasePriority(MethList->getMethod()), nullptr);
+ Results.getBasePriority(MethList->getMethod()),
+ /*Qualifier=*/std::nullopt);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
Results.MaybeAddResult(R, SemaRef.CurContext);
@@ -8704,9 +8763,9 @@ static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
// Record any protocols we find.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(D))
if (!OnlyForwardDeclarations || !Proto->hasDefinition())
- Results.AddResult(
- Result(Proto, Results.getBasePriority(Proto), nullptr), CurContext,
- nullptr, false);
+ Results.AddResult(Result(Proto, Results.getBasePriority(Proto),
+ /*Qualifier=*/std::nullopt),
+ CurContext, nullptr, false);
}
}
@@ -8772,9 +8831,9 @@ static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
if (const auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
(!OnlyUnimplemented || !Class->getImplementation()))
- Results.AddResult(
- Result(Class, Results.getBasePriority(Class), nullptr), CurContext,
- nullptr, false);
+ Results.AddResult(Result(Class, Results.getBasePriority(Class),
+ /*Qualifier=*/std::nullopt),
+ CurContext, nullptr, false);
}
}
@@ -8886,9 +8945,9 @@ void SemaCodeCompletion::CodeCompleteObjCInterfaceCategory(
for (const auto *D : TU->decls())
if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
if (CategoryNames.insert(Category->getIdentifier()).second)
- Results.AddResult(
- Result(Category, Results.getBasePriority(Category), nullptr),
- SemaRef.CurContext, nullptr, false);
+ Results.AddResult(Result(Category, Results.getBasePriority(Category),
+ /*Qualifier=*/std::nullopt),
+ SemaRef.CurContext, nullptr, false);
Results.ExitScope();
HandleCodeCompleteResults(&SemaRef, CodeCompleter,
@@ -8923,7 +8982,8 @@ void SemaCodeCompletion::CodeCompleteObjCImplementationCategory(
for (const auto *Cat : Class->visible_categories()) {
if ((!IgnoreImplemented || !Cat->getImplementation()) &&
CategoryNames.insert(Cat->getIdentifier()).second)
- Results.AddResult(Result(Cat, Results.getBasePriority(Cat), nullptr),
+ Results.AddResult(Result(Cat, Results.getBasePriority(Cat),
+ /*Qualifier=*/std::nullopt),
SemaRef.CurContext, nullptr, false);
}
@@ -9023,7 +9083,8 @@ void SemaCodeCompletion::CodeCompleteObjCPropertySynthesizeIvar(
for (; Class; Class = Class->getSuperClass()) {
for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
Ivar = Ivar->getNextIvar()) {
- Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar), nullptr),
+ Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar),
+ /*Qualifier=*/std::nullopt),
SemaRef.CurContext, nullptr, false);
// Determine whether we've seen an ivar with a name similar to the
@@ -10039,7 +10100,8 @@ void SemaCodeCompletion::CodeCompleteObjCMethodDeclSelector(
}
Result R(MethList->getMethod(),
- Results.getBasePriority(MethList->getMethod()), nullptr);
+ Results.getBasePriority(MethList->getMethod()),
+ /*Qualifier=*/std::nullopt);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
R.DeclaringEntity = true;
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index da85959..f114173a 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -307,7 +307,8 @@ static UnsignedOrNone EvaluateFoldExpandedConstraintSize(
UnsignedOrNone NumExpansions = FE->getNumExpansions();
if (S.CheckParameterPacksForExpansion(
FE->getEllipsisLoc(), Pattern->getSourceRange(), Unexpanded, MLTAL,
- Expand, RetainExpansion, NumExpansions) ||
+ /*FailOnPackProducingTemplates=*/true, Expand, RetainExpansion,
+ NumExpansions) ||
!Expand || RetainExpansion)
return std::nullopt;
@@ -1696,11 +1697,13 @@ bool FoldExpandedConstraint::AreCompatibleForSubsumption(
Sema::collectUnexpandedParameterPacks(const_cast<Expr *>(B.Pattern), BPacks);
for (const UnexpandedParameterPack &APack : APacks) {
- std::pair<unsigned, unsigned> DepthAndIndex = getDepthAndIndex(APack);
- auto it = llvm::find_if(BPacks, [&](const UnexpandedParameterPack &BPack) {
- return getDepthAndIndex(BPack) == DepthAndIndex;
+ auto ADI = getDepthAndIndex(APack);
+ if (!ADI)
+ continue;
+ auto It = llvm::find_if(BPacks, [&](const UnexpandedParameterPack &BPack) {
+ return getDepthAndIndex(BPack) == ADI;
});
- if (it != BPacks.end())
+ if (It != BPacks.end())
return true;
}
return false;
diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp
index d193a33..cc03616 100644
--- a/clang/lib/Sema/SemaCoroutine.cpp
+++ b/clang/lib/Sema/SemaCoroutine.cpp
@@ -89,8 +89,8 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
AddArg(T);
// Build the template-id.
- QualType CoroTrait =
- S.CheckTemplateIdType(TemplateName(CoroTraits), KwLoc, Args);
+ QualType CoroTrait = S.CheckTemplateIdType(
+ ElaboratedTypeKeyword::None, TemplateName(CoroTraits), KwLoc, Args);
if (CoroTrait.isNull())
return QualType();
if (S.RequireCompleteType(KwLoc, CoroTrait,
@@ -111,23 +111,18 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
<< RD;
return QualType();
}
- // The promise type is required to be a class type.
- QualType PromiseType = S.Context.getTypeDeclType(Promise);
-
- auto buildElaboratedType = [&]() {
- auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, S.getStdNamespace());
- NNS = NestedNameSpecifier::Create(S.Context, NNS, CoroTrait.getTypePtr());
- return S.Context.getElaboratedType(ElaboratedTypeKeyword::None, NNS,
- PromiseType);
- };
+ NestedNameSpecifier Qualifier(CoroTrait.getTypePtr());
+ QualType PromiseType = S.Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ Qualifier, Promise);
+ // The promise type is required to be a class type.
if (!PromiseType->getAsCXXRecordDecl()) {
S.Diag(FuncLoc,
diag::err_implied_std_coroutine_traits_promise_type_not_class)
- << buildElaboratedType();
+ << PromiseType;
return QualType();
}
- if (S.RequireCompleteType(FuncLoc, buildElaboratedType(),
+ if (S.RequireCompleteType(FuncLoc, PromiseType,
diag::err_coroutine_promise_type_incomplete))
return QualType();
@@ -167,8 +162,8 @@ static QualType lookupCoroutineHandleType(Sema &S, QualType PromiseType,
S.Context.getTrivialTypeSourceInfo(PromiseType, Loc)));
// Build the template-id.
- QualType CoroHandleType =
- S.CheckTemplateIdType(TemplateName(CoroHandle), Loc, Args);
+ QualType CoroHandleType = S.CheckTemplateIdType(
+ ElaboratedTypeKeyword::None, TemplateName(CoroHandle), Loc, Args);
if (CoroHandleType.isNull())
return QualType();
if (S.RequireCompleteType(Loc, CoroHandleType,
@@ -643,7 +638,9 @@ static void checkNoThrow(Sema &S, const Stmt *E,
QualType::DestructionKind::DK_cxx_destructor) {
const auto *T =
cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
- checkDeclNoexcept(cast<CXXRecordDecl>(T->getDecl())->getDestructor(),
+ checkDeclNoexcept(cast<CXXRecordDecl>(T->getOriginalDecl())
+ ->getDefinition()
+ ->getDestructor(),
/*IsDtor=*/true);
}
} else
@@ -1083,9 +1080,9 @@ static Expr *buildStdNoThrowDeclRef(Sema &S, SourceLocation Loc) {
static TypeSourceInfo *getTypeSourceInfoForStdAlignValT(Sema &S,
SourceLocation Loc) {
- EnumDecl *StdAlignValT = S.getStdAlignValT();
- QualType StdAlignValDecl = S.Context.getTypeDeclType(StdAlignValT);
- return S.Context.getTrivialTypeSourceInfo(StdAlignValDecl);
+ EnumDecl *StdAlignValDecl = S.getStdAlignValT();
+ CanQualType StdAlignValT = S.Context.getCanonicalTagType(StdAlignValDecl);
+ return S.Context.getTrivialTypeSourceInfo(StdAlignValT);
}
// When searching for custom allocators on the PromiseType we want to
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index b5eb825..12bedae 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -140,8 +140,8 @@ class TypeNameValidatorCCC final : public CorrectionCandidateCallback {
} // end anonymous namespace
-QualType Sema::getTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK,
- TypeDecl *TD, SourceLocation NameLoc) {
+void Sema::checkTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK,
+ TypeDecl *TD, SourceLocation NameLoc) {
auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(LookupCtx);
auto *FoundRD = dyn_cast<CXXRecordDecl>(TD);
if (DCK != DiagCtorKind::None && LookupRD && FoundRD &&
@@ -157,7 +157,6 @@ QualType Sema::getTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK,
DiagnoseUseOfDecl(TD, NameLoc);
MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
- return Context.getTypeDeclType(TD);
}
namespace {
@@ -182,13 +181,13 @@ lookupUnqualifiedTypeNameInBase(Sema &S, const IdentifierInfo &II,
UnqualifiedTypeNameLookupResult FoundTypeDecl =
UnqualifiedTypeNameLookupResult::NotFound;
for (const auto &Base : RD->bases()) {
- const CXXRecordDecl *BaseRD = nullptr;
- if (auto *BaseTT = Base.getType()->getAs<TagType>())
- BaseRD = BaseTT->getAsCXXRecordDecl();
- else if (auto *TST = Base.getType()->getAs<TemplateSpecializationType>()) {
+ const CXXRecordDecl *BaseRD = Base.getType()->getAsCXXRecordDecl();
+ if (BaseRD) {
+ } else if (auto *TST = dyn_cast<TemplateSpecializationType>(
+ Base.getType().getCanonicalType())) {
// Look for type decls in dependent base classes that have known primary
// templates.
- if (!TST || !TST->isDependentType())
+ if (!TST->isDependentType())
continue;
auto *TD = TST->getTemplateName().getAsTemplateDecl();
if (!TD)
@@ -253,8 +252,7 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
S.Diag(NameLoc, diag::ext_found_in_dependent_base) << &II;
ASTContext &Context = S.Context;
- auto *NNS = NestedNameSpecifier::Create(
- Context, nullptr, cast<Type>(Context.getRecordType(RD)));
+ NestedNameSpecifier NNS(Context.getCanonicalTagType(RD).getTypePtr());
QualType T =
Context.getDependentNameType(ElaboratedTypeKeyword::None, NNS, &II);
@@ -269,45 +267,6 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
-/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
-static ParsedType buildNamedType(Sema &S, const CXXScopeSpec *SS, QualType T,
- SourceLocation NameLoc,
- bool WantNontrivialTypeSourceInfo = true) {
- switch (T->getTypeClass()) {
- case Type::DeducedTemplateSpecialization:
- case Type::Enum:
- case Type::InjectedClassName:
- case Type::Record:
- case Type::Typedef:
- case Type::UnresolvedUsing:
- case Type::Using:
- break;
- // These can never be qualified so an ElaboratedType node
- // would carry no additional meaning.
- case Type::ObjCInterface:
- case Type::ObjCTypeParam:
- case Type::TemplateTypeParm:
- return ParsedType::make(T);
- default:
- llvm_unreachable("Unexpected Type Class");
- }
-
- if (!SS || SS->isEmpty())
- return ParsedType::make(S.Context.getElaboratedType(
- ElaboratedTypeKeyword::None, nullptr, T, nullptr));
-
- QualType ElTy = S.getElaboratedType(ElaboratedTypeKeyword::None, *SS, T);
- if (!WantNontrivialTypeSourceInfo)
- return ParsedType::make(ElTy);
-
- TypeLocBuilder Builder;
- Builder.pushTypeSpec(T).setNameLoc(NameLoc);
- ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(ElTy);
- ElabTL.setElaboratedKeywordLoc(SourceLocation());
- ElabTL.setQualifierLoc(SS->getWithLocInContext(S.Context));
- return S.CreateParsedType(ElTy, Builder.getTypeSourceInfo(S.Context, ElTy));
-}
-
ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS, bool isClassName,
bool HasTrailingDot, ParsedType ObjectTypePtr,
@@ -348,9 +307,10 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
if (AllowImplicitTypename == ImplicitTypenameContext::No)
return nullptr;
SourceLocation QualifiedLoc = SS->getRange().getBegin();
- auto DB =
- DiagCompat(QualifiedLoc, diag_compat::implicit_typename)
- << NestedNameSpecifier::Create(Context, SS->getScopeRep(), &II);
+ // FIXME: Defer the diagnostic after we build the type and use it.
+ auto DB = DiagCompat(QualifiedLoc, diag_compat::implicit_typename)
+ << Context.getDependentNameType(ElaboratedTypeKeyword::None,
+ SS->getScopeRep(), &II);
if (!getLangOpts().CPlusPlus20)
DB << FixItHint::CreateInsertion(QualifiedLoc, "typename ");
}
@@ -430,7 +390,7 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
bool MemberOfUnknownSpecialization;
UnqualifiedId TemplateName;
TemplateName.setIdentifier(NewII, NameLoc);
- NestedNameSpecifier *NNS = Correction.getCorrectionSpecifier();
+ NestedNameSpecifier NNS = Correction.getCorrectionSpecifier();
CXXScopeSpec NewSS, *NewSSPtr = SS;
if (SS && NNS) {
NewSS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
@@ -530,20 +490,78 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
assert(IIDecl && "Didn't find decl");
- QualType T;
+ TypeLocBuilder TLB;
if (TypeDecl *TD = dyn_cast<TypeDecl>(IIDecl)) {
- // C++ [class.qual]p2: A lookup that would find the injected-class-name
- // instead names the constructors of the class, except when naming a class.
- // This is ill-formed when we're not actually forming a ctor or dtor name.
- T = getTypeDeclType(LookupCtx,
- IsImplicitTypename ? DiagCtorKind::Implicit
- : DiagCtorKind::None,
- TD, NameLoc);
- } else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
+ checkTypeDeclType(LookupCtx,
+ IsImplicitTypename ? DiagCtorKind::Implicit
+ : DiagCtorKind::None,
+ TD, NameLoc);
+ QualType T;
+ if (FoundUsingShadow) {
+ T = Context.getUsingType(ElaboratedTypeKeyword::None,
+ SS ? SS->getScopeRep() : std::nullopt,
+ FoundUsingShadow);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ TLB.push<UsingTypeLoc>(T).set(/*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc(),
+ NameLoc);
+ } else if (auto *Tag = dyn_cast<TagDecl>(TD)) {
+ T = Context.getTagType(ElaboratedTypeKeyword::None,
+ SS ? SS->getScopeRep() : std::nullopt, Tag,
+ /*OwnsTag=*/false);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ auto TL = TLB.push<TagTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc());
+ TL.setNameLoc(NameLoc);
+ } else if (auto *TN = dyn_cast<TypedefNameDecl>(TD);
+ TN && !isa<ObjCTypeParamDecl>(TN)) {
+ T = Context.getTypedefType(ElaboratedTypeKeyword::None,
+ SS ? SS->getScopeRep() : std::nullopt, TN);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ TLB.push<TypedefTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(),
+ NameLoc);
+ } else if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(TD)) {
+ T = Context.getUnresolvedUsingType(ElaboratedTypeKeyword::None,
+ SS ? SS->getScopeRep() : std::nullopt,
+ UD);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ TLB.push<UnresolvedUsingTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(),
+ NameLoc);
+ } else {
+ T = Context.getTypeDeclType(TD);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ if (isa<ObjCTypeParamType>(T))
+ TLB.push<ObjCTypeParamTypeLoc>(T).setNameLoc(NameLoc);
+ else
+ TLB.pushTypeSpec(T).setNameLoc(NameLoc);
+ }
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
+ if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
(void)DiagnoseUseOfDecl(IDecl, NameLoc);
- if (!HasTrailingDot)
- T = Context.getObjCInterfaceType(IDecl);
- FoundUsingShadow = nullptr; // FIXME: Target must be a TypeDecl.
+ if (!HasTrailingDot) {
+ // FIXME: Support UsingType for this case.
+ QualType T = Context.getObjCInterfaceType(IDecl);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ auto TL = TLB.push<ObjCInterfaceTypeLoc>(T);
+ TL.setNameLoc(NameLoc);
+ // FIXME: Pass in this source location.
+ TL.setNameEndLoc(NameLoc);
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
} else if (auto *UD = dyn_cast<UnresolvedUsingIfExistsDecl>(IIDecl)) {
(void)DiagnoseUseOfDecl(UD, NameLoc);
// Recover with 'int'
@@ -551,41 +569,38 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
} else if (AllowDeducedTemplate) {
if (auto *TD = getAsTypeTemplateDecl(IIDecl)) {
assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
+ // FIXME: Support UsingType here.
TemplateName Template = Context.getQualifiedTemplateName(
- SS ? SS->getScopeRep() : nullptr, /*TemplateKeyword=*/false,
+ SS ? SS->getScopeRep() : std::nullopt, /*TemplateKeyword=*/false,
FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD));
- T = Context.getDeducedTemplateSpecializationType(Template, QualType(),
- false);
- // Don't wrap in a further UsingType.
- FoundUsingShadow = nullptr;
+ QualType T = Context.getDeducedTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, Template, QualType(), false);
+ auto TL = TLB.push<DeducedTemplateSpecializationTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setNameLoc(NameLoc);
+ TL.setQualifierLoc(SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc());
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
}
- if (T.isNull()) {
- // If it's not plausibly a type, suppress diagnostics.
- Result.suppressDiagnostics();
- return nullptr;
- }
-
- if (FoundUsingShadow)
- T = Context.getUsingType(FoundUsingShadow, T);
-
- return buildNamedType(*this, SS, T, NameLoc, WantNontrivialTypeSourceInfo);
+ // As it's not plausibly a type, suppress diagnostics.
+ Result.suppressDiagnostics();
+ return nullptr;
}
// Builds a fake NNS for the given decl context.
-static NestedNameSpecifier *
+static NestedNameSpecifier
synthesizeCurrentNestedNameSpecifier(ASTContext &Context, DeclContext *DC) {
for (;; DC = DC->getLookupParent()) {
DC = DC->getPrimaryContext();
auto *ND = dyn_cast<NamespaceDecl>(DC);
if (ND && !ND->isInline() && !ND->isAnonymousNamespace())
- return NestedNameSpecifier::Create(Context, nullptr, ND);
+ return NestedNameSpecifier(Context, ND, std::nullopt);
if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
- return NestedNameSpecifier::Create(Context, nullptr,
- RD->getTypeForDecl());
+ return NestedNameSpecifier(Context.getCanonicalTagType(RD)->getTypePtr());
if (isa<TranslationUnitDecl>(DC))
- return NestedNameSpecifier::GlobalSpecifier(Context);
+ return NestedNameSpecifier::getGlobal();
}
llvm_unreachable("something isn't in TU scope?");
}
@@ -610,7 +625,7 @@ ParsedType Sema::ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
bool IsTemplateTypeArg) {
assert(getLangOpts().MSVCCompat && "shouldn't be called in non-MSVC mode");
- NestedNameSpecifier *NNS = nullptr;
+ NestedNameSpecifier NNS = std::nullopt;
if (IsTemplateTypeArg && getCurScope()->isTemplateParamScope()) {
// If we weren't able to parse a default template argument, delay lookup
// until instantiation time by making a non-dependent DependentTypeName. We
@@ -625,7 +640,7 @@ ParsedType Sema::ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
findRecordWithDependentBasesOfEnclosingMethod(CurContext)) {
// Build a DependentNameType that will perform lookup into RD at
// instantiation time.
- NNS = NestedNameSpecifier::Create(Context, nullptr, RD->getTypeForDecl());
+ NNS = NestedNameSpecifier(Context.getCanonicalTagType(RD)->getTypePtr());
// Diagnose that this identifier was undeclared, and retry the lookup during
// template instantiation.
@@ -678,19 +693,22 @@ DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
}
bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
- if (CurContext->isRecord()) {
- if (SS->getScopeRep()->getKind() == NestedNameSpecifier::Super)
- return true;
+ if (!CurContext->isRecord())
+ return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
- const Type *Ty = SS->getScopeRep()->getAsType();
-
- CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
- for (const auto &Base : RD->bases())
- if (Ty && Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType()))
+ switch (SS->getScopeRep().getKind()) {
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return true;
+ case NestedNameSpecifier::Kind::Type: {
+ QualType T(SS->getScopeRep().getAsType(), 0);
+ for (const auto &Base : cast<CXXRecordDecl>(CurContext)->bases())
+ if (Context.hasSameUnqualifiedType(T, Base.getType()))
return true;
+ [[fallthrough]];
+ }
+ default:
return S->isFunctionPrototypeScope();
}
- return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
}
void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
@@ -786,7 +804,7 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
Diag(IILoc, IsTemplateName ? diag::err_no_member_template
: diag::err_typename_nested_not_found)
<< II << DC << SS->getRange();
- else if (SS->isValid() && SS->getScopeRep()->containsErrors()) {
+ else if (SS->isValid() && SS->getScopeRep().containsErrors()) {
SuggestedType =
ActOnTypenameType(S, SourceLocation(), *SS, *II, IILoc).get();
} else if (isDependentScopeSpecifier(*SS)) {
@@ -794,12 +812,13 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S))
DiagID = diag::ext_typename_missing;
+ SuggestedType =
+ ActOnTypenameType(S, SourceLocation(), *SS, *II, IILoc).get();
+
Diag(SS->getRange().getBegin(), DiagID)
- << NestedNameSpecifier::Create(Context, SS->getScopeRep(), II)
+ << GetTypeFromParser(SuggestedType)
<< SourceRange(SS->getRange().getBegin(), IILoc)
<< FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
- SuggestedType = ActOnTypenameType(S, SourceLocation(),
- *SS, *II, IILoc).get();
} else {
assert(SS && SS->isInvalid() &&
"Invalid scope specifier has already been diagnosed");
@@ -1156,10 +1175,34 @@ Corrected:
}
auto BuildTypeFor = [&](TypeDecl *Type, NamedDecl *Found) {
- QualType T = Context.getTypeDeclType(Type);
- if (const auto *USD = dyn_cast<UsingShadowDecl>(Found))
- T = Context.getUsingType(USD, T);
- return buildNamedType(*this, &SS, T, NameLoc);
+ QualType T;
+ TypeLocBuilder TLB;
+ if (const auto *USD = dyn_cast<UsingShadowDecl>(Found)) {
+ T = Context.getUsingType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ USD);
+ TLB.push<UsingTypeLoc>(T).set(/*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), NameLoc);
+ } else {
+ T = Context.getTypeDeclType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ Type);
+ if (isa<TagType>(T)) {
+ auto TTL = TLB.push<TagTypeLoc>(T);
+ TTL.setElaboratedKeywordLoc(SourceLocation());
+ TTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TTL.setNameLoc(NameLoc);
+ } else if (isa<TypedefType>(T)) {
+ TLB.push<TypedefTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), NameLoc);
+ } else if (isa<UnresolvedUsingType>(T)) {
+ TLB.push<UnresolvedUsingTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), NameLoc);
+ } else {
+ TLB.pushTypeSpec(T).setNameLoc(NameLoc);
+ }
+ }
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
};
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
@@ -2009,8 +2052,7 @@ static bool ShouldDiagnoseUnusedDecl(const LangOptions &LangOpts,
// consistent for both scalars and arrays.
Ty = Ty->getBaseElementTypeUnsafe();
- if (const TagType *TT = Ty->getAs<TagType>()) {
- const TagDecl *Tag = TT->getDecl();
+ if (const TagDecl *Tag = Ty->getAsTagDecl()) {
if (Tag->hasAttr<UnusedAttr>())
return false;
@@ -2070,7 +2112,7 @@ void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D) {
void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D,
DiagReceiverTy DiagReceiver) {
- if (D->getTypeForDecl()->isDependentType())
+ if (D->isDependentType())
return;
for (auto *TmpD : D->decls()) {
@@ -2128,8 +2170,7 @@ void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD,
if (Ty->isReferenceType() || Ty->isDependentType())
return;
- if (const TagType *TT = Ty->getAs<TagType>()) {
- const TagDecl *Tag = TT->getDecl();
+ if (const TagDecl *Tag = Ty->getAsTagDecl()) {
if (Tag->hasAttr<UnusedAttr>())
return;
// In C++, don't warn for record types that don't have WarnUnusedAttr, to
@@ -2508,7 +2549,8 @@ void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
}
Context.setObjCIdRedefinitionType(T);
// Install the built-in type for 'id', ignoring the current definition.
- New->setTypeForDecl(Context.getObjCIdType().getTypePtr());
+ New->setModedTypeSourceInfo(New->getTypeSourceInfo(),
+ Context.getObjCIdType());
return;
}
case 5:
@@ -2516,14 +2558,16 @@ void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
break;
Context.setObjCClassRedefinitionType(New->getUnderlyingType());
// Install the built-in type for 'Class', ignoring the current definition.
- New->setTypeForDecl(Context.getObjCClassType().getTypePtr());
+ New->setModedTypeSourceInfo(New->getTypeSourceInfo(),
+ Context.getObjCClassType());
return;
case 3:
if (!TypeID->isStr("SEL"))
break;
Context.setObjCSelRedefinitionType(New->getUnderlyingType());
// Install the built-in type for 'SEL', ignoring the current definition.
- New->setTypeForDecl(Context.getObjCSelType().getTypePtr());
+ New->setModedTypeSourceInfo(New->getTypeSourceInfo(),
+ Context.getObjCSelType());
return;
}
// Fall through - the typedef name was not a builtin type.
@@ -2555,7 +2599,6 @@ void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
!hasVisibleDefinition(OldTag, &Hidden)) {
// There is a definition of this tag, but it is not visible. Use it
// instead of our tag.
- New->setTypeForDecl(OldTD->getTypeForDecl());
if (OldTD->isModed())
New->setModedTypeSourceInfo(OldTD->getTypeSourceInfo(),
OldTD->getUnderlyingType());
@@ -2742,7 +2785,7 @@ static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) {
if (ValueDecl *VD = dyn_cast<ValueDecl>(New))
Ty = VD->getType();
else
- Ty = S.Context.getTagDeclType(cast<TagDecl>(New));
+ Ty = S.Context.getCanonicalTagType(cast<TagDecl>(New));
if (OldAlign == 0)
OldAlign = S.Context.getTypeAlign(Ty);
@@ -2913,8 +2956,11 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
}
static const NamedDecl *getDefinition(const Decl *D) {
- if (const TagDecl *TD = dyn_cast<TagDecl>(D))
- return TD->getDefinition();
+ if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (const auto *Def = TD->getDefinition(); Def && !Def->isBeingDefined())
+ return Def;
+ return nullptr;
+ }
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
const VarDecl *Def = VD->getDefinition();
if (Def)
@@ -3069,6 +3115,10 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
cast<SYCLKernelEntryPointAttr>(NewAttribute)->setInvalidAttr();
++I;
continue;
+ } else if (isa<SYCLExternalAttr>(NewAttribute)) {
+ // SYCLExternalAttr may be added after a definition.
+ ++I;
+ continue;
}
S.Diag(NewAttribute->getLocation(),
@@ -3607,7 +3657,9 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
FunctionDecl *Old = OldD->getAsFunction();
if (!Old) {
if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(OldD)) {
- if (New->getFriendObjectKind()) {
+ // We don't need to check the using friend pattern from other module unit
+ // since we should have diagnosed such cases in its unit already.
+ if (New->getFriendObjectKind() && !OldD->isInAnotherModuleUnit()) {
Diag(New->getLocation(), diag::err_using_decl_friend);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
@@ -4092,6 +4144,18 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
diag::note_carries_dependency_missing_first_decl) << 0/*Function*/;
}
+ // SYCL 2020 section 5.10.1, "SYCL functions and member functions linkage":
+ // When a function is declared with SYCL_EXTERNAL, that macro must be
+ // used on the first declaration of that function in the translation unit.
+ // Redeclarations of the function in the same translation unit may
+ // optionally use SYCL_EXTERNAL, but this is not required.
+ const SYCLExternalAttr *SEA = New->getAttr<SYCLExternalAttr>();
+ if (SEA && !Old->hasAttr<SYCLExternalAttr>()) {
+ Diag(SEA->getLocation(), diag::warn_sycl_external_missing_on_first_decl)
+ << SEA;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ }
+
// (C++98 8.3.5p3):
// All declarations for a function shall agree exactly in both the
// return type and the parameter-type-list.
@@ -5024,7 +5088,7 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
// The type must match the tag exactly; no qualifiers allowed.
if (!Context.hasSameType(NewTD->getUnderlyingType(),
- Context.getTagDeclType(TagFromDeclSpec))) {
+ Context.getCanonicalTagType(TagFromDeclSpec))) {
if (getLangOpts().CPlusPlus)
Context.addTypedefNameForUnnamedTagDecl(TagFromDeclSpec, NewTD);
return;
@@ -5227,15 +5291,8 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
// UNION_TYPE; <- where UNION_TYPE is a typedef union.
if ((Tag && Tag->getDeclName()) ||
DS.getTypeSpecType() == DeclSpec::TST_typename) {
- RecordDecl *Record = nullptr;
- if (Tag)
- Record = dyn_cast<RecordDecl>(Tag);
- else if (const RecordType *RT =
- DS.getRepAsType().get()->getAsStructureType())
- Record = RT->getDecl();
- else if (const RecordType *UT = DS.getRepAsType().get()->getAsUnionType())
- Record = UT->getDecl();
-
+ RecordDecl *Record = Tag ? dyn_cast<RecordDecl>(Tag)
+ : DS.getRepAsType().get()->getAsRecordDecl();
if (Record && getLangOpts().MicrosoftExt) {
Diag(DS.getBeginLoc(), diag::ext_ms_anonymous_record)
<< Record->isUnion() << DS.getSourceRange();
@@ -5455,48 +5512,54 @@ InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S, DeclContext *Owner,
if ((isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) &&
cast<NamedDecl>(D)->getDeclName()) {
ValueDecl *VD = cast<ValueDecl>(D);
- if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
- VD->getLocation(), AnonRecord->isUnion(),
- SC)) {
- // C++ [class.union]p2:
- // The names of the members of an anonymous union shall be
- // distinct from the names of any other entity in the
- // scope in which the anonymous union is declared.
+ // C++ [class.union]p2:
+ // The names of the members of an anonymous union shall be
+ // distinct from the names of any other entity in the
+ // scope in which the anonymous union is declared.
+
+ bool FieldInvalid = CheckAnonMemberRedeclaration(
+ SemaRef, S, Owner, VD->getDeclName(), VD->getLocation(),
+ AnonRecord->isUnion(), SC);
+ if (FieldInvalid)
Invalid = true;
- } else {
- // C++ [class.union]p2:
- // For the purpose of name lookup, after the anonymous union
- // definition, the members of the anonymous union are
- // considered to have been defined in the scope in which the
- // anonymous union is declared.
- unsigned OldChainingSize = Chaining.size();
- if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD))
- Chaining.append(IF->chain_begin(), IF->chain_end());
- else
- Chaining.push_back(VD);
- assert(Chaining.size() >= 2);
- NamedDecl **NamedChain =
- new (SemaRef.Context)NamedDecl*[Chaining.size()];
- for (unsigned i = 0; i < Chaining.size(); i++)
- NamedChain[i] = Chaining[i];
+ // Inject the IndirectFieldDecl even if invalid, because later
+ // diagnostics may depend on it being present, see findDefaultInitializer.
+
+ // C++ [class.union]p2:
+ // For the purpose of name lookup, after the anonymous union
+ // definition, the members of the anonymous union are
+ // considered to have been defined in the scope in which the
+ // anonymous union is declared.
+ unsigned OldChainingSize = Chaining.size();
+ if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD))
+ Chaining.append(IF->chain_begin(), IF->chain_end());
+ else
+ Chaining.push_back(VD);
- IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create(
- SemaRef.Context, Owner, VD->getLocation(), VD->getIdentifier(),
- VD->getType(), {NamedChain, Chaining.size()});
+ assert(Chaining.size() >= 2);
+ NamedDecl **NamedChain =
+ new (SemaRef.Context) NamedDecl *[Chaining.size()];
+ for (unsigned i = 0; i < Chaining.size(); i++)
+ NamedChain[i] = Chaining[i];
- for (const auto *Attr : VD->attrs())
- IndirectField->addAttr(Attr->clone(SemaRef.Context));
+ IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create(
+ SemaRef.Context, Owner, VD->getLocation(), VD->getIdentifier(),
+ VD->getType(), {NamedChain, Chaining.size()});
- IndirectField->setAccess(AS);
- IndirectField->setImplicit();
- SemaRef.PushOnScopeChains(IndirectField, S);
+ for (const auto *Attr : VD->attrs())
+ IndirectField->addAttr(Attr->clone(SemaRef.Context));
- // That includes picking up the appropriate access specifier.
- if (AS != AS_none) IndirectField->setAccess(AS);
+ IndirectField->setAccess(AS);
+ IndirectField->setImplicit();
+ IndirectField->setInvalidDecl(FieldInvalid);
+ SemaRef.PushOnScopeChains(IndirectField, S);
- Chaining.resize(OldChainingSize);
- }
+ // That includes picking up the appropriate access specifier.
+ if (AS != AS_none)
+ IndirectField->setAccess(AS);
+
+ Chaining.resize(OldChainingSize);
}
}
@@ -5762,7 +5825,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
Anon = FieldDecl::Create(
Context, OwningClass, DS.getBeginLoc(), Record->getLocation(),
- /*IdentifierInfo=*/nullptr, Context.getTypeDeclType(Record), TInfo,
+ /*IdentifierInfo=*/nullptr, Context.getCanonicalTagType(Record), TInfo,
/*BitWidth=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Anon->setAccess(AS);
@@ -5782,7 +5845,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
Anon = VarDecl::Create(Context, Owner, DS.getBeginLoc(),
Record->getLocation(), /*IdentifierInfo=*/nullptr,
- Context.getTypeDeclType(Record), TInfo, SC);
+ Context.getCanonicalTagType(Record), TInfo, SC);
if (Invalid)
Anon->setInvalidDecl();
@@ -5845,7 +5908,7 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
assert(TInfo && "couldn't build declarator info for anonymous struct");
auto *ParentDecl = cast<RecordDecl>(CurContext);
- QualType RecTy = Context.getTypeDeclType(Record);
+ CanQualType RecTy = Context.getCanonicalTagType(Record);
// Create a declaration for this anonymous struct.
NamedDecl *Anon =
@@ -5964,14 +6027,14 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return DeclarationNameInfo();
// Determine the type of the class being constructed.
- QualType CurClassType = Context.getTypeDeclType(CurClass);
+ CanQualType CurClassType = Context.getCanonicalTagType(CurClass);
// FIXME: Check two things: that the template-id names the same type as
// CurClassType, and that the template-id does not occur when the name
// was qualified.
- NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(CurClassType)));
+ NameInfo.setName(
+ Context.DeclarationNames.getCXXConstructorName(CurClassType));
// FIXME: should we retrieve TypeSourceInfo?
NameInfo.setNamedTypeInfo(nullptr);
return NameInfo;
@@ -6259,8 +6322,9 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
// that's the case, then drop this declaration entirely.
if ((Name.getNameKind() == DeclarationName::CXXConstructorName ||
Name.getNameKind() == DeclarationName::CXXDestructorName) &&
- !Context.hasSameType(Name.getCXXNameType(),
- Context.getTypeDeclType(cast<CXXRecordDecl>(Cur))))
+ !Context.hasSameType(
+ Name.getCXXNameType(),
+ Context.getCanonicalTagType(cast<CXXRecordDecl>(Cur))))
return true;
return false;
@@ -6280,36 +6344,48 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
<< FixItHint::CreateRemoval(TemplateId->TemplateKWLoc);
NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
- do {
- if (TypeLoc TL = SpecLoc.getTypeLoc()) {
- if (SourceLocation TemplateKeywordLoc = TL.getTemplateKeywordLoc();
- TemplateKeywordLoc.isValid())
- Diag(Loc, diag::ext_template_after_declarative_nns)
- << FixItHint::CreateRemoval(TemplateKeywordLoc);
- }
-
- if (const Type *T = SpecLoc.getNestedNameSpecifier()->getAsType()) {
- if (const auto *TST = T->getAsAdjusted<TemplateSpecializationType>()) {
- // C++23 [expr.prim.id.qual]p3:
- // [...] If a nested-name-specifier N is declarative and has a
- // simple-template-id with a template argument list A that involves a
- // template parameter, let T be the template nominated by N without A.
- // T shall be a class template.
- if (TST->isDependentType() && TST->isTypeAlias())
- Diag(Loc, diag::ext_alias_template_in_declarative_nns)
- << SpecLoc.getLocalSourceRange();
- } else if (T->isDecltypeType() || T->getAsAdjusted<PackIndexingType>()) {
- // C++23 [expr.prim.id.qual]p2:
- // [...] A declarative nested-name-specifier shall not have a
- // computed-type-specifier.
- //
- // CWG2858 changed this from 'decltype-specifier' to
- // 'computed-type-specifier'.
- Diag(Loc, diag::err_computed_type_in_declarative_nns)
- << T->isDecltypeType() << SpecLoc.getTypeLoc().getSourceRange();
- }
+ for (TypeLoc TL = SpecLoc.getAsTypeLoc(), NextTL; TL;
+ TL = std::exchange(NextTL, TypeLoc())) {
+ SourceLocation TemplateKeywordLoc;
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::TemplateSpecialization: {
+ auto TST = TL.castAs<TemplateSpecializationTypeLoc>();
+ TemplateKeywordLoc = TST.getTemplateKeywordLoc();
+ if (auto *T = TST.getTypePtr(); T->isDependentType() && T->isTypeAlias())
+ Diag(Loc, diag::ext_alias_template_in_declarative_nns)
+ << TST.getLocalSourceRange();
+ break;
}
- } while ((SpecLoc = SpecLoc.getPrefix()));
+ case TypeLoc::Decltype:
+ case TypeLoc::PackIndexing: {
+ const Type *T = TL.getTypePtr();
+ // C++23 [expr.prim.id.qual]p2:
+ // [...] A declarative nested-name-specifier shall not have a
+ // computed-type-specifier.
+ //
+ // CWG2858 changed this from 'decltype-specifier' to
+ // 'computed-type-specifier'.
+ Diag(Loc, diag::err_computed_type_in_declarative_nns)
+ << T->isDecltypeType() << TL.getSourceRange();
+ break;
+ }
+ case TypeLoc::DependentName:
+ NextTL =
+ TL.castAs<DependentNameTypeLoc>().getQualifierLoc().getAsTypeLoc();
+ break;
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto TST = TL.castAs<DependentTemplateSpecializationTypeLoc>();
+ TemplateKeywordLoc = TST.getTemplateKeywordLoc();
+ NextTL = TST.getQualifierLoc().getAsTypeLoc();
+ break;
+ }
+ default:
+ break;
+ }
+ if (TemplateKeywordLoc.isValid())
+ Diag(Loc, diag::ext_template_after_declarative_nns)
+ << FixItHint::CreateRemoval(TemplateKeywordLoc);
+ }
return false;
}
@@ -9046,9 +9122,8 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// We really want to find the base class destructor here.
- QualType T = Context.getTypeDeclType(BaseRecord);
- CanQualType CT = Context.getCanonicalType(T);
- Name = Context.DeclarationNames.getCXXDestructorName(CT);
+ Name = Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalTagType(BaseRecord));
}
for (NamedDecl *BaseND : BaseRecord->lookup(Name)) {
@@ -9734,7 +9809,7 @@ static void checkIsValidOpenCLKernelParameter(
// At this point we already handled everything except of a RecordType.
assert(PT->isRecordType() && "Unexpected type.");
- const RecordDecl *PD = PT->castAs<RecordType>()->getDecl();
+ const auto *PD = PT->castAsRecordDecl();
VisitStack.push_back(PD);
assert(VisitStack.back() && "First decl null?");
@@ -9762,7 +9837,7 @@ static void checkIsValidOpenCLKernelParameter(
"Unexpected type.");
const Type *FieldRecTy = FieldTy->getPointeeOrArrayElementType();
- RD = FieldRecTy->castAs<RecordType>()->getDecl();
+ RD = FieldRecTy->castAsRecordDecl();
} else {
RD = cast<RecordDecl>(Next);
}
@@ -10768,7 +10843,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// redeclaration lookup found nothing at all. Diagnose that now;
// nothing will diagnose that error later.
if (isFriend &&
- (D.getCXXScopeSpec().getScopeRep()->isDependent() ||
+ (D.getCXXScopeSpec().getScopeRep().isDependent() ||
(!Previous.empty() && CurContext->isDependentContext()))) {
// ignore these
} else if (NewFD->isCPUDispatchMultiVersion() ||
@@ -12256,6 +12331,9 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (NewFD->hasAttr<SYCLKernelEntryPointAttr>())
SYCL().CheckSYCLEntryPointFunctionDecl(NewFD);
+ if (NewFD->hasAttr<SYCLExternalAttr>())
+ SYCL().CheckSYCLExternalFunctionDecl(NewFD);
+
// Semantic checking for this function declaration (in isolation).
if (getLangOpts().CPlusPlus) {
@@ -12272,11 +12350,11 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// template struct A<B>;
if (NewFD->getFriendObjectKind() == Decl::FriendObjectKind::FOK_None ||
!Destructor->getFunctionObjectParameterType()->isDependentType()) {
- CXXRecordDecl *Record = Destructor->getParent();
- QualType ClassType = Context.getTypeDeclType(Record);
+ CanQualType ClassType =
+ Context.getCanonicalTagType(Destructor->getParent());
- DeclarationName Name = Context.DeclarationNames.getCXXDestructorName(
- Context.getCanonicalType(ClassType));
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXDestructorName(ClassType);
if (NewFD->getDeclName() != Name) {
Diag(NewFD->getLocation(), diag::err_destructor_name);
NewFD->setInvalidDecl();
@@ -12444,6 +12522,13 @@ void Sema::CheckMain(FunctionDecl *FD, const DeclSpec &DS) {
return;
}
+ if (FD->hasAttr<SYCLExternalAttr>()) {
+ Diag(FD->getLocation(), diag::err_sycl_external_invalid_main)
+ << FD->getAttr<SYCLExternalAttr>();
+ FD->setInvalidDecl();
+ return;
+ }
+
// Functions named main in hlsl are default entries, but don't have specific
// signatures they are required to conform to.
if (getLangOpts().HLSL)
@@ -13280,7 +13365,7 @@ struct DiagNonTrivalCUnionDefaultInitializeVisitor
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const auto *RD = QT->castAsRecordDecl();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
@@ -13346,7 +13431,7 @@ struct DiagNonTrivalCUnionDestructedTypeVisitor
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const auto *RD = QT->castAsRecordDecl();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
@@ -13411,7 +13496,7 @@ struct DiagNonTrivalCUnionCopyVisitor
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const auto *RD = QT->castAsRecordDecl();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
@@ -14382,9 +14467,8 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
// version of one of these types, or an array of one of the preceding
// types and is declared without an initializer.
if (getLangOpts().CPlusPlus && Var->hasLocalStorage()) {
- if (const RecordType *Record
- = Context.getBaseElementType(Type)->getAs<RecordType>()) {
- CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl());
+ if (const auto *CXXRecord =
+ Context.getBaseElementType(Type)->getAsCXXRecordDecl()) {
// Mark the function (if we're in one) for further checking even if the
// looser rules of C++11 do not require such checks, so that we can
// diagnose incompatibilities with C++98.
@@ -14847,8 +14931,8 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// Require the destructor.
if (!type->isDependentType())
- if (const RecordType *recordType = baseType->getAs<RecordType>())
- FinalizeVarWithDestructor(var, recordType);
+ if (auto *RD = baseType->getAsCXXRecordDecl())
+ FinalizeVarWithDestructor(var, RD);
// If this variable must be emitted, add it as an initializer for the current
// module.
@@ -15763,17 +15847,18 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
if (TypoCorrectedFunctionDefinitions.count(Definition))
return;
- // If we don't have a visible definition of the function, and it's inline or
- // a template, skip the new definition.
- if (SkipBody && !hasVisibleDefinition(Definition) &&
+ bool DefinitionVisible = false;
+ if (SkipBody && isRedefinitionAllowedFor(Definition, DefinitionVisible) &&
(Definition->getFormalLinkage() == Linkage::Internal ||
Definition->isInlined() || Definition->getDescribedFunctionTemplate() ||
Definition->getNumTemplateParameterLists())) {
SkipBody->ShouldSkip = true;
SkipBody->Previous = const_cast<FunctionDecl*>(Definition);
- if (auto *TD = Definition->getDescribedFunctionTemplate())
- makeMergedDefinitionVisible(TD);
- makeMergedDefinitionVisible(const_cast<FunctionDecl*>(Definition));
+ if (!DefinitionVisible) {
+ if (auto *TD = Definition->getDescribedFunctionTemplate())
+ makeMergedDefinitionVisible(TD);
+ makeMergedDefinitionVisible(const_cast<FunctionDecl *>(Definition));
+ }
return;
}
@@ -16149,10 +16234,6 @@ Decl *Sema::ActOnSkippedFunctionBody(Decl *Decl) {
return Decl;
}
-Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
- return ActOnFinishFunctionBody(D, BodyArg, /*IsInstantiation=*/false);
-}
-
/// RAII object that pops an ExpressionEvaluationContext when exiting a function
/// body.
class ExitFunctionBodyRAII {
@@ -16223,8 +16304,8 @@ void Sema::CheckCoroutineWrapper(FunctionDecl *FD) {
Diag(FD->getLocation(), diag::err_coroutine_return_type) << RD;
}
-Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
- bool IsInstantiation) {
+Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, bool IsInstantiation,
+ bool RetainFunctionScopeInfo) {
FunctionScopeInfo *FSI = getCurFunction();
FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
@@ -16280,6 +16361,14 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
}
}
+ if (FD && !FD->isInvalidDecl() && FD->hasAttr<SYCLExternalAttr>()) {
+ SYCLExternalAttr *SEAttr = FD->getAttr<SYCLExternalAttr>();
+ if (FD->isDeletedAsWritten())
+ Diag(SEAttr->getLocation(),
+ diag::err_sycl_external_invalid_deleted_function)
+ << SEAttr;
+ }
+
{
// Do not call PopExpressionEvaluationContext() if it is a lambda because
// one is already popped when finishing the lambda in BuildLambdaExpr().
@@ -16681,7 +16770,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (!IsInstantiation)
PopDeclContext();
- PopFunctionScopeInfo(ActivePolicy, dcl);
+ if (!RetainFunctionScopeInfo)
+ PopFunctionScopeInfo(ActivePolicy, dcl);
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
@@ -17483,6 +17573,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// FIXME: Check member specializations more carefully.
bool isMemberSpecialization = false;
+ bool IsInjectedClassName = false;
bool Invalid = false;
// We only need to do this matching if we have template parameters
@@ -17948,8 +18039,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#407
if (getLangOpts().CPlusPlus) {
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(PrevDecl)) {
- if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
- TagDecl *Tag = TT->getDecl();
+ if (TagDecl *Tag = TD->getUnderlyingType()->getAsTagDecl()) {
if (Tag->getDeclName() == Name &&
Tag->getDeclContext()->getRedeclContext()
->Equals(TD->getDeclContext()->getRedeclContext())) {
@@ -17959,6 +18049,16 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
Previous.resolveKind();
}
}
+ } else if (auto *RD = dyn_cast<CXXRecordDecl>(PrevDecl);
+ TUK == TagUseKind::Reference && RD &&
+ RD->isInjectedClassName()) {
+ // If lookup found the injected class name, the previous declaration is
+ // the class being injected into.
+ PrevDecl = cast<TagDecl>(RD->getDeclContext());
+ Previous.clear();
+ Previous.addDecl(PrevDecl);
+ Previous.resolveKind();
+ IsInjectedClassName = true;
}
}
@@ -18082,78 +18182,83 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Diagnose attempts to redefine a tag.
if (TUK == TagUseKind::Definition) {
- if (NamedDecl *Def = PrevTagDecl->getDefinition()) {
- // If we're defining a specialization and the previous definition
- // is from an implicit instantiation, don't emit an error
- // here; we'll catch this in the general case below.
- bool IsExplicitSpecializationAfterInstantiation = false;
- if (isMemberSpecialization) {
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
- IsExplicitSpecializationAfterInstantiation =
- RD->getTemplateSpecializationKind() !=
- TSK_ExplicitSpecialization;
- else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
- IsExplicitSpecializationAfterInstantiation =
- ED->getTemplateSpecializationKind() !=
- TSK_ExplicitSpecialization;
- }
-
- // Note that clang allows ODR-like semantics for ObjC/C, i.e., do
- // not keep more that one definition around (merge them). However,
- // ensure the decl passes the structural compatibility check in
- // C11 6.2.7/1 (or 6.1.2.6/1 in C89).
- NamedDecl *Hidden = nullptr;
- if (SkipBody &&
- (!hasVisibleDefinition(Def, &Hidden) || getLangOpts().C23)) {
- // There is a definition of this tag, but it is not visible. We
- // explicitly make use of C++'s one definition rule here, and
- // assume that this definition is identical to the hidden one
- // we already have. Make the existing definition visible and
- // use it in place of this one.
- if (!getLangOpts().CPlusPlus) {
- // Postpone making the old definition visible until after we
- // complete parsing the new one and do the structural
- // comparison.
- SkipBody->CheckSameAsPrevious = true;
- SkipBody->New = createTagFromNewDecl();
- SkipBody->Previous = Def;
-
- ProcessDeclAttributeList(S, SkipBody->New, Attrs);
- return Def;
- } else {
- SkipBody->ShouldSkip = true;
- SkipBody->Previous = Def;
- makeMergedDefinitionVisible(Hidden);
- // Carry on and handle it like a normal definition. We'll
- // skip starting the definition later.
- }
- } else if (!IsExplicitSpecializationAfterInstantiation) {
- // A redeclaration in function prototype scope in C isn't
- // visible elsewhere, so merely issue a warning.
- if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope())
- Diag(NameLoc, diag::warn_redefinition_in_param_list) << Name;
- else
- Diag(NameLoc, diag::err_redefinition) << Name;
- notePreviousDefinition(Def,
- NameLoc.isValid() ? NameLoc : KWLoc);
- // If this is a redefinition, recover by making this
- // struct be anonymous, which will make any later
- // references get the previous definition.
- Name = nullptr;
- Previous.clear();
- Invalid = true;
- }
- } else {
+ if (TagDecl *Def = PrevTagDecl->getDefinition()) {
// If the type is currently being defined, complain
// about a nested redefinition.
- auto *TD = Context.getTagDeclType(PrevTagDecl)->getAsTagDecl();
- if (TD->isBeingDefined()) {
+ if (Def->isBeingDefined()) {
Diag(NameLoc, diag::err_nested_redefinition) << Name;
Diag(PrevTagDecl->getLocation(),
diag::note_previous_definition);
Name = nullptr;
Previous.clear();
Invalid = true;
+ } else {
+ // If we're defining a specialization and the previous
+ // definition is from an implicit instantiation, don't emit an
+ // error here; we'll catch this in the general case below.
+ bool IsExplicitSpecializationAfterInstantiation = false;
+ if (isMemberSpecialization) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ RD->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ ED->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ }
+
+ // Note that clang allows ODR-like semantics for ObjC/C, i.e.,
+ // do not keep more that one definition around (merge them).
+ // However, ensure the decl passes the structural compatibility
+ // check in C11 6.2.7/1 (or 6.1.2.6/1 in C89).
+ NamedDecl *Hidden = nullptr;
+ bool HiddenDefVisible = false;
+ if (SkipBody &&
+ (isRedefinitionAllowedFor(Def, &Hidden, HiddenDefVisible) ||
+ getLangOpts().C23)) {
+ // There is a definition of this tag, but it is not visible.
+ // We explicitly make use of C++'s one definition rule here,
+ // and assume that this definition is identical to the hidden
+ // one we already have. Make the existing definition visible
+ // and use it in place of this one.
+ if (!getLangOpts().CPlusPlus) {
+ // Postpone making the old definition visible until after we
+ // complete parsing the new one and do the structural
+ // comparison.
+ SkipBody->CheckSameAsPrevious = true;
+ SkipBody->New = createTagFromNewDecl();
+ SkipBody->Previous = Def;
+
+ ProcessDeclAttributeList(S, SkipBody->New, Attrs);
+ return Def;
+ }
+
+ SkipBody->ShouldSkip = true;
+ SkipBody->Previous = Def;
+ if (!HiddenDefVisible && Hidden)
+ makeMergedDefinitionVisible(Hidden);
+ // Carry on and handle it like a normal definition. We'll
+ // skip starting the definition later.
+
+ } else if (!IsExplicitSpecializationAfterInstantiation) {
+ // A redeclaration in function prototype scope in C isn't
+ // visible elsewhere, so merely issue a warning.
+ if (!getLangOpts().CPlusPlus &&
+ S->containedInPrototypeScope())
+ Diag(NameLoc, diag::warn_redefinition_in_param_list)
+ << Name;
+ else
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ notePreviousDefinition(Def,
+ NameLoc.isValid() ? NameLoc : KWLoc);
+ // If this is a redefinition, recover by making this
+ // struct be anonymous, which will make any later
+ // references get the previous definition.
+ Name = nullptr;
+ Previous.clear();
+ Invalid = true;
+ }
}
}
@@ -18324,14 +18429,14 @@ CreateNewDecl:
(IsTypeSpecifier || IsTemplateParamOrArg) &&
TUK == TagUseKind::Definition) {
Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
- << Context.getTagDeclType(New);
+ << Context.getCanonicalTagType(New);
Invalid = true;
}
if (!Invalid && getLangOpts().CPlusPlus && TUK == TagUseKind::Definition &&
DC->getDeclKind() == Decl::Enum) {
Diag(New->getLocation(), diag::err_type_defined_in_enum)
- << Context.getTagDeclType(New);
+ << Context.getCanonicalTagType(New);
Invalid = true;
}
@@ -18412,7 +18517,8 @@ CreateNewDecl:
// In C23 mode, if the declaration is complete, we do not want to
// diagnose.
if (!getLangOpts().C23 || TUK != TagUseKind::Definition)
- Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
+ Diag(Loc, diag::warn_decl_in_param_list)
+ << Context.getCanonicalTagType(New);
}
}
@@ -18437,14 +18543,20 @@ CreateNewDecl:
if (PrevDecl)
CheckRedeclarationInModule(New, PrevDecl);
- if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
- New->startDefinition();
+ if (TUK == TagUseKind::Definition) {
+ if (!SkipBody || !SkipBody->ShouldSkip) {
+ New->startDefinition();
+ } else {
+ New->setCompleteDefinition();
+ New->demoteThisDefinitionToDeclaration();
+ }
+ }
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
- if (TUK == TagUseKind::Friend) {
+ if (TUK == TagUseKind::Friend || IsInjectedClassName) {
// We might be replacing an existing declaration in the lookup tables;
// if so, borrow its access specifier.
if (PrevDecl)
@@ -18564,14 +18676,12 @@ void Sema::ActOnStartCXXMemberDeclarations(
// as if it were a public member name.
CXXRecordDecl *InjectedClassName = CXXRecordDecl::Create(
Context, Record->getTagKind(), CurContext, Record->getBeginLoc(),
- Record->getLocation(), Record->getIdentifier(),
- /*PrevDecl=*/nullptr,
- /*DelayTypeCreation=*/true);
- Context.getTypeDeclType(InjectedClassName, Record);
+ Record->getLocation(), Record->getIdentifier());
InjectedClassName->setImplicit();
InjectedClassName->setAccess(AS_public);
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate())
InjectedClassName->setDescribedClassTemplate(Template);
+
PushOnScopeChains(InjectedClassName, S);
assert(InjectedClassName->isInjectedClassName() &&
"Broken injected-class-name");
@@ -19000,17 +19110,16 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
if (!InvalidDecl && getLangOpts().CPlusPlus) {
if (Record->isUnion()) {
- if (const RecordType *RT = EltTy->getAs<RecordType>()) {
- CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (RDecl->getDefinition()) {
- // C++ [class.union]p1: An object of a class with a non-trivial
- // constructor, a non-trivial copy constructor, a non-trivial
- // destructor, or a non-trivial copy assignment operator
- // cannot be a member of a union, nor can an array of such
- // objects.
- if (CheckNontrivialField(NewFD))
- NewFD->setInvalidDecl();
- }
+ if (const auto *RD = EltTy->getAsCXXRecordDecl();
+ RD && (RD->isBeingDefined() || RD->isCompleteDefinition())) {
+
+ // C++ [class.union]p1: An object of a class with a non-trivial
+ // constructor, a non-trivial copy constructor, a non-trivial
+ // destructor, or a non-trivial copy assignment operator
+ // cannot be a member of a union, nor can an array of such
+ // objects.
+ if (CheckNontrivialField(NewFD))
+ NewFD->setInvalidDecl();
}
// C++ [class.union]p1: If a union contains a member of reference type,
@@ -19066,54 +19175,51 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
return false;
QualType EltTy = Context.getBaseElementType(FD->getType());
- if (const RecordType *RT = EltTy->getAs<RecordType>()) {
- CXXRecordDecl *RDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (RDecl->getDefinition()) {
- // We check for copy constructors before constructors
- // because otherwise we'll never get complaints about
- // copy constructors.
-
- CXXSpecialMemberKind member = CXXSpecialMemberKind::Invalid;
- // We're required to check for any non-trivial constructors. Since the
- // implicit default constructor is suppressed if there are any
- // user-declared constructors, we just need to check that there is a
- // trivial default constructor and a trivial copy constructor. (We don't
- // worry about move constructors here, since this is a C++98 check.)
- if (RDecl->hasNonTrivialCopyConstructor())
- member = CXXSpecialMemberKind::CopyConstructor;
- else if (!RDecl->hasTrivialDefaultConstructor())
- member = CXXSpecialMemberKind::DefaultConstructor;
- else if (RDecl->hasNonTrivialCopyAssignment())
- member = CXXSpecialMemberKind::CopyAssignment;
- else if (RDecl->hasNonTrivialDestructor())
- member = CXXSpecialMemberKind::Destructor;
-
- if (member != CXXSpecialMemberKind::Invalid) {
- if (!getLangOpts().CPlusPlus11 &&
- getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) {
- // Objective-C++ ARC: it is an error to have a non-trivial field of
- // a union. However, system headers in Objective-C programs
- // occasionally have Objective-C lifetime objects within unions,
- // and rather than cause the program to fail, we make those
- // members unavailable.
- SourceLocation Loc = FD->getLocation();
- if (getSourceManager().isInSystemHeader(Loc)) {
- if (!FD->hasAttr<UnavailableAttr>())
- FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
- UnavailableAttr::IR_ARCFieldWithOwnership, Loc));
- return false;
- }
+ if (const auto *RDecl = EltTy->getAsCXXRecordDecl();
+ RDecl && (RDecl->isBeingDefined() || RDecl->isCompleteDefinition())) {
+ // We check for copy constructors before constructors
+ // because otherwise we'll never get complaints about
+ // copy constructors.
+
+ CXXSpecialMemberKind member = CXXSpecialMemberKind::Invalid;
+ // We're required to check for any non-trivial constructors. Since the
+ // implicit default constructor is suppressed if there are any
+ // user-declared constructors, we just need to check that there is a
+ // trivial default constructor and a trivial copy constructor. (We don't
+ // worry about move constructors here, since this is a C++98 check.)
+ if (RDecl->hasNonTrivialCopyConstructor())
+ member = CXXSpecialMemberKind::CopyConstructor;
+ else if (!RDecl->hasTrivialDefaultConstructor())
+ member = CXXSpecialMemberKind::DefaultConstructor;
+ else if (RDecl->hasNonTrivialCopyAssignment())
+ member = CXXSpecialMemberKind::CopyAssignment;
+ else if (RDecl->hasNonTrivialDestructor())
+ member = CXXSpecialMemberKind::Destructor;
+
+ if (member != CXXSpecialMemberKind::Invalid) {
+ if (!getLangOpts().CPlusPlus11 && getLangOpts().ObjCAutoRefCount &&
+ RDecl->hasObjectMember()) {
+ // Objective-C++ ARC: it is an error to have a non-trivial field of
+ // a union. However, system headers in Objective-C programs
+ // occasionally have Objective-C lifetime objects within unions,
+ // and rather than cause the program to fail, we make those
+ // members unavailable.
+ SourceLocation Loc = FD->getLocation();
+ if (getSourceManager().isInSystemHeader(Loc)) {
+ if (!FD->hasAttr<UnavailableAttr>())
+ FD->addAttr(UnavailableAttr::CreateImplicit(
+ Context, "", UnavailableAttr::IR_ARCFieldWithOwnership, Loc));
+ return false;
}
-
- Diag(
- FD->getLocation(),
- getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member
- : diag::err_illegal_union_or_anon_struct_member)
- << FD->getParent()->isUnion() << FD->getDeclName() << member;
- DiagnoseNontrivial(RDecl, member);
- return !getLangOpts().CPlusPlus11;
}
+
+ Diag(FD->getLocation(),
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member
+ : diag::err_illegal_union_or_anon_struct_member)
+ << FD->getParent()->isUnion() << FD->getDeclName() << member;
+ DiagnoseNontrivial(RDecl, member);
+ return !getLangOpts().CPlusPlus11;
}
}
@@ -19389,11 +19495,9 @@ bool Sema::EntirelyFunctionPointers(const RecordDecl *Record) {
return PointeeType.getDesugaredType(Context)->isFunctionType();
}
// If a member is a struct entirely of function pointers, that counts too.
- if (const RecordType *RT = FieldType->getAs<RecordType>()) {
- const RecordDecl *Record = RT->getDecl();
- if (Record->isStruct() && EntirelyFunctionPointers(Record))
- return true;
- }
+ if (const auto *Record = FieldType->getAsRecordDecl();
+ Record && Record->isStruct() && EntirelyFunctionPointers(Record))
+ return true;
return false;
};
@@ -19547,8 +19651,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
- } else if (const RecordType *FDTTy = FDTy->getAs<RecordType>()) {
- if (Record && FDTTy->getDecl()->hasFlexibleArrayMember()) {
+ } else if (const auto *RD = FDTy->getAsRecordDecl()) {
+ if (Record && RD->hasFlexibleArrayMember()) {
// A type which contains a flexible array member is considered to be a
// flexible array member.
Record->setHasFlexibleArrayMember(true);
@@ -19574,9 +19678,9 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Ivars can not have abstract class types
FD->setInvalidDecl();
}
- if (Record && FDTTy->getDecl()->hasObjectMember())
+ if (Record && RD->hasObjectMember())
Record->setHasObjectMember(true);
- if (Record && FDTTy->getDecl()->hasVolatileMember())
+ if (Record && RD->hasVolatileMember())
Record->setHasVolatileMember(true);
} else if (FDTy->isObjCObjectType()) {
/// A field cannot be an Objective-c object
@@ -19607,8 +19711,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
Record->setHasObjectMember(true);
else if (Context.getAsArrayType(FD->getType())) {
QualType BaseType = Context.getBaseElementType(FD->getType());
- if (BaseType->isRecordType() &&
- BaseType->castAs<RecordType>()->getDecl()->hasObjectMember())
+ if (const auto *RD = BaseType->getAsRecordDecl();
+ RD && RD->hasObjectMember())
Record->setHasObjectMember(true);
else if (BaseType->isObjCObjectPointerType() ||
BaseType.isObjCGCStrong())
@@ -19640,8 +19744,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
Record->setHasNonTrivialToPrimitiveDestructCUnion(true);
}
- if (const auto *RT = FT->getAs<RecordType>()) {
- if (RT->getDecl()->getArgPassingRestrictions() ==
+ if (const auto *RD = FT->getAsRecordDecl()) {
+ if (RD->getArgPassingRestrictions() ==
RecordArgPassingKind::CanNeverPassInRegs)
Record->setArgPassingRestrictions(
RecordArgPassingKind::CanNeverPassInRegs);
@@ -19652,6 +19756,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
Q && Q.isAddressDiscriminated()) {
Record->setArgPassingRestrictions(
RecordArgPassingKind::CanNeverPassInRegs);
+ Record->setNonTrivialToPrimitiveCopy(true);
}
}
@@ -20179,9 +20284,10 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
// different from T:
// - every enumerator of every member of class T that is an unscoped
// enumerated type
- if (getLangOpts().CPlusPlus && !TheEnumDecl->isScoped())
- DiagnoseClassNameShadow(TheEnumDecl->getDeclContext(),
- DeclarationNameInfo(Id, IdLoc));
+ if (getLangOpts().CPlusPlus && !TheEnumDecl->isScoped() &&
+ DiagnoseClassNameShadow(TheEnumDecl->getDeclContext(),
+ DeclarationNameInfo(Id, IdLoc)))
+ return nullptr;
EnumConstantDecl *New =
CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val);
@@ -20407,7 +20513,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDeclX, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attrs) {
EnumDecl *Enum = cast<EnumDecl>(EnumDeclX);
- QualType EnumType = Context.getTypeDeclType(Enum);
+ CanQualType EnumType = Context.getCanonicalTagType(Enum);
ProcessDeclAttributeList(S, Enum, Attrs);
ProcessAPINotes(Enum);
@@ -20750,3 +20856,13 @@ bool Sema::shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee) {
return LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
CUDA().IdentifyTarget(Callee) == CUDAFunctionTarget::Global;
}
+
+bool Sema::isRedefinitionAllowedFor(NamedDecl *D, NamedDecl **Suggested,
+ bool &Visible) {
+ Visible = hasVisibleDefinition(D, Suggested);
+ // The redefinition of D in the **current** TU is allowed if D is invisible or
+ // D is defined in the global module of other module units. We didn't check if
+ // it is in global module as, we'll check the redefinition in named module
+ // later with better diagnostic message.
+ return D->isInAnotherModuleUnit() || !Visible;
+}
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index f5f18b0..3ded60c 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -169,7 +169,7 @@ static bool isIntOrBool(Expr *Exp) {
// Check to see if the type is a smart pointer of some kind. We assume
// it's a smart pointer if it defines both operator-> and operator*.
-static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
+static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordDecl *Record) {
auto IsOverloadedOperatorPresent = [&S](const RecordDecl *Record,
OverloadedOperatorKind Op) {
DeclContextLookupResult Result =
@@ -177,7 +177,6 @@ static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
return !Result.empty();
};
- const RecordDecl *Record = RT->getDecl();
bool foundStarOperator = IsOverloadedOperatorPresent(Record, OO_Star);
bool foundArrowOperator = IsOverloadedOperatorPresent(Record, OO_Arrow);
if (foundStarOperator && foundArrowOperator)
@@ -212,14 +211,14 @@ static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D,
if (QT->isAnyPointerType())
return true;
- if (const auto *RT = QT->getAs<RecordType>()) {
+ if (const auto *RD = QT->getAsRecordDecl()) {
// If it's an incomplete type, it could be a smart pointer; skip it.
// (We don't want to force template instantiation if we can avoid it,
// since that would alter the order in which templates are instantiated.)
- if (RT->isIncompleteType())
+ if (!RD->isCompleteDefinition())
return true;
- if (threadSafetyCheckIsSmartPointer(S, RT))
+ if (threadSafetyCheckIsSmartPointer(S, RD))
return true;
}
@@ -229,13 +228,13 @@ static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D,
/// Checks that the passed in QualType either is of RecordType or points
/// to RecordType. Returns the relevant RecordType, null if it does not exit.
-static const RecordType *getRecordType(QualType QT) {
- if (const auto *RT = QT->getAs<RecordType>())
- return RT;
+static const RecordDecl *getRecordDecl(QualType QT) {
+ if (const auto *RD = QT->getAsRecordDecl())
+ return RD;
- // Now check if we point to record type.
- if (const auto *PT = QT->getAs<PointerType>())
- return PT->getPointeeType()->getAs<RecordType>();
+ // Now check if we point to a record.
+ if (const auto *PT = QT->getAsCanonical<PointerType>())
+ return PT->getPointeeType()->getAsRecordDecl();
return nullptr;
}
@@ -257,34 +256,34 @@ static bool checkRecordDeclForAttr(const RecordDecl *RD) {
}
static bool checkRecordTypeForCapability(Sema &S, QualType Ty) {
- const RecordType *RT = getRecordType(Ty);
+ const auto *RD = getRecordDecl(Ty);
- if (!RT)
+ if (!RD)
return false;
// Don't check for the capability if the class hasn't been defined yet.
- if (RT->isIncompleteType())
+ if (!RD->isCompleteDefinition())
return true;
// Allow smart pointers to be used as capability objects.
// FIXME -- Check the type that the smart pointer points to.
- if (threadSafetyCheckIsSmartPointer(S, RT))
+ if (threadSafetyCheckIsSmartPointer(S, RD))
return true;
- return checkRecordDeclForAttr<CapabilityAttr>(RT->getDecl());
+ return checkRecordDeclForAttr<CapabilityAttr>(RD);
}
static bool checkRecordTypeForScopedCapability(Sema &S, QualType Ty) {
- const RecordType *RT = getRecordType(Ty);
+ const auto *RD = getRecordDecl(Ty);
- if (!RT)
+ if (!RD)
return false;
// Don't check for the capability if the class hasn't been defined yet.
- if (RT->isIncompleteType())
+ if (!RD->isCompleteDefinition())
return true;
- return checkRecordDeclForAttr<ScopedLockableAttr>(RT->getDecl());
+ return checkRecordDeclForAttr<ScopedLockableAttr>(RD);
}
static bool checkTypedefTypeForCapability(QualType Ty) {
@@ -399,10 +398,10 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
ArgTy = DRE->getDecl()->getType();
// First see if we can just cast to record type, or pointer to record type.
- const RecordType *RT = getRecordType(ArgTy);
+ const auto *RD = getRecordDecl(ArgTy);
// Now check if we index into a record type function param.
- if(!RT && ParamIdxOk) {
+ if (!RD && ParamIdxOk) {
const auto *FD = dyn_cast<FunctionDecl>(D);
const auto *IL = dyn_cast<IntegerLiteral>(ArgExp);
if(FD && IL) {
@@ -1254,8 +1253,8 @@ bool Sema::isValidPointerAttrType(QualType T, bool RefOkay) {
// The nonnull attribute, and other similar attributes, can be applied to a
// transparent union that contains a pointer type.
if (const RecordType *UT = T->getAsUnionType()) {
- if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
- RecordDecl *UD = UT->getDecl();
+ RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
for (const auto *I : UD->fields()) {
QualType QT = I->getType();
if (QT->isAnyPointerType() || QT->isBlockPointerType())
@@ -1987,6 +1986,11 @@ void clang::inferNoReturnAttr(Sema &S, const Decl *D) {
isKnownToAlwaysThrow(FD)) {
NonConstFD->addAttr(InferredNoReturnAttr::CreateImplicit(S.Context));
+ // [[noreturn]] can only be added to lambdas since C++23
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD);
+ MD && !S.getLangOpts().CPlusPlus23 && isLambdaCallOperator(MD))
+ return;
+
// Emit a diagnostic suggesting the function being marked [[noreturn]].
S.Diag(FD->getLocation(), diag::warn_suggest_noreturn_function)
<< /*isFunction=*/0 << FD;
@@ -3560,7 +3564,9 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
Ty = getFunctionOrMethodResultType(D);
// replace instancetype with the class type
- auto Instancetype = S.Context.getObjCInstanceTypeDecl()->getTypeForDecl();
+ auto *Instancetype = cast<TypedefType>(S.Context.getTypedefType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
+ S.Context.getObjCInstanceTypeDecl()));
if (Ty->getAs<TypedefType>() == Instancetype)
if (auto *OMD = dyn_cast<ObjCMethodDecl>(D))
if (auto *Interface = OMD->getClassInterface())
@@ -3630,7 +3636,7 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
QualType T = cast<VarDecl>(D)->getType();
if (S.Context.getAsArrayType(T))
T = S.Context.getBaseElementType(T);
- if (!T->getAs<RecordType>()) {
+ if (!T->isRecordType()) {
S.Diag(AL.getLoc(), diag::err_init_priority_object_attr);
AL.setInvalid();
return;
@@ -4154,7 +4160,7 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
RecordDecl *RD = nullptr;
const auto *TD = dyn_cast<TypedefNameDecl>(D);
if (TD && TD->getUnderlyingType()->isUnionType())
- RD = TD->getUnderlyingType()->getAsUnionType()->getDecl();
+ RD = TD->getUnderlyingType()->getAsRecordDecl();
else
RD = dyn_cast<RecordDecl>(D);
@@ -4507,7 +4513,7 @@ void Sema::CheckAlignasUnderalignment(Decl *D) {
if (const auto *VD = dyn_cast<ValueDecl>(D)) {
UnderlyingTy = DiagTy = VD->getType();
} else {
- UnderlyingTy = DiagTy = Context.getTagDeclType(cast<TagDecl>(D));
+ UnderlyingTy = DiagTy = Context.getCanonicalTagType(cast<TagDecl>(D));
if (const auto *ED = dyn_cast<EnumDecl>(D))
UnderlyingTy = ED->getIntegerType();
}
@@ -4727,14 +4733,14 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
// GCC allows 'mode' attribute on enumeration types (even incomplete), except
// for vector modes. So, 'enum X __attribute__((mode(QI)));' forms a complete
// type, 'enum { A } __attribute__((mode(V4SI)))' is rejected.
- if ((isa<EnumDecl>(D) || OldElemTy->getAs<EnumType>()) &&
+ if ((isa<EnumDecl>(D) || OldElemTy->isEnumeralType()) &&
VectorSize.getBoolValue()) {
Diag(AttrLoc, diag::err_enum_mode_vector_type) << Name << CI.getRange();
return;
}
bool IntegralOrAnyEnumType = (OldElemTy->isIntegralOrEnumerationType() &&
!OldElemTy->isBitIntType()) ||
- OldElemTy->getAs<EnumType>();
+ OldElemTy->isEnumeralType();
if (!OldElemTy->getAs<BuiltinType>() && !OldElemTy->isComplexType() &&
!IntegralOrAnyEnumType)
@@ -7054,6 +7060,9 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_EnumExtensibility:
handleEnumExtensibilityAttr(S, D, AL);
break;
+ case ParsedAttr::AT_SYCLExternal:
+ handleSimpleAttribute<SYCLExternalAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_SYCLKernelEntryPoint:
S.SYCL().handleKernelEntryPointAttr(D, AL);
break;
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index f5b4614..4b0dead 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "TypeLocBuilder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -1101,15 +1102,13 @@ static std::string printTemplateArgs(const PrintingPolicy &PrintingPolicy,
return std::string(OS.str());
}
-static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
- SourceLocation Loc, StringRef Trait,
- TemplateArgumentListInfo &Args,
- unsigned DiagID) {
+static QualType getStdTrait(Sema &S, SourceLocation Loc, StringRef Trait,
+ TemplateArgumentListInfo &Args, unsigned DiagID) {
auto DiagnoseMissing = [&] {
if (DiagID)
S.Diag(Loc, DiagID) << printTemplateArgs(S.Context.getPrintingPolicy(),
Args, /*Params*/ nullptr);
- return true;
+ return QualType();
};
// FIXME: Factor out duplication with lookupPromiseType in SemaCoroutine.
@@ -1122,12 +1121,12 @@ static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
// missing specialization, because this can only fail if the user has been
// declaring their own names in namespace std or we don't support the
// standard library implementation in use.
- LookupResult Result(S, &S.PP.getIdentifierTable().get(Trait),
- Loc, Sema::LookupOrdinaryName);
+ LookupResult Result(S, &S.PP.getIdentifierTable().get(Trait), Loc,
+ Sema::LookupOrdinaryName);
if (!S.LookupQualifiedName(Result, Std))
return DiagnoseMissing();
if (Result.isAmbiguous())
- return true;
+ return QualType();
ClassTemplateDecl *TraitTD = Result.getAsSingle<ClassTemplateDecl>();
if (!TraitTD) {
@@ -1135,28 +1134,31 @@ static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
NamedDecl *Found = *Result.begin();
S.Diag(Loc, diag::err_std_type_trait_not_class_template) << Trait;
S.Diag(Found->getLocation(), diag::note_declared_at);
- return true;
+ return QualType();
}
// Build the template-id.
- QualType TraitTy = S.CheckTemplateIdType(TemplateName(TraitTD), Loc, Args);
+ QualType TraitTy = S.CheckTemplateIdType(ElaboratedTypeKeyword::None,
+ TemplateName(TraitTD), Loc, Args);
if (TraitTy.isNull())
- return true;
+ return QualType();
+
if (!S.isCompleteType(Loc, TraitTy)) {
if (DiagID)
S.RequireCompleteType(
Loc, TraitTy, DiagID,
printTemplateArgs(S.Context.getPrintingPolicy(), Args,
TraitTD->getTemplateParameters()));
- return true;
+ return QualType();
}
+ return TraitTy;
+}
- CXXRecordDecl *RD = TraitTy->getAsCXXRecordDecl();
+static bool lookupMember(Sema &S, CXXRecordDecl *RD,
+ LookupResult &MemberLookup) {
assert(RD && "specialization of class template is not a class?");
-
- // Look up the member of the trait type.
- S.LookupQualifiedName(TraitMemberLookup, RD);
- return TraitMemberLookup.isAmbiguous();
+ S.LookupQualifiedName(MemberLookup, RD);
+ return MemberLookup.isAmbiguous();
}
static TemplateArgumentLoc
@@ -1178,17 +1180,20 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
EnterExpressionEvaluationContext ContextRAII(
S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- DeclarationName Value = S.PP.getIdentifierInfo("value");
- LookupResult R(S, Value, Loc, Sema::LookupOrdinaryName);
-
// Form template argument list for tuple_size<T>.
TemplateArgumentListInfo Args(Loc, Loc);
Args.addArgument(getTrivialTypeTemplateArgument(S, Loc, T));
+ QualType TraitTy = getStdTrait(S, Loc, "tuple_size", Args, /*DiagID=*/0);
+ if (TraitTy.isNull())
+ return IsTupleLike::NotTupleLike;
+
+ DeclarationName Value = S.PP.getIdentifierInfo("value");
+ LookupResult R(S, Value, Loc, Sema::LookupOrdinaryName);
+
// If there's no tuple_size specialization or the lookup of 'value' is empty,
// it's not tuple-like.
- if (lookupStdTypeTraitMember(S, R, Loc, "tuple_size", Args, /*DiagID*/ 0) ||
- R.empty())
+ if (lookupMember(S, TraitTy->getAsCXXRecordDecl(), R) || R.empty())
return IsTupleLike::NotTupleLike;
// If we get this far, we've committed to the tuple interpretation, but
@@ -1228,11 +1233,15 @@ static QualType getTupleLikeElementType(Sema &S, SourceLocation Loc,
getTrivialIntegralTemplateArgument(S, Loc, S.Context.getSizeType(), I));
Args.addArgument(getTrivialTypeTemplateArgument(S, Loc, T));
+ QualType TraitTy =
+ getStdTrait(S, Loc, "tuple_element", Args,
+ diag::err_decomp_decl_std_tuple_element_not_specialized);
+ if (TraitTy.isNull())
+ return QualType();
+
DeclarationName TypeDN = S.PP.getIdentifierInfo("type");
LookupResult R(S, TypeDN, Loc, Sema::LookupOrdinaryName);
- if (lookupStdTypeTraitMember(
- S, R, Loc, "tuple_element", Args,
- diag::err_decomp_decl_std_tuple_element_not_specialized))
+ if (lookupMember(S, TraitTy->getAsCXXRecordDecl(), R))
return QualType();
auto *TD = R.getAsSingle<TypeDecl>();
@@ -1246,7 +1255,8 @@ static QualType getTupleLikeElementType(Sema &S, SourceLocation Loc,
return QualType();
}
- return S.Context.getTypeDeclType(TD);
+ NestedNameSpecifier Qualifier(TraitTy.getTypePtr());
+ return S.Context.getTypeDeclType(ElaboratedTypeKeyword::None, Qualifier, TD);
}
namespace {
@@ -1363,10 +1373,13 @@ static bool checkTupleLikeDecomposition(Sema &S,
S.BuildReferenceType(T, E.get()->isLValue(), Loc, B->getDeclName());
if (RefType.isNull())
return true;
- auto *RefVD = VarDecl::Create(
- S.Context, Src->getDeclContext(), Loc, Loc,
- B->getDeclName().getAsIdentifierInfo(), RefType,
- S.Context.getTrivialTypeSourceInfo(T, Loc), Src->getStorageClass());
+
+ // Don't give this VarDecl a TypeSourceInfo, since this is a synthesized
+ // entity and this type was never written in source code.
+ auto *RefVD =
+ VarDecl::Create(S.Context, Src->getDeclContext(), Loc, Loc,
+ B->getDeclName().getAsIdentifierInfo(), RefType,
+ /*TInfo=*/nullptr, Src->getStorageClass());
RefVD->setLexicalDeclContext(Src->getLexicalDeclContext());
RefVD->setTSCSpec(Src->getTSCSpec());
RefVD->setImplicit();
@@ -1452,7 +1465,7 @@ static DeclAccessPair findDecomposableBaseClass(Sema &S, SourceLocation Loc,
}
// ... [accessible, implied by other rules] base class of E.
- S.CheckBaseClassAccess(Loc, BaseType, S.Context.getRecordType(RD),
+ S.CheckBaseClassAccess(Loc, BaseType, S.Context.getCanonicalTagType(RD),
*BestPath, diag::err_decomp_decl_inaccessible_base);
AS = BestPath->Access;
@@ -1528,8 +1541,8 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
const auto *RD = cast_or_null<CXXRecordDecl>(BasePair.getDecl());
if (!RD)
return true;
- QualType BaseType = S.Context.getQualifiedType(S.Context.getRecordType(RD),
- DecompType.getQualifiers());
+ QualType BaseType = S.Context.getQualifiedType(
+ S.Context.getCanonicalTagType(RD), DecompType.getQualifiers());
auto *DD = cast<DecompositionDecl>(Src);
unsigned NumFields = llvm::count_if(
@@ -2138,7 +2151,7 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
static bool CheckConstexprCtorInitializer(Sema &SemaRef,
const FunctionDecl *Dcl,
FieldDecl *Field,
- llvm::SmallSet<Decl*, 16> &Inits,
+ llvm::SmallPtrSet<Decl *, 16> &Inits,
bool &Diagnosed,
Sema::CheckConstexprKind Kind) {
// In C++20 onwards, there's nothing to check for validity.
@@ -2174,7 +2187,7 @@ static bool CheckConstexprCtorInitializer(Sema &SemaRef,
return false;
}
} else if (Field->isAnonymousStructOrUnion()) {
- const RecordDecl *RD = Field->getType()->castAs<RecordType>()->getDecl();
+ const auto *RD = Field->getType()->castAsRecordDecl();
for (auto *I : RD->fields())
// If an anonymous union contains an anonymous struct of which any member
// is initialized, all members must be initialized.
@@ -2457,7 +2470,7 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
// Check initialization of non-static data members. Base classes are
// always initialized so do not need to be checked. Dependent bases
// might not have initializers in the member initializer list.
- llvm::SmallSet<Decl*, 16> Inits;
+ llvm::SmallPtrSet<Decl *, 16> Inits;
for (const auto *I: Constructor->inits()) {
if (FieldDecl *FD = I->getMember())
Inits.insert(FD);
@@ -2666,7 +2679,9 @@ void Sema::DiagnoseImmediateEscalatingReason(FunctionDecl *FD) {
return DynamicRecursiveASTVisitor::TraverseCXXConstructorDecl(Ctr);
}
- bool TraverseType(QualType T) override { return true; }
+ bool TraverseType(QualType T, bool TraverseQualifier) override {
+ return true;
+ }
bool VisitBlockExpr(BlockExpr *T) override { return true; }
} Visitor(*this, FD);
@@ -2907,9 +2922,7 @@ NoteIndirectBases(ASTContext &Context, IndirectBaseSet &Set,
{
// Even though the incoming type is a base, it might not be
// a class -- it could be a template parm, for instance.
- if (auto Rec = Type->getAs<RecordType>()) {
- auto Decl = Rec->getAsCXXRecordDecl();
-
+ if (const auto *Decl = Type->getAsCXXRecordDecl()) {
// Iterate over its bases.
for (const auto &BaseSpec : Decl->bases()) {
QualType Base = Context.getCanonicalType(BaseSpec.getType())
@@ -2968,8 +2981,7 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
if (Bases.size() > 1)
NoteIndirectBases(Context, IndirectBaseTypes, NewBaseType);
- if (const RecordType *Record = NewBaseType->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (const auto *RD = NewBaseType->getAsCXXRecordDecl()) {
if (Class->isInterface() &&
(!RD->isInterfaceLike() ||
KnownBase->getAccessSpecifier() != AS_public)) {
@@ -3050,7 +3062,7 @@ bool Sema::IsDerivedFrom(SourceLocation Loc, CXXRecordDecl *Derived,
// FIXME: In a modules build, do we need the entire path to be visible for us
// to be able to use the inheritance relationship?
- if (!isCompleteType(Loc, Context.getTypeDeclType(Derived)) &&
+ if (!isCompleteType(Loc, Context.getCanonicalTagType(Derived)) &&
!Derived->isBeingDefined())
return false;
@@ -3205,7 +3217,8 @@ std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) {
// We haven't displayed a path to this particular base
// class subobject yet.
PathDisplayStr += "\n ";
- PathDisplayStr += Context.getTypeDeclType(Paths.getOrigin()).getAsString();
+ PathDisplayStr += QualType(Context.getCanonicalTagType(Paths.getOrigin()))
+ .getAsString();
for (CXXBasePath::const_iterator Element = Path->begin();
Element != Path->end(); ++Element)
PathDisplayStr += " -> " + Element->Base->getType().getAsString();
@@ -4245,7 +4258,7 @@ static bool FindBaseInitializer(Sema &SemaRef,
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/false);
if (SemaRef.IsDerivedFrom(ClassDecl->getLocation(),
- SemaRef.Context.getTypeDeclType(ClassDecl),
+ SemaRef.Context.getCanonicalTagType(ClassDecl),
BaseType, Paths)) {
for (CXXBasePaths::paths_iterator Path = Paths.begin();
Path != Paths.end(); ++Path) {
@@ -4481,7 +4494,7 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (getLangOpts().MSVCCompat && !getLangOpts().CPlusPlus20) {
if (auto UnqualifiedBase = R.getAsSingle<ClassTemplateDecl>()) {
auto *TempSpec = cast<TemplateSpecializationType>(
- UnqualifiedBase->getInjectedClassNameSpecialization());
+ UnqualifiedBase->getCanonicalInjectedSpecializationType(Context));
TemplateName TN = TempSpec->getTemplateName();
for (auto const &Base : ClassDecl->bases()) {
auto BaseTemplate =
@@ -4545,14 +4558,36 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
}
if (BaseType.isNull()) {
- BaseType = getElaboratedType(ElaboratedTypeKeyword::None, SS,
- Context.getTypeDeclType(TyD));
MarkAnyDeclReferenced(TyD->getLocation(), TyD, /*OdrUse=*/false);
- TInfo = Context.CreateTypeSourceInfo(BaseType);
- ElaboratedTypeLoc TL = TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>();
- TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
- TL.setElaboratedKeywordLoc(SourceLocation());
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
+
+ TypeLocBuilder TLB;
+ // FIXME: This is missing building the UsingType for TyD, if any.
+ if (const auto *TD = dyn_cast<TagDecl>(TyD)) {
+ BaseType = Context.getTagType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TD, /*OwnsTag=*/false);
+ auto TL = TLB.push<TagTypeLoc>(BaseType);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(IdLoc);
+ } else if (auto *TN = dyn_cast<TypedefNameDecl>(TyD)) {
+ BaseType = Context.getTypedefType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TN);
+ TLB.push<TypedefTypeLoc>(BaseType).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), IdLoc);
+ } else if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(TyD)) {
+ BaseType = Context.getUnresolvedUsingType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), UD);
+ TLB.push<UnresolvedUsingTypeLoc>(BaseType).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), IdLoc);
+ } else {
+ // FIXME: What else can appear here?
+ assert(SS.isEmpty());
+ BaseType = Context.getTypeDeclType(TyD);
+ TLB.pushTypeSpec(BaseType).setNameLoc(IdLoc);
+ }
+ TInfo = TLB.getTypeSourceInfo(Context, BaseType);
}
}
@@ -4661,10 +4696,12 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
Args = MultiExprArg(ParenList->getExprs(), ParenList->getNumExprs());
}
+ CanQualType ClassType = Context.getCanonicalTagType(ClassDecl);
+
SourceRange InitRange = Init->getSourceRange();
// Initialize the object.
- InitializedEntity DelegationEntity = InitializedEntity::InitializeDelegation(
- QualType(ClassDecl->getTypeForDecl(), 0));
+ InitializedEntity DelegationEntity =
+ InitializedEntity::InitializeDelegation(ClassType);
InitializationKind Kind =
InitList ? InitializationKind::CreateDirectList(
NameLoc, Init->getBeginLoc(), Init->getEndLoc())
@@ -4686,9 +4723,8 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
}
if (DelegationInit.isInvalid()) {
- DelegationInit =
- CreateRecoveryExpr(InitRange.getBegin(), InitRange.getEnd(), Args,
- QualType(ClassDecl->getTypeForDecl(), 0));
+ DelegationInit = CreateRecoveryExpr(InitRange.getBegin(),
+ InitRange.getEnd(), Args, ClassType);
if (DelegationInit.isInvalid())
return true;
} else {
@@ -4753,8 +4789,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
const CXXBaseSpecifier *DirectBaseSpec = nullptr;
const CXXBaseSpecifier *VirtualBaseSpec = nullptr;
if (!Dependent) {
- if (Context.hasSameUnqualifiedType(QualType(ClassDecl->getTypeForDecl(),0),
- BaseType))
+ if (declaresSameEntity(ClassDecl, BaseType->getAsCXXRecordDecl()))
return BuildDelegatingInitializer(BaseTInfo, Init, ClassDecl);
FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec,
@@ -4774,7 +4809,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
Dependent = true;
else
return Diag(BaseLoc, diag::err_not_direct_base_or_virtual)
- << BaseType << Context.getTypeDeclType(ClassDecl)
+ << BaseType << Context.getCanonicalTagType(ClassDecl)
<< BaseTInfo->getTypeLoc().getSourceRange();
}
}
@@ -5078,9 +5113,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
if (FieldBaseElementType->isReferenceType()) {
SemaRef.Diag(Constructor->getLocation(),
diag::err_uninitialized_member_in_ctor)
- << (int)Constructor->isImplicit()
- << SemaRef.Context.getTagDeclType(Constructor->getParent())
- << 0 << Field->getDeclName();
+ << (int)Constructor->isImplicit()
+ << SemaRef.Context.getCanonicalTagType(Constructor->getParent()) << 0
+ << Field->getDeclName();
SemaRef.Diag(Field->getLocation(), diag::note_declared_at);
return true;
}
@@ -5088,9 +5123,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
if (FieldBaseElementType.isConstQualified()) {
SemaRef.Diag(Constructor->getLocation(),
diag::err_uninitialized_member_in_ctor)
- << (int)Constructor->isImplicit()
- << SemaRef.Context.getTagDeclType(Constructor->getParent())
- << 1 << Field->getDeclName();
+ << (int)Constructor->isImplicit()
+ << SemaRef.Context.getCanonicalTagType(Constructor->getParent()) << 1
+ << Field->getDeclName();
SemaRef.Diag(Field->getLocation(), diag::note_declared_at);
return true;
}
@@ -5391,7 +5426,7 @@ static void MarkBaseDestructorsReferenced(Sema &S, SourceLocation Location,
S.CheckDestructorAccess(Base.getBeginLoc(), Dtor,
S.PDiag(diag::err_access_dtor_base)
<< Base.getType() << Base.getSourceRange(),
- S.Context.getTypeDeclType(ClassDecl));
+ S.Context.getCanonicalTagType(ClassDecl));
S.MarkFunctionReferenced(Location, Dtor);
S.DiagnoseUseOfDecl(Dtor, Location);
@@ -5437,7 +5472,8 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
CXXCtorInitializer *Member = Initializers[i];
if (Member->isBaseInitializer())
- Info.AllBaseFields[Member->getBaseClass()->getAs<RecordType>()] = Member;
+ Info.AllBaseFields[Member->getBaseClass()->getAsCanonical<RecordType>()] =
+ Member;
else {
Info.AllBaseFields[Member->getAnyMember()->getCanonicalDecl()] = Member;
@@ -5465,8 +5501,8 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
// Push virtual bases before others.
for (auto &VBase : ClassDecl->vbases()) {
- if (CXXCtorInitializer *Value
- = Info.AllBaseFields.lookup(VBase.getType()->getAs<RecordType>())) {
+ if (CXXCtorInitializer *Value = Info.AllBaseFields.lookup(
+ VBase.getType()->getAsCanonical<RecordType>())) {
// [class.base.init]p7, per DR257:
// A mem-initializer where the mem-initializer-id names a virtual base
// class is ignored during execution of a constructor of any class that
@@ -5504,8 +5540,8 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
if (Base.isVirtual())
continue;
- if (CXXCtorInitializer *Value
- = Info.AllBaseFields.lookup(Base.getType()->getAs<RecordType>())) {
+ if (CXXCtorInitializer *Value = Info.AllBaseFields.lookup(
+ Base.getType()->getAsCanonical<RecordType>())) {
Info.AllToInit.push_back(Value);
} else if (!AnyErrors) {
CXXCtorInitializer *CXXBaseInit;
@@ -5593,10 +5629,10 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
}
static void PopulateKeysForFields(FieldDecl *Field, SmallVectorImpl<const void*> &IdealInits) {
- if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ if (const RecordType *RT = Field->getType()->getAsCanonical<RecordType>()) {
+ const RecordDecl *RD = RT->getOriginalDecl();
if (RD->isAnonymousStructOrUnion()) {
- for (auto *Field : RD->fields())
+ for (auto *Field : RD->getDefinitionOrSelf()->fields())
PopulateKeysForFields(Field, IdealInits);
return;
}
@@ -5928,16 +5964,14 @@ void Sema::MarkVirtualBaseDestructorsReferenced(
if (!Dtor)
continue;
- if (CheckDestructorAccess(
- ClassDecl->getLocation(), Dtor,
- PDiag(diag::err_access_dtor_vbase)
- << Context.getTypeDeclType(ClassDecl) << VBase.getType(),
- Context.getTypeDeclType(ClassDecl)) ==
- AR_accessible) {
+ CanQualType CT = Context.getCanonicalTagType(ClassDecl);
+ if (CheckDestructorAccess(ClassDecl->getLocation(), Dtor,
+ PDiag(diag::err_access_dtor_vbase)
+ << CT << VBase.getType(),
+ CT) == AR_accessible) {
CheckDerivedToBaseConversion(
- Context.getTypeDeclType(ClassDecl), VBase.getType(),
- diag::err_access_dtor_vbase, 0, ClassDecl->getLocation(),
- SourceRange(), DeclarationName(), nullptr);
+ CT, VBase.getType(), diag::err_access_dtor_vbase, 0,
+ ClassDecl->getLocation(), SourceRange(), DeclarationName(), nullptr);
}
MarkFunctionReferenced(Location, Dtor);
@@ -6052,10 +6086,8 @@ struct AbstractUsageInfo {
bool Invalid;
AbstractUsageInfo(Sema &S, CXXRecordDecl *Record)
- : S(S), Record(Record),
- AbstractType(S.Context.getCanonicalType(
- S.Context.getTypeDeclType(Record))),
- Invalid(false) {}
+ : S(S), Record(Record),
+ AbstractType(S.Context.getCanonicalTagType(Record)), Invalid(false) {}
void DiagnoseAbstractType() {
if (Invalid) return;
@@ -6868,8 +6900,8 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
bool isAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
uint64_t TypeSize = isAArch64 ? 128 : 64;
- if (CopyCtorIsTrivial &&
- S.getASTContext().getTypeSize(D->getTypeForDecl()) <= TypeSize)
+ if (CopyCtorIsTrivial && S.getASTContext().getTypeSize(
+ S.Context.getCanonicalTagType(D)) <= TypeSize)
return true;
return false;
}
@@ -6984,9 +7016,12 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
++I) {
NamedDecl *D = (*I)->getUnderlyingDecl();
+ // Invalid IndirectFieldDecls have already been diagnosed with
+ // err_anonymous_record_member_redecl in
+ // SemaDecl.cpp:CheckAnonMemberRedeclaration.
if (((isa<FieldDecl>(D) || isa<UnresolvedUsingValueDecl>(D)) &&
Record->hasUserDeclaredConstructor()) ||
- isa<IndirectFieldDecl>(D)) {
+ (isa<IndirectFieldDecl>(D) && !D->isInvalidDecl())) {
Diag((*I)->getLocation(), diag::err_member_name_of_class)
<< D->getDeclName();
break;
@@ -7000,7 +7035,8 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
if ((!dtor || (!dtor->isVirtual() && dtor->getAccess() == AS_public)) &&
!Record->hasAttr<FinalAttr>())
Diag(dtor ? dtor->getLocation() : Record->getLocation(),
- diag::warn_non_virtual_dtor) << Context.getRecordType(Record);
+ diag::warn_non_virtual_dtor)
+ << Context.getCanonicalTagType(Record);
}
if (Record->isAbstract()) {
@@ -7022,7 +7058,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
(FA->isSpelledAsSealed() ? " sealed" : " final"));
Diag(Record->getLocation(),
diag::note_final_dtor_non_final_class_silence)
- << Context.getRecordType(Record) << FA->isSpelledAsSealed();
+ << Context.getCanonicalTagType(Record) << FA->isSpelledAsSealed();
}
}
}
@@ -7177,7 +7213,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
return true;
};
EffectivelyConstexprDestructor =
- Check(QualType(Record->getTypeForDecl(), 0), Check);
+ Check(Context.getCanonicalTagType(Record), Check);
}
// Define defaulted constexpr virtual functions that override a base class
@@ -7333,7 +7369,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
NewDecls.empty() ? NewKind : DeleteKind);
Diag(Record->getLocation(),
diag::err_type_aware_allocator_missing_matching_operator)
- << FoundOperator << Context.getRecordType(Record)
+ << FoundOperator << Context.getCanonicalTagType(Record)
<< MissingOperator;
for (auto MD : NewDecls)
Diag(MD->getLocation(),
@@ -7569,10 +7605,9 @@ static bool defaultedSpecialMemberIsConstexpr(
// class is a constexpr function, and
if (!S.getLangOpts().CPlusPlus23) {
for (const auto &B : ClassDecl->bases()) {
- const RecordType *BaseType = B.getType()->getAs<RecordType>();
- if (!BaseType)
+ auto *BaseClassDecl = B.getType()->getAsCXXRecordDecl();
+ if (!BaseClassDecl)
continue;
- CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, 0, ConstArg,
InheritedCtor, Inherited))
return false;
@@ -7594,8 +7629,10 @@ static bool defaultedSpecialMemberIsConstexpr(
F->hasInClassInitializer())
continue;
QualType BaseType = S.Context.getBaseElementType(F->getType());
- if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
- CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (const RecordType *RecordTy = BaseType->getAsCanonical<RecordType>()) {
+ CXXRecordDecl *FieldRecDecl =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM,
BaseType.getCVRQualifiers(),
ConstArg && !F->isMutable()))
@@ -7782,9 +7819,9 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
ReturnType = Type->getReturnType();
QualType ThisType = MD->getFunctionObjectParameterType();
- QualType DeclType = Context.getTypeDeclType(RD);
- DeclType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- DeclType, nullptr);
+ QualType DeclType =
+ Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, RD, /*OwnsTag=*/false);
DeclType = Context.getAddrSpaceQualType(
DeclType, ThisType.getQualifiers().getAddressSpace());
QualType ExpectedReturnType = Context.getLValueReferenceType(DeclType);
@@ -7819,7 +7856,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
if (!ExplicitObjectParameter.isNull() &&
(!ExplicitObjectParameter->isReferenceType() ||
!Context.hasSameType(ExplicitObjectParameter.getNonReferenceType(),
- Context.getRecordType(RD)))) {
+ Context.getCanonicalTagType(RD)))) {
if (DeleteOnTypeMismatch)
ShouldDeleteForTypeMismatch = true;
else {
@@ -8278,7 +8315,7 @@ private:
Best->FoundDecl.getDecl()->isCXXClassMember()) {
QualType ObjectType = Subobj.Kind == Subobject::Member
? Args[0]->getType()
- : S.Context.getRecordType(RD);
+ : S.Context.getCanonicalTagType(RD);
if (!S.isMemberAccessibleForDeletion(
ArgClass, Best->FoundDecl, ObjectType, Subobj.Loc,
Diagnose == ExplainDeleted
@@ -8979,7 +9016,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
bool Ok = !IsMethod || FD->hasCXXExplicitFunctionObjectParameter();
QualType ExpectedTy;
if (RD)
- ExpectedTy = Context.getRecordType(RD);
+ ExpectedTy = Context.getCanonicalTagType(RD);
if (auto *Ref = CTy->getAs<LValueReferenceType>()) {
CTy = Ref->getPointeeType();
if (RD)
@@ -9002,7 +9039,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// corresponding defaulted 'operator<=>' already.
if (!FD->isImplicit()) {
if (RD) {
- QualType PlainTy = Context.getRecordType(RD);
+ CanQualType PlainTy = Context.getCanonicalTagType(RD);
QualType RefTy =
Context.getLValueReferenceType(PlainTy.withConst());
Diag(FD->getLocation(), diag::err_defaulted_comparison_param)
@@ -9033,7 +9070,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
} else {
// Out of class, require the defaulted comparison to be a friend (of a
// complete type, per CWG2547).
- if (RequireCompleteType(FD->getLocation(), Context.getRecordType(RD),
+ if (RequireCompleteType(FD->getLocation(), Context.getCanonicalTagType(RD),
diag::err_defaulted_comparison_not_friend, int(DCK),
int(1)))
return true;
@@ -9486,15 +9523,15 @@ bool SpecialMemberDeletionInfo::isAccessible(Subobject Subobj,
CXXMethodDecl *target) {
/// If we're operating on a base class, the object type is the
/// type of this special member.
- QualType objectTy;
+ CanQualType objectTy;
AccessSpecifier access = target->getAccess();
if (CXXBaseSpecifier *base = Subobj.dyn_cast<CXXBaseSpecifier*>()) {
- objectTy = S.Context.getTypeDeclType(MD->getParent());
+ objectTy = S.Context.getCanonicalTagType(MD->getParent());
access = CXXRecordDecl::MergeAccess(base->getAccessSpecifier(), access);
// If we're operating on a field, the object type is the type of the field.
} else {
- objectTy = S.Context.getTypeDeclType(target->getParent());
+ objectTy = S.Context.getCanonicalTagType(target->getParent());
}
return S.isMemberAccessibleForDeletion(
@@ -9912,7 +9949,7 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD,
// results in an ambiguity or in a function that is deleted or inaccessible
if (CSM == CXXSpecialMemberKind::Destructor && MD->isVirtual()) {
FunctionDecl *OperatorDelete = nullptr;
- QualType DeallocType = Context.getRecordType(RD);
+ CanQualType DeallocType = Context.getCanonicalTagType(RD);
DeclarationName Name =
Context.DeclarationNames.getCXXOperatorName(OO_Delete);
ImplicitDeallocationParameters IDP = {
@@ -10245,7 +10282,7 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD,
CXXSpecialMemberKind CSM) {
- QualType Ty = Context.getRecordType(RD);
+ CanQualType Ty = Context.getCanonicalTagType(RD);
bool ConstArg = (CSM == CXXSpecialMemberKind::CopyConstructor ||
CSM == CXXSpecialMemberKind::CopyAssignment);
@@ -10293,9 +10330,9 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMemberKind CSM,
ClangABICompat14)) {
if (Diagnose)
Diag(Param0->getLocation(), diag::note_nontrivial_param_type)
- << Param0->getSourceRange() << Param0->getType()
- << Context.getLValueReferenceType(
- Context.getRecordType(RD).withConst());
+ << Param0->getSourceRange() << Param0->getType()
+ << Context.getLValueReferenceType(
+ Context.getCanonicalTagType(RD).withConst());
return false;
}
@@ -10312,8 +10349,8 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMemberKind CSM,
if (!RT || RT->getPointeeType().getCVRQualifiers()) {
if (Diagnose)
Diag(Param0->getLocation(), diag::note_nontrivial_param_type)
- << Param0->getSourceRange() << Param0->getType()
- << Context.getRValueReferenceType(Context.getRecordType(RD));
+ << Param0->getSourceRange() << Param0->getType()
+ << Context.getRValueReferenceType(Context.getCanonicalTagType(RD));
return false;
}
break;
@@ -10432,9 +10469,7 @@ public:
/// method overloads virtual methods in a base class without overriding any,
/// to be used with CXXRecordDecl::lookupInBases().
bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- RecordDecl *BaseRecord =
- Specifier->getType()->castAs<RecordType>()->getDecl();
-
+ auto *BaseRecord = Specifier->getType()->castAsRecordDecl();
DeclarationName Name = Method->getDeclName();
assert(Name.getNameKind() == DeclarationName::Identifier);
@@ -10589,9 +10624,11 @@ void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
return;
}
- if (const auto *RT = FT->getBaseElementTypeUnsafe()->getAs<RecordType>())
+ if (const auto *RT =
+ FT->getBaseElementTypeUnsafe()->getAsCanonical<RecordType>())
if (!RT->isDependentType() &&
- !cast<CXXRecordDecl>(RT->getDecl())->canPassInRegisters()) {
+ !cast<CXXRecordDecl>(RT->getOriginalDecl()->getDefinitionOrSelf())
+ ->canPassInRegisters()) {
PrintDiagAndRemoveAttr(5);
return;
}
@@ -10626,7 +10663,7 @@ void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
void Sema::checkIncorrectVTablePointerAuthenticationAttribute(
CXXRecordDecl &RD) {
- if (RequireCompleteType(RD.getLocation(), Context.getRecordType(&RD),
+ if (RequireCompleteType(RD.getLocation(), Context.getCanonicalTagType(&RD),
diag::err_incomplete_type_vtable_pointer_auth))
return;
@@ -11056,9 +11093,10 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
if (!Constructor->isInvalidDecl() &&
Constructor->hasOneParamOrDefaultArgs() &&
!Constructor->isFunctionTemplateSpecialization()) {
- QualType ParamType = Constructor->getParamDecl(0)->getType();
- QualType ClassTy = Context.getTagDeclType(ClassDecl);
- if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
+ CanQualType ParamType =
+ Constructor->getParamDecl(0)->getType()->getCanonicalTypeUnqualified();
+ CanQualType ClassTy = Context.getCanonicalTagType(ClassDecl);
+ if (ParamType == ClassTy) {
SourceLocation ParamLoc = Constructor->getParamDecl(0)->getLocation();
const char *ConstRef
= Constructor->getParamDecl(0)->getIdentifier() ? "const &"
@@ -11429,8 +11467,7 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
// same object type (or a reference to it), to a (possibly
// cv-qualified) base class of that type (or a reference to it),
// or to (possibly cv-qualified) void.
- QualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ CanQualType ClassType = Context.getCanonicalTagType(ClassDecl);
if (const ReferenceType *ConvTypeRef = ConvType->getAs<ReferenceType>())
ConvType = ConvTypeRef->getPointeeType();
if (Conversion->getTemplateSpecializationKind() != TSK_Undeclared &&
@@ -11698,8 +11735,8 @@ bool Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
const QualifiedTemplateName *Qualifiers =
SpecifiedName.getAsQualifiedTemplateName();
assert(Qualifiers && "expected QualifiedTemplate");
- bool SimplyWritten = !Qualifiers->hasTemplateKeyword() &&
- Qualifiers->getQualifier() == nullptr;
+ bool SimplyWritten =
+ !Qualifiers->hasTemplateKeyword() && !Qualifiers->getQualifier();
if (SimplyWritten && TemplateMatches)
AcceptableReturnType = true;
else {
@@ -12004,10 +12041,11 @@ QualType Sema::CheckComparisonCategoryType(ComparisonCategoryType Kind,
// Use an elaborated type for diagnostics which has a name containing the
// prepended 'std' namespace but not any inline namespace names.
auto TyForDiags = [&](ComparisonCategoryInfo *Info) {
- auto *NNS =
- NestedNameSpecifier::Create(Context, nullptr, getStdNamespace());
- return Context.getElaboratedType(ElaboratedTypeKeyword::None, NNS,
- Info->getType());
+ NestedNameSpecifier Qualifier(Context, getStdNamespace(),
+ /*Prefix=*/std::nullopt);
+ return Context.getTagType(ElaboratedTypeKeyword::None, Qualifier,
+ Info->Record,
+ /*OwnsTag=*/false);
};
// Check if we've already successfully checked the comparison category type
@@ -12143,26 +12181,14 @@ static bool isStdClassTemplate(Sema &S, QualType SugaredType, QualType *TypeArg,
ClassTemplateDecl *Template = nullptr;
ArrayRef<TemplateArgument> Arguments;
- {
- const TemplateSpecializationType *TST =
- SugaredType->getAsNonAliasTemplateSpecializationType();
- if (!TST)
- if (const auto *ICN = SugaredType->getAs<InjectedClassNameType>())
- TST = ICN->getInjectedTST();
- if (TST) {
- Template = dyn_cast_or_null<ClassTemplateDecl>(
- TST->getTemplateName().getAsTemplateDecl());
- Arguments = TST->template_arguments();
- } else if (const RecordType *RT = SugaredType->getAs<RecordType>()) {
- ClassTemplateSpecializationDecl *Specialization =
- dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
- if (!Specialization) {
- ReportMatchingNameAsMalformed(RT->getDecl());
- return false;
- }
- Template = Specialization->getSpecializedTemplate();
- Arguments = Specialization->getTemplateArgs().asArray();
- }
+ if (const TemplateSpecializationType *TST =
+ SugaredType->getAsNonAliasTemplateSpecializationType()) {
+ Template = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl());
+ Arguments = TST->template_arguments();
+ } else if (const auto *TT = SugaredType->getAs<TagType>()) {
+ Template = TT->getTemplateDecl();
+ Arguments = TT->getTemplateArgs(S.Context);
}
if (!Template) {
@@ -12288,13 +12314,8 @@ static QualType BuildStdClassTemplate(Sema &S, ClassTemplateDecl *CTD,
auto TSI = S.Context.getTrivialTypeSourceInfo(TypeParam, Loc);
Args.addArgument(TemplateArgumentLoc(TemplateArgument(TypeParam), TSI));
- QualType T = S.CheckTemplateIdType(TemplateName(CTD), Loc, Args);
- if (T.isNull())
- return QualType();
-
- return S.Context.getElaboratedType(
- ElaboratedTypeKeyword::None,
- NestedNameSpecifier::Create(S.Context, nullptr, S.getStdNamespace()), T);
+ return S.CheckTemplateIdType(ElaboratedTypeKeyword::None, TemplateName(CTD),
+ Loc, Args);
}
QualType Sema::BuildStdInitializerList(QualType Element, SourceLocation Loc) {
@@ -12436,9 +12457,7 @@ Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc,
S = S->getDeclParent();
UsingDirectiveDecl *UDir = nullptr;
- NestedNameSpecifier *Qualifier = nullptr;
- if (SS.isSet())
- Qualifier = SS.getScopeRep();
+ NestedNameSpecifier Qualifier = SS.getScopeRep();
// Lookup namespace name.
LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName);
@@ -12450,14 +12469,16 @@ Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc,
R.clear();
// Allow "using namespace std;" or "using namespace ::std;" even if
// "std" hasn't been defined yet, for GCC compatibility.
- if ((!Qualifier || Qualifier->getKind() == NestedNameSpecifier::Global) &&
+ if ((!Qualifier ||
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Global) &&
NamespcName->isStr("std")) {
Diag(IdentLoc, diag::ext_using_undefined_std);
R.addDecl(getOrCreateStdNamespace());
R.resolveKind();
}
// Otherwise, attempt typo correction.
- else TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, NamespcName);
+ else
+ TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, NamespcName);
}
if (!R.empty()) {
@@ -12584,7 +12605,7 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC_UsingDeclaration))
return nullptr;
} else {
- if (!SS.getScopeRep()->containsUnexpandedParameterPack() &&
+ if (!SS.getScopeRep().containsUnexpandedParameterPack() &&
!TargetNameInfo.containsUnexpandedParameterPack()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< SourceRange(SS.getBeginLoc(), TargetNameInfo.getEndLoc());
@@ -12625,15 +12646,12 @@ Decl *Sema::ActOnUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
return nullptr;
}
- auto *Enum = dyn_cast_if_present<EnumDecl>(EnumTy->getAsTagDecl());
+ auto *Enum = EnumTy->getAsEnumDecl();
if (!Enum) {
Diag(IdentLoc, diag::err_using_enum_not_enum) << EnumTy;
return nullptr;
}
- if (auto *Def = Enum->getDefinition())
- Enum = Def;
-
if (TSI == nullptr)
TSI = Context.getTrivialTypeSourceInfo(EnumTy, IdentLoc);
@@ -12869,7 +12887,7 @@ UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
UsingDecl *Using = cast<UsingDecl>(BUD);
bool IsVirtualBase =
isVirtualDirectBase(cast<CXXRecordDecl>(CurContext),
- Using->getQualifier()->getAsRecordDecl());
+ Using->getQualifier().getAsRecordDecl());
Shadow = ConstructorUsingShadowDecl::Create(
Context, CurContext, Using->getLocation(), Using, Orig, IsVirtualBase);
} else {
@@ -12935,7 +12953,7 @@ namespace {
class UsingValidatorCCC final : public CorrectionCandidateCallback {
public:
UsingValidatorCCC(bool HasTypenameKeyword, bool IsInstantiation,
- NestedNameSpecifier *NNS, CXXRecordDecl *RequireMemberOf)
+ NestedNameSpecifier NNS, CXXRecordDecl *RequireMemberOf)
: HasTypenameKeyword(HasTypenameKeyword),
IsInstantiation(IsInstantiation), OldNNS(NNS),
RequireMemberOf(RequireMemberOf) {}
@@ -12962,24 +12980,23 @@ public:
ASTContext &Ctx = ND->getASTContext();
if (!Ctx.getLangOpts().CPlusPlus11)
return false;
- QualType FoundType = Ctx.getRecordType(FoundRecord);
+ CanQualType FoundType = Ctx.getCanonicalTagType(FoundRecord);
// Check that the injected-class-name is named as a member of its own
// type; we don't want to suggest 'using Derived::Base;', since that
// means something else.
- NestedNameSpecifier *Specifier =
- Candidate.WillReplaceSpecifier()
- ? Candidate.getCorrectionSpecifier()
- : OldNNS;
- if (!Specifier->getAsType() ||
- !Ctx.hasSameType(QualType(Specifier->getAsType(), 0), FoundType))
+ NestedNameSpecifier Specifier = Candidate.WillReplaceSpecifier()
+ ? Candidate.getCorrectionSpecifier()
+ : OldNNS;
+ if (Specifier.getKind() != NestedNameSpecifier::Kind::Type ||
+ !Ctx.hasSameType(QualType(Specifier.getAsType(), 0), FoundType))
return false;
// Check that this inheriting constructor declaration actually names a
// direct base class of the current class.
bool AnyDependentBases = false;
if (!findDirectBaseWithType(RequireMemberOf,
- Ctx.getRecordType(FoundRecord),
+ Ctx.getCanonicalTagType(FoundRecord),
AnyDependentBases) &&
!AnyDependentBases)
return false;
@@ -13009,7 +13026,7 @@ public:
private:
bool HasTypenameKeyword;
bool IsInstantiation;
- NestedNameSpecifier *OldNNS;
+ NestedNameSpecifier OldNNS;
CXXRecordDecl *RequireMemberOf;
};
} // end anonymous namespace
@@ -13051,7 +13068,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
if (UsingName.getName().getNameKind() == DeclarationName::CXXConstructorName)
if (auto *RD = dyn_cast<CXXRecordDecl>(CurContext))
UsingName.setName(Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(Context.getRecordType(RD))));
+ Context.getCanonicalTagType(RD)));
// Do the redeclaration lookup in the current scope.
LookupResult Previous(*this, UsingName, LookupUsingDeclName,
@@ -13154,7 +13171,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
// equal to that of the current context.
if (CurContext->isRecord()) {
R.setBaseObjectType(
- Context.getTypeDeclType(cast<CXXRecordDecl>(CurContext)));
+ Context.getCanonicalTagType(cast<CXXRecordDecl>(CurContext)));
}
LookupQualifiedName(R, LookupContext);
@@ -13220,7 +13237,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
// constructor.
auto *CurClass = cast<CXXRecordDecl>(CurContext);
UsingName.setName(Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(Context.getRecordType(CurClass))));
+ Context.getCanonicalTagType(CurClass)));
UsingName.setNamedTypeInfo(nullptr);
for (auto *Ctor : LookupConstructors(RD))
R.addDecl(Ctor);
@@ -13366,20 +13383,16 @@ NamedDecl *Sema::BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) {
assert(!UD->hasTypename() && "expecting a constructor name");
- const Type *SourceType = UD->getQualifier()->getAsType();
- assert(SourceType &&
- "Using decl naming constructor doesn't have type in scope spec.");
+ QualType SourceType(UD->getQualifier().getAsType(), 0);
CXXRecordDecl *TargetClass = cast<CXXRecordDecl>(CurContext);
// Check whether the named type is a direct base class.
bool AnyDependentBases = false;
- auto *Base = findDirectBaseWithType(TargetClass, QualType(SourceType, 0),
- AnyDependentBases);
+ auto *Base =
+ findDirectBaseWithType(TargetClass, SourceType, AnyDependentBases);
if (!Base && !AnyDependentBases) {
- Diag(UD->getUsingLoc(),
- diag::err_using_decl_constructor_not_in_direct_base)
- << UD->getNameInfo().getSourceRange()
- << QualType(SourceType, 0) << TargetClass;
+ Diag(UD->getUsingLoc(), diag::err_using_decl_constructor_not_in_direct_base)
+ << UD->getNameInfo().getSourceRange() << SourceType << TargetClass;
UD->setInvalidDecl();
return true;
}
@@ -13395,7 +13408,7 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Prev) {
- NestedNameSpecifier *Qual = SS.getScopeRep();
+ NestedNameSpecifier Qual = SS.getScopeRep();
// C++03 [namespace.udecl]p8:
// C++0x [namespace.udecl]p10:
@@ -13410,7 +13423,7 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
// declaration in the same scope.
// FIXME: How should we check for dependent type-type conflicts at block
// scope?
- if (Qual->isDependent() && !HasTypenameKeyword) {
+ if (Qual.isDependent() && !HasTypenameKeyword) {
for (auto *D : Prev) {
if (!isa<TypeDecl>(D) && !isa<UsingDecl>(D) && !isa<UsingPackDecl>(D)) {
bool OldCouldBeEnumerator =
@@ -13427,13 +13440,12 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
return false;
}
- const NestedNameSpecifier *CNNS =
- Context.getCanonicalNestedNameSpecifier(Qual);
+ NestedNameSpecifier CNNS = Qual.getCanonical();
for (LookupResult::iterator I = Prev.begin(), E = Prev.end(); I != E; ++I) {
NamedDecl *D = *I;
bool DTypename;
- NestedNameSpecifier *DQual;
+ NestedNameSpecifier DQual = std::nullopt;
if (UsingDecl *UD = dyn_cast<UsingDecl>(D)) {
DTypename = UD->hasTypename();
DQual = UD->getQualifier();
@@ -13454,7 +13466,7 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
// using decls differ if they name different scopes (but note that
// template instantiation can cause this check to trigger when it
// didn't before instantiation).
- if (CNNS != Context.getCanonicalNestedNameSpecifier(DQual))
+ if (CNNS != DQual.getCanonical())
continue;
Diag(NameLoc, diag::err_using_decl_redeclaration) << SS.getRange();
@@ -13910,11 +13922,10 @@ struct SpecialMemberExceptionSpecInfo
}
bool SpecialMemberExceptionSpecInfo::visitBase(CXXBaseSpecifier *Base) {
- auto *RT = Base->getType()->getAs<RecordType>();
- if (!RT)
+ auto *BaseClass = Base->getType()->getAsCXXRecordDecl();
+ if (!BaseClass)
return false;
- auto *BaseClass = cast<CXXRecordDecl>(RT->getDecl());
Sema::SpecialMemberOverloadResult SMOR = lookupInheritedCtor(BaseClass);
if (auto *BaseCtor = SMOR.getMethod()) {
visitSubobjectCall(Base, BaseCtor);
@@ -13938,10 +13949,9 @@ bool SpecialMemberExceptionSpecInfo::visitField(FieldDecl *FD) {
E = S.BuildCXXDefaultInitExpr(Loc, FD).get();
if (E)
ExceptSpec.CalledExpr(E);
- } else if (auto *RT = S.Context.getBaseElementType(FD->getType())
- ->getAs<RecordType>()) {
- visitClassSubobject(cast<CXXRecordDecl>(RT->getDecl()), FD,
- FD->getType().getCVRQualifiers());
+ } else if (auto *RD = S.Context.getBaseElementType(FD->getType())
+ ->getAsCXXRecordDecl()) {
+ visitClassSubobject(RD, FD, FD->getType().getCVRQualifiers());
}
return false;
}
@@ -14003,7 +14013,7 @@ ComputeDefaultedSpecialMemberExceptionSpec(
// attempting to resolve an exception specification before it's known
// at a higher level.
if (S.RequireCompleteType(MD->getLocation(),
- S.Context.getRecordType(ClassDecl),
+ S.Context.getCanonicalTagType(ClassDecl),
diag::err_exception_spec_incomplete_type))
return Info.ExceptSpec;
@@ -14134,8 +14144,7 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
*this, ClassDecl, CXXSpecialMemberKind::DefaultConstructor, false);
// Create the actual constructor declaration.
- CanQualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ CanQualType ClassType = Context.getCanonicalTagType(ClassDecl);
SourceLocation ClassLoc = ClassDecl->getLocation();
DeclarationName Name
= Context.DeclarationNames.getCXXConstructorName(ClassType);
@@ -14420,8 +14429,7 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
*this, ClassDecl, CXXSpecialMemberKind::Destructor, false);
// Create the actual destructor declaration.
- CanQualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ CanQualType ClassType = Context.getCanonicalTagType(ClassDecl);
SourceLocation ClassLoc = ClassDecl->getLocation();
DeclarationName Name
= Context.DeclarationNames.getCXXDestructorName(ClassType);
@@ -14758,10 +14766,9 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
S.Context, To, UO_AddrOf, S.Context.getPointerType(To->getType()),
VK_PRValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
- const Type *E = T->getBaseElementTypeUnsafe();
- bool NeedsCollectableMemCpy =
- E->isRecordType() &&
- E->castAs<RecordType>()->getDecl()->hasObjectMember();
+ bool NeedsCollectableMemCpy = false;
+ if (auto *RD = T->getBaseElementTypeUnsafe()->getAsRecordDecl())
+ NeedsCollectableMemCpy = RD->hasObjectMember();
// Create a reference to the __builtin_objc_memmove_collectable function
StringRef MemCpyName = NeedsCollectableMemCpy ?
@@ -14837,9 +14844,7 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
// the class is used (as if by explicit qualification; that is,
// ignoring any possible virtual overriding functions in more derived
// classes);
- if (const RecordType *RecordTy = T->getAs<RecordType>()) {
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
-
+ if (auto *ClassDecl = T->getAsCXXRecordDecl()) {
// Look for operator=.
DeclarationName Name
= S.Context.DeclarationNames.getCXXOperatorName(OO_Equal);
@@ -14885,10 +14890,9 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
// reference to operator=; this is required to suppress the virtual
// call mechanism.
CXXScopeSpec SS;
+ // FIXME: Don't canonicalize this.
const Type *CanonicalT = S.Context.getCanonicalType(T.getTypePtr());
- SS.MakeTrivial(S.Context,
- NestedNameSpecifier::Create(S.Context, nullptr, CanonicalT),
- Loc);
+ SS.MakeTrivial(S.Context, NestedNameSpecifier(CanonicalT), Loc);
// Create the reference to operator=.
ExprResult OpEqualRef
@@ -15042,9 +15046,9 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
if (DSM.isAlreadyBeingDeclared())
return nullptr;
- QualType ArgType = Context.getTypeDeclType(ClassDecl);
- ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- ArgType, nullptr);
+ QualType ArgType = Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ClassDecl,
+ /*OwnsTag=*/false);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ArgType, AS);
@@ -15303,7 +15307,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Check for members of reference type; we can't copy those.
if (Field->getType()->isReferenceType()) {
Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
- << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName();
+ << Context.getCanonicalTagType(ClassDecl) << 0
+ << Field->getDeclName();
Diag(Field->getLocation(), diag::note_declared_at);
Invalid = true;
continue;
@@ -15311,9 +15316,10 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Check for members of const-qualified, non-class type.
QualType BaseType = Context.getBaseElementType(Field->getType());
- if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) {
+ if (!BaseType->isRecordType() && BaseType.isConstQualified()) {
Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
- << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName();
+ << Context.getCanonicalTagType(ClassDecl) << 1
+ << Field->getDeclName();
Diag(Field->getLocation(), diag::note_declared_at);
Invalid = true;
continue;
@@ -15398,9 +15404,9 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
// Note: The following rules are largely analoguous to the move
// constructor rules.
- QualType ArgType = Context.getTypeDeclType(ClassDecl);
- ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- ArgType, nullptr);
+ QualType ArgType = Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ClassDecl,
+ /*OwnsTag=*/false);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ArgType, AS);
@@ -15690,7 +15696,8 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// Check for members of reference type; we can't move those.
if (Field->getType()->isReferenceType()) {
Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
- << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName();
+ << Context.getCanonicalTagType(ClassDecl) << 0
+ << Field->getDeclName();
Diag(Field->getLocation(), diag::note_declared_at);
Invalid = true;
continue;
@@ -15698,9 +15705,10 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// Check for members of const-qualified, non-class type.
QualType BaseType = Context.getBaseElementType(Field->getType());
- if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) {
+ if (!BaseType->isRecordType() && BaseType.isConstQualified()) {
Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
- << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName();
+ << Context.getCanonicalTagType(ClassDecl) << 1
+ << Field->getDeclName();
Diag(Field->getLocation(), diag::note_declared_at);
Invalid = true;
continue;
@@ -15791,10 +15799,10 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
if (DSM.isAlreadyBeingDeclared())
return nullptr;
- QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ClassType = Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ClassDecl,
+ /*OwnsTag=*/false);
QualType ArgType = ClassType;
- ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- ArgType, nullptr);
bool Const = ClassDecl->implicitCopyConstructorHasConstParam();
if (Const)
ArgType = ArgType.withConst();
@@ -15938,11 +15946,11 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
if (DSM.isAlreadyBeingDeclared())
return nullptr;
- QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ClassType = Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ClassDecl,
+ /*OwnsTag=*/false);
QualType ArgType = ClassType;
- ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- ArgType, nullptr);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ClassType, AS);
@@ -16290,14 +16298,14 @@ ExprResult Sema::BuildCXXConstructExpr(
Constructor);
}
-void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
+void Sema::FinalizeVarWithDestructor(VarDecl *VD, CXXRecordDecl *ClassDecl) {
if (VD->isInvalidDecl()) return;
// If initializing the variable failed, don't also diagnose problems with
// the destructor, they're likely related.
if (VD->getInit() && VD->getInit()->containsErrors())
return;
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
+ ClassDecl = ClassDecl->getDefinitionOrSelf();
if (ClassDecl->isInvalidDecl()) return;
if (ClassDecl->hasIrrelevantDestructor()) return;
if (ClassDecl->isDependentContext()) return;
@@ -16639,10 +16647,10 @@ static inline bool CheckOperatorNewDeleteTypes(
if (CheckType(SizeParameterIndex, SemaRef.Context.getSizeType(), "size_t"))
return true;
- TypeDecl *StdAlignValTDecl = SemaRef.getStdAlignValT();
- QualType StdAlignValT =
- StdAlignValTDecl ? SemaRef.Context.getTypeDeclType(StdAlignValTDecl)
- : QualType();
+ TagDecl *StdAlignValTDecl = SemaRef.getStdAlignValT();
+ CanQualType StdAlignValT =
+ StdAlignValTDecl ? SemaRef.Context.getCanonicalTagType(StdAlignValTDecl)
+ : CanQualType();
if (CheckType(SizeParameterIndex + 1, StdAlignValT, "std::align_val_t"))
return true;
@@ -16682,8 +16690,8 @@ CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) {
auto *MD = dyn_cast<CXXMethodDecl>(FnDecl);
auto ConstructDestroyingDeleteAddressType = [&]() {
assert(MD);
- return SemaRef.Context.getCanonicalType(SemaRef.Context.getPointerType(
- SemaRef.Context.getRecordType(MD->getParent())));
+ return SemaRef.Context.getPointerType(
+ SemaRef.Context.getCanonicalTagType(MD->getParent()));
};
// C++ P2719: A destroying operator delete cannot be type aware
@@ -16722,8 +16730,8 @@ CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) {
// function shall be of type void *.
CanQualType ExpectedAddressParamType =
MD && IsPotentiallyDestroyingOperatorDelete(SemaRef, MD)
- ? SemaRef.Context.getCanonicalType(SemaRef.Context.getPointerType(
- SemaRef.Context.getRecordType(MD->getParent())))
+ ? SemaRef.Context.getPointerType(
+ SemaRef.Context.getCanonicalTagType(MD->getParent()))
: SemaRef.Context.VoidPtrTy;
// C++ [basic.stc.dynamic.deallocation]p2:
@@ -16955,9 +16963,9 @@ checkLiteralOperatorTemplateParameterList(Sema &SemaRef,
// first template parameter as its type.
if (PmType && PmArgs && !PmType->isTemplateParameterPack() &&
PmArgs->isTemplateParameterPack()) {
- const TemplateTypeParmType *TArgs =
- PmArgs->getType()->getAs<TemplateTypeParmType>();
- if (TArgs && TArgs->getDepth() == PmType->getDepth() &&
+ if (const auto *TArgs =
+ PmArgs->getType()->getAsCanonical<TemplateTypeParmType>();
+ TArgs && TArgs->getDepth() == PmType->getDepth() &&
TArgs->getIndex() == PmType->getIndex()) {
if (!SemaRef.inTemplateInstantiation())
SemaRef.Diag(TpDecl->getLocation(),
@@ -17304,7 +17312,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
Invalid = true;
if (!Invalid && !ExDeclType->isDependentType()) {
- if (const RecordType *recordType = ExDeclType->getAs<RecordType>()) {
+ if (auto *ClassDecl = ExDeclType->getAsCXXRecordDecl()) {
// Insulate this from anything else we might currently be parsing.
EnterExpressionEvaluationContext scope(
*this, ExpressionEvaluationContext::PotentiallyEvaluated);
@@ -17341,7 +17349,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
}
// And make sure it's destructable.
- FinalizeVarWithDestructor(ExDecl, recordType);
+ FinalizeVarWithDestructor(ExDecl, ClassDecl);
}
}
}
@@ -17954,27 +17962,14 @@ DeclResult Sema::ActOnTemplatedFriendTag(
/*OOK=*/OffsetOfKind::Outside);
}
+ TypeSourceInfo *TSI = nullptr;
ElaboratedTypeKeyword Keyword
= TypeWithKeyword::getKeywordForTagTypeKind(Kind);
- QualType T = CheckTypenameType(Keyword, TagLoc, QualifierLoc,
- *Name, NameLoc);
+ QualType T = CheckTypenameType(Keyword, TagLoc, QualifierLoc, *Name,
+ NameLoc, &TSI, /*DeducedTSTContext=*/true);
if (T.isNull())
return true;
- TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
- if (isa<DependentNameType>(T)) {
- DependentNameTypeLoc TL =
- TSI->getTypeLoc().castAs<DependentNameTypeLoc>();
- TL.setElaboratedKeywordLoc(TagLoc);
- TL.setQualifierLoc(QualifierLoc);
- TL.setNameLoc(NameLoc);
- } else {
- ElaboratedTypeLoc TL = TSI->getTypeLoc().castAs<ElaboratedTypeLoc>();
- TL.setElaboratedKeywordLoc(TagLoc);
- TL.setQualifierLoc(QualifierLoc);
- TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(NameLoc);
- }
-
FriendDecl *Friend =
FriendDecl::Create(Context, CurContext, NameLoc, TSI, FriendLoc,
EllipsisLoc, TempParamLists);
@@ -17992,7 +17987,8 @@ DeclResult Sema::ActOnTemplatedFriendTag(
collectUnexpandedParameterPacks(QualifierLoc, Unexpanded);
unsigned FriendDeclDepth = TempParamLists.front()->getDepth();
for (UnexpandedParameterPack &U : Unexpanded) {
- if (getDepthAndIndex(U).first >= FriendDeclDepth) {
+ if (std::optional<std::pair<unsigned, unsigned>> DI = getDepthAndIndex(U);
+ DI && DI->first >= FriendDeclDepth) {
auto *ND = dyn_cast<NamedDecl *>(U.first);
if (!ND)
ND = cast<const TemplateTypeParmType *>(U.first)->getDecl();
@@ -18286,7 +18282,7 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// - There's a non-dependent scope specifier, in which case we
// compute it and do a previous lookup there for a function
// or function template.
- } else if (!SS.getScopeRep()->isDependent()) {
+ } else if (!SS.getScopeRep().isDependent()) {
DC = computeDeclContext(SS);
if (!DC) return nullptr;
@@ -18792,8 +18788,8 @@ bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
// that of B::f, the class type in the return type of D::f shall be
// complete at the point of declaration of D::f or shall be the class
// type D.
- if (const RecordType *RT = NewClassTy->getAs<RecordType>()) {
- if (!RT->isBeingDefined() &&
+ if (const auto *RD = NewClassTy->getAsCXXRecordDecl()) {
+ if (!RD->isBeingDefined() &&
RequireCompleteType(New->getLocation(), NewClassTy,
diag::err_covariant_return_incomplete,
New->getDeclName()))
@@ -19148,8 +19144,7 @@ void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
return;
for (const auto &I : RD->bases()) {
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *Base = I.getType()->castAsCXXRecordDecl();
if (Base->getNumVBases() == 0)
continue;
MarkVirtualMembersReferenced(Loc, Base);
diff --git a/clang/lib/Sema/SemaDeclObjC.cpp b/clang/lib/Sema/SemaDeclObjC.cpp
index bbd1049..98eb5af 100644
--- a/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/clang/lib/Sema/SemaDeclObjC.cpp
@@ -591,12 +591,13 @@ void SemaObjC::ActOnSuperClassOfClassInterface(
// The previous declaration was not a class decl. Check if we have a
// typedef. If we do, get the underlying class type.
if (const TypedefNameDecl *TDecl =
- dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
+ dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
QualType T = TDecl->getUnderlyingType();
if (T->isObjCObjectType()) {
if (NamedDecl *IDecl = T->castAs<ObjCObjectType>()->getInterface()) {
SuperClassDecl = dyn_cast<ObjCInterfaceDecl>(IDecl);
- SuperClassType = Context.getTypeDeclType(TDecl);
+ SuperClassType = Context.getTypeDeclType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt, TDecl);
// This handles the following case:
// @interface NewI @end
@@ -1389,11 +1390,9 @@ class ObjCTypeArgOrProtocolValidatorCCC final
// Make sure the type is something we would accept as a type
// argument.
- auto type = Context.getTypeDeclType(typeDecl);
- if (type->isObjCObjectPointerType() ||
- type->isBlockPointerType() ||
+ if (CanQualType type = Context.getCanonicalTypeDeclType(typeDecl);
type->isDependentType() ||
- type->isObjCObjectType())
+ isa<ObjCObjectPointerType, BlockPointerType, ObjCObjectType>(type))
return true;
return false;
@@ -1589,7 +1588,9 @@ void SemaObjC::actOnObjCTypeArgsOrProtocolQualifiers(
unsigned diagID; // unused
QualType type;
if (auto *actualTypeDecl = dyn_cast<TypeDecl *>(typeDecl))
- type = Context.getTypeDeclType(actualTypeDecl);
+ type =
+ Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, actualTypeDecl);
else
type = Context.getObjCInterfaceType(cast<ObjCInterfaceDecl *>(typeDecl));
TypeSourceInfo *parsedTSInfo = Context.getTrivialTypeSourceInfo(type, loc);
@@ -3231,8 +3232,10 @@ static bool tryMatchRecordTypes(ASTContext &Context,
assert(lt && rt && lt != rt);
if (!isa<RecordType>(lt) || !isa<RecordType>(rt)) return false;
- RecordDecl *left = cast<RecordType>(lt)->getDecl();
- RecordDecl *right = cast<RecordType>(rt)->getDecl();
+ RecordDecl *left =
+ cast<RecordType>(lt)->getOriginalDecl()->getDefinitionOrSelf();
+ RecordDecl *right =
+ cast<RecordType>(rt)->getOriginalDecl()->getDefinitionOrSelf();
// Require union-hood to match.
if (left->isUnion() != right->isUnion()) return false;
@@ -3845,8 +3848,8 @@ SemaObjC::ObjCContainerKind SemaObjC::getObjCContainerKind() const {
static bool IsVariableSizedType(QualType T) {
if (T->isIncompleteArrayType())
return true;
- const auto *RecordTy = T->getAs<RecordType>();
- return (RecordTy && RecordTy->getDecl()->hasFlexibleArrayMember());
+ const auto *RD = T->getAsRecordDecl();
+ return RD && RD->hasFlexibleArrayMember();
}
static void DiagnoseVariableSizedIvars(Sema &S, ObjCContainerDecl *OCD) {
@@ -3891,13 +3894,11 @@ static void DiagnoseVariableSizedIvars(Sema &S, ObjCContainerDecl *OCD) {
<< ivar->getDeclName() << IvarTy
<< TagTypeKind::Class; // Use "class" for Obj-C.
IsInvalidIvar = true;
- } else if (const RecordType *RecordTy = IvarTy->getAs<RecordType>()) {
- if (RecordTy->getDecl()->hasFlexibleArrayMember()) {
- S.Diag(ivar->getLocation(),
- diag::err_objc_variable_sized_type_not_at_end)
- << ivar->getDeclName() << IvarTy;
- IsInvalidIvar = true;
- }
+ } else if (const auto *RD = IvarTy->getAsRecordDecl();
+ RD && RD->hasFlexibleArrayMember()) {
+ S.Diag(ivar->getLocation(), diag::err_objc_variable_sized_type_not_at_end)
+ << ivar->getDeclName() << IvarTy;
+ IsInvalidIvar = true;
}
if (IsInvalidIvar) {
S.Diag(ivar->getNextIvar()->getLocation(),
@@ -5534,10 +5535,8 @@ void SemaObjC::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
AllToInit.push_back(Member);
// Be sure that the destructor is accessible and is marked as referenced.
- if (const RecordType *RecordTy =
- Context.getBaseElementType(Field->getType())
- ->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (auto *RD = Context.getBaseElementType(Field->getType())
+ ->getAsCXXRecordDecl()) {
if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(RD)) {
SemaRef.MarkFunctionReferenced(Field->getLocation(), Destructor);
SemaRef.CheckDestructorAccess(
diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp
index 0a6cea8..552c929 100644
--- a/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -163,8 +163,8 @@ bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
DiagID = diag::ext_incomplete_in_exception_spec;
ReturnValueOnError = false;
}
- if (!(PointeeT->isRecordType() &&
- PointeeT->castAs<RecordType>()->isBeingDefined()) &&
+ if (auto *RD = PointeeT->getAsRecordDecl();
+ !(RD && RD->isBeingDefined()) &&
RequireCompleteType(Range.getBegin(), PointeeT, DiagID, Kind, Range))
return ReturnValueOnError;
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 6793d6d..8565b18 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/Attrs.inc"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -1528,8 +1529,12 @@ void Sema::checkEnumArithmeticConversions(Expr *LHS, Expr *RHS,
// are ill-formed.
if (getLangOpts().CPlusPlus26)
DiagID = diag::warn_conv_mixed_enum_types_cxx26;
- else if (!L->castAs<EnumType>()->getDecl()->hasNameForLinkage() ||
- !R->castAs<EnumType>()->getDecl()->hasNameForLinkage()) {
+ else if (!L->castAsCanonical<EnumType>()
+ ->getOriginalDecl()
+ ->hasNameForLinkage() ||
+ !R->castAsCanonical<EnumType>()
+ ->getOriginalDecl()
+ ->hasNameForLinkage()) {
// If either enumeration type is unnamed, it's less likely that the
// user cares about this, but this situation is still deprecated in
// C++2a. Use a different warning group.
@@ -2531,7 +2536,7 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
if (isa<CXXRecordDecl>(DC)) {
if (ExplicitTemplateArgs) {
if (LookupTemplateName(
- R, S, SS, Context.getRecordType(cast<CXXRecordDecl>(DC)),
+ R, S, SS, Context.getCanonicalTagType(cast<CXXRecordDecl>(DC)),
/*EnteringContext*/ false, TemplateNameIsRequired,
/*RequiredTemplateKind*/ nullptr, /*AllowTypoCorrection*/ true))
return true;
@@ -2607,11 +2612,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
}
R.addDecl(ND);
if (getLangOpts().CPlusPlus && ND->isCXXClassMember()) {
- CXXRecordDecl *Record = nullptr;
- if (Corrected.getCorrectionSpecifier()) {
- const Type *Ty = Corrected.getCorrectionSpecifier()->getAsType();
- Record = Ty->getAsCXXRecordDecl();
- }
+ CXXRecordDecl *Record =
+ Corrected.getCorrectionSpecifier().getAsRecordDecl();
if (!Record)
Record = cast<CXXRecordDecl>(
ND->getDeclContext()->getRedeclContext());
@@ -2707,8 +2709,7 @@ recoverFromMSUnqualifiedLookup(Sema &S, ASTContext &Context,
// Synthesize a fake NNS that points to the derived class. This will
// perform name lookup during template instantiation.
CXXScopeSpec SS;
- auto *NNS =
- NestedNameSpecifier::Create(Context, nullptr, RD->getTypeForDecl());
+ NestedNameSpecifier NNS(Context.getCanonicalTagType(RD)->getTypePtr());
SS.MakeTrivial(Context, NNS, SourceRange(Loc, Loc));
return DependentScopeDeclRefExpr::Create(
Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
@@ -2831,8 +2832,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// If this name wasn't predeclared and if this is not a function
// call, diagnose the problem.
- DefaultFilterCCC DefaultValidator(II, SS.isValid() ? SS.getScopeRep()
- : nullptr);
+ DefaultFilterCCC DefaultValidator(II, SS.getScopeRep());
DefaultValidator.IsAddressOfOperand = IsAddressOfOperand;
assert((!CCC || CCC->IsAddressOfOperand == IsAddressOfOperand) &&
"Typo correction callback misconfigured");
@@ -2943,8 +2943,28 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
}
if (const TypeDecl *TD = R.getAsSingle<TypeDecl>()) {
- QualType Ty = Context.getTypeDeclType(TD);
- QualType ET = getElaboratedType(ElaboratedTypeKeyword::None, SS, Ty);
+ QualType ET;
+ TypeLocBuilder TLB;
+ if (auto *TagD = dyn_cast<TagDecl>(TD)) {
+ ET = SemaRef.Context.getTagType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TagD,
+ /*OwnsTag=*/false);
+ auto TL = TLB.push<TagTypeLoc>(ET);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(NameInfo.getLoc());
+ } else if (auto *TypedefD = dyn_cast<TypedefNameDecl>(TD)) {
+ ET = SemaRef.Context.getTypedefType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TypedefD);
+ TLB.push<TypedefTypeLoc>(ET).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), NameInfo.getLoc());
+ } else {
+ // FIXME: What else can appear here?
+ ET = SemaRef.Context.getTypeDeclType(TD);
+ TLB.pushTypeSpec(ET).setNameLoc(NameInfo.getLoc());
+ assert(SS.isEmpty());
+ }
// Diagnose a missing typename if this resolved unambiguously to a type in
// a dependent context. If we can recover with a type, downgrade this to
@@ -2965,13 +2985,6 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
D << FixItHint::CreateInsertion(Loc, "typename ");
// Recover by pretending this was an elaborated type.
- TypeLocBuilder TLB;
- TLB.pushTypeSpec(Ty).setNameLoc(NameInfo.getLoc());
-
- ElaboratedTypeLoc QTL = TLB.push<ElaboratedTypeLoc>(ET);
- QTL.setElaboratedKeywordLoc(SourceLocation());
- QTL.setQualifierLoc(SS.getWithLocInContext(Context));
-
*RecoveryTSI = TLB.getTypeSourceInfo(Context, ET);
return ExprEmpty();
@@ -2987,11 +3000,10 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
return BuildDeclarationNameExpr(SS, R, /*ADL=*/false);
}
-ExprResult
-Sema::PerformObjectMemberConversion(Expr *From,
- NestedNameSpecifier *Qualifier,
- NamedDecl *FoundDecl,
- NamedDecl *Member) {
+ExprResult Sema::PerformObjectMemberConversion(Expr *From,
+ NestedNameSpecifier Qualifier,
+ NamedDecl *FoundDecl,
+ NamedDecl *Member) {
const auto *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext());
if (!RD)
return From;
@@ -3002,7 +3014,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
QualType FromType = From->getType();
bool PointerConversions = false;
if (isa<FieldDecl>(Member)) {
- DestRecordType = Context.getCanonicalType(Context.getTypeDeclType(RD));
+ DestRecordType = Context.getCanonicalTagType(RD);
auto FromPtrType = FromType->getAs<PointerType>();
DestRecordType = Context.getAddrSpaceQualType(
DestRecordType, FromPtrType
@@ -3080,8 +3092,8 @@ Sema::PerformObjectMemberConversion(Expr *From,
// x = 17; // error: ambiguous base subobjects
// Derived1::x = 17; // okay, pick the Base subobject of Derived1
// }
- if (Qualifier && Qualifier->getAsType()) {
- QualType QType = QualType(Qualifier->getAsType(), 0);
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::Type) {
+ QualType QType = QualType(Qualifier.getAsType(), 0);
assert(QType->isRecordType() && "lookup done with non-record type");
QualType QRecordType = QualType(QType->castAs<RecordType>(), 0);
@@ -3248,10 +3260,6 @@ ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
return ULE;
}
-static void diagnoseUncapturableValueReferenceOrBinding(Sema &S,
- SourceLocation loc,
- ValueDecl *var);
-
ExprResult Sema::BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs,
@@ -4483,9 +4491,6 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::BitInt:
case Type::HLSLInlineSpirv:
llvm_unreachable("type class is never variably-modified!");
- case Type::Elaborated:
- T = cast<ElaboratedType>(Ty)->getNamedType();
- break;
case Type::Adjusted:
T = cast<AdjustedType>(Ty)->getOriginalType();
break;
@@ -5877,7 +5882,7 @@ static bool isParenthetizedAndQualifiedAddressOfExpr(Expr *Fn) {
return DRE->hasQualifier();
}
if (auto *OVL = dyn_cast<OverloadExpr>(UO->getSubExpr()->IgnoreParens()))
- return OVL->getQualifier();
+ return bool(OVL->getQualifier());
return false;
}
@@ -7076,7 +7081,7 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
for (unsigned i = 0, e = Args.size(); i != e; i++) {
if (const auto *RT =
dyn_cast<RecordType>(Args[i]->getType().getCanonicalType())) {
- if (RT->getDecl()->isOrContainsUnion())
+ if (RT->getOriginalDecl()->isOrContainsUnion())
Diag(Args[i]->getBeginLoc(), diag::warn_cmse_nonsecure_union)
<< 0 << i;
}
@@ -7818,6 +7823,39 @@ ExprResult Sema::CheckExtVectorCast(SourceRange R, QualType DestTy,
return prepareVectorSplat(DestTy, CastExpr);
}
+/// Check that a call to alloc_size function specifies sufficient space for the
+/// destination type.
+static void CheckSufficientAllocSize(Sema &S, QualType DestType,
+ const Expr *E) {
+ QualType SourceType = E->getType();
+ if (!DestType->isPointerType() || !SourceType->isPointerType() ||
+ DestType == SourceType)
+ return;
+
+ const auto *CE = dyn_cast<CallExpr>(E->IgnoreParenCasts());
+ if (!CE)
+ return;
+
+ // Find the total size allocated by the function call.
+ if (!CE->getCalleeAllocSizeAttr())
+ return;
+ std::optional<llvm::APInt> AllocSize =
+ CE->evaluateBytesReturnedByAllocSizeCall(S.Context);
+ if (!AllocSize)
+ return;
+ auto Size = CharUnits::fromQuantity(AllocSize->getZExtValue());
+
+ QualType TargetType = DestType->getPointeeType();
+ // Find the destination size. As a special case function types have size of
+ // one byte to match the sizeof operator behavior.
+ auto LhsSize = TargetType->isFunctionType()
+ ? CharUnits::One()
+ : S.Context.getTypeSizeInCharsIfKnown(TargetType);
+ if (LhsSize && Size < LhsSize)
+ S.Diag(E->getExprLoc(), diag::warn_alloc_size)
+ << Size.getQuantity() << TargetType << LhsSize->getQuantity();
+}
+
ExprResult
Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
@@ -7883,6 +7921,8 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
DiscardMisalignedMemberAddress(castType.getTypePtr(), CastExpr);
+ CheckSufficientAllocSize(*this, castType, CastExpr);
+
return BuildCStyleCastExpr(LParenLoc, castTInfo, RParenLoc, CastExpr);
}
@@ -8430,9 +8470,11 @@ static bool checkVectorResult(Sema &S, QualType CondTy, QualType VecResTy,
QualType CVE = CV->getElementType();
QualType RVE = RV->getElementType();
- if (S.Context.getTypeSize(CVE) != S.Context.getTypeSize(RVE)) {
+ // Boolean vectors are permitted outside of OpenCL mode.
+ if (S.Context.getTypeSize(CVE) != S.Context.getTypeSize(RVE) &&
+ (!CVE->isBooleanType() || S.LangOpts.OpenCL)) {
S.Diag(QuestionLoc, diag::err_conditional_vector_element_size)
- << CondTy << VecResTy;
+ << CondTy << VecResTy;
return true;
}
@@ -8603,15 +8645,11 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// If both operands are the same structure or union type, the result is that
// type.
- if (const RecordType *LHSRT = LHSTy->getAs<RecordType>()) { // C99 6.5.15p3
- if (const RecordType *RHSRT = RHSTy->getAs<RecordType>())
- if (LHSRT->getDecl() == RHSRT->getDecl())
- // "If both the operands have structure or union type, the result has
- // that type." This implies that CV qualifiers are dropped.
- return Context.getCommonSugaredType(LHSTy.getUnqualifiedType(),
- RHSTy.getUnqualifiedType());
- // FIXME: Type of conditional expression must be complete in C mode.
- }
+ // FIXME: Type of conditional expression must be complete in C mode.
+ if (LHSTy->isRecordType() &&
+ Context.hasSameUnqualifiedType(LHSTy, RHSTy)) // C99 6.5.15p3
+ return Context.getCommonSugaredType(LHSTy.getUnqualifiedType(),
+ RHSTy.getUnqualifiedType());
// C99 6.5.15p5: "If both operands have void type, the result has void type."
// The following || allows only one side to be void (a GCC-ism).
@@ -9312,14 +9350,14 @@ AssignConvertType Sema::CheckAssignmentConstraints(QualType LHSType,
// If we have an atomic type, try a non-atomic assignment, then just add an
// atomic qualification step.
if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
- AssignConvertType result =
+ AssignConvertType Result =
CheckAssignmentConstraints(AtomicTy->getValueType(), RHS, Kind);
- if (result != AssignConvertType::Compatible)
- return result;
+ if (!IsAssignConvertCompatible(Result))
+ return Result;
if (Kind != CK_NoOp && ConvertRHS)
RHS = ImpCastExprToType(RHS.get(), AtomicTy->getValueType(), Kind);
Kind = CK_NonAtomicToAtomic;
- return AssignConvertType::Compatible;
+ return Result;
}
// If the left-hand side is a reference type, then we are in a
@@ -9674,11 +9712,14 @@ Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType,
// If the ArgType is a Union type, we want to handle a potential
// transparent_union GCC extension.
const RecordType *UT = ArgType->getAsUnionType();
- if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (!UT)
+ return AssignConvertType::Incompatible;
+
+ RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!UD->hasAttr<TransparentUnionAttr>())
return AssignConvertType::Incompatible;
// The field to initialize within the transparent union.
- RecordDecl *UD = UT->getDecl();
FieldDecl *InitField = nullptr;
// It's compatible if the expression matches any of the fields.
for (auto *it : UD->fields()) {
@@ -9887,6 +9928,12 @@ AssignConvertType Sema::CheckSingleAssignmentConstraints(QualType LHSType,
AssignConvertType result =
CheckAssignmentConstraints(LHSType, RHS, Kind, ConvertRHS);
+ // If assigning a void * created by an allocation function call to some other
+ // type, check that the allocated size is sufficient for that type.
+ if (result != AssignConvertType::Incompatible &&
+ RHS.get()->getType()->isVoidPointerType())
+ CheckSufficientAllocSize(*this, LHSType, RHS.get());
+
// C99 6.5.16.1p2: The value of the right operand is converted to the
// type of the assignment expression.
// CheckAssignmentConstraints allows the left-hand side to be a reference,
@@ -10742,9 +10789,50 @@ static void DiagnoseBadDivideOrRemainderValues(Sema& S, ExprResult &LHS,
<< IsDiv << RHS.get()->getSourceRange());
}
+static void diagnoseScopedEnums(Sema &S, const SourceLocation Loc,
+ const ExprResult &LHS, const ExprResult &RHS,
+ BinaryOperatorKind Opc) {
+ if (!LHS.isUsable() || !RHS.isUsable())
+ return;
+ const Expr *LHSExpr = LHS.get();
+ const Expr *RHSExpr = RHS.get();
+ const QualType LHSType = LHSExpr->getType();
+ const QualType RHSType = RHSExpr->getType();
+ const bool LHSIsScoped = LHSType->isScopedEnumeralType();
+ const bool RHSIsScoped = RHSType->isScopedEnumeralType();
+ if (!LHSIsScoped && !RHSIsScoped)
+ return;
+ if (BinaryOperator::isAssignmentOp(Opc) && LHSIsScoped)
+ return;
+ if (!LHSIsScoped && !LHSType->isIntegralOrUnscopedEnumerationType())
+ return;
+ if (!RHSIsScoped && !RHSType->isIntegralOrUnscopedEnumerationType())
+ return;
+ auto DiagnosticHelper = [&S](const Expr *expr, const QualType type) {
+ SourceLocation BeginLoc = expr->getBeginLoc();
+ QualType IntType = type->castAs<EnumType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getIntegerType();
+ std::string InsertionString = "static_cast<" + IntType.getAsString() + ">(";
+ S.Diag(BeginLoc, diag::note_no_implicit_conversion_for_scoped_enum)
+ << FixItHint::CreateInsertion(BeginLoc, InsertionString)
+ << FixItHint::CreateInsertion(expr->getEndLoc(), ")");
+ };
+ if (LHSIsScoped) {
+ DiagnosticHelper(LHSExpr, LHSType);
+ }
+ if (RHSIsScoped) {
+ DiagnosticHelper(RHSExpr, RHSType);
+ }
+}
+
QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
- bool IsCompAssign, bool IsDiv) {
+ BinaryOperatorKind Opc) {
+ bool IsCompAssign = Opc == BO_MulAssign || Opc == BO_DivAssign;
+ bool IsDiv = Opc == BO_Div || Opc == BO_DivAssign;
+
checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
QualType LHSTy = LHS.get()->getType();
@@ -10772,9 +10860,11 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
-
- if (compType.isNull() || !compType->isArithmeticType())
- return InvalidOperands(Loc, LHS, RHS);
+ if (compType.isNull() || !compType->isArithmeticType()) {
+ QualType ResultTy = InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(*this, Loc, LHS, RHS, Opc);
+ return ResultTy;
+ }
if (IsDiv) {
DetectPrecisionLossInComplexDivision(*this, RHS.get()->getType(), Loc);
DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, IsDiv);
@@ -10837,8 +10927,12 @@ QualType Sema::CheckRemainderOperands(
if (compType.isNull() ||
(!compType->isIntegerType() &&
- !(getLangOpts().HLSL && compType->isFloatingType())))
- return InvalidOperands(Loc, LHS, RHS);
+ !(getLangOpts().HLSL && compType->isFloatingType()))) {
+ QualType ResultTy = InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(*this, Loc, LHS, RHS,
+ IsCompAssign ? BO_RemAssign : BO_Rem);
+ return ResultTy;
+ }
DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, false /* IsDiv */);
return compType;
}
@@ -11194,7 +11288,9 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
} else if (PExp->getType()->isObjCObjectPointerType()) {
isObjCPointer = true;
} else {
- return InvalidOperands(Loc, LHS, RHS);
+ QualType ResultTy = InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(*this, Loc, LHS, RHS, Opc);
+ return ResultTy;
}
}
assert(PExp->getType()->isAnyPointerType());
@@ -11251,7 +11347,8 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
// C99 6.5.6
QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
- QualType* CompLHSTy) {
+ BinaryOperatorKind Opc,
+ QualType *CompLHSTy) {
checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
if (LHS.get()->getType()->isVectorType() ||
@@ -11396,12 +11493,14 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
}
}
- return InvalidOperands(Loc, LHS, RHS);
+ QualType ResultTy = InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(*this, Loc, LHS, RHS, Opc);
+ return ResultTy;
}
static bool isScopedEnumerationType(QualType T) {
- if (const EnumType *ET = T->getAs<EnumType>())
- return ET->getDecl()->isScoped();
+ if (const EnumType *ET = T->getAsCanonical<EnumType>())
+ return ET->getOriginalDecl()->isScoped();
return false;
}
@@ -11744,15 +11843,12 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
// Embedded-C 4.1.6.2.2: The LHS may also be fixed-point.
if ((!LHSType->isFixedPointOrIntegerType() &&
!LHSType->hasIntegerRepresentation()) ||
- !RHSType->hasIntegerRepresentation())
- return InvalidOperands(Loc, LHS, RHS);
-
- // C++0x: Don't allow scoped enums. FIXME: Use something better than
- // hasIntegerRepresentation() above instead of this.
- if (isScopedEnumerationType(LHSType) ||
- isScopedEnumerationType(RHSType)) {
- return InvalidOperands(Loc, LHS, RHS);
+ !RHSType->hasIntegerRepresentation()) {
+ QualType ResultTy = InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(*this, Loc, LHS, RHS, Opc);
+ return ResultTy;
}
+
DiagnoseBadShiftValues(*this, LHS, RHS, Loc, Opc, LHSType);
// "The type of the result is that of the promoted left operand."
@@ -12290,8 +12386,7 @@ static QualType checkArithmeticOrEnumeralThreeWayCompare(Sema &S,
S.InvalidOperands(Loc, LHS, RHS);
return QualType();
}
- QualType IntType =
- LHSStrippedType->castAs<EnumType>()->getDecl()->getIntegerType();
+ QualType IntType = LHSStrippedType->castAsEnumDecl()->getIntegerType();
assert(IntType->isArithmeticType());
// We can't use `CK_IntegralCast` when the underlying type is 'bool', so we
@@ -12311,8 +12406,11 @@ static QualType checkArithmeticOrEnumeralThreeWayCompare(Sema &S,
S.UsualArithmeticConversions(LHS, RHS, Loc, ArithConvKind::Comparison);
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
- if (Type.isNull())
- return S.InvalidOperands(Loc, LHS, RHS);
+ if (Type.isNull()) {
+ QualType ResultTy = S.InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(S, Loc, LHS, RHS, BO_Cmp);
+ return ResultTy;
+ }
std::optional<ComparisonCategoryType> CCT =
getComparisonCategoryForBuiltinCmp(Type);
@@ -12344,8 +12442,11 @@ static QualType checkArithmeticOrEnumeralCompare(Sema &S, ExprResult &LHS,
S.UsualArithmeticConversions(LHS, RHS, Loc, ArithConvKind::Comparison);
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
- if (Type.isNull())
- return S.InvalidOperands(Loc, LHS, RHS);
+ if (Type.isNull()) {
+ QualType ResultTy = S.InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(S, Loc, LHS, RHS, Opc);
+ return ResultTy;
+ }
assert(Type->isArithmeticType() || Type->isEnumeralType());
if (Type->isAnyComplexType() && BinaryOperator::isRelationalOp(Opc))
@@ -12457,7 +12558,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
auto computeResultTy = [&]() {
if (Opc != BO_Cmp)
- return Context.getLogicalOperationType();
+ return QualType(Context.getLogicalOperationType());
assert(getLangOpts().CPlusPlus);
assert(Context.hasSameType(LHS.get()->getType(), RHS.get()->getType()));
@@ -13355,7 +13456,9 @@ inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
if (!compType.isNull() && compType->isIntegralOrUnscopedEnumerationType())
return compType;
- return InvalidOperands(Loc, LHS, RHS);
+ QualType ResultTy = InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(*this, Loc, LHS, RHS, Opc);
+ return ResultTy;
}
// C99 6.5.[13,14]
@@ -13457,13 +13560,19 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
// C++ [expr.log.or]p1
// The operands are both contextually converted to type bool.
ExprResult LHSRes = PerformContextuallyConvertToBool(LHS.get());
- if (LHSRes.isInvalid())
- return InvalidOperands(Loc, LHS, RHS);
+ if (LHSRes.isInvalid()) {
+ QualType ResultTy = InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(*this, Loc, LHS, RHS, Opc);
+ return ResultTy;
+ }
LHS = LHSRes;
ExprResult RHSRes = PerformContextuallyConvertToBool(RHS.get());
- if (RHSRes.isInvalid())
- return InvalidOperands(Loc, LHS, RHS);
+ if (RHSRes.isInvalid()) {
+ QualType ResultTy = InvalidOperands(Loc, LHS, RHS);
+ diagnoseScopedEnums(*this, Loc, LHS, RHS, Opc);
+ return ResultTy;
+ }
RHS = RHSRes;
// C++ [expr.log.and]p2
@@ -13511,6 +13620,8 @@ static NonConstCaptureKind isReferenceToNonConstCapture(Sema &S, Expr *E) {
VarDecl *Var = dyn_cast<VarDecl>(Value);
if (!Var)
return NCCK_None;
+ if (Var->getType()->isReferenceType())
+ return NCCK_None;
assert(Var->hasLocalStorage() && "capture added 'const' to non-local?");
@@ -13694,8 +13805,10 @@ static void DiagnoseRecursiveConstFields(Sema &S, const ValueDecl *VD,
// diagnostics in field nesting order.
while (RecordTypeList.size() > NextToCheckIndex) {
bool IsNested = NextToCheckIndex > 0;
- for (const FieldDecl *Field :
- RecordTypeList[NextToCheckIndex]->getDecl()->fields()) {
+ for (const FieldDecl *Field : RecordTypeList[NextToCheckIndex]
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->fields()) {
// First, check every field for constness.
QualType FieldTy = Field->getType();
if (FieldTy.isConstQualified()) {
@@ -13712,7 +13825,7 @@ static void DiagnoseRecursiveConstFields(Sema &S, const ValueDecl *VD,
// Then we append it to the list to check next in order.
FieldTy = FieldTy.getCanonicalType();
- if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) {
+ if (const auto *FieldRecTy = FieldTy->getAsCanonical<RecordType>()) {
if (!llvm::is_contained(RecordTypeList, FieldRecTy))
RecordTypeList.push_back(FieldRecTy);
}
@@ -13728,7 +13841,7 @@ static void DiagnoseRecursiveConstFields(Sema &S, const Expr *E,
QualType Ty = E->getType();
assert(Ty->isRecordType() && "lvalue was not record?");
SourceRange Range = E->getSourceRange();
- const RecordType *RTy = Ty.getCanonicalType()->getAs<RecordType>();
+ const auto *RTy = Ty->getAsCanonical<RecordType>();
bool DiagEmitted = false;
if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
@@ -15057,8 +15170,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
case BO_Mul:
case BO_Div:
ConvertHalfVec = true;
- ResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, false,
- Opc == BO_Div);
+ ResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, Opc);
break;
case BO_Rem:
ResultTy = CheckRemainderOperands(LHS, RHS, OpLoc);
@@ -15069,7 +15181,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
break;
case BO_Sub:
ConvertHalfVec = true;
- ResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc);
+ ResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, Opc);
break;
case BO_Shl:
case BO_Shr:
@@ -15113,8 +15225,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
case BO_MulAssign:
case BO_DivAssign:
ConvertHalfVec = true;
- CompResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, true,
- Opc == BO_DivAssign);
+ CompResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, Opc);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
ResultTy =
@@ -15136,7 +15247,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
break;
case BO_SubAssign:
ConvertHalfVec = true;
- CompResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, &CompLHSTy);
+ CompResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, Opc, &CompLHSTy);
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
ResultTy =
CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
@@ -16146,11 +16257,10 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
return ExprError();
// Look for the designated field.
- const RecordType *RC = CurrentType->getAs<RecordType>();
- if (!RC)
+ auto *RD = CurrentType->getAsRecordDecl();
+ if (!RD)
return ExprError(Diag(OC.LocEnd, diag::err_offsetof_record_type)
<< CurrentType);
- RecordDecl *RD = RC->getDecl();
// C++ [lib.support.types]p5:
// The macro offsetof accepts a restricted set of type arguments in this
@@ -16211,8 +16321,8 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
// If the member was found in a base class, introduce OffsetOfNodes for
// the base class indirections.
CXXBasePaths Paths;
- if (IsDerivedFrom(OC.LocStart, CurrentType, Context.getTypeDeclType(Parent),
- Paths)) {
+ if (IsDerivedFrom(OC.LocStart, CurrentType,
+ Context.getCanonicalTagType(Parent), Paths)) {
if (Paths.getDetectedVirtual()) {
Diag(OC.LocEnd, diag::err_offsetof_field_of_virtual_base)
<< MemberDecl->getDeclName()
@@ -16547,8 +16657,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
auto *Var = cast<VarDecl>(Cap.getVariable());
Expr *CopyExpr = nullptr;
if (getLangOpts().CPlusPlus && Cap.isCopyCapture()) {
- if (const RecordType *Record =
- Cap.getCaptureType()->getAs<RecordType>()) {
+ if (auto *Record = Cap.getCaptureType()->getAsCXXRecordDecl()) {
// The capture logic needs the destructor, so make sure we mark it.
// Usually this is unnecessary because most local variables have
// their destructors marked at declaration time, but parameters are
@@ -16776,8 +16885,8 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
// va_arg. Instead, get the underlying type of the enumeration and pass
// that.
QualType UnderlyingType = TInfo->getType();
- if (const auto *ET = UnderlyingType->getAs<EnumType>())
- UnderlyingType = ET->getDecl()->getIntegerType();
+ if (const auto *ED = UnderlyingType->getAsEnumDecl())
+ UnderlyingType = ED->getIntegerType();
if (Context.typesAreCompatible(PromoteType, UnderlyingType,
/*CompareUnqualified*/ true))
PromoteType = QualType();
@@ -16923,7 +17032,7 @@ ExprResult Sema::ActOnSourceLocExpr(SourceLocIdentKind Kind,
return ExprError();
}
ResultTy = Context.getPointerType(
- Context.getRecordType(StdSourceLocationImplDecl).withConst());
+ Context.getCanonicalTagType(StdSourceLocationImplDecl).withConst());
break;
}
@@ -18108,6 +18217,8 @@ void Sema::PopExpressionEvaluationContext() {
MaybeODRUseExprs.insert_range(Rec.SavedMaybeODRUseExprs);
}
+ DiagnoseMisalignedMembers();
+
// Pop the current expression evaluation context off the stack.
ExprEvalContexts.pop_back();
}
@@ -18642,8 +18753,9 @@ void Sema::MarkCaptureUsedInEnclosingContext(ValueDecl *Capture,
MarkVarDeclODRUsed(Capture, Loc, *this, &CapturingScopeIndex);
}
-void diagnoseUncapturableValueReferenceOrBinding(Sema &S, SourceLocation loc,
- ValueDecl *var) {
+static void diagnoseUncapturableValueReferenceOrBinding(Sema &S,
+ SourceLocation loc,
+ ValueDecl *var) {
DeclContext *VarDC = var->getDeclContext();
// If the parameter still belongs to the translation unit, then
@@ -18771,17 +18883,16 @@ static bool isVariableCapturable(CapturingScopeInfo *CSI, ValueDecl *Var,
}
// Prohibit structs with flexible array members too.
// We cannot capture what is in the tail end of the struct.
- if (const RecordType *VTTy = Var->getType()->getAs<RecordType>()) {
- if (VTTy->getDecl()->hasFlexibleArrayMember()) {
- if (Diagnose) {
- if (IsBlock)
- S.Diag(Loc, diag::err_ref_flexarray_type);
- else
- S.Diag(Loc, diag::err_lambda_capture_flexarray_type) << Var;
- S.Diag(Var->getLocation(), diag::note_previous_decl) << Var;
- }
- return false;
+ if (const auto *VTD = Var->getType()->getAsRecordDecl();
+ VTD && VTD->hasFlexibleArrayMember()) {
+ if (Diagnose) {
+ if (IsBlock)
+ S.Diag(Loc, diag::err_ref_flexarray_type);
+ else
+ S.Diag(Loc, diag::err_lambda_capture_flexarray_type) << Var;
+ S.Diag(Var->getLocation(), diag::note_previous_decl) << Var;
}
+ return false;
}
const bool HasBlocksAttr = Var->hasAttr<BlocksAttr>();
// Lambdas and captured statements are not allowed to capture __block
@@ -19861,7 +19972,7 @@ ExprResult Sema::CheckLValueToRValueConversionOperand(Expr *E) {
// C++2a [basic.def.odr]p4:
// [...] an expression of non-volatile-qualified non-class type to which
// the lvalue-to-rvalue conversion is applied [...]
- if (E->getType().isVolatileQualified() || E->getType()->getAs<RecordType>())
+ if (E->getType().isVolatileQualified() || E->getType()->isRecordType())
return E;
ExprResult Result =
@@ -21213,7 +21324,7 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
NamedDecl *Temp = *ULE->decls_begin();
const bool IsTypeAliasTemplateDecl = isa<TypeAliasTemplateDecl>(Temp);
- NestedNameSpecifier *NNS = ULE->getQualifierLoc().getNestedNameSpecifier();
+ NestedNameSpecifier NNS = ULE->getQualifierLoc().getNestedNameSpecifier();
// FIXME: AssumedTemplate is not very appropriate for error recovery here,
// as it models only the unqualified-id case, where this case can clearly be
// qualified. Thus we can't just qualify an assumed template.
@@ -21239,16 +21350,16 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
QualType TST;
{
SFINAETrap Trap(*this);
- TST = CheckTemplateIdType(TN, NameInfo.getBeginLoc(), TAL);
+ TST = CheckTemplateIdType(ElaboratedTypeKeyword::None, TN,
+ NameInfo.getBeginLoc(), TAL);
}
if (TST.isNull())
TST = Context.getTemplateSpecializationType(
- TN, ULE->template_arguments(), /*CanonicalArgs=*/{},
+ ElaboratedTypeKeyword::None, TN, ULE->template_arguments(),
+ /*CanonicalArgs=*/{},
HasAnyDependentTA ? Context.DependentTy : Context.IntTy);
- QualType ET =
- Context.getElaboratedType(ElaboratedTypeKeyword::None, NNS, TST);
return CreateRecoveryExpr(NameInfo.getBeginLoc(), NameInfo.getEndLoc(), {},
- ET);
+ TST);
}
// Overloaded expressions.
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index 0edfd60..5a9279d 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -57,11 +57,11 @@ using namespace sema;
ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
const IdentifierInfo &Name) {
- NestedNameSpecifier *NNS = SS.getScopeRep();
- if ([[maybe_unused]] const IdentifierInfo *II = NNS->getAsIdentifier())
- assert(II == &Name && "not a constructor name");
+ NestedNameSpecifier NNS = SS.getScopeRep();
+ QualType Type(NNS.getAsType(), 0);
+ if ([[maybe_unused]] const auto *DNT = dyn_cast<DependentNameType>(Type))
+ assert(DNT->getIdentifier() == &Name && "not a constructor name");
- QualType Type(NNS->translateToType(Context), 0);
// This reference to the type is located entirely at the location of the
// final identifier in the qualified-id.
return CreateParsedType(Type,
@@ -111,10 +111,8 @@ ParsedType Sema::getConstructorName(const IdentifierInfo &II,
return ParsedType();
}
- QualType T = Context.getTypeDeclType(InjectedClassName);
- DiagnoseUseOfDecl(InjectedClassName, NameLoc);
- MarkAnyDeclReferenced(NameLoc, InjectedClassName, /*OdrUse=*/false);
-
+ QualType T = Context.getTagType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ InjectedClassName, /*OwnsTag=*/false);
return ParsedType::make(T);
}
@@ -175,7 +173,7 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
if (SearchType.isNull() || SearchType->isDependentType())
return true;
- QualType T = Context.getTypeDeclType(Type);
+ CanQualType T = Context.getCanonicalTypeDeclType(Type);
return Context.hasSameUnqualifiedType(T, SearchType);
};
@@ -207,7 +205,8 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
NamedDecl *D = F.next();
if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
Diag(D->getLocation(), diag::note_destructor_type_here)
- << Context.getTypeDeclType(TD);
+ << Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
else
Diag(D->getLocation(), diag::note_destructor_nontype_here);
@@ -222,11 +221,11 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
if (IsAcceptableResult(Type)) {
- QualType T = Context.getTypeDeclType(Type);
+ QualType T = Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Type);
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
- return CreateParsedType(
- Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr, T),
- Context.getTrivialTypeSourceInfo(T, NameLoc));
+ return CreateParsedType(T,
+ Context.getTrivialTypeSourceInfo(T, NameLoc));
}
}
@@ -311,15 +310,23 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
// If both lookups succeed and find a dependent result, which result should
// we retain? (Same question for p->~type-name().)
- if (NestedNameSpecifier *Prefix =
- SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
+ auto Prefix = [&]() -> NestedNameSpecifierLoc {
+ NestedNameSpecifierLoc NNS = SS.getWithLocInContext(Context);
+ if (!NNS)
+ return NestedNameSpecifierLoc();
+ if (auto TL = NNS.getAsTypeLoc())
+ return TL.getPrefix();
+ return NNS.getAsNamespaceAndPrefix().Prefix;
+ }();
+
+ if (Prefix) {
// This is
//
// nested-name-specifier type-name :: ~ type-name
//
// Look for the second type-name in the nested-name-specifier.
CXXScopeSpec PrefixSS;
- PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
+ PrefixSS.Adopt(Prefix);
if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
return T;
} else {
@@ -374,7 +381,7 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
//
// also looks for type-name in the scope. Unfortunately, we can't
// reasonably apply this fallback for dependent nested-name-specifiers.
- if (SS.isValid() && SS.getScopeRep()->getPrefix()) {
+ if (Prefix) {
if (ParsedType T = LookupInScope()) {
Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
<< FixItHint::CreateRemoval(SS.getRange());
@@ -419,9 +426,10 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
if (auto *TD = dyn_cast<TypeDecl>(FoundDecls[0]->getUnderlyingDecl())) {
assert(!SearchType.isNull() &&
"should only reject a type result if we have a search type");
- QualType T = Context.getTypeDeclType(TD);
Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
- << T << SearchType << MakeFixItHint();
+ << Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD)
+ << SearchType << MakeFixItHint();
} else {
Diag(NameLoc, diag::err_destructor_expr_nontype)
<< &II << MakeFixItHint();
@@ -435,7 +443,8 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
for (NamedDecl *FoundD : FoundDecls) {
if (auto *TD = dyn_cast<TypeDecl>(FoundD->getUnderlyingDecl()))
Diag(FoundD->getLocation(), diag::note_destructor_type_here)
- << Context.getTypeDeclType(TD);
+ << Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
else
Diag(FoundD->getLocation(), diag::note_destructor_nontype_here)
<< FoundD;
@@ -501,12 +510,8 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
<< II << static_cast<int>(Status) << Hint;
}
- if (!SS.isValid())
- return false;
-
- switch (SS.getScopeRep()->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::TypeSpec:
+ switch (SS.getScopeRep().getKind()) {
+ case NestedNameSpecifier::Kind::Type:
// Per C++11 [over.literal]p2, literal operators can only be declared at
// namespace scope. Therefore, this unqualified-id cannot name anything.
// Reject it early, because we have no AST representation for this in the
@@ -515,9 +520,10 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
<< SS.getScopeRep();
return true;
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ case NestedNameSpecifier::Kind::Namespace:
return false;
}
@@ -537,7 +543,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
QualType T
= Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(),
Quals);
- if (T->getAs<RecordType>() &&
+ if (T->isRecordType() &&
RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
return ExprError();
@@ -564,8 +570,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
}
QualType T = E->getType();
- if (const RecordType *RecordT = T->getAs<RecordType>()) {
- CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getDecl());
+ if (auto *RecordD = T->getAsCXXRecordDecl()) {
// C++ [expr.typeid]p3:
// [...] If the type of the expression is a class type, the class
// shall be completely-defined.
@@ -659,7 +664,7 @@ Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti));
}
- QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl);
+ CanQualType TypeInfoType = Context.getCanonicalTagType(CXXTypeInfoDecl);
if (isType) {
// The operand is a type; handle it as such.
@@ -939,7 +944,7 @@ collectPublicBases(CXXRecordDecl *RD,
static void getUnambiguousPublicSubobjects(
CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) {
llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
- llvm::SmallSet<CXXRecordDecl *, 2> VBases;
+ llvm::SmallPtrSet<CXXRecordDecl *, 2> VBases;
llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
SubobjectsSeen[RD] = 1;
PublicSubobjectsSeen.insert(RD);
@@ -1214,7 +1219,7 @@ QualType Sema::getCurrentThisType() {
// This is a lambda call operator that is being instantiated as a default
// initializer. DC must point to the enclosing class type, so we can recover
// the 'this' type from it.
- QualType ClassTy = Context.getTypeDeclType(cast<CXXRecordDecl>(DC));
+ CanQualType ClassTy = Context.getCanonicalTagType(cast<CXXRecordDecl>(DC));
// There are no cv-qualifiers for 'this' within default initializers,
// per [expr.prim.general]p4.
ThisTy = Context.getPointerType(ClassTy);
@@ -1244,7 +1249,7 @@ Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
else
Record = cast<CXXRecordDecl>(ContextDecl);
- QualType T = S.Context.getRecordType(Record);
+ QualType T = S.Context.getCanonicalTagType(Record);
T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
S.CXXThisTypeOverride =
@@ -1732,7 +1737,7 @@ static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
S.Context.hasSameUnqualifiedType(
FD->getParamDecl(UsualParams)->getType(),
- S.Context.getTypeDeclType(S.getStdAlignValT())))
+ S.Context.getCanonicalTagType(S.getStdAlignValT())))
++UsualParams;
return UsualParams == FD->getNumParams();
@@ -1860,8 +1865,8 @@ namespace {
if (FunctionTemplateDecl *Best = S.getMoreSpecializedTemplate(
PrimaryTemplate, OtherPrimaryTemplate, SourceLocation(),
TPOC_Call, ImplicitArgCount,
- DC ? QualType(DC->getTypeForDecl(), 0) : QualType{},
- OtherDC ? QualType(OtherDC->getTypeForDecl(), 0) : QualType{},
+ DC ? S.Context.getCanonicalTagType(DC) : QualType{},
+ OtherDC ? S.Context.getCanonicalTagType(OtherDC) : QualType{},
false)) {
return Best == PrimaryTemplate ? 1 : -1;
}
@@ -1968,8 +1973,8 @@ static UsualDeallocFnInfo resolveDeallocationOverload(
static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
TypeAwareAllocationMode PassType,
QualType allocType) {
- const RecordType *record =
- allocType->getBaseElementTypeUnsafe()->getAs<RecordType>();
+ const auto *record =
+ allocType->getBaseElementTypeUnsafe()->getAsCanonical<RecordType>();
if (!record) return false;
// Try to find an operator delete[] in class scope.
@@ -1977,7 +1982,7 @@ static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
DeclarationName deleteName =
S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
- S.LookupQualifiedName(ops, record->getDecl());
+ S.LookupQualifiedName(ops, record->getOriginalDecl()->getDefinitionOrSelf());
// We're just doing this for information.
ops.suppressDiagnostics();
@@ -2290,8 +2295,7 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
ConvertedSize = PerformImplicitConversion(
*ArraySize, Context.getSizeType(), AssignmentAction::Converting);
- if (!ConvertedSize.isInvalid() &&
- (*ArraySize)->getType()->getAs<RecordType>())
+ if (!ConvertedSize.isInvalid() && (*ArraySize)->getType()->isRecordType())
// Diagnose the compatibility of this conversion.
Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion)
<< (*ArraySize)->getType() << 0 << "'size_t'";
@@ -2505,7 +2509,7 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// because there might not be a `std::align_val_t` type.
EnumDecl *StdAlignValT = getStdAlignValT();
QualType AlignValT =
- StdAlignValT ? Context.getTypeDeclType(StdAlignValT) : SizeTy;
+ StdAlignValT ? Context.getCanonicalTagType(StdAlignValT) : SizeTy;
IntegerLiteral AlignmentLiteral(
Context,
llvm::APInt(Context.getTypeSize(SizeTy),
@@ -2966,7 +2970,7 @@ bool Sema::FindAllocationFunctions(
isTypeAwareAllocation(IAP.PassTypeIdentity);
if (IncludeAlignParam) {
DeclareGlobalNewDelete();
- AlignValT = Context.getTypeDeclType(getStdAlignValT());
+ AlignValT = Context.getCanonicalTagType(getStdAlignValT());
}
CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
if (IncludeAlignParam)
@@ -3048,8 +3052,7 @@ bool Sema::FindAllocationFunctions(
LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
if (AllocElemType->isRecordType() &&
DeleteScope != AllocationFunctionScope::Global) {
- auto *RD =
- cast<CXXRecordDecl>(AllocElemType->castAs<RecordType>()->getDecl());
+ auto *RD = AllocElemType->castAsCXXRecordDecl();
LookupQualifiedName(FoundDelete, RD);
}
if (FoundDelete.isAmbiguous())
@@ -3426,7 +3429,7 @@ void Sema::DeclareGlobalNewDelete() {
for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
if (Aligned)
- Params.push_back(Context.getTypeDeclType(getStdAlignValT()));
+ Params.push_back(Context.getCanonicalTagType(getStdAlignValT()));
DeclareGlobalAllocationFunction(
Context.DeclarationNames.getCXXOperatorName(Kind), Return, Params);
@@ -3483,7 +3486,7 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
bool HasBadAllocExceptionSpec = Name.isAnyOperatorNew();
if (HasBadAllocExceptionSpec) {
if (!getLangOpts().CPlusPlus11) {
- BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
+ BadAllocType = Context.getCanonicalTagType(getStdBadAlloc());
assert(StdBadAlloc && "Must have std::bad_alloc declared");
EPI.ExceptionSpec.Type = EST_Dynamic;
EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
@@ -3497,6 +3500,19 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
}
auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
+ // The MSVC STL has explicit cdecl on its (host-side) allocation function
+ // specializations for the allocation, so in order to prevent a CC clash
+ // we use the host's CC, if available, or CC_C as a fallback, for the
+ // host-side implicit decls, knowing these do not get emitted when compiling
+ // for device.
+ if (getLangOpts().CUDAIsDevice && ExtraAttr &&
+ isa<CUDAHostAttr>(ExtraAttr) &&
+ Context.getTargetInfo().getTriple().isSPIRV()) {
+ if (auto *ATI = Context.getAuxTargetInfo())
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(ATI->getDefaultCallingConv());
+ else
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CallingConv::CC_C);
+ }
QualType FnType = Context.getFunctionType(Return, Params, EPI);
FunctionDecl *Alloc = FunctionDecl::Create(
Context, GlobalCtx, SourceLocation(), SourceLocation(), Name, FnType,
@@ -3593,7 +3609,7 @@ FunctionDecl *Sema::FindDeallocationFunctionForDestructor(SourceLocation Loc,
DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Delete);
FunctionDecl *OperatorDelete = nullptr;
- QualType DeallocType = Context.getRecordType(RD);
+ CanQualType DeallocType = Context.getCanonicalTagType(RD);
ImplicitDeallocationParameters IDP = {
DeallocType, ShouldUseTypeAwareOperatorNewOrDelete(),
AlignedAllocationMode::No, SizedDeallocationMode::No};
@@ -3627,7 +3643,7 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
Found.suppressDiagnostics();
if (!isAlignedAllocation(IDP.PassAlignment) &&
- hasNewExtendedAlignment(*this, Context.getRecordType(RD)))
+ hasNewExtendedAlignment(*this, Context.getCanonicalTagType(RD)))
IDP.PassAlignment = AlignedAllocationMode::Yes;
// C++17 [expr.delete]p10:
@@ -4049,8 +4065,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
? diag::err_delete_incomplete
: diag::warn_delete_incomplete,
Ex.get())) {
- if (const RecordType *RT = PointeeElem->getAs<RecordType>())
- PointeeRD = cast<CXXRecordDecl>(RT->getDecl());
+ PointeeRD = PointeeElem->getAsCXXRecordDecl();
}
}
@@ -4574,7 +4589,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// If the user-defined conversion is specified by a conversion function,
// the initial standard conversion sequence converts the source type to
// the implicit object parameter of the conversion function.
- BeforeToType = Context.getTagDeclType(Conv->getParent());
+ BeforeToType = Context.getCanonicalTagType(Conv->getParent());
} else {
const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(FD);
CastKind = CK_ConstructorConversion;
@@ -4818,7 +4833,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (FromType->isVectorType() || ToType->isVectorType())
StepTy = adjustVectorType(Context, FromType, ToType, &ElTy);
if (ElTy->isBooleanType()) {
- assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
+ assert(FromType->castAsEnumDecl()->isFixed() &&
SCS.Second == ICK_Integral_Promotion &&
"only enums with fixed underlying type can promote to bool");
From = ImpCastExprToType(From, StepTy, CK_IntegralToBoolean, VK_PRValue,
@@ -5370,16 +5385,18 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
return QualType();
}
+ // FIXME: use sugared type from member pointer.
+ CanQualType RHSClassType = Context.getCanonicalTagType(RHSClass);
CXXCastPath BasePath;
if (CheckDerivedToBaseConversion(
- LHSType, QualType(RHSClass->getTypeForDecl(), 0), Loc,
+ LHSType, RHSClassType, Loc,
SourceRange(LHS.get()->getBeginLoc(), RHS.get()->getEndLoc()),
&BasePath))
return QualType();
// Cast LHS to type of use.
- QualType UseType = Context.getQualifiedType(RHSClass->getTypeForDecl(),
- LHSType.getQualifiers());
+ QualType UseType =
+ Context.getQualifiedType(RHSClassType, LHSType.getQualifiers());
if (isIndirect)
UseType = Context.getPointerType(UseType);
ExprValueKind VK = isIndirect ? VK_PRValue : LHS.get()->getValueKind();
@@ -5502,8 +5519,8 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
// the same or one is a base class of the other:
QualType FTy = From->getType();
QualType TTy = To->getType();
- const RecordType *FRec = FTy->getAs<RecordType>();
- const RecordType *TRec = TTy->getAs<RecordType>();
+ const RecordType *FRec = FTy->getAsCanonical<RecordType>();
+ const RecordType *TRec = TTy->getAsCanonical<RecordType>();
bool FDerivedFromT = FRec && TRec && FRec != TRec &&
Self.IsDerivedFrom(QuestionLoc, FTy, TTy);
if (FRec && TRec && (FRec == TRec || FDerivedFromT ||
@@ -5726,10 +5743,12 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
return {};
}
+ // Boolean vectors are permitted outside of OpenCL mode.
if (Context.getTypeSize(ResultElementTy) !=
- Context.getTypeSize(CondElementTy)) {
- Diag(QuestionLoc, diag::err_conditional_vector_element_size) << CondType
- << ResultType;
+ Context.getTypeSize(CondElementTy) &&
+ (!CondElementTy->isBooleanType() || LangOpts.OpenCL)) {
+ Diag(QuestionLoc, diag::err_conditional_vector_element_size)
+ << CondType << ResultType;
return {};
}
@@ -6184,7 +6203,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
case Pointer:
return Ctx.getPointerType(T);
case MemberPointer:
- return Ctx.getMemberPointerType(T, /*Qualifier=*/nullptr,
+ return Ctx.getMemberPointerType(T, /*Qualifier=*/std::nullopt,
ClassOrBound->getAsCXXRecordDecl());
case ObjCPointer:
return Ctx.getObjCObjectPointerType(T);
@@ -6357,7 +6376,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
return QualType();
Steps.emplace_back(Step::MemberPointer,
- Context.getTypeDeclType(Cls).getTypePtr());
+ Context.getCanonicalTagType(Cls).getTypePtr());
continue;
}
@@ -6636,7 +6655,8 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
// That should be enough to guarantee that this type is complete, if we're
// not processing a decltype expression.
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (RD->isInvalidDecl() || RD->isDependentContext())
return E;
@@ -7243,16 +7263,13 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- TypeResult T = ActOnTemplateIdType(S,
- SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->Name,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc,
- /*IsCtorOrDtorName*/true);
+ TypeResult T = ActOnTemplateIdType(
+ S, ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
+ TemplateId->TemplateKWLoc, TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ /*IsCtorOrDtorName*/ true);
if (T.isInvalid() || !T.get()) {
// Recover by assuming we had the right type all along.
DestructedType = ObjectType;
@@ -7296,16 +7313,13 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- TypeResult T = ActOnTemplateIdType(S,
- SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->Name,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc,
- /*IsCtorOrDtorName*/true);
+ TypeResult T = ActOnTemplateIdType(
+ S, ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
+ TemplateId->TemplateKWLoc, TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ /*IsCtorOrDtorName*/ true);
if (T.isInvalid() || !T.get()) {
// Recover by dropping this type.
ScopeType = QualType();
@@ -7504,12 +7518,10 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
}
// GCC seems to also exclude expressions of incomplete enum type.
- if (const EnumType *T = E->getType()->getAs<EnumType>()) {
- if (!T->getDecl()->isComplete()) {
- // FIXME: stupid workaround for a codegen bug!
- E = ImpCastExprToType(E, Context.VoidTy, CK_ToVoid).get();
- return E;
- }
+ if (const auto *ED = E->getType()->getAsEnumDecl(); ED && !ED->isComplete()) {
+ // FIXME: stupid workaround for a codegen bug!
+ E = ImpCastExprToType(E, Context.VoidTy, CK_ToVoid).get();
+ return E;
}
ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
diff --git a/clang/lib/Sema/SemaExprMember.cpp b/clang/lib/Sema/SemaExprMember.cpp
index 5dca509..4a31a13 100644
--- a/clang/lib/Sema/SemaExprMember.cpp
+++ b/clang/lib/Sema/SemaExprMember.cpp
@@ -492,13 +492,14 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize);
// Now look up the TypeDefDecl from the vector type. Without this,
- // diagostics look bad. We want extended vector types to appear built-in.
+ // diagnostics look bad. We want extended vector types to appear built-in.
for (Sema::ExtVectorDeclsType::iterator
I = S.ExtVectorDecls.begin(S.getExternalSource()),
E = S.ExtVectorDecls.end();
I != E; ++I) {
if ((*I)->getUnderlyingType() == VT)
- return S.Context.getTypedefType(*I);
+ return S.Context.getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, *I);
}
return VT; // should never get here (a typedef type should always be found).
@@ -881,7 +882,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
// build a CXXDependentScopeMemberExpr.
if (R.wasNotFoundInCurrentInstantiation() ||
(R.getLookupName().getCXXOverloadedOperator() == OO_Equal &&
- (SS.isSet() ? SS.getScopeRep()->isDependent()
+ (SS.isSet() ? SS.getScopeRep().isDependent()
: BaseExprType->isDependentType())))
return ActOnDependentMemberExpr(BaseExpr, BaseExprType, IsArrow, OpLoc, SS,
TemplateKWLoc, FirstQualifierInScope,
diff --git a/clang/lib/Sema/SemaExprObjC.cpp b/clang/lib/Sema/SemaExprObjC.cpp
index e0662d8..331f6e5 100644
--- a/clang/lib/Sema/SemaExprObjC.cpp
+++ b/clang/lib/Sema/SemaExprObjC.cpp
@@ -638,15 +638,14 @@ ExprResult SemaObjC::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
// Look for the appropriate method within NSNumber.
BoxingMethod = getNSNumberFactoryMethod(*this, Loc, ValueType);
BoxedType = NSNumberPointer;
- } else if (const EnumType *ET = ValueType->getAs<EnumType>()) {
- if (!ET->getDecl()->isComplete()) {
+ } else if (const auto *ED = ValueType->getAsEnumDecl()) {
+ if (!ED->isComplete()) {
Diag(Loc, diag::err_objc_incomplete_boxed_expression_type)
<< ValueType << ValueExpr->getSourceRange();
return ExprError();
}
- BoxingMethod = getNSNumberFactoryMethod(*this, Loc,
- ET->getDecl()->getIntegerType());
+ BoxingMethod = getNSNumberFactoryMethod(*this, Loc, ED->getIntegerType());
BoxedType = NSNumberPointer;
} else if (ValueType->isObjCBoxableRecordType()) {
// Support for structure types, that marked as objc_boxable
@@ -2337,10 +2336,10 @@ SemaObjC::getObjCMessageKind(Scope *S, IdentifierInfo *Name,
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(ND))
T = Context.getObjCInterfaceType(Class);
else if (TypeDecl *Type = dyn_cast<TypeDecl>(ND)) {
- T = Context.getTypeDeclType(Type);
SemaRef.DiagnoseUseOfDecl(Type, NameLoc);
- }
- else
+ T = Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Type);
+ } else
return ObjCInstanceMessage;
// We have a class message, and T is the type we're
@@ -3846,8 +3845,9 @@ static inline T *getObjCBridgeAttr(const TypedefType *TD) {
QualType QT = TDNDecl->getUnderlyingType();
if (QT->isPointerType()) {
QT = QT->getPointeeType();
- if (const RecordType *RT = QT->getAs<RecordType>()) {
- for (auto *Redecl : RT->getDecl()->getMostRecentDecl()->redecls()) {
+ if (const RecordType *RT = QT->getAsCanonical<RecordType>()) {
+ for (auto *Redecl :
+ RT->getOriginalDecl()->getMostRecentDecl()->redecls()) {
if (auto *attr = Redecl->getAttr<T>())
return attr;
}
diff --git a/clang/lib/Sema/SemaFunctionEffects.cpp b/clang/lib/Sema/SemaFunctionEffects.cpp
index 1592862..8590ee8 100644
--- a/clang/lib/Sema/SemaFunctionEffects.cpp
+++ b/clang/lib/Sema/SemaFunctionEffects.cpp
@@ -1352,11 +1352,15 @@ private:
return true;
}
- bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node) override {
+ bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node,
+ bool TraverseQualifier) override {
return true;
}
- bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node) override { return true; }
+ bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node,
+ bool TraverseQualifier) override {
+ return true;
+ }
bool TraverseCXXNoexceptExpr(CXXNoexceptExpr *Node) override {
return true;
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 8536e04..1e5ec95 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -71,6 +71,10 @@ static RegisterType getRegisterType(ResourceClass RC) {
llvm_unreachable("unexpected ResourceClass value");
}
+static RegisterType getRegisterType(const HLSLAttributedResourceType *ResTy) {
+ return getRegisterType(ResTy->getAttrs().ResourceClass);
+}
+
// Converts the first letter of string Slot to RegisterType.
// Returns false if the letter does not correspond to a valid register type.
static bool convertToRegisterType(StringRef Slot, RegisterType *RT) {
@@ -232,9 +236,8 @@ static unsigned calculateLegacyCbufferFieldAlign(const ASTContext &Context,
static unsigned calculateLegacyCbufferSize(const ASTContext &Context,
QualType T) {
constexpr unsigned CBufferAlign = 16;
- if (const RecordType *RT = T->getAs<RecordType>()) {
+ if (const auto *RD = T->getAsRecordDecl()) {
unsigned Size = 0;
- const RecordDecl *RD = RT->getDecl();
for (const FieldDecl *Field : RD->fields()) {
QualType Ty = Field->getType();
unsigned FieldSize = calculateLegacyCbufferSize(Context, Ty);
@@ -337,16 +340,19 @@ static bool isZeroSizedArray(const ConstantArrayType *CAT) {
return CAT != nullptr;
}
-// Returns true if the record type is an HLSL resource class or an array of
-// resource classes
-static bool isResourceRecordTypeOrArrayOf(const Type *Ty) {
- while (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
- Ty = CAT->getArrayElementTypeNoTypeQual();
- return HLSLAttributedResourceType::findHandleTypeOnResource(Ty) != nullptr;
+static bool isResourceRecordTypeOrArrayOf(VarDecl *VD) {
+ const Type *Ty = VD->getType().getTypePtr();
+ return Ty->isHLSLResourceRecord() || Ty->isHLSLResourceRecordArray();
}
-static bool isResourceRecordTypeOrArrayOf(VarDecl *VD) {
- return isResourceRecordTypeOrArrayOf(VD->getType().getTypePtr());
+static const HLSLAttributedResourceType *
+getResourceArrayHandleType(VarDecl *VD) {
+ assert(VD->getType()->isHLSLResourceRecordArray() &&
+ "expected array of resource records");
+ const Type *Ty = VD->getType()->getUnqualifiedDesugaredType();
+ while (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ Ty = CAT->getArrayElementTypeNoTypeQual()->getUnqualifiedDesugaredType();
+ return HLSLAttributedResourceType::findHandleTypeOnResource(Ty);
}
// Returns true if the type is a leaf element type that is not valid to be
@@ -355,10 +361,10 @@ static bool isResourceRecordTypeOrArrayOf(VarDecl *VD) {
// type or if it is a record type that needs to be inspected further.
static bool isInvalidConstantBufferLeafElementType(const Type *Ty) {
Ty = Ty->getUnqualifiedDesugaredType();
- if (isResourceRecordTypeOrArrayOf(Ty))
+ if (Ty->isHLSLResourceRecord() || Ty->isHLSLResourceRecordArray())
return true;
- if (Ty->isRecordType())
- return Ty->getAsCXXRecordDecl()->isEmpty();
+ if (const auto *RD = Ty->getAsCXXRecordDecl())
+ return RD->isEmpty();
if (Ty->isConstantArrayType() &&
isZeroSizedArray(cast<ConstantArrayType>(Ty)))
return true;
@@ -373,21 +379,21 @@ static bool isInvalidConstantBufferLeafElementType(const Type *Ty) {
// needs to be created for HLSL Buffer use that will exclude these unwanted
// declarations (see createHostLayoutStruct function).
static bool requiresImplicitBufferLayoutStructure(const CXXRecordDecl *RD) {
- if (RD->getTypeForDecl()->isHLSLIntangibleType() || RD->isEmpty())
+ if (RD->isHLSLIntangible() || RD->isEmpty())
return true;
// check fields
for (const FieldDecl *Field : RD->fields()) {
QualType Ty = Field->getType();
if (isInvalidConstantBufferLeafElementType(Ty.getTypePtr()))
return true;
- if (Ty->isRecordType() &&
- requiresImplicitBufferLayoutStructure(Ty->getAsCXXRecordDecl()))
+ if (const auto *RD = Ty->getAsCXXRecordDecl();
+ RD && requiresImplicitBufferLayoutStructure(RD))
return true;
}
// check bases
for (const CXXBaseSpecifier &Base : RD->bases())
if (requiresImplicitBufferLayoutStructure(
- Base.getType()->getAsCXXRecordDecl()))
+ Base.getType()->castAsCXXRecordDecl()))
return true;
return false;
}
@@ -452,13 +458,12 @@ static FieldDecl *createFieldForHostLayoutStruct(Sema &S, const Type *Ty,
if (isInvalidConstantBufferLeafElementType(Ty))
return nullptr;
- if (Ty->isRecordType()) {
- CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
if (requiresImplicitBufferLayoutStructure(RD)) {
RD = createHostLayoutStruct(S, RD);
if (!RD)
return nullptr;
- Ty = RD->getTypeForDecl();
+ Ty = S.Context.getCanonicalTagType(RD)->getTypePtr();
}
}
@@ -504,12 +509,12 @@ static CXXRecordDecl *createHostLayoutStruct(Sema &S,
assert(NumBases == 1 && "HLSL supports only one base type");
(void)NumBases;
CXXBaseSpecifier Base = *StructDecl->bases_begin();
- CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ CXXRecordDecl *BaseDecl = Base.getType()->castAsCXXRecordDecl();
if (requiresImplicitBufferLayoutStructure(BaseDecl)) {
BaseDecl = createHostLayoutStruct(S, BaseDecl);
if (BaseDecl) {
- TypeSourceInfo *TSI = AST.getTrivialTypeSourceInfo(
- QualType(BaseDecl->getTypeForDecl(), 0));
+ TypeSourceInfo *TSI =
+ AST.getTrivialTypeSourceInfo(AST.getCanonicalTagType(BaseDecl));
Base = CXXBaseSpecifier(SourceRange(), false, StructDecl->isClass(),
AS_none, TSI, SourceLocation());
}
@@ -575,16 +580,13 @@ void createHostLayoutStructForBuffer(Sema &S, HLSLBufferDecl *BufDecl) {
BufDecl->addLayoutStruct(LS);
}
-static void addImplicitBindingAttrToBuffer(Sema &S, HLSLBufferDecl *BufDecl,
- uint32_t ImplicitBindingOrderID) {
- RegisterType RT =
- BufDecl->isCBuffer() ? RegisterType::CBuffer : RegisterType::SRV;
+static void addImplicitBindingAttrToDecl(Sema &S, Decl *D, RegisterType RT,
+ uint32_t ImplicitBindingOrderID) {
auto *Attr =
HLSLResourceBindingAttr::CreateImplicit(S.getASTContext(), "", "0", {});
- std::optional<unsigned> RegSlot;
- Attr->setBinding(RT, RegSlot, 0);
+ Attr->setBinding(RT, std::nullopt, 0);
Attr->setImplicitBindingOrderID(ImplicitBindingOrderID);
- BufDecl->addAttr(Attr);
+ D->addAttr(Attr);
}
// Handle end of cbuffer/tbuffer declaration
@@ -607,7 +609,10 @@ void SemaHLSL::ActOnFinishBuffer(Decl *Dcl, SourceLocation RBrace) {
if (RBA)
RBA->setImplicitBindingOrderID(OrderID);
else
- addImplicitBindingAttrToBuffer(SemaRef, BufDecl, OrderID);
+ addImplicitBindingAttrToDecl(SemaRef, BufDecl,
+ BufDecl->isCBuffer() ? RegisterType::CBuffer
+ : RegisterType::SRV,
+ OrderID);
}
SemaRef.PopDeclContext();
@@ -722,6 +727,23 @@ void SemaHLSL::ActOnTopLevelFunction(FunctionDecl *FD) {
if (FD->getName() != TargetInfo.getTargetOpts().HLSLEntry)
return;
+ // If we have specified a root signature to override the entry function then
+ // attach it now
+ if (RootSigOverrideIdent) {
+ LookupResult R(SemaRef, RootSigOverrideIdent, SourceLocation(),
+ Sema::LookupOrdinaryName);
+ if (SemaRef.LookupQualifiedName(R, FD->getDeclContext()))
+ if (auto *SignatureDecl =
+ dyn_cast<HLSLRootSignatureDecl>(R.getFoundDecl())) {
+ FD->dropAttr<RootSignatureAttr>();
+ // We could look up the SourceRange of the macro here as well
+ AttributeCommonInfo AL(RootSigOverrideIdent, AttributeScopeInfo(),
+ SourceRange(), ParsedAttr::Form::Microsoft());
+ FD->addAttr(::new (getASTContext()) RootSignatureAttr(
+ getASTContext(), AL, RootSigOverrideIdent, SignatureDecl));
+ }
+ }
+
llvm::Triple::EnvironmentType Env = TargetInfo.getTriple().getEnvironment();
if (HLSLShaderAttr::isValidShaderType(Env) && Env != llvm::Triple::Library) {
if (const auto *Shader = FD->getAttr<HLSLShaderAttr>()) {
@@ -1137,15 +1159,14 @@ struct PerVisibilityBindingChecker {
bool HadOverlap = false;
using llvm::hlsl::BindingInfoBuilder;
- auto ReportOverlap = [this, &HadOverlap](
- const BindingInfoBuilder &Builder,
- const BindingInfoBuilder::Binding &Reported) {
+ auto ReportOverlap = [this,
+ &HadOverlap](const BindingInfoBuilder &Builder,
+ const llvm::hlsl::Binding &Reported) {
HadOverlap = true;
const auto *Elem =
static_cast<const hlsl::RootSignatureElement *>(Reported.Cookie);
- const BindingInfoBuilder::Binding &Previous =
- Builder.findOverlapping(Reported);
+ const llvm::hlsl::Binding &Previous = Builder.findOverlapping(Reported);
const auto *PrevElem =
static_cast<const hlsl::RootSignatureElement *>(Previous.Cookie);
@@ -1843,7 +1864,7 @@ SemaHLSL::TakeLocForHLSLAttribute(const HLSLAttributedResourceType *RT) {
// requirements and adds them to Bindings
void SemaHLSL::collectResourceBindingsOnUserRecordDecl(const VarDecl *VD,
const RecordType *RT) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
for (FieldDecl *FD : RD->fields()) {
const Type *Ty = FD->getType()->getUnqualifiedDesugaredType();
@@ -1913,7 +1934,7 @@ static bool DiagnoseLocalRegisterBinding(Sema &S, SourceLocation &ArgLoc,
if (const HLSLAttributedResourceType *AttrResType =
HLSLAttributedResourceType::findHandleTypeOnResource(
VD->getType().getTypePtr())) {
- if (RegType == getRegisterType(AttrResType->getAttrs().ResourceClass))
+ if (RegType == getRegisterType(AttrResType))
return true;
S.Diag(D->getLocation(), diag::err_hlsl_binding_type_mismatch)
@@ -2446,8 +2467,8 @@ void SemaHLSL::ActOnEndOfTranslationUnit(TranslationUnitDecl *TU) {
HLSLBufferDecl *DefaultCBuffer = HLSLBufferDecl::CreateDefaultCBuffer(
SemaRef.getASTContext(), SemaRef.getCurLexicalContext(),
DefaultCBufferDecls);
- addImplicitBindingAttrToBuffer(SemaRef, DefaultCBuffer,
- getNextImplicitBindingOrderID());
+ addImplicitBindingAttrToDecl(SemaRef, DefaultCBuffer, RegisterType::CBuffer,
+ getNextImplicitBindingOrderID());
SemaRef.getCurLexicalContext()->addDecl(DefaultCBuffer);
createHostLayoutStructForBuffer(SemaRef, DefaultCBuffer);
@@ -3187,10 +3208,7 @@ static void BuildFlattenedTypeList(QualType BaseTy,
List.insert(List.end(), VT->getNumElements(), VT->getElementType());
continue;
}
- if (const auto *RT = dyn_cast<RecordType>(T)) {
- const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
- assert(RD && "HLSL record types should all be CXXRecordDecls!");
-
+ if (const auto *RD = T->getAsCXXRecordDecl()) {
if (RD->isStandardLayout())
RD = RD->getStandardLayoutBaseWithFields();
@@ -3396,7 +3414,7 @@ bool SemaHLSL::ContainsBitField(QualType BaseTy) {
continue;
}
if (const auto *RT = dyn_cast<RecordType>(T)) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion())
continue;
@@ -3597,7 +3615,7 @@ void SemaHLSL::deduceAddressSpace(VarDecl *Decl) {
return;
// Resource handles.
- if (isResourceRecordTypeOrArrayOf(Type->getUnqualifiedDesugaredType()))
+ if (Type->isHLSLResourceRecord() || Type->isHLSLResourceRecordArray())
return;
// Only static globals belong to the Private address space.
@@ -3637,10 +3655,7 @@ void SemaHLSL::ActOnVariableDeclarator(VarDecl *VD) {
if (VD->getType()->isHLSLIntangibleType())
collectResourceBindingsOnVarDecl(VD);
- const Type *VarType = VD->getType().getTypePtr();
- while (VarType->isArrayType())
- VarType = VarType->getArrayElementTypeNoTypeQual();
- if (VarType->isHLSLResourceRecord() ||
+ if (isResourceRecordTypeOrArrayOf(VD) ||
VD->hasAttr<HLSLVkConstantIdAttr>()) {
// Make the variable for resources static. The global externally visible
// storage is accessed through the handle, which is a member. The variable
@@ -3650,6 +3665,24 @@ void SemaHLSL::ActOnVariableDeclarator(VarDecl *VD) {
// process explicit bindings
processExplicitBindingsOnDecl(VD);
+
+ if (VD->getType()->isHLSLResourceRecordArray()) {
+ // If the resource array does not have an explicit binding attribute,
+ // create an implicit one. It will be used to transfer implicit binding
+ // order_ID to codegen.
+ if (!VD->hasAttr<HLSLVkBindingAttr>()) {
+ HLSLResourceBindingAttr *RBA = VD->getAttr<HLSLResourceBindingAttr>();
+ if (!RBA || !RBA->hasRegisterSlot()) {
+ uint32_t OrderID = getNextImplicitBindingOrderID();
+ if (RBA)
+ RBA->setImplicitBindingOrderID(OrderID);
+ else
+ addImplicitBindingAttrToDecl(
+ SemaRef, VD, getRegisterType(getResourceArrayHandleType(VD)),
+ OrderID);
+ }
+ }
+ }
}
deduceAddressSpace(VD);
@@ -3675,11 +3708,12 @@ static bool initVarDeclWithCtor(Sema &S, VarDecl *VD,
return true;
}
-bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) {
+void SemaHLSL::createResourceRecordCtorArgs(
+ const Type *ResourceTy, StringRef VarName, HLSLResourceBindingAttr *RBA,
+ HLSLVkBindingAttr *VkBinding, uint32_t ArrayIndex,
+ llvm::SmallVectorImpl<Expr *> &Args) {
std::optional<uint32_t> RegisterSlot;
uint32_t SpaceNo = 0;
- HLSLVkBindingAttr *VkBinding = VD->getAttr<HLSLVkBindingAttr>();
- HLSLResourceBindingAttr *RBA = VD->getAttr<HLSLResourceBindingAttr>();
if (VkBinding) {
RegisterSlot = VkBinding->getBinding();
SpaceNo = VkBinding->getSet();
@@ -3694,12 +3728,12 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) {
uint64_t IntTySize = AST.getTypeSize(AST.IntTy);
IntegerLiteral *RangeSize = IntegerLiteral::Create(
AST, llvm::APInt(IntTySize, 1), AST.IntTy, SourceLocation());
- IntegerLiteral *Index = IntegerLiteral::Create(
- AST, llvm::APInt(UIntTySize, 0), AST.UnsignedIntTy, SourceLocation());
+ IntegerLiteral *Index =
+ IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, ArrayIndex),
+ AST.UnsignedIntTy, SourceLocation());
IntegerLiteral *Space =
IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, SpaceNo),
AST.UnsignedIntTy, SourceLocation());
- StringRef VarName = VD->getName();
StringLiteral *Name = StringLiteral::Create(
AST, VarName, StringLiteralKind::Ordinary, false,
AST.getStringLiteralArrayType(AST.CharTy.withConst(), VarName.size()),
@@ -3710,18 +3744,57 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) {
IntegerLiteral *RegSlot = IntegerLiteral::Create(
AST, llvm::APInt(UIntTySize, RegisterSlot.value()), AST.UnsignedIntTy,
SourceLocation());
- Expr *Args[] = {RegSlot, Space, RangeSize, Index, Name};
- return initVarDeclWithCtor(SemaRef, VD, Args);
+ Args.append({RegSlot, Space, RangeSize, Index, Name});
+ } else {
+ // resource with implicit binding
+ uint32_t OrderID = (RBA && RBA->hasImplicitBindingOrderID())
+ ? RBA->getImplicitBindingOrderID()
+ : getNextImplicitBindingOrderID();
+ IntegerLiteral *OrderId =
+ IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, OrderID),
+ AST.UnsignedIntTy, SourceLocation());
+ Args.append({Space, RangeSize, Index, OrderId, Name});
}
+}
- // resource with implicit binding
- IntegerLiteral *OrderId = IntegerLiteral::Create(
- AST, llvm::APInt(UIntTySize, getNextImplicitBindingOrderID()),
- AST.UnsignedIntTy, SourceLocation());
- Expr *Args[] = {Space, RangeSize, Index, OrderId, Name};
+bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) {
+ SmallVector<Expr *> Args;
+ createResourceRecordCtorArgs(VD->getType().getTypePtr(), VD->getName(),
+ VD->getAttr<HLSLResourceBindingAttr>(),
+ VD->getAttr<HLSLVkBindingAttr>(), 0, Args);
return initVarDeclWithCtor(SemaRef, VD, Args);
}
+bool SemaHLSL::initGlobalResourceArrayDecl(VarDecl *VD) {
+ assert(VD->getType()->isHLSLResourceRecordArray() &&
+ "expected array of resource records");
+
+ // Individual resources in a resource array are not initialized here. They
+ // are initialized later on during codegen when the individual resources are
+ // accessed. Codegen will emit a call to the resource constructor with the
+ // specified array index. We need to make sure though that the constructor
+ // for the specific resource type is instantiated, so codegen can emit a call
+ // to it when the array element is accessed.
+ SmallVector<Expr *> Args;
+ QualType ResElementTy = VD->getASTContext().getBaseElementType(VD->getType());
+ createResourceRecordCtorArgs(ResElementTy.getTypePtr(), VD->getName(),
+ VD->getAttr<HLSLResourceBindingAttr>(),
+ VD->getAttr<HLSLVkBindingAttr>(), 0, Args);
+
+ SourceLocation Loc = VD->getLocation();
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemporary(ResElementTy);
+ InitializationKind Kind = InitializationKind::CreateDirect(Loc, Loc, Loc);
+ InitializationSequence InitSeq(SemaRef, Entity, Kind, Args);
+ if (InitSeq.Failed())
+ return false;
+
+ // This takes care of instantiating and emitting of the constructor that will
+ // be called from codegen when the array is accessed.
+ ExprResult OneResInit = InitSeq.Perform(SemaRef, Entity, Kind, Args);
+ return !OneResInit.isInvalid();
+}
+
// Returns true if the initialization has been handled.
// Returns false to use default initialization.
bool SemaHLSL::ActOnUninitializedVarDecl(VarDecl *VD) {
@@ -3730,17 +3803,14 @@ bool SemaHLSL::ActOnUninitializedVarDecl(VarDecl *VD) {
if (VD->getType().getAddressSpace() == LangAS::hlsl_constant)
return true;
- // Initialize resources
- if (!isResourceRecordTypeOrArrayOf(VD))
- return false;
-
- // FIXME: We currectly support only simple resources - no arrays of resources
- // or resources in user defined structs.
- // (llvm/llvm-project#133835, llvm/llvm-project#133837)
// Initialize resources at the global scope
- if (VD->hasGlobalStorage() && VD->getType()->isHLSLResourceRecord())
- return initGlobalResourceDecl(VD);
-
+ if (VD->hasGlobalStorage()) {
+ const Type *Ty = VD->getType().getTypePtr();
+ if (Ty->isHLSLResourceRecord())
+ return initGlobalResourceDecl(VD);
+ if (Ty->isHLSLResourceRecordArray())
+ return initGlobalResourceArrayDecl(VD);
+ }
return false;
}
@@ -3908,18 +3978,19 @@ class InitListTransformer {
return true;
}
- if (auto *RTy = Ty->getAs<RecordType>()) {
- llvm::SmallVector<const RecordType *> RecordTypes;
- RecordTypes.push_back(RTy);
- while (RecordTypes.back()->getAsCXXRecordDecl()->getNumBases()) {
- CXXRecordDecl *D = RecordTypes.back()->getAsCXXRecordDecl();
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
+ llvm::SmallVector<CXXRecordDecl *> RecordDecls;
+ RecordDecls.push_back(RD);
+ while (RecordDecls.back()->getNumBases()) {
+ CXXRecordDecl *D = RecordDecls.back();
assert(D->getNumBases() == 1 &&
"HLSL doesn't support multiple inheritance");
- RecordTypes.push_back(D->bases_begin()->getType()->getAs<RecordType>());
+ RecordDecls.push_back(
+ D->bases_begin()->getType()->castAsCXXRecordDecl());
}
- while (!RecordTypes.empty()) {
- const RecordType *RT = RecordTypes.pop_back_val();
- for (auto *FD : RT->getDecl()->fields()) {
+ while (!RecordDecls.empty()) {
+ CXXRecordDecl *RD = RecordDecls.pop_back_val();
+ for (auto *FD : RD->fields()) {
DeclAccessPair Found = DeclAccessPair::make(FD, FD->getAccess());
DeclarationNameInfo NameInfo(FD->getDeclName(), E->getBeginLoc());
ExprResult Res = S.BuildFieldReferenceExpr(
@@ -3956,20 +4027,20 @@ class InitListTransformer {
for (uint64_t I = 0; I < Size; ++I)
Inits.push_back(generateInitListsImpl(ElTy));
}
- if (auto *RTy = Ty->getAs<RecordType>()) {
- llvm::SmallVector<const RecordType *> RecordTypes;
- RecordTypes.push_back(RTy);
- while (RecordTypes.back()->getAsCXXRecordDecl()->getNumBases()) {
- CXXRecordDecl *D = RecordTypes.back()->getAsCXXRecordDecl();
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
+ llvm::SmallVector<CXXRecordDecl *> RecordDecls;
+ RecordDecls.push_back(RD);
+ while (RecordDecls.back()->getNumBases()) {
+ CXXRecordDecl *D = RecordDecls.back();
assert(D->getNumBases() == 1 &&
"HLSL doesn't support multiple inheritance");
- RecordTypes.push_back(D->bases_begin()->getType()->getAs<RecordType>());
+ RecordDecls.push_back(
+ D->bases_begin()->getType()->castAsCXXRecordDecl());
}
- while (!RecordTypes.empty()) {
- const RecordType *RT = RecordTypes.pop_back_val();
- for (auto *FD : RT->getDecl()->fields()) {
+ while (!RecordDecls.empty()) {
+ CXXRecordDecl *RD = RecordDecls.pop_back_val();
+ for (auto *FD : RD->fields())
Inits.push_back(generateInitListsImpl(FD->getType()));
- }
}
}
auto *NewInit = new (Ctx) InitListExpr(Ctx, Inits.front()->getBeginLoc(),
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index 1dd38c0..c971293 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -446,7 +446,6 @@ class InitListChecker {
unsigned ExpectedNumInits);
int numArrayElements(QualType DeclType);
int numStructUnionElements(QualType DeclType);
- static RecordDecl *getRecordDecl(QualType DeclType);
ExprResult PerformEmptyInit(SourceLocation Loc,
const InitializedEntity &Entity);
@@ -775,8 +774,8 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
= InitializedEntity::InitializeMember(Field, &ParentEntity);
if (Init >= NumInits || !ILE->getInit(Init)) {
- if (const RecordType *RType = ILE->getType()->getAs<RecordType>())
- if (!RType->getDecl()->isUnion())
+ if (const RecordType *RType = ILE->getType()->getAsCanonical<RecordType>())
+ if (!RType->getOriginalDecl()->isUnion())
assert((Init < NumInits || VerifyOnly) &&
"This ILE should have been expanded");
@@ -922,8 +921,7 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
if (ILE->isTransparent())
return;
- if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
- const RecordDecl *RDecl = RType->getDecl();
+ if (const auto *RDecl = ILE->getType()->getAsRecordDecl()) {
if (RDecl->isUnion() && ILE->getInitializedFieldInUnion()) {
FillInEmptyInitForField(0, ILE->getInitializedFieldInUnion(), Entity, ILE,
RequiresSecondPass, FillWithNoInit);
@@ -1127,7 +1125,7 @@ int InitListChecker::numArrayElements(QualType DeclType) {
}
int InitListChecker::numStructUnionElements(QualType DeclType) {
- RecordDecl *structDecl = DeclType->castAs<RecordType>()->getDecl();
+ auto *structDecl = DeclType->castAsRecordDecl();
int InitializableMembers = 0;
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(structDecl))
InitializableMembers += CXXRD->getNumBases();
@@ -1140,14 +1138,6 @@ int InitListChecker::numStructUnionElements(QualType DeclType) {
return InitializableMembers - structDecl->hasFlexibleArrayMember();
}
-RecordDecl *InitListChecker::getRecordDecl(QualType DeclType) {
- if (const auto *RT = DeclType->getAs<RecordType>())
- return RT->getDecl();
- if (const auto *Inject = DeclType->getAs<InjectedClassNameType>())
- return Inject->getDecl();
- return nullptr;
-}
-
/// Determine whether Entity is an entity for which it is idiomatic to elide
/// the braces in aggregate initialization.
static bool isIdiomaticBraceElisionEntity(const InitializedEntity &Entity) {
@@ -1164,16 +1154,14 @@ static bool isIdiomaticBraceElisionEntity(const InitializedEntity &Entity) {
// Allows elide brace initialization for aggregates with empty base.
if (Entity.getKind() == InitializedEntity::EK_Base) {
- auto *ParentRD =
- Entity.getParent()->getType()->castAs<RecordType>()->getDecl();
+ auto *ParentRD = Entity.getParent()->getType()->castAsRecordDecl();
CXXRecordDecl *CXXRD = cast<CXXRecordDecl>(ParentRD);
return CXXRD->getNumBases() == 1 && CXXRD->field_empty();
}
// Allow brace elision if the only subobject is a field.
if (Entity.getKind() == InitializedEntity::EK_Member) {
- auto *ParentRD =
- Entity.getParent()->getType()->castAs<RecordType>()->getDecl();
+ auto *ParentRD = Entity.getParent()->getType()->castAsRecordDecl();
if (CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(ParentRD)) {
if (CXXRD->getNumBases()) {
return false;
@@ -1442,7 +1430,7 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
} else if (DeclType->isVectorType()) {
CheckVectorType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
- } else if (const RecordDecl *RD = getRecordDecl(DeclType)) {
+ } else if (const RecordDecl *RD = DeclType->getAsRecordDecl()) {
auto Bases =
CXXRecordDecl::base_class_const_range(CXXRecordDecl::base_class_const_iterator(),
CXXRecordDecl::base_class_const_iterator());
@@ -2320,7 +2308,7 @@ void InitListChecker::CheckStructUnionTypes(
bool SubobjectIsDesignatorContext, unsigned &Index,
InitListExpr *StructuredList, unsigned &StructuredIndex,
bool TopLevelObject) {
- const RecordDecl *RD = getRecordDecl(DeclType);
+ const RecordDecl *RD = DeclType->getAsRecordDecl();
// If the record is invalid, some of it's members are invalid. To avoid
// confusion, we forgo checking the initializer for the entire record.
@@ -2350,7 +2338,9 @@ void InitListChecker::CheckStructUnionTypes(
Field != FieldEnd; ++Field) {
if (Field->hasInClassInitializer() ||
(Field->isAnonymousStructOrUnion() &&
- Field->getType()->getAsCXXRecordDecl()->hasInClassInitializer())) {
+ Field->getType()
+ ->castAsCXXRecordDecl()
+ ->hasInClassInitializer())) {
StructuredList->setInitializedFieldInUnion(*Field);
// FIXME: Actually build a CXXDefaultInitExpr?
return;
@@ -2889,7 +2879,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// then the current object (defined below) shall have
// structure or union type and the identifier shall be the
// name of a member of that type.
- RecordDecl *RD = getRecordDecl(CurrentObjectType);
+ RecordDecl *RD = CurrentObjectType->getAsRecordDecl();
if (!RD) {
SourceLocation Loc = D->getDotLoc();
if (Loc.isInvalid())
@@ -3296,8 +3286,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (StringLiteral *SL = dyn_cast<StringLiteral>(SubExpr)) {
// Get the length of the string.
uint64_t StrLen = SL->getLength();
- if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen))
- StrLen = cast<ConstantArrayType>(AT)->getZExtSize();
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT);
+ CAT && CAT->getSize().ult(StrLen))
+ StrLen = CAT->getZExtSize();
StructuredList->resizeInits(Context, StrLen);
// Build a literal for each character in the string, and put them into
@@ -3319,8 +3310,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Get the length of the string.
uint64_t StrLen = Str.size();
- if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen))
- StrLen = cast<ConstantArrayType>(AT)->getZExtSize();
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT);
+ CAT && CAT->getSize().ult(StrLen))
+ StrLen = CAT->getZExtSize();
StructuredList->resizeInits(Context, StrLen);
// Build a literal for each character in the string, and put them into
@@ -4337,8 +4329,8 @@ static bool hasCopyOrMoveCtorParam(ASTContext &Ctx,
QualType ParmT =
Info.Constructor->getParamDecl(0)->getType().getNonReferenceType();
- QualType ClassT =
- Ctx.getRecordType(cast<CXXRecordDecl>(Info.FoundDecl->getDeclContext()));
+ CanQualType ClassT = Ctx.getCanonicalTagType(
+ cast<CXXRecordDecl>(Info.FoundDecl->getDeclContext()));
return Ctx.hasSameUnqualifiedType(ParmT, ClassT);
}
@@ -4535,11 +4527,7 @@ static void TryConstructorInitialization(Sema &S,
}
}
- const RecordType *DestRecordType = DestType->getAs<RecordType>();
- assert(DestRecordType && "Constructor initialization requires record type");
- CXXRecordDecl *DestRecordDecl
- = cast<CXXRecordDecl>(DestRecordType->getDecl());
-
+ auto *DestRecordDecl = DestType->castAsCXXRecordDecl();
// Build the candidate set directly in the initialization sequence
// structure, so that it will persist if we fail.
OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
@@ -5026,7 +5014,7 @@ static void TryListInitialization(Sema &S,
// class type with a default constructor, the object is
// value-initialized.
if (InitList->getNumInits() == 0) {
- CXXRecordDecl *RD = DestType->getAsCXXRecordDecl();
+ CXXRecordDecl *RD = DestType->castAsCXXRecordDecl();
if (S.LookupDefaultConstructor(RD)) {
TryValueInitialization(S, Entity, Kind, Sequence, InitList);
return;
@@ -5057,10 +5045,9 @@ static void TryListInitialization(Sema &S,
// is direct-list-initialization, the object is initialized with the
// value T(v); if a narrowing conversion is required to convert v to
// the underlying type of T, the program is ill-formed.
- auto *ET = DestType->getAs<EnumType>();
if (S.getLangOpts().CPlusPlus17 &&
Kind.getKind() == InitializationKind::IK_DirectList &&
- ET && ET->getDecl()->isFixed() &&
+ DestType->isEnumeralType() && DestType->castAsEnumDecl()->isFixed() &&
!S.Context.hasSameUnqualifiedType(E->getType(), DestType) &&
(E->getType()->isIntegralOrUnscopedEnumerationType() ||
E->getType()->isFloatingType())) {
@@ -5165,13 +5152,13 @@ static OverloadingResult TryRefInitWithConversionFunction(
bool AllowExplicitCtors = false;
bool AllowExplicitConvs = Kind.allowExplicitConversionFunctionsInRefBinding();
- const RecordType *T1RecordType = nullptr;
- if (AllowRValues && (T1RecordType = T1->getAs<RecordType>()) &&
+ if (AllowRValues && T1->isRecordType() &&
S.isCompleteType(Kind.getLocation(), T1)) {
+ auto *T1RecordDecl = T1->castAsCXXRecordDecl();
+ if (T1RecordDecl->isInvalidDecl())
+ return OR_No_Viable_Function;
// The type we're converting to is a class type. Enumerate its constructors
// to see if there is a suitable conversion.
- CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl());
-
for (NamedDecl *D : S.LookupConstructors(T1RecordDecl)) {
auto Info = getConstructorInfo(D);
if (!Info.Constructor)
@@ -5193,16 +5180,13 @@ static OverloadingResult TryRefInitWithConversionFunction(
}
}
}
- if (T1RecordType && T1RecordType->getDecl()->isInvalidDecl())
- return OR_No_Viable_Function;
- const RecordType *T2RecordType = nullptr;
- if ((T2RecordType = T2->getAs<RecordType>()) &&
- S.isCompleteType(Kind.getLocation(), T2)) {
+ if (T2->isRecordType() && S.isCompleteType(Kind.getLocation(), T2)) {
+ const auto *T2RecordDecl = T2->castAsCXXRecordDecl();
+ if (T2RecordDecl->isInvalidDecl())
+ return OR_No_Viable_Function;
// The type we're converting from is a class type, enumerate its conversion
// functions.
- CXXRecordDecl *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getDecl());
-
const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
NamedDecl *D = *I;
@@ -5237,8 +5221,6 @@ static OverloadingResult TryRefInitWithConversionFunction(
}
}
}
- if (T2RecordType && T2RecordType->getDecl()->isInvalidDecl())
- return OR_No_Viable_Function;
SourceLocation DeclLoc = Initializer->getBeginLoc();
@@ -5714,62 +5696,60 @@ static void TryValueInitialization(Sema &S,
// -- if T is an array type, then each element is value-initialized;
T = S.Context.getBaseElementType(T);
- if (const RecordType *RT = T->getAs<RecordType>()) {
- if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- bool NeedZeroInitialization = true;
- // C++98:
- // -- if T is a class type (clause 9) with a user-declared constructor
- // (12.1), then the default constructor for T is called (and the
- // initialization is ill-formed if T has no accessible default
- // constructor);
- // C++11:
- // -- if T is a class type (clause 9) with either no default constructor
- // (12.1 [class.ctor]) or a default constructor that is user-provided
- // or deleted, then the object is default-initialized;
- //
- // Note that the C++11 rule is the same as the C++98 rule if there are no
- // defaulted or deleted constructors, so we just use it unconditionally.
- CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl);
- if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted())
- NeedZeroInitialization = false;
-
- // -- if T is a (possibly cv-qualified) non-union class type without a
- // user-provided or deleted default constructor, then the object is
- // zero-initialized and, if T has a non-trivial default constructor,
- // default-initialized;
- // The 'non-union' here was removed by DR1502. The 'non-trivial default
- // constructor' part was removed by DR1507.
- if (NeedZeroInitialization)
- Sequence.AddZeroInitializationStep(Entity.getType());
-
- // C++03:
- // -- if T is a non-union class type without a user-declared constructor,
- // then every non-static data member and base class component of T is
- // value-initialized;
- // [...] A program that calls for [...] value-initialization of an
- // entity of reference type is ill-formed.
- //
- // C++11 doesn't need this handling, because value-initialization does not
- // occur recursively there, and the implicit default constructor is
- // defined as deleted in the problematic cases.
- if (!S.getLangOpts().CPlusPlus11 &&
- ClassDecl->hasUninitializedReferenceMember()) {
- Sequence.SetFailed(InitializationSequence::FK_TooManyInitsForReference);
- return;
- }
-
- // If this is list-value-initialization, pass the empty init list on when
- // building the constructor call. This affects the semantics of a few
- // things (such as whether an explicit default constructor can be called).
- Expr *InitListAsExpr = InitList;
- MultiExprArg Args(&InitListAsExpr, InitList ? 1 : 0);
- bool InitListSyntax = InitList;
+ if (auto *ClassDecl = T->getAsCXXRecordDecl()) {
+ bool NeedZeroInitialization = true;
+ // C++98:
+ // -- if T is a class type (clause 9) with a user-declared constructor
+ // (12.1), then the default constructor for T is called (and the
+ // initialization is ill-formed if T has no accessible default
+ // constructor);
+ // C++11:
+ // -- if T is a class type (clause 9) with either no default constructor
+ // (12.1 [class.ctor]) or a default constructor that is user-provided
+ // or deleted, then the object is default-initialized;
+ //
+ // Note that the C++11 rule is the same as the C++98 rule if there are no
+ // defaulted or deleted constructors, so we just use it unconditionally.
+ CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl);
+ if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted())
+ NeedZeroInitialization = false;
+
+ // -- if T is a (possibly cv-qualified) non-union class type without a
+ // user-provided or deleted default constructor, then the object is
+ // zero-initialized and, if T has a non-trivial default constructor,
+ // default-initialized;
+ // The 'non-union' here was removed by DR1502. The 'non-trivial default
+ // constructor' part was removed by DR1507.
+ if (NeedZeroInitialization)
+ Sequence.AddZeroInitializationStep(Entity.getType());
- // FIXME: Instead of creating a CXXConstructExpr of array type here,
- // wrap a class-typed CXXConstructExpr in an ArrayInitLoopExpr.
- return TryConstructorInitialization(
- S, Entity, Kind, Args, T, Entity.getType(), Sequence, InitListSyntax);
+ // C++03:
+ // -- if T is a non-union class type without a user-declared constructor,
+ // then every non-static data member and base class component of T is
+ // value-initialized;
+ // [...] A program that calls for [...] value-initialization of an
+ // entity of reference type is ill-formed.
+ //
+ // C++11 doesn't need this handling, because value-initialization does not
+ // occur recursively there, and the implicit default constructor is
+ // defined as deleted in the problematic cases.
+ if (!S.getLangOpts().CPlusPlus11 &&
+ ClassDecl->hasUninitializedReferenceMember()) {
+ Sequence.SetFailed(InitializationSequence::FK_TooManyInitsForReference);
+ return;
}
+
+ // If this is list-value-initialization, pass the empty init list on when
+ // building the constructor call. This affects the semantics of a few
+ // things (such as whether an explicit default constructor can be called).
+ Expr *InitListAsExpr = InitList;
+ MultiExprArg Args(&InitListAsExpr, InitList ? 1 : 0);
+ bool InitListSyntax = InitList;
+
+ // FIXME: Instead of creating a CXXConstructExpr of array type here,
+ // wrap a class-typed CXXConstructExpr in an ArrayInitLoopExpr.
+ return TryConstructorInitialization(
+ S, Entity, Kind, Args, T, Entity.getType(), Sequence, InitListSyntax);
}
Sequence.AddZeroInitializationStep(Entity.getType());
@@ -5911,9 +5891,8 @@ static void TryOrBuildParenListInitialization(
AT->getElementType(), llvm::APInt(/*numBits=*/32, ArrayLength),
/*SizeExpr=*/nullptr, ArraySizeModifier::Normal, 0);
}
- } else if (auto *RT = Entity.getType()->getAs<RecordType>()) {
- bool IsUnion = RT->isUnionType();
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ } else if (auto *RD = Entity.getType()->getAsCXXRecordDecl()) {
+ bool IsUnion = RD->isUnion();
if (RD->isInvalidDecl()) {
// Exit early to avoid confusion when processing members.
// We do the same for braced list initialization in
@@ -6099,14 +6078,12 @@ static void TryUserDefinedConversion(Sema &S,
// explicit conversion operators.
bool AllowExplicit = Kind.AllowExplicit();
- if (const RecordType *DestRecordType = DestType->getAs<RecordType>()) {
+ if (DestType->isRecordType()) {
// The type we're converting to is a class type. Enumerate its constructors
// to see if there is a suitable conversion.
- CXXRecordDecl *DestRecordDecl
- = cast<CXXRecordDecl>(DestRecordType->getDecl());
-
// Try to complete the type we're converting to.
if (S.isCompleteType(Kind.getLocation(), DestType)) {
+ auto *DestRecordDecl = DestType->castAsCXXRecordDecl();
for (NamedDecl *D : S.LookupConstructors(DestRecordDecl)) {
auto Info = getConstructorInfo(D);
if (!Info.Constructor)
@@ -6132,16 +6109,14 @@ static void TryUserDefinedConversion(Sema &S,
SourceLocation DeclLoc = Initializer->getBeginLoc();
- if (const RecordType *SourceRecordType = SourceType->getAs<RecordType>()) {
+ if (SourceType->isRecordType()) {
// The type we're converting from is a class type, enumerate its conversion
// functions.
// We can only enumerate the conversion functions for a complete type; if
// the type isn't complete, simply skip this step.
if (S.isCompleteType(DeclLoc, SourceType)) {
- CXXRecordDecl *SourceRecordDecl
- = cast<CXXRecordDecl>(SourceRecordType->getDecl());
-
+ auto *SourceRecordDecl = SourceType->castAsCXXRecordDecl();
const auto &Conversions =
SourceRecordDecl->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
@@ -6228,7 +6203,7 @@ static void TryUserDefinedConversion(Sema &S,
Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType,
HadMultipleCandidates);
- if (ConvType->getAs<RecordType>()) {
+ if (ConvType->isRecordType()) {
// The call is used to direct-initialize [...] the object that is the
// destination of the copy-initialization.
//
@@ -7171,9 +7146,7 @@ static ExprResult CopyObject(Sema &S,
return CurInit;
// Determine which class type we're copying to.
Expr *CurInitExpr = (Expr *)CurInit.get();
- CXXRecordDecl *Class = nullptr;
- if (const RecordType *Record = T->getAs<RecordType>())
- Class = cast<CXXRecordDecl>(Record->getDecl());
+ auto *Class = T->getAsCXXRecordDecl();
if (!Class)
return CurInit;
@@ -7318,7 +7291,7 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
Expr *CurInitExpr) {
assert(S.getLangOpts().CPlusPlus11);
- const RecordType *Record = CurInitExpr->getType()->getAs<RecordType>();
+ auto *Record = CurInitExpr->getType()->getAsCXXRecordDecl();
if (!Record)
return;
@@ -7328,8 +7301,7 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
// Find constructors which would have been considered.
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
- DeclContext::lookup_result Ctors =
- S.LookupConstructors(cast<CXXRecordDecl>(Record->getDecl()));
+ DeclContext::lookup_result Ctors = S.LookupConstructors(Record);
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
@@ -7793,7 +7765,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
DiagID = diag::ext_default_init_const;
S.Diag(Kind.getLocation(), DiagID)
- << DestType << (bool)DestType->getAs<RecordType>()
+ << DestType << DestType->isRecordType()
<< FixItHint::CreateInsertion(ZeroInitializationFixitLoc,
ZeroInitializationFixit);
}
@@ -8159,9 +8131,8 @@ ExprResult InitializationSequence::Perform(Sema &S,
// FIXME: It makes no sense to do this here. This should happen
// regardless of how we initialized the entity.
QualType T = CurInit.get()->getType();
- if (const RecordType *Record = T->getAs<RecordType>()) {
- CXXDestructorDecl *Destructor
- = S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl()));
+ if (auto *Record = T->castAsCXXRecordDecl()) {
+ CXXDestructorDecl *Destructor = S.LookupDestructor(Record);
S.CheckDestructorAccess(CurInit.get()->getBeginLoc(), Destructor,
S.PDiag(diag::err_access_dtor_temp) << T);
S.MarkFunctionReferenced(CurInit.get()->getBeginLoc(), Destructor);
@@ -8549,9 +8520,8 @@ ExprResult InitializationSequence::Perform(Sema &S,
S.isStdInitializerList(Step->Type, &ElementType);
assert(IsStdInitializerList &&
"StdInitializerList step to non-std::initializer_list");
- const CXXRecordDecl *Record =
- Step->Type->getAsCXXRecordDecl()->getDefinition();
- assert(Record && Record->isCompleteDefinition() &&
+ const auto *Record = Step->Type->castAsCXXRecordDecl();
+ assert(Record->isCompleteDefinition() &&
"std::initializer_list should have already be "
"complete/instantiated by this point");
@@ -8808,8 +8778,8 @@ static void emitBadConversionNotes(Sema &S, const InitializedEntity &entity,
destPointeeType.getQualifiers().compatiblyIncludes(
fromPointeeType.getQualifiers(), S.getASTContext()))
S.Diag(fromDecl->getLocation(), diag::note_forward_class_conversion)
- << S.getASTContext().getTagDeclType(fromDecl)
- << S.getASTContext().getTagDeclType(destDecl);
+ << S.getASTContext().getCanonicalTagType(fromDecl)
+ << S.getASTContext().getCanonicalTagType(destDecl);
}
static void diagnoseListInit(Sema &S, const InitializedEntity &Entity,
@@ -9208,32 +9178,30 @@ bool InitializationSequence::Diagnose(Sema &S,
InheritedFrom = Inherited.getShadowDecl()->getNominatedBaseClass();
if (Entity.getKind() == InitializedEntity::EK_Base) {
S.Diag(Kind.getLocation(), diag::err_missing_default_ctor)
- << (InheritedFrom ? 2 : Constructor->isImplicit() ? 1 : 0)
- << S.Context.getTypeDeclType(Constructor->getParent())
- << /*base=*/0
- << Entity.getType()
- << InheritedFrom;
-
- RecordDecl *BaseDecl
- = Entity.getBaseSpecifier()->getType()->castAs<RecordType>()
- ->getDecl();
+ << (InheritedFrom ? 2
+ : Constructor->isImplicit() ? 1
+ : 0)
+ << S.Context.getCanonicalTagType(Constructor->getParent())
+ << /*base=*/0 << Entity.getType() << InheritedFrom;
+
+ auto *BaseDecl =
+ Entity.getBaseSpecifier()->getType()->castAsRecordDecl();
S.Diag(BaseDecl->getLocation(), diag::note_previous_decl)
- << S.Context.getTagDeclType(BaseDecl);
+ << S.Context.getCanonicalTagType(BaseDecl);
} else {
S.Diag(Kind.getLocation(), diag::err_missing_default_ctor)
- << (InheritedFrom ? 2 : Constructor->isImplicit() ? 1 : 0)
- << S.Context.getTypeDeclType(Constructor->getParent())
- << /*member=*/1
- << Entity.getName()
- << InheritedFrom;
+ << (InheritedFrom ? 2
+ : Constructor->isImplicit() ? 1
+ : 0)
+ << S.Context.getCanonicalTagType(Constructor->getParent())
+ << /*member=*/1 << Entity.getName() << InheritedFrom;
S.Diag(Entity.getDecl()->getLocation(),
diag::note_member_declared_at);
- if (const RecordType *Record
- = Entity.getType()->getAs<RecordType>())
- S.Diag(Record->getDecl()->getLocation(),
+ if (const auto *Record = Entity.getType()->getAs<RecordType>())
+ S.Diag(Record->getOriginalDecl()->getLocation(),
diag::note_previous_decl)
- << S.Context.getTagDeclType(Record->getDecl());
+ << S.Context.getCanonicalTagType(Record->getOriginalDecl());
}
break;
}
@@ -9300,11 +9268,11 @@ bool InitializationSequence::Diagnose(Sema &S,
// initialized.
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(S.CurContext);
S.Diag(Kind.getLocation(), diag::err_uninitialized_member_in_ctor)
- << (Constructor->getInheritedConstructor() ? 2 :
- Constructor->isImplicit() ? 1 : 0)
- << S.Context.getTypeDeclType(Constructor->getParent())
- << /*const=*/1
- << Entity.getName();
+ << (Constructor->getInheritedConstructor() ? 2
+ : Constructor->isImplicit() ? 1
+ : 0)
+ << S.Context.getCanonicalTagType(Constructor->getParent())
+ << /*const=*/1 << Entity.getName();
S.Diag(Entity.getDecl()->getLocation(), diag::note_previous_decl)
<< Entity.getName();
} else if (const auto *VD = dyn_cast_if_present<VarDecl>(Entity.getDecl());
@@ -9313,7 +9281,7 @@ bool InitializationSequence::Diagnose(Sema &S,
<< VD;
} else {
S.Diag(Kind.getLocation(), diag::err_default_init_const)
- << DestType << (bool)DestType->getAs<RecordType>();
+ << DestType << DestType->isRecordType();
}
break;
@@ -10008,7 +9976,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// dependent. e.g.
// using AliasFoo = Foo<bool>;
if (const auto *CTSD = llvm::dyn_cast<ClassTemplateSpecializationDecl>(
- RT->getAsCXXRecordDecl()))
+ RT->getOriginalDecl()))
Template = CTSD->getSpecializedTemplate();
}
}
@@ -10157,7 +10125,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
auto *RD = cast<CXXRecordDecl>(Pattern->getTemplatedDecl());
if (!(RD->getDefinition() && RD->isAggregate()))
return;
- QualType Ty = Context.getRecordType(RD);
+ QualType Ty = Context.getCanonicalTagType(RD);
SmallVector<QualType, 8> ElementTypes;
InitListChecker CheckInitList(*this, Entity, ListInit, Ty, ElementTypes);
@@ -10297,8 +10265,8 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
case OR_No_Viable_Function: {
CXXRecordDecl *Primary =
cast<ClassTemplateDecl>(Template)->getTemplatedDecl();
- bool Complete =
- isCompleteType(Kind.getLocation(), Context.getTypeDeclType(Primary));
+ bool Complete = isCompleteType(Kind.getLocation(),
+ Context.getCanonicalTagType(Primary));
Candidates.NoteCandidates(
PartialDiagnosticAt(
Kind.getLocation(),
diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp
index bc3c4b0..fbc2e7e 100644
--- a/clang/lib/Sema/SemaLambda.cpp
+++ b/clang/lib/Sema/SemaLambda.cpp
@@ -425,7 +425,7 @@ bool Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
.getNonReferenceType()
.getUnqualifiedType()
.getDesugaredType(getASTContext());
- QualType LambdaType = getASTContext().getRecordType(RD);
+ CanQualType LambdaType = getASTContext().getCanonicalTagType(RD);
if (LambdaType == ExplicitObjectParameterType)
return false;
@@ -457,7 +457,7 @@ bool Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
return true;
}
- if (Paths.isAmbiguous(LambdaType->getCanonicalTypeUnqualified())) {
+ if (Paths.isAmbiguous(LambdaType)) {
std::string PathsDisplay = getAmbiguousPathsDisplayString(Paths);
Diag(CallLoc, diag::err_explicit_object_lambda_ambiguous_base)
<< LambdaType << PathsDisplay;
@@ -641,9 +641,8 @@ static EnumDecl *findEnumForBlockReturn(Expr *E) {
}
// - it is an expression of that formal enum type.
- if (const EnumType *ET = E->getType()->getAs<EnumType>()) {
- return ET->getDecl();
- }
+ if (auto *ED = E->getType()->getAsEnumDecl())
+ return ED;
// Otherwise, nope.
return nullptr;
@@ -759,7 +758,7 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
assert(isa<BlockScopeInfo>(CSI));
const EnumDecl *ED = findCommonEnumForBlockReturns(CSI.Returns);
if (ED) {
- CSI.ReturnType = Context.getTypeDeclType(ED);
+ CSI.ReturnType = Context.getCanonicalTagType(ED);
adjustBlockReturnsToEnum(*this, CSI.Returns, CSI.ReturnType);
return;
}
@@ -1968,14 +1967,15 @@ ExprResult Sema::BuildCaptureInit(const Capture &Cap,
}
ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body) {
- LambdaScopeInfo LSI = *cast<LambdaScopeInfo>(FunctionScopes.back());
+ LambdaScopeInfo &LSI = *cast<LambdaScopeInfo>(FunctionScopes.back());
if (LSI.CallOperator->hasAttr<SYCLKernelEntryPointAttr>())
SYCL().CheckSYCLEntryPointFunctionDecl(LSI.CallOperator);
- ActOnFinishFunctionBody(LSI.CallOperator, Body);
+ ActOnFinishFunctionBody(LSI.CallOperator, Body, /*IsInstantiation=*/false,
+ /*RetainFunctionScopeInfo=*/true);
- return BuildLambdaExpr(StartLoc, Body->getEndLoc(), &LSI);
+ return BuildLambdaExpr(StartLoc, Body->getEndLoc());
}
static LambdaCaptureDefault
@@ -2132,156 +2132,149 @@ ConstructFixItRangeForUnusedCapture(Sema &S, SourceRange CaptureRange,
return SourceRange(FixItStart, FixItEnd);
}
-ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
- LambdaScopeInfo *LSI) {
+ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(FunctionScopes.back());
// Collect information from the lambda scope.
SmallVector<LambdaCapture, 4> Captures;
SmallVector<Expr *, 4> CaptureInits;
SourceLocation CaptureDefaultLoc = LSI->CaptureDefaultLoc;
LambdaCaptureDefault CaptureDefault =
mapImplicitCaptureStyle(LSI->ImpCaptureStyle);
- CXXRecordDecl *Class;
- CXXMethodDecl *CallOperator;
- SourceRange IntroducerRange;
- bool ExplicitParams;
- bool ExplicitResultType;
- CleanupInfo LambdaCleanup;
- bool ContainsUnexpandedParameterPack;
- bool IsGenericLambda;
- {
- CallOperator = LSI->CallOperator;
- Class = LSI->Lambda;
- IntroducerRange = LSI->IntroducerRange;
- ExplicitParams = LSI->ExplicitParams;
- ExplicitResultType = !LSI->HasImplicitReturnType;
- LambdaCleanup = LSI->Cleanup;
- ContainsUnexpandedParameterPack = LSI->ContainsUnexpandedParameterPack;
- IsGenericLambda = Class->isGenericLambda();
-
- CallOperator->setLexicalDeclContext(Class);
- Decl *TemplateOrNonTemplateCallOperatorDecl =
- CallOperator->getDescribedFunctionTemplate()
- ? CallOperator->getDescribedFunctionTemplate()
- : cast<Decl>(CallOperator);
-
- // FIXME: Is this really the best choice? Keeping the lexical decl context
- // set as CurContext seems more faithful to the source.
- TemplateOrNonTemplateCallOperatorDecl->setLexicalDeclContext(Class);
-
- PopExpressionEvaluationContext();
-
- // True if the current capture has a used capture or default before it.
- bool CurHasPreviousCapture = CaptureDefault != LCD_None;
- SourceLocation PrevCaptureLoc = CurHasPreviousCapture ?
- CaptureDefaultLoc : IntroducerRange.getBegin();
-
- for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I) {
- const Capture &From = LSI->Captures[I];
-
- if (From.isInvalid())
- return ExprError();
-
- assert(!From.isBlockCapture() && "Cannot capture __block variables");
- bool IsImplicit = I >= LSI->NumExplicitCaptures;
- SourceLocation ImplicitCaptureLoc =
- IsImplicit ? CaptureDefaultLoc : SourceLocation();
-
- // Use source ranges of explicit captures for fixits where available.
- SourceRange CaptureRange = LSI->ExplicitCaptureRanges[I];
-
- // Warn about unused explicit captures.
- bool IsCaptureUsed = true;
- if (!CurContext->isDependentContext() && !IsImplicit &&
- !From.isODRUsed()) {
- // Initialized captures that are non-ODR used may not be eliminated.
- // FIXME: Where did the IsGenericLambda here come from?
- bool NonODRUsedInitCapture =
- IsGenericLambda && From.isNonODRUsed() && From.isInitCapture();
- if (!NonODRUsedInitCapture) {
- bool IsLast = (I + 1) == LSI->NumExplicitCaptures;
- SourceRange FixItRange = ConstructFixItRangeForUnusedCapture(
- *this, CaptureRange, PrevCaptureLoc, CurHasPreviousCapture,
- IsLast);
- IsCaptureUsed =
- !DiagnoseUnusedLambdaCapture(CaptureRange, FixItRange, From);
- }
- }
+ CXXRecordDecl *Class = LSI->Lambda;
+ CXXMethodDecl *CallOperator = LSI->CallOperator;
+ SourceRange IntroducerRange = LSI->IntroducerRange;
+ bool ExplicitParams = LSI->ExplicitParams;
+ bool ExplicitResultType = !LSI->HasImplicitReturnType;
+ CleanupInfo LambdaCleanup = LSI->Cleanup;
+ bool ContainsUnexpandedParameterPack = LSI->ContainsUnexpandedParameterPack;
+ bool IsGenericLambda = Class->isGenericLambda();
+
+ CallOperator->setLexicalDeclContext(Class);
+ Decl *TemplateOrNonTemplateCallOperatorDecl =
+ CallOperator->getDescribedFunctionTemplate()
+ ? CallOperator->getDescribedFunctionTemplate()
+ : cast<Decl>(CallOperator);
+
+ // FIXME: Is this really the best choice? Keeping the lexical decl context
+ // set as CurContext seems more faithful to the source.
+ TemplateOrNonTemplateCallOperatorDecl->setLexicalDeclContext(Class);
- if (CaptureRange.isValid()) {
- CurHasPreviousCapture |= IsCaptureUsed;
- PrevCaptureLoc = CaptureRange.getEnd();
+ PopExpressionEvaluationContext();
+
+ sema::AnalysisBasedWarnings::Policy WP =
+ AnalysisWarnings.getPolicyInEffectAt(EndLoc);
+ // We cannot release LSI until we finish computing captures, which
+ // requires the scope to be popped.
+ Sema::PoppedFunctionScopePtr _ = PopFunctionScopeInfo(&WP, LSI->CallOperator);
+
+ // True if the current capture has a used capture or default before it.
+ bool CurHasPreviousCapture = CaptureDefault != LCD_None;
+ SourceLocation PrevCaptureLoc =
+ CurHasPreviousCapture ? CaptureDefaultLoc : IntroducerRange.getBegin();
+
+ for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I) {
+ const Capture &From = LSI->Captures[I];
+
+ if (From.isInvalid())
+ return ExprError();
+
+ assert(!From.isBlockCapture() && "Cannot capture __block variables");
+ bool IsImplicit = I >= LSI->NumExplicitCaptures;
+ SourceLocation ImplicitCaptureLoc =
+ IsImplicit ? CaptureDefaultLoc : SourceLocation();
+
+ // Use source ranges of explicit captures for fixits where available.
+ SourceRange CaptureRange = LSI->ExplicitCaptureRanges[I];
+
+ // Warn about unused explicit captures.
+ bool IsCaptureUsed = true;
+ if (!CurContext->isDependentContext() && !IsImplicit && !From.isODRUsed()) {
+ // Initialized captures that are non-ODR used may not be eliminated.
+ // FIXME: Where did the IsGenericLambda here come from?
+ bool NonODRUsedInitCapture =
+ IsGenericLambda && From.isNonODRUsed() && From.isInitCapture();
+ if (!NonODRUsedInitCapture) {
+ bool IsLast = (I + 1) == LSI->NumExplicitCaptures;
+ SourceRange FixItRange = ConstructFixItRangeForUnusedCapture(
+ *this, CaptureRange, PrevCaptureLoc, CurHasPreviousCapture, IsLast);
+ IsCaptureUsed =
+ !DiagnoseUnusedLambdaCapture(CaptureRange, FixItRange, From);
}
+ }
- // Map the capture to our AST representation.
- LambdaCapture Capture = [&] {
- if (From.isThisCapture()) {
- // Capturing 'this' implicitly with a default of '[=]' is deprecated,
- // because it results in a reference capture. Don't warn prior to
- // C++2a; there's nothing that can be done about it before then.
- if (getLangOpts().CPlusPlus20 && IsImplicit &&
- CaptureDefault == LCD_ByCopy) {
- Diag(From.getLocation(), diag::warn_deprecated_this_capture);
- Diag(CaptureDefaultLoc, diag::note_deprecated_this_capture)
- << FixItHint::CreateInsertion(
- getLocForEndOfToken(CaptureDefaultLoc), ", this");
- }
- return LambdaCapture(From.getLocation(), IsImplicit,
- From.isCopyCapture() ? LCK_StarThis : LCK_This);
- } else if (From.isVLATypeCapture()) {
- return LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType);
- } else {
- assert(From.isVariableCapture() && "unknown kind of capture");
- ValueDecl *Var = From.getVariable();
- LambdaCaptureKind Kind =
- From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef;
- return LambdaCapture(From.getLocation(), IsImplicit, Kind, Var,
- From.getEllipsisLoc());
- }
- }();
+ if (CaptureRange.isValid()) {
+ CurHasPreviousCapture |= IsCaptureUsed;
+ PrevCaptureLoc = CaptureRange.getEnd();
+ }
- // Form the initializer for the capture field.
- ExprResult Init = BuildCaptureInit(From, ImplicitCaptureLoc);
+ // Map the capture to our AST representation.
+ LambdaCapture Capture = [&] {
+ if (From.isThisCapture()) {
+ // Capturing 'this' implicitly with a default of '[=]' is deprecated,
+ // because it results in a reference capture. Don't warn prior to
+ // C++2a; there's nothing that can be done about it before then.
+ if (getLangOpts().CPlusPlus20 && IsImplicit &&
+ CaptureDefault == LCD_ByCopy) {
+ Diag(From.getLocation(), diag::warn_deprecated_this_capture);
+ Diag(CaptureDefaultLoc, diag::note_deprecated_this_capture)
+ << FixItHint::CreateInsertion(
+ getLocForEndOfToken(CaptureDefaultLoc), ", this");
+ }
+ return LambdaCapture(From.getLocation(), IsImplicit,
+ From.isCopyCapture() ? LCK_StarThis : LCK_This);
+ } else if (From.isVLATypeCapture()) {
+ return LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType);
+ } else {
+ assert(From.isVariableCapture() && "unknown kind of capture");
+ ValueDecl *Var = From.getVariable();
+ LambdaCaptureKind Kind = From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef;
+ return LambdaCapture(From.getLocation(), IsImplicit, Kind, Var,
+ From.getEllipsisLoc());
+ }
+ }();
- // FIXME: Skip this capture if the capture is not used, the initializer
- // has no side-effects, the type of the capture is trivial, and the
- // lambda is not externally visible.
+ // Form the initializer for the capture field.
+ ExprResult Init = BuildCaptureInit(From, ImplicitCaptureLoc);
- // Add a FieldDecl for the capture and form its initializer.
- BuildCaptureField(Class, From);
- Captures.push_back(Capture);
- CaptureInits.push_back(Init.get());
+ // FIXME: Skip this capture if the capture is not used, the initializer
+ // has no side-effects, the type of the capture is trivial, and the
+ // lambda is not externally visible.
- if (LangOpts.CUDA)
- CUDA().CheckLambdaCapture(CallOperator, From);
- }
+ // Add a FieldDecl for the capture and form its initializer.
+ BuildCaptureField(Class, From);
+ Captures.push_back(Capture);
+ CaptureInits.push_back(Init.get());
- Class->setCaptures(Context, Captures);
-
- // C++11 [expr.prim.lambda]p6:
- // The closure type for a lambda-expression with no lambda-capture
- // has a public non-virtual non-explicit const conversion function
- // to pointer to function having the same parameter and return
- // types as the closure type's function call operator.
- if (Captures.empty() && CaptureDefault == LCD_None)
- addFunctionPointerConversions(*this, IntroducerRange, Class,
- CallOperator);
-
- // Objective-C++:
- // The closure type for a lambda-expression has a public non-virtual
- // non-explicit const conversion function to a block pointer having the
- // same parameter and return types as the closure type's function call
- // operator.
- // FIXME: Fix generic lambda to block conversions.
- if (getLangOpts().Blocks && getLangOpts().ObjC && !IsGenericLambda)
- addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
-
- // Finalize the lambda class.
- SmallVector<Decl*, 4> Fields(Class->fields());
- ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(),
- SourceLocation(), ParsedAttributesView());
- CheckCompletedCXXClass(nullptr, Class);
+ if (LangOpts.CUDA)
+ CUDA().CheckLambdaCapture(CallOperator, From);
}
+ Class->setCaptures(Context, Captures);
+
+ // C++11 [expr.prim.lambda]p6:
+ // The closure type for a lambda-expression with no lambda-capture
+ // has a public non-virtual non-explicit const conversion function
+ // to pointer to function having the same parameter and return
+ // types as the closure type's function call operator.
+ if (Captures.empty() && CaptureDefault == LCD_None)
+ addFunctionPointerConversions(*this, IntroducerRange, Class, CallOperator);
+
+ // Objective-C++:
+ // The closure type for a lambda-expression has a public non-virtual
+ // non-explicit const conversion function to a block pointer having the
+ // same parameter and return types as the closure type's function call
+ // operator.
+ // FIXME: Fix generic lambda to block conversions.
+ if (getLangOpts().Blocks && getLangOpts().ObjC && !IsGenericLambda)
+ addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
+
+ // Finalize the lambda class.
+ SmallVector<Decl *, 4> Fields(Class->fields());
+ ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(),
+ SourceLocation(), ParsedAttributesView());
+ CheckCompletedCXXClass(nullptr, Class);
+
Cleanup.mergeFrom(LambdaCleanup);
LambdaExpr *Lambda =
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index dc73ded..86ffae9 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -563,9 +563,8 @@ void LookupResult::resolveKind() {
// no ambiguity if they all refer to the same type, so unique based on the
// canonical type.
if (const auto *TD = dyn_cast<TypeDecl>(D)) {
- QualType T = getSema().Context.getTypeDeclType(TD);
auto UniqueResult = UniqueTypes.insert(
- std::make_pair(getSema().Context.getCanonicalType(T), I));
+ std::make_pair(getSema().Context.getCanonicalTypeDeclType(TD), I));
if (!UniqueResult.second) {
// The type is not unique.
ExistingI = UniqueResult.first->second;
@@ -717,7 +716,7 @@ static QualType getOpenCLEnumType(Sema &S, llvm::StringRef Name) {
EnumDecl *Decl = Result.getAsSingle<EnumDecl>();
if (!Decl)
return diagOpenCLBuiltinTypeError(S, "enum", Name);
- return S.Context.getEnumType(Decl);
+ return S.Context.getCanonicalTagType(Decl);
}
/// Lookup an OpenCL typedef type.
@@ -730,7 +729,8 @@ static QualType getOpenCLTypedefType(Sema &S, llvm::StringRef Name) {
TypedefNameDecl *Decl = Result.getAsSingle<TypedefNameDecl>();
if (!Decl)
return diagOpenCLBuiltinTypeError(S, "typedef", Name);
- return S.Context.getTypedefType(Decl);
+ return S.Context.getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Decl);
}
/// Get the QualType instances of the return type and arguments for an OpenCL
@@ -1001,7 +1001,7 @@ static void LookupPredefedObjCSuperType(Sema &Sema, Scope *S) {
Sema.LookupName(Result, S);
if (Result.getResultKind() == LookupResultKind::Found)
if (const TagDecl *TD = Result.getAsSingle<TagDecl>())
- Context.setObjCSuperType(Context.getTagDeclType(TD));
+ Context.setObjCSuperType(Context.getCanonicalTagType(TD));
}
void Sema::LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID) {
@@ -2435,12 +2435,12 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
if (!R.getLookupName())
return false;
+#ifndef NDEBUG
// Make sure that the declaration context is complete.
- assert((!isa<TagDecl>(LookupCtx) ||
- LookupCtx->isDependentContext() ||
- cast<TagDecl>(LookupCtx)->isCompleteDefinition() ||
- cast<TagDecl>(LookupCtx)->isBeingDefined()) &&
- "Declaration context must already be complete!");
+ if (const auto *TD = dyn_cast<TagDecl>(LookupCtx);
+ TD && !TD->isDependentType() && TD->getDefinition() == nullptr)
+ llvm_unreachable("Declaration context must already be complete!");
+#endif
struct QualifiedLookupInScope {
bool oldVal;
@@ -2596,10 +2596,8 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// C++ [class.member.lookup]p3:
// type declarations (including injected-class-names) are replaced by
// the types they designate
- if (const TypeDecl *TD = dyn_cast<TypeDecl>(ND->getUnderlyingDecl())) {
- QualType T = Context.getTypeDeclType(TD);
- return T.getCanonicalType().getAsOpaquePtr();
- }
+ if (const TypeDecl *TD = dyn_cast<TypeDecl>(ND->getUnderlyingDecl()))
+ return Context.getCanonicalTypeDeclType(TD).getAsOpaquePtr();
return ND->getUnderlyingDecl()->getCanonicalDecl();
}
@@ -2704,12 +2702,10 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS) {
- auto *NNS = SS.getScopeRep();
- if (NNS && NNS->getKind() == NestedNameSpecifier::Super)
- return LookupInSuper(R, NNS->getAsRecordDecl());
- else
-
- return LookupQualifiedName(R, LookupCtx);
+ NestedNameSpecifier Qualifier = SS.getScopeRep();
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::MicrosoftSuper)
+ return LookupInSuper(R, Qualifier.getAsMicrosoftSuper());
+ return LookupQualifiedName(R, LookupCtx);
}
bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
@@ -2731,7 +2727,9 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
IsDependent = !DC && ObjectType->isDependentType();
assert(((!DC && ObjectType->isDependentType()) ||
!ObjectType->isIncompleteType() || !ObjectType->getAs<TagType>() ||
- ObjectType->castAs<TagType>()->isBeingDefined()) &&
+ ObjectType->castAs<TagType>()
+ ->getOriginalDecl()
+ ->isEntityBeingDefined()) &&
"Caller should have completed object type");
} else if (SS && SS->isNotEmpty()) {
// This nested-name-specifier occurs after another nested-name-specifier,
@@ -2744,9 +2742,9 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
// FIXME: '__super' lookup semantics could be implemented by a
// LookupResult::isSuperLookup flag which skips the initial search of
// the lookup context in LookupQualified.
- if (NestedNameSpecifier *NNS = SS->getScopeRep();
- NNS->getKind() == NestedNameSpecifier::Super)
- return LookupInSuper(R, NNS->getAsRecordDecl());
+ if (NestedNameSpecifier Qualifier = SS->getScopeRep();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::MicrosoftSuper)
+ return LookupInSuper(R, Qualifier.getAsMicrosoftSuper());
}
IsDependent = !DC && isDependentScopeSpecifier(*SS);
} else {
@@ -2772,10 +2770,9 @@ bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
// members of Class itself. That is, the naming class is Class, and the
// access includes the access of the base.
for (const auto &BaseSpec : Class->bases()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(
- BaseSpec.getType()->castAs<RecordType>()->getDecl());
+ auto *RD = BaseSpec.getType()->castAsCXXRecordDecl();
LookupResult Result(*this, R.getLookupNameInfo(), R.getLookupKind());
- Result.setBaseObjectType(Context.getRecordType(Class));
+ Result.setBaseObjectType(Context.getCanonicalTagType(Class));
LookupQualifiedName(Result, RD);
// Copy the lookup results into the target, merging the base's access into
@@ -3101,7 +3098,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
// Only recurse into base classes for complete types.
if (!Result.S.isCompleteType(Result.InstantiationLoc,
- Result.S.Context.getRecordType(Class)))
+ Result.S.Context.getCanonicalTagType(Class)))
return;
// Add direct and indirect base classes along with their associated
@@ -3114,16 +3111,15 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
// Visit the base classes.
for (const auto &Base : Class->bases()) {
- const RecordType *BaseType = Base.getType()->getAs<RecordType>();
+ CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
// In dependent contexts, we do ADL twice, and the first time around,
// the base type might be a dependent TemplateSpecializationType, or a
// TemplateTypeParmType. If that happens, simply ignore it.
// FIXME: If we want to support export, we probably need to add the
// namespace of the template in a TemplateSpecializationType, or even
// the classes and namespaces of known non-dependent arguments.
- if (!BaseType)
+ if (!BaseDecl)
continue;
- CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl());
if (Result.addClassTransitive(BaseDecl)) {
// Find the associated namespace for this base class.
DeclContext *BaseCtx = BaseDecl->getDeclContext();
@@ -3194,8 +3190,10 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// Its associated namespaces are the innermost enclosing
// namespaces of its associated classes.
case Type::Record: {
+ // FIXME: This should use the original decl.
CXXRecordDecl *Class =
- cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
+ cast<CXXRecordDecl>(cast<RecordType>(T)->getOriginalDecl())
+ ->getDefinitionOrSelf();
addAssociatedClassesAndNamespaces(Result, Class);
break;
}
@@ -3205,7 +3203,8 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// If it is a class member, its associated class is the
// member’s class; else it has no associated class.
case Type::Enum: {
- EnumDecl *Enum = cast<EnumType>(T)->getDecl();
+ // FIXME: This should use the original decl.
+ auto *Enum = T->castAsEnumDecl();
DeclContext *Ctx = Enum->getDeclContext();
if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
@@ -3438,7 +3437,7 @@ Sema::LookupSpecialMember(CXXRecordDecl *RD, CXXSpecialMemberKind SM,
// Prepare for overload resolution. Here we construct a synthetic argument
// if necessary and make sure that implicit functions are declared.
- CanQualType CanTy = Context.getCanonicalType(Context.getTagDeclType(RD));
+ CanQualType CanTy = Context.getCanonicalTagType(RD);
DeclarationName Name;
Expr *Arg = nullptr;
unsigned NumArgs;
@@ -3645,7 +3644,7 @@ DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
});
}
- CanQualType T = Context.getCanonicalType(Context.getTypeDeclType(Class));
+ CanQualType T = Context.getCanonicalTagType(Class);
DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(T);
return Class->lookup(Name);
}
@@ -4257,10 +4256,9 @@ private:
continue;
RD = TD->getTemplatedDecl();
} else {
- const auto *Record = BaseType->getAs<RecordType>();
- if (!Record)
+ RD = BaseType->getAsCXXRecordDecl();
+ if (!RD)
continue;
- RD = Record->getDecl();
}
// FIXME: It would be nice to be able to determine whether referencing
@@ -4546,40 +4544,101 @@ static void checkCorrectionVisibility(Sema &SemaRef, TypoCorrection &TC) {
// the given NestedNameSpecifier (i.e. given a NestedNameSpecifier "foo::bar::",
// fill the vector with the IdentifierInfo pointers for "foo" and "bar").
static void getNestedNameSpecifierIdentifiers(
- NestedNameSpecifier *NNS,
- SmallVectorImpl<const IdentifierInfo*> &Identifiers) {
- if (NestedNameSpecifier *Prefix = NNS->getPrefix())
- getNestedNameSpecifierIdentifiers(Prefix, Identifiers);
- else
+ NestedNameSpecifier NNS,
+ SmallVectorImpl<const IdentifierInfo *> &Identifiers) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
Identifiers.clear();
+ return;
- const IdentifierInfo *II = nullptr;
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- II = NNS->getAsIdentifier();
- break;
-
- case NestedNameSpecifier::Namespace: {
- const NamespaceBaseDecl *Namespace = NNS->getAsNamespace();
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
+ getNestedNameSpecifierIdentifiers(Prefix, Identifiers);
if (const auto *NS = dyn_cast<NamespaceDecl>(Namespace);
NS && NS->isAnonymousNamespace())
return;
- II = Namespace->getIdentifier();
- break;
+ Identifiers.push_back(Namespace->getIdentifier());
+ return;
}
- case NestedNameSpecifier::TypeSpec:
- II = QualType(NNS->getAsType(), 0).getBaseTypeIdentifier();
+ case NestedNameSpecifier::Kind::Type: {
+ for (const Type *T = NNS.getAsType(); /**/; /**/) {
+ switch (T->getTypeClass()) {
+ case Type::DependentName: {
+ auto *DT = cast<DependentNameType>(T);
+ getNestedNameSpecifierIdentifiers(DT->getQualifier(), Identifiers);
+ Identifiers.push_back(DT->getIdentifier());
+ return;
+ }
+ case Type::TemplateSpecialization: {
+ TemplateName Name =
+ cast<TemplateSpecializationType>(T)->getTemplateName();
+ if (const QualifiedTemplateName *QTN =
+ Name.getAsQualifiedTemplateName()) {
+ getNestedNameSpecifierIdentifiers(QTN->getQualifier(), Identifiers);
+ Name = QTN->getUnderlyingTemplate();
+ }
+ if (const auto *TD = Name.getAsTemplateDecl(/*IgnoreDeduced=*/true))
+ Identifiers.push_back(TD->getIdentifier());
+ return;
+ }
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateStorage &S =
+ cast<DependentTemplateSpecializationType>(T)
+ ->getDependentTemplateName();
+ getNestedNameSpecifierIdentifiers(S.getQualifier(), Identifiers);
+ // FIXME: Should this dig into the Name as well?
+ // Identifiers.push_back(S.getName().getIdentifier());
+ return;
+ }
+ case Type::SubstTemplateTypeParm:
+ T = cast<SubstTemplateTypeParmType>(T)
+ ->getReplacementType()
+ .getTypePtr();
+ continue;
+ case Type::TemplateTypeParm:
+ Identifiers.push_back(cast<TemplateTypeParmType>(T)->getIdentifier());
+ return;
+ case Type::Decltype:
+ return;
+ case Type::Enum:
+ case Type::Record:
+ case Type::InjectedClassName: {
+ auto *TT = cast<TagType>(T);
+ getNestedNameSpecifierIdentifiers(TT->getQualifier(), Identifiers);
+ Identifiers.push_back(TT->getOriginalDecl()->getIdentifier());
+ return;
+ }
+ case Type::Typedef: {
+ auto *TT = cast<TypedefType>(T);
+ getNestedNameSpecifierIdentifiers(TT->getQualifier(), Identifiers);
+ Identifiers.push_back(TT->getDecl()->getIdentifier());
+ return;
+ }
+ case Type::Using: {
+ auto *TT = cast<UsingType>(T);
+ getNestedNameSpecifierIdentifiers(TT->getQualifier(), Identifiers);
+ Identifiers.push_back(TT->getDecl()->getIdentifier());
+ return;
+ }
+ case Type::UnresolvedUsing: {
+ auto *TT = cast<UnresolvedUsingType>(T);
+ getNestedNameSpecifierIdentifiers(TT->getQualifier(), Identifiers);
+ Identifiers.push_back(TT->getDecl()->getIdentifier());
+ return;
+ }
+ default:
+ Identifiers.push_back(QualType(T, 0).getBaseTypeIdentifier());
+ return;
+ }
+ }
break;
+ }
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return;
}
-
- if (II)
- Identifiers.push_back(II);
}
void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
@@ -4612,11 +4671,11 @@ void TypoCorrectionConsumer::FoundName(StringRef Name) {
void TypoCorrectionConsumer::addKeywordResult(StringRef Keyword) {
// Compute the edit distance between the typo and this keyword,
// and add the keyword to the list of results.
- addName(Keyword, nullptr, nullptr, true);
+ addName(Keyword, /*ND=*/nullptr, /*NNS=*/std::nullopt, /*isKeyword=*/true);
}
void TypoCorrectionConsumer::addName(StringRef Name, NamedDecl *ND,
- NestedNameSpecifier *NNS, bool isKeyword) {
+ NestedNameSpecifier NNS, bool isKeyword) {
// Use a simple length-based heuristic to determine the minimum possible
// edit distance. If the minimum isn't good enough, bail out early.
StringRef TypoStr = Typo->getName();
@@ -4708,10 +4767,10 @@ void TypoCorrectionConsumer::addNamespaces(
Namespaces.addNameSpecifier(KNPair.first);
bool SSIsTemplate = false;
- if (NestedNameSpecifier *NNS =
- (SS && SS->isValid()) ? SS->getScopeRep() : nullptr) {
- if (const Type *T = NNS->getAsType())
- SSIsTemplate = T->getTypeClass() == Type::TemplateSpecialization;
+ if (NestedNameSpecifier NNS = (SS ? SS->getScopeRep() : std::nullopt)) {
+ if (NNS.getKind() == NestedNameSpecifier::Kind::Type)
+ SSIsTemplate =
+ NNS.getAsType()->getTypeClass() == Type::TemplateSpecialization;
}
// Do not transform this into an iterator-based loop. The loop body can
// trigger the creation of further types (through lazy deserialization) and
@@ -4813,17 +4872,15 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
for (const TypoCorrection &QR : QualifiedResults) {
for (const auto &NSI : Namespaces) {
DeclContext *Ctx = NSI.DeclCtx;
- const Type *NSType = NSI.NameSpecifier->getAsType();
+ CXXRecordDecl *NamingClass = NSI.NameSpecifier.getAsRecordDecl();
// If the current NestedNameSpecifier refers to a class and the
// current correction candidate is the name of that class, then skip
// it as it is unlikely a qualified version of the class' constructor
// is an appropriate correction.
- if (CXXRecordDecl *NSDecl = NSType ? NSType->getAsCXXRecordDecl() :
- nullptr) {
- if (NSDecl->getIdentifier() == QR.getCorrectionAsIdentifierInfo())
- continue;
- }
+ if (NamingClass &&
+ NamingClass->getIdentifier() == QR.getCorrectionAsIdentifierInfo())
+ continue;
TypoCorrection TC(QR);
TC.ClearCorrectionDecls();
@@ -4853,7 +4910,7 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
std::string NewQualified = TC.getAsString(SemaRef.getLangOpts());
std::string OldQualified;
llvm::raw_string_ostream OldOStream(OldQualified);
- SS->getScopeRep()->print(OldOStream, SemaRef.getPrintingPolicy());
+ SS->getScopeRep().print(OldOStream, SemaRef.getPrintingPolicy());
OldOStream << Typo->getName();
// If correction candidate would be an identical written qualified
// identifier, then the existing CXXScopeSpec probably included a
@@ -4864,8 +4921,7 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
for (LookupResult::iterator TRD = Result.begin(), TRDEnd = Result.end();
TRD != TRDEnd; ++TRD) {
if (SemaRef.CheckMemberAccess(TC.getCorrectionRange().getBegin(),
- NSType ? NSType->getAsCXXRecordDecl()
- : nullptr,
+ NamingClass,
TRD.getPair()) == Sema::AR_accessible)
TC.addCorrectionDecl(*TRD);
}
@@ -4889,10 +4945,10 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
TypoCorrectionConsumer::NamespaceSpecifierSet::NamespaceSpecifierSet(
ASTContext &Context, DeclContext *CurContext, CXXScopeSpec *CurScopeSpec)
: Context(Context), CurContextChain(buildContextChain(CurContext)) {
- if (NestedNameSpecifier *NNS =
- CurScopeSpec ? CurScopeSpec->getScopeRep() : nullptr) {
+ if (NestedNameSpecifier NNS =
+ CurScopeSpec ? CurScopeSpec->getScopeRep() : std::nullopt) {
llvm::raw_string_ostream SpecifierOStream(CurNameSpecifier);
- NNS->print(SpecifierOStream, Context.getPrintingPolicy());
+ NNS.print(SpecifierOStream, Context.getPrintingPolicy());
getNestedNameSpecifierIdentifiers(NNS, CurNameSpecifierIdentifiers);
}
@@ -4906,7 +4962,7 @@ TypoCorrectionConsumer::NamespaceSpecifierSet::NamespaceSpecifierSet(
// Add the global context as a NestedNameSpecifier
SpecifierInfo SI = {cast<DeclContext>(Context.getTranslationUnitDecl()),
- NestedNameSpecifier::GlobalSpecifier(Context), 1};
+ NestedNameSpecifier::getGlobal(), 1};
DistanceMap[1].push_back(SI);
}
@@ -4926,14 +4982,16 @@ auto TypoCorrectionConsumer::NamespaceSpecifierSet::buildContextChain(
unsigned
TypoCorrectionConsumer::NamespaceSpecifierSet::buildNestedNameSpecifier(
- DeclContextList &DeclChain, NestedNameSpecifier *&NNS) {
+ DeclContextList &DeclChain, NestedNameSpecifier &NNS) {
unsigned NumSpecifiers = 0;
for (DeclContext *C : llvm::reverse(DeclChain)) {
if (auto *ND = dyn_cast_or_null<NamespaceDecl>(C)) {
- NNS = NestedNameSpecifier::Create(Context, NNS, ND);
+ NNS = NestedNameSpecifier(Context, ND, NNS);
++NumSpecifiers;
} else if (auto *RD = dyn_cast_or_null<RecordDecl>(C)) {
- NNS = NestedNameSpecifier::Create(Context, NNS, RD->getTypeForDecl());
+ QualType T = Context.getTagType(ElaboratedTypeKeyword::None, NNS, RD,
+ /*OwnsTag=*/false);
+ NNS = NestedNameSpecifier(T.getTypePtr());
++NumSpecifiers;
}
}
@@ -4942,7 +5000,7 @@ TypoCorrectionConsumer::NamespaceSpecifierSet::buildNestedNameSpecifier(
void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
DeclContext *Ctx) {
- NestedNameSpecifier *NNS = nullptr;
+ NestedNameSpecifier NNS = std::nullopt;
unsigned NumSpecifiers = 0;
DeclContextList NamespaceDeclChain(buildContextChain(Ctx));
DeclContextList FullNamespaceDeclChain(NamespaceDeclChain);
@@ -4960,7 +5018,7 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
// Add an explicit leading '::' specifier if needed.
if (NamespaceDeclChain.empty()) {
// Rebuild the NestedNameSpecifier as a globally-qualified specifier.
- NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ NNS = NestedNameSpecifier::getGlobal();
NumSpecifiers =
buildNestedNameSpecifier(FullNamespaceDeclChain, NNS);
} else if (NamedDecl *ND =
@@ -4972,12 +5030,12 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
llvm::raw_string_ostream SpecifierOStream(NewNameSpecifier);
SmallVector<const IdentifierInfo *, 4> NewNameSpecifierIdentifiers;
getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers);
- NNS->print(SpecifierOStream, Context.getPrintingPolicy());
+ NNS.print(SpecifierOStream, Context.getPrintingPolicy());
SameNameSpecifier = NewNameSpecifier == CurNameSpecifier;
}
if (SameNameSpecifier || llvm::is_contained(CurContextIdentifiers, Name)) {
// Rebuild the NestedNameSpecifier as a globally-qualified specifier.
- NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ NNS = NestedNameSpecifier::getGlobal();
NumSpecifiers =
buildNestedNameSpecifier(FullNamespaceDeclChain, NNS);
}
@@ -5463,7 +5521,7 @@ std::string TypoCorrection::getAsString(const LangOptions &LO) const {
if (CorrectionNameSpec) {
std::string tmpBuffer;
llvm::raw_string_ostream PrefixOStream(tmpBuffer);
- CorrectionNameSpec->print(PrefixOStream, PrintingPolicy(LO));
+ CorrectionNameSpec.print(PrefixOStream, PrintingPolicy(LO));
PrefixOStream << CorrectionName;
return PrefixOStream.str();
}
diff --git a/clang/lib/Sema/SemaModule.cpp b/clang/lib/Sema/SemaModule.cpp
index ff9f85f..773bcb2 100644
--- a/clang/lib/Sema/SemaModule.cpp
+++ b/clang/lib/Sema/SemaModule.cpp
@@ -137,7 +137,7 @@ makeTransitiveImportsVisible(ASTContext &Ctx, VisibleModuleSet &VisibleModules,
"modules only.");
llvm::SmallVector<Module *, 4> Worklist;
- llvm::SmallSet<Module *, 16> Visited;
+ llvm::SmallPtrSet<Module *, 16> Visited;
Worklist.push_back(Imported);
Module *FoundPrimaryModuleInterface =
@@ -265,10 +265,11 @@ Sema::DeclGroupPtrTy
Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
ModuleDeclKind MDK, ModuleIdPath Path,
ModuleIdPath Partition, ModuleImportState &ImportState,
- bool IntroducerIsFirstPPToken) {
+ bool SeenNoTrivialPPDirective) {
assert(getLangOpts().CPlusPlusModules &&
"should only have module decl in standard C++ modules");
+ bool IsFirstDecl = ImportState == ModuleImportState::FirstDecl;
bool SeenGMF = ImportState == ModuleImportState::GlobalFragment;
// If any of the steps here fail, we count that as invalidating C++20
// module state;
@@ -336,7 +337,8 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
// In C++20, A module directive may only appear as the first preprocessing
// tokens in a file (excluding the global module fragment.).
- if (getLangOpts().CPlusPlusModules && !IntroducerIsFirstPPToken && !SeenGMF) {
+ if (getLangOpts().CPlusPlusModules &&
+ (!IsFirstDecl || SeenNoTrivialPPDirective) && !SeenGMF) {
Diag(ModuleLoc, diag::err_module_decl_not_at_start);
SourceLocation BeginLoc = PP.getMainFileFirstPPTokenLoc();
Diag(BeginLoc, diag::note_global_module_introducer_missing)
diff --git a/clang/lib/Sema/SemaObjC.cpp b/clang/lib/Sema/SemaObjC.cpp
index 0f39a98..4f9470a 100644
--- a/clang/lib/Sema/SemaObjC.cpp
+++ b/clang/lib/Sema/SemaObjC.cpp
@@ -691,7 +691,7 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
if (!anyPackExpansions && finalTypeArgs.size() != numTypeParams) {
S.Diag(loc, diag::err_objc_type_args_wrong_arity)
<< (typeArgs.size() < typeParams->size()) << objcClass->getDeclName()
- << (unsigned)finalTypeArgs.size() << (unsigned)numTypeParams;
+ << (unsigned)finalTypeArgs.size() << numTypeParams;
S.Diag(objcClass->getLocation(), diag::note_previous_decl) << objcClass;
if (failOnError)
@@ -1380,7 +1380,7 @@ SemaObjC::ObjCSubscriptKind SemaObjC::CheckSubscriptingKind(Expr *FromE) {
// If we don't have a class type in C++, there's no way we can get an
// expression of integral or enumeration type.
- const RecordType *RecordTy = T->getAs<RecordType>();
+ const RecordType *RecordTy = T->getAsCanonical<RecordType>();
if (!RecordTy && (T->isObjCObjectPointerType() || T->isVoidPointerType()))
// All other scalar cases are assumed to be dictionary indexing which
// caller handles, with diagnostics if needed.
@@ -1407,7 +1407,8 @@ SemaObjC::ObjCSubscriptKind SemaObjC::CheckSubscriptingKind(Expr *FromE) {
int NoIntegrals = 0, NoObjCIdPointers = 0;
SmallVector<CXXConversionDecl *, 4> ConversionDecls;
- for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getDecl())
+ for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf()
->getVisibleConversionFunctions()) {
if (CXXConversionDecl *Conversion =
dyn_cast<CXXConversionDecl>(D->getUnderlyingDecl())) {
@@ -1506,11 +1507,11 @@ bool SemaObjC::isCFStringType(QualType T) {
if (!PT)
return false;
- const auto *RT = PT->getPointeeType()->getAs<RecordType>();
+ const auto *RT = PT->getPointeeType()->getAsCanonical<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl();
if (RD->getTagKind() != TagTypeKind::Struct)
return false;
diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp
index 9dbb1d2..1880cec 100644
--- a/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/clang/lib/Sema/SemaObjCProperty.cpp
@@ -1320,8 +1320,8 @@ Decl *SemaObjC::ActOnPropertyImplDecl(
CompleteTypeErr = true;
}
if (!CompleteTypeErr) {
- const RecordType *RecordTy = PropertyIvarType->getAs<RecordType>();
- if (RecordTy && RecordTy->getDecl()->hasFlexibleArrayMember()) {
+ if (const auto *RD = PropertyIvarType->getAsRecordDecl();
+ RD && RD->hasFlexibleArrayMember()) {
Diag(PropertyIvarLoc, diag::err_synthesize_variable_sized_ivar)
<< PropertyIvarType;
CompleteTypeErr = true; // suppress later diagnostics about the ivar
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index 4d58b4a..8e87cae 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -690,9 +690,9 @@ ExprResult CheckVarType(SemaOpenACC &S, OpenACCClauseKind CK, Expr *VarExpr,
}
}
} else if (CK == OpenACCClauseKind::Reduction) {
- // TODO: OpenACC:
- // Reduction must have copyctor + dtor + operation in InnerTy I think?
- // Need to confirm when implementing this part.
+ // TODO: Reduction needs to be an aggregate, which gets checked later, so
+ // construction here isn't a problem. However, we need to make sure that we
+ // can compare it correctly still.
}
// All 3 things need to make sure they have a dtor.
@@ -1921,8 +1921,13 @@ void SemaOpenACC::ActOnVariableDeclarator(VarDecl *VD) {
return;
// This cast should be safe, since a static-local can only happen in a
- // function declaration.
- auto *ContextDecl = cast<FunctionDecl>(getCurContext());
+ // function declaration. However, in error cases (or perhaps ObjC/C++?), this
+ // could possibly be something like a 'block' decl, so if this is NOT a
+ // function decl, just give up.
+ auto *ContextDecl = dyn_cast<FunctionDecl>(getCurContext());
+
+ if (!ContextDecl)
+ return;
// OpenACC 3.3 2.15:
// In C and C++, function static variables are not supported in functions to
@@ -2585,7 +2590,9 @@ SemaOpenACC::ActOnOpenACCAsteriskSizeExpr(SourceLocation AsteriskLoc) {
}
std::pair<VarDecl *, VarDecl *>
-SemaOpenACC::CreateInitRecipe(OpenACCClauseKind CK, const Expr *VarExpr) {
+SemaOpenACC::CreateInitRecipe(OpenACCClauseKind CK,
+ OpenACCReductionOperator ReductionOperator,
+ const Expr *VarExpr) {
// Strip off any array subscripts/array section exprs to get to the type of
// the variable.
while (isa_and_present<ArraySectionExpr, ArraySubscriptExpr>(VarExpr)) {
@@ -2604,31 +2611,161 @@ SemaOpenACC::CreateInitRecipe(OpenACCClauseKind CK, const Expr *VarExpr) {
QualType VarTy =
VarExpr->getType().getNonReferenceType().getUnqualifiedType();
+ IdentifierInfo *VarName = [&]() {
+ switch (CK) {
+ case OpenACCClauseKind::Private:
+ return &getASTContext().Idents.get("openacc.private.init");
+ case OpenACCClauseKind::FirstPrivate:
+ return &getASTContext().Idents.get("openacc.firstprivate.init");
+ case OpenACCClauseKind::Reduction:
+ return &getASTContext().Idents.get("openacc.reduction.init");
+ default:
+ llvm_unreachable("Unknown clause kind?");
+ }
+ }();
+
VarDecl *Recipe = VarDecl::Create(
getASTContext(), SemaRef.getCurContext(), VarExpr->getBeginLoc(),
- VarExpr->getBeginLoc(),
- &getASTContext().Idents.get("openacc.private.init"), VarTy,
+ VarExpr->getBeginLoc(), VarName, VarTy,
getASTContext().getTrivialTypeSourceInfo(VarTy), SC_Auto);
ExprResult Init;
VarDecl *Temporary = nullptr;
-
- if (CK == OpenACCClauseKind::Private) {
+ {
// Trap errors so we don't get weird ones here. If we can't init, we'll just
// swallow the errors.
Sema::TentativeAnalysisScope Trap{SemaRef};
InitializedEntity Entity = InitializedEntity::InitializeVariable(Recipe);
- InitializationKind Kind =
- InitializationKind::CreateDefault(Recipe->getLocation());
- InitializationSequence InitSeq(SemaRef.SemaRef, Entity, Kind, {});
- Init = InitSeq.Perform(SemaRef.SemaRef, Entity, Kind, {});
- } else if (CK == OpenACCClauseKind::FirstPrivate) {
- // TODO: OpenACC: Implement this to do a 'copy' operation.
- } else if (CK == OpenACCClauseKind::Reduction) {
- // TODO: OpenACC: Implement this for whatever reduction needs.
- } else {
- llvm_unreachable("Unknown clause kind in CreateInitRecipe");
+ if (CK == OpenACCClauseKind::Private) {
+ InitializationKind Kind =
+ InitializationKind::CreateDefault(Recipe->getLocation());
+
+ InitializationSequence InitSeq(SemaRef.SemaRef, Entity, Kind, {});
+ Init = InitSeq.Perform(SemaRef.SemaRef, Entity, Kind, {});
+
+ } else if (CK == OpenACCClauseKind::FirstPrivate) {
+ // Create a VarDecl to be the 'copied-from' for the copy section of the
+ // recipe. This allows us to make the association so that we can use the
+ // standard 'generation' ability of the init.
+ Temporary = VarDecl::Create(
+ getASTContext(), SemaRef.getCurContext(), VarExpr->getBeginLoc(),
+ VarExpr->getBeginLoc(), &getASTContext().Idents.get("openacc.temp"),
+ VarTy, getASTContext().getTrivialTypeSourceInfo(VarTy), SC_Auto);
+ auto *TemporaryDRE = DeclRefExpr::Create(
+ getASTContext(), NestedNameSpecifierLoc{}, SourceLocation{},
+ Temporary,
+ /*ReferstoEnclosingVariableOrCapture=*/false,
+ DeclarationNameInfo{DeclarationName{Temporary->getDeclName()},
+ VarExpr->getBeginLoc()},
+ VarTy, clang::VK_LValue, Temporary, nullptr, NOUR_None);
+
+ Expr *InitExpr = nullptr;
+
+ if (const auto *ArrTy = getASTContext().getAsConstantArrayType(VarTy)) {
+ // Arrays need to have each individual element initialized as there
+ // isn't a normal 'equals' feature in C/C++. This section sets these up
+ // as an init list after 'initializing' each individual element.
+ llvm::SmallVector<Expr *> Args;
+
+ // Decay to pointer for the array subscript expression.
+ auto *CastToPtr = ImplicitCastExpr::Create(
+ getASTContext(),
+ getASTContext().getPointerType(ArrTy->getElementType()),
+ CK_ArrayToPointerDecay, TemporaryDRE, /*BasePath=*/nullptr,
+ clang::VK_LValue, FPOptionsOverride{});
+
+ for (std::size_t I = 0; I < ArrTy->getLimitedSize(); ++I) {
+ // Each element needs to be some sort of copy initialization from an
+ // array-index of the original temporary (referenced via a
+ // DeclRefExpr).
+
+ auto *Idx = IntegerLiteral::Create(
+ getASTContext(),
+ llvm::APInt(
+ getASTContext().getTypeSize(getASTContext().getSizeType()),
+ I),
+ getASTContext().getSizeType(), VarExpr->getBeginLoc());
+
+ Expr *Subscript = new (getASTContext()) ArraySubscriptExpr(
+ CastToPtr, Idx, ArrTy->getElementType(), clang::VK_LValue,
+ OK_Ordinary, VarExpr->getBeginLoc());
+
+ // Generate a simple copy from the result of the subscript. This will
+ // do a bitwise copy or a copy-constructor, as necessary.
+ InitializedEntity CopyEntity =
+ InitializedEntity::InitializeElement(getASTContext(), I, Entity);
+ InitializationKind CopyKind =
+ InitializationKind::CreateCopy(VarExpr->getBeginLoc(), {});
+ InitializationSequence CopySeq(SemaRef.SemaRef, CopyEntity, CopyKind,
+ Subscript,
+ /*TopLevelOfInitList=*/true);
+
+ ExprResult ElemRes =
+ CopySeq.Perform(SemaRef.SemaRef, CopyEntity, CopyKind, Subscript);
+ Args.push_back(ElemRes.get());
+ }
+
+ InitExpr = new (getASTContext())
+ InitListExpr(getASTContext(), VarExpr->getBeginLoc(), Args,
+ VarExpr->getEndLoc());
+ InitExpr->setType(VarTy);
+
+ } else {
+ // If this isn't an array, we can just do normal copy init from a simple
+ // variable reference, so set that up.
+ InitExpr = TemporaryDRE;
+ }
+
+ InitializationKind Kind = InitializationKind::CreateForInit(
+ Recipe->getLocation(), /*DirectInit=*/true, InitExpr);
+ InitializationSequence InitSeq(SemaRef.SemaRef, Entity, Kind, InitExpr,
+ /*TopLevelOfInitList=*/false,
+ /*TreatUnavailableAsInvalid=*/false);
+ Init = InitSeq.Perform(SemaRef.SemaRef, Entity, Kind, InitExpr, &VarTy);
+ } else if (CK == OpenACCClauseKind::Reduction) {
+ // How we initialize the reduction variable depends on the operator used,
+ // according to the chart in OpenACC 3.3 section 2.6.15.
+
+ switch (ReductionOperator) {
+ case OpenACCReductionOperator::Invalid:
+ // This can only happen when there is an error, and since these inits
+ // are used for code generation, we can just ignore/not bother doing any
+ // initialization here.
+ break;
+ case OpenACCReductionOperator::Multiplication:
+ case OpenACCReductionOperator::Max:
+ case OpenACCReductionOperator::Min:
+ case OpenACCReductionOperator::BitwiseAnd:
+ case OpenACCReductionOperator::And:
+ // TODO: OpenACC: figure out init for these.
+ break;
+
+ case OpenACCReductionOperator::Addition:
+ case OpenACCReductionOperator::BitwiseOr:
+ case OpenACCReductionOperator::BitwiseXOr:
+ case OpenACCReductionOperator::Or: {
+ // +, |, ^, and || all use 0 for their initializers, so we can just
+ // use 'zero init' here and not bother with the rest of the
+ // array/compound type/etc contents.
+ Expr *InitExpr = new (getASTContext()) InitListExpr(
+ getASTContext(), VarExpr->getBeginLoc(), {}, VarExpr->getEndLoc());
+ // we set this to void so that the initialization sequence generation
+ // will get this type correct/etc.
+ InitExpr->setType(getASTContext().VoidTy);
+
+ InitializationKind Kind = InitializationKind::CreateForInit(
+ Recipe->getLocation(), /*DirectInit=*/true, InitExpr);
+ InitializationSequence InitSeq(SemaRef.SemaRef, Entity, Kind, InitExpr,
+ /*TopLevelOfInitList=*/false,
+ /*TreatUnavailableAsInvalid=*/false);
+ Init = InitSeq.Perform(SemaRef.SemaRef, Entity, Kind, InitExpr, &VarTy);
+ break;
+ }
+ }
+ } else {
+ llvm_unreachable("Unknown clause kind in CreateInitRecipe");
+ }
}
if (Init.get()) {
diff --git a/clang/lib/Sema/SemaOpenACCClause.cpp b/clang/lib/Sema/SemaOpenACCClause.cpp
index e8a18243..bb61ced 100644
--- a/clang/lib/Sema/SemaOpenACCClause.cpp
+++ b/clang/lib/Sema/SemaOpenACCClause.cpp
@@ -1054,13 +1054,17 @@ OpenACCClause *SemaOpenACCClauseVisitor::VisitWaitClause(
OpenACCClause *SemaOpenACCClauseVisitor::VisitDeviceTypeClause(
SemaOpenACC::OpenACCParsedClause &Clause) {
- // Based on discussions, having more than 1 'architecture' on a 'set' is
- // nonsensical, so we're going to fix the standard to reflect this. Implement
- // the limitation, since the Dialect requires this.
- if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Set &&
+ // OpenACC Pull #550 (https://github.com/OpenACC/openacc-spec/pull/550)
+ // clarified that Init, Shutdown, and Set only support a single architecture.
+ // Though the dialect only requires it for 'set' as far as we know, we'll just
+ // implement all 3 here.
+ if ((Clause.getDirectiveKind() == OpenACCDirectiveKind::Init ||
+ Clause.getDirectiveKind() == OpenACCDirectiveKind::Shutdown ||
+ Clause.getDirectiveKind() == OpenACCDirectiveKind::Set) &&
Clause.getDeviceTypeArchitectures().size() > 1) {
SemaRef.Diag(Clause.getDeviceTypeArchitectures()[1].getLoc(),
- diag::err_acc_device_type_multiple_archs);
+ diag::err_acc_device_type_multiple_archs)
+ << Clause.getDirectiveKind();
return nullptr;
}
@@ -1770,18 +1774,28 @@ OpenACCClause *SemaOpenACCClauseVisitor::VisitReductionClause(
}
SmallVector<Expr *> ValidVars;
+ SmallVector<OpenACCReductionRecipe> Recipes;
for (Expr *Var : Clause.getVarList()) {
ExprResult Res = SemaRef.CheckReductionVar(Clause.getDirectiveKind(),
Clause.getReductionOp(), Var);
- if (Res.isUsable())
+ if (Res.isUsable()) {
ValidVars.push_back(Res.get());
+
+ VarDecl *InitRecipe =
+ SemaRef
+ .CreateInitRecipe(OpenACCClauseKind::Reduction,
+ Clause.getReductionOp(), Res.get())
+ .first;
+ Recipes.push_back({InitRecipe});
+ }
}
return SemaRef.CheckReductionClause(
ExistingClauses, Clause.getDirectiveKind(), Clause.getBeginLoc(),
Clause.getLParenLoc(), Clause.getReductionOp(), ValidVars,
+ Recipes,
Clause.getEndLoc());
}
@@ -1917,61 +1931,96 @@ SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
ExprResult SemaOpenACC::CheckReductionVar(OpenACCDirectiveKind DirectiveKind,
OpenACCReductionOperator ReductionOp,
Expr *VarExpr) {
+ // For now, we only support 'scalar' types, or composites/arrays of scalar
+ // types.
VarExpr = VarExpr->IgnoreParenCasts();
+ SourceLocation VarLoc = VarExpr->getBeginLoc();
+
+ SmallVector<PartialDiagnosticAt> Notes;
+ QualType CurType = VarExpr->getType();
+
+ // For array like things, the expression can either be an array element
+ // (subscript expr), array section, or array type. Peel those off, and add
+ // notes in case we find an illegal kind. We'll allow scalar or composite of
+ // scalars inside of this.
+ if (auto *ASE = dyn_cast<ArraySectionExpr>(VarExpr)) {
+ QualType BaseType = ArraySectionExpr::getBaseOriginalType(ASE);
+
+ PartialDiagnostic PD = PDiag(diag::note_acc_reduction_array)
+ << diag::OACCReductionArray::Section << BaseType;
+ Notes.push_back({ASE->getBeginLoc(), PD});
+
+ CurType = getASTContext().getBaseElementType(BaseType);
+ } else if (auto *SubExpr = dyn_cast<ArraySubscriptExpr>(VarExpr)) {
+ // Array subscript already results in the type of the thing as its type, so
+ // there is no type to change here.
+ PartialDiagnostic PD =
+ PDiag(diag::note_acc_reduction_array)
+ << diag::OACCReductionArray::Subscript
+ << SubExpr->getBase()->IgnoreParenImpCasts()->getType();
+ Notes.push_back({SubExpr->getBeginLoc(), PD});
+ } else if (auto *AT = getASTContext().getAsArrayType(CurType)) {
+ // If we're already the array type, peel off the array and leave the element
+ // type.
+ CurType = getASTContext().getBaseElementType(AT);
+ PartialDiagnostic PD = PDiag(diag::note_acc_reduction_array)
+ << diag::OACCReductionArray::ArrayTy << CurType;
+ Notes.push_back({VarLoc, PD});
+ }
- auto TypeIsValid = [](QualType Ty) {
- return Ty->isDependentType() || Ty->isScalarType();
+ auto IsValidMemberOfComposite = [](QualType Ty) {
+ return Ty->isDependentType() ||
+ (Ty->isScalarType() && !Ty->isPointerType());
};
- if (isa<ArraySectionExpr>(VarExpr)) {
- Expr *ASExpr = VarExpr;
- QualType BaseTy = ArraySectionExpr::getBaseOriginalType(ASExpr);
- QualType EltTy = getASTContext().getBaseElementType(BaseTy);
+ auto EmitDiags = [&](SourceLocation Loc, PartialDiagnostic PD) {
+ Diag(Loc, PD);
- if (!TypeIsValid(EltTy)) {
- Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_type)
- << EltTy << /*Sub array base type*/ 1;
- return ExprError();
- }
- } else if (VarExpr->getType()->isArrayType()) {
- // Arrays are considered an 'aggregate variable' explicitly, so are OK, no
- // additional checking required.
- //
- // Glossary: Aggregate variables – a variable of any non-scalar datatype,
- // including array or composite variables.
- //
- // The next branch (record decl) checks for composite variables.
- } else if (auto *RD = VarExpr->getType()->getAsRecordDecl()) {
+ for (auto [Loc, PD] : Notes)
+ Diag(Loc, PD);
+
+ Diag(VarLoc, diag::note_acc_reduction_type_summary);
+ };
+
+ // If the type is already scalar, or is dependent, just give up.
+ if (IsValidMemberOfComposite(CurType)) {
+ // Nothing to do here, is valid.
+ } else if (auto *RD = CurType->getAsRecordDecl()) {
if (!RD->isStruct() && !RD->isClass()) {
- Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
- << /*not class or struct*/ 0 << VarExpr->getType();
+ EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type)
+ << RD << diag::OACCReductionTy::NotClassStruct);
return ExprError();
}
if (!RD->isCompleteDefinition()) {
- Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
- << /*incomplete*/ 1 << VarExpr->getType();
+ EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type)
+ << RD << diag::OACCReductionTy::NotComplete);
return ExprError();
}
+
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
CXXRD && !CXXRD->isAggregate()) {
- Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
- << /*aggregate*/ 2 << VarExpr->getType();
+ EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type)
+ << CXXRD << diag::OACCReductionTy::NotAgg);
return ExprError();
}
for (FieldDecl *FD : RD->fields()) {
- if (!TypeIsValid(FD->getType())) {
- Diag(VarExpr->getExprLoc(),
- diag::err_acc_reduction_composite_member_type);
- Diag(FD->getLocation(), diag::note_acc_reduction_composite_member_loc);
+ if (!IsValidMemberOfComposite(FD->getType())) {
+ PartialDiagnostic PD =
+ PDiag(diag::note_acc_reduction_member_of_composite)
+ << FD->getName() << RD->getName();
+ Notes.push_back({FD->getBeginLoc(), PD});
+ // TODO: member here.note_acc_reduction_member_of_composite
+ EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type)
+ << FD->getType()
+ << diag::OACCReductionTy::MemberNotScalar);
return ExprError();
}
}
- } else if (!TypeIsValid(VarExpr->getType())) {
- Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_type)
- << VarExpr->getType() << /*Sub array base type*/ 0;
- return ExprError();
+ } else {
+ EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type)
+ << CurType << diag::OACCReductionTy::NotScalar);
}
// OpenACC3.3: 2.9.11: Reduction clauses on nested constructs for the same
@@ -2154,7 +2203,8 @@ OpenACCClause *SemaOpenACC::CheckReductionClause(
ArrayRef<const OpenACCClause *> ExistingClauses,
OpenACCDirectiveKind DirectiveKind, SourceLocation BeginLoc,
SourceLocation LParenLoc, OpenACCReductionOperator ReductionOp,
- ArrayRef<Expr *> Vars, SourceLocation EndLoc) {
+ ArrayRef<Expr *> Vars, ArrayRef<OpenACCReductionRecipe> Recipes,
+ SourceLocation EndLoc) {
if (DirectiveKind == OpenACCDirectiveKind::Loop ||
isOpenACCCombinedDirectiveKind(DirectiveKind)) {
// OpenACC 3.3 2.9.11: A reduction clause may not appear on a loop directive
@@ -2183,7 +2233,7 @@ OpenACCClause *SemaOpenACC::CheckReductionClause(
}
auto *Ret = OpenACCReductionClause::Create(
- getASTContext(), BeginLoc, LParenLoc, ReductionOp, Vars, EndLoc);
+ getASTContext(), BeginLoc, LParenLoc, ReductionOp, Vars, Recipes, EndLoc);
return Ret;
}
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 2c5d97c..4b030a2 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -7358,7 +7358,9 @@ SemaOpenMP::checkOpenMPDeclareVariantFunction(SemaOpenMP::DeclGroupPtrTy DG,
Diag(SR.getBegin(), diag::err_omp_interop_type_not_found) << SR;
return std::nullopt;
}
- QualType InteropType = Context.getTypeDeclType(TD);
+ QualType InteropType =
+ Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
if (PTy->isVariadic()) {
Diag(FD->getLocation(), diag::err_omp_append_args_with_varargs) << SR;
return std::nullopt;
@@ -7378,7 +7380,7 @@ SemaOpenMP::checkOpenMPDeclareVariantFunction(SemaOpenMP::DeclGroupPtrTy DG,
auto *Method = dyn_cast<CXXMethodDecl>(FD);
if (Method && !Method->isStatic()) {
FnPtrType = Context.getMemberPointerType(
- AdjustedFnType, /*Qualifier=*/nullptr, Method->getParent());
+ AdjustedFnType, /*Qualifier=*/std::nullopt, Method->getParent());
ExprResult ER;
{
// Build addr_of unary op to correctly handle type checks for member
@@ -11095,22 +11097,27 @@ StmtResult SemaOpenMP::ActOnOpenMPErrorDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- const OMPSeverityClause *SeverityC =
- OMPExecutableDirective::getSingleClause<OMPSeverityClause>(Clauses);
- const OMPMessageClause *MessageC =
- OMPExecutableDirective::getSingleClause<OMPMessageClause>(Clauses);
- Expr *ME = MessageC ? MessageC->getMessageString() : nullptr;
-
if (!AtC || AtC->getAtKind() == OMPC_AT_compilation) {
+ const OMPSeverityClause *SeverityC =
+ OMPExecutableDirective::getSingleClause<OMPSeverityClause>(Clauses);
+ const OMPMessageClause *MessageC =
+ OMPExecutableDirective::getSingleClause<OMPMessageClause>(Clauses);
+ std::optional<std::string> SL =
+ MessageC ? MessageC->tryEvaluateString(getASTContext()) : std::nullopt;
+
+ if (MessageC && !SL)
+ Diag(MessageC->getMessageString()->getBeginLoc(),
+ diag::warn_clause_expected_string)
+ << getOpenMPClauseNameForDiag(OMPC_message) << 1;
if (SeverityC && SeverityC->getSeverityKind() == OMPC_SEVERITY_warning)
Diag(SeverityC->getSeverityKindKwLoc(), diag::warn_diagnose_if_succeeded)
- << (ME ? cast<StringLiteral>(ME)->getString() : "WARNING");
+ << SL.value_or("WARNING");
else
- Diag(StartLoc, diag::err_diagnose_if_succeeded)
- << (ME ? cast<StringLiteral>(ME)->getString() : "ERROR");
+ Diag(StartLoc, diag::err_diagnose_if_succeeded) << SL.value_or("ERROR");
if (!SeverityC || SeverityC->getSeverityKind() != OMPC_SEVERITY_warning)
return StmtError();
}
+
return OMPErrorDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses);
}
@@ -16462,13 +16469,32 @@ OMPClause *SemaOpenMP::ActOnOpenMPMessageClause(Expr *ME,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
assert(ME && "NULL expr in Message clause");
- if (!isa<StringLiteral>(ME)) {
+ QualType Type = ME->getType();
+ if ((!Type->isPointerType() && !Type->isArrayType()) ||
+ !Type->getPointeeOrArrayElementType()->isAnyCharacterType()) {
Diag(ME->getBeginLoc(), diag::warn_clause_expected_string)
- << getOpenMPClauseNameForDiag(OMPC_message);
+ << getOpenMPClauseNameForDiag(OMPC_message) << 0;
return nullptr;
}
- return new (getASTContext())
- OMPMessageClause(ME, StartLoc, LParenLoc, EndLoc);
+
+ Stmt *HelperValStmt = nullptr;
+
+ OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
+ OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause(
+ DKind, OMPC_message, getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ME = SemaRef.MakeFullExpr(ME).get();
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
+ ME = tryBuildCapture(SemaRef, ME, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
+ }
+
+ // Convert array type to pointer type if needed.
+ ME = SemaRef.DefaultFunctionArrayLvalueConversion(ME).get();
+
+ return new (getASTContext()) OMPMessageClause(
+ ME, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
OMPClause *SemaOpenMP::ActOnOpenMPOrderClause(
@@ -18626,13 +18652,14 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
// the set of member candidates is empty.
LookupResult Lookup(SemaRef, ReductionId, Sema::LookupOMPReductionName);
Lookup.suppressDiagnostics();
- if (const auto *TyRec = Ty->getAs<RecordType>()) {
+ if (Ty->isRecordType()) {
// Complete the type if it can be completed.
// If the type is neither complete nor being defined, bail out now.
- if (SemaRef.isCompleteType(Loc, Ty) || TyRec->isBeingDefined() ||
- TyRec->getDecl()->getDefinition()) {
+ bool IsComplete = SemaRef.isCompleteType(Loc, Ty);
+ auto *RD = Ty->castAsRecordDecl();
+ if (IsComplete || RD->isBeingDefined()) {
Lookup.clear();
- SemaRef.LookupQualifiedName(Lookup, TyRec->getDecl());
+ SemaRef.LookupQualifiedName(Lookup, RD);
if (Lookup.empty()) {
Lookups.emplace_back();
Lookups.back().append(Lookup.begin(), Lookup.end());
@@ -24807,12 +24834,12 @@ ExprResult SemaOpenMP::ActOnOMPIteratorExpr(Scope *S,
/// Check if \p AssumptionStr is a known assumption and warn if not.
static void checkOMPAssumeAttr(Sema &S, SourceLocation Loc,
StringRef AssumptionStr) {
- if (llvm::KnownAssumptionStrings.count(AssumptionStr))
+ if (llvm::getKnownAssumptionStrings().count(AssumptionStr))
return;
unsigned BestEditDistance = 3;
StringRef Suggestion;
- for (const auto &KnownAssumptionIt : llvm::KnownAssumptionStrings) {
+ for (const auto &KnownAssumptionIt : llvm::getKnownAssumptionStrings()) {
unsigned EditDistance =
AssumptionStr.edit_distance(KnownAssumptionIt.getKey());
if (EditDistance < BestEditDistance) {
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 76e189d..14fa847 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -369,8 +369,8 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
// A conversion to an enumeration type is narrowing if the conversion to
// the underlying type is narrowing. This only arises for expressions of
// the form 'Enum{init}'.
- if (auto *ET = ToType->getAs<EnumType>())
- ToType = ET->getDecl()->getIntegerType();
+ if (const auto *ED = ToType->getAsEnumDecl())
+ ToType = ED->getIntegerType();
switch (Second) {
// 'bool' is an integral type; dispatch to the right place to handle it.
@@ -1058,12 +1058,12 @@ static bool shouldAddReversedEqEq(Sema &S, SourceLocation OpLoc,
if (isa<CXXMethodDecl>(EqFD)) {
// If F is a class member, search scope is class type of first operand.
QualType RHS = FirstOperand->getType();
- auto *RHSRec = RHS->getAs<RecordType>();
+ auto *RHSRec = RHS->getAsCXXRecordDecl();
if (!RHSRec)
return true;
LookupResult Members(S, NotEqOp, OpLoc,
Sema::LookupNameKind::LookupMemberName);
- S.LookupQualifiedName(Members, RHSRec->getDecl());
+ S.LookupQualifiedName(Members, RHSRec);
Members.suppressAccessDiagnostics();
for (NamedDecl *Op : Members)
if (FunctionsCorrespond(S.Context, EqFD, Op->getAsFunction()))
@@ -1268,7 +1268,7 @@ OverloadKind Sema::CheckOverload(Scope *S, FunctionDecl *New,
//
// Exception: if the scope is dependent and this is not a class
// member, the using declaration can only introduce an enumerator.
- if (UUD->getQualifier()->isDependent() && !UUD->isCXXClassMember()) {
+ if (UUD->getQualifier().isDependent() && !UUD->isCXXClassMember()) {
Match = *I;
return OverloadKind::NonFunction;
}
@@ -1471,9 +1471,8 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
if (OldMethod->isImplicitObjectMemberFunction() &&
OldMethod->getParent() != NewMethod->getParent()) {
- QualType ParentType =
- SemaRef.Context.getTypeDeclType(OldMethod->getParent())
- .getCanonicalType();
+ CanQualType ParentType =
+ SemaRef.Context.getCanonicalTagType(OldMethod->getParent());
if (ParentType.getTypePtr() != BS.Ty)
return false;
BS.Ty = DS.Ty;
@@ -1802,7 +1801,7 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
// constructor (i.e., a user-defined conversion function) is
// called for those cases.
QualType FromType = From->getType();
- if (ToType->getAs<RecordType>() && FromType->getAs<RecordType>() &&
+ if (ToType->isRecordType() &&
(S.Context.hasSameUnqualifiedType(FromType, ToType) ||
S.IsDerivedFrom(From->getBeginLoc(), FromType, ToType))) {
ICS.setStandard();
@@ -1979,7 +1978,7 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
}
// Drop 'noexcept' if not present in target type.
- if (FromFPT) {
+ if (FromFPT && ToFPT) {
if (FromFPT->isNothrow() && !ToFPT->isNothrow()) {
FromFn = cast<FunctionType>(
Context.getFunctionTypeWithExceptionSpec(QualType(FromFPT, 0),
@@ -2293,7 +2292,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
== UO_AddrOf &&
"Non-address-of operator on non-static member address");
FromType = S.Context.getMemberPointerType(
- FromType, /*Qualifier=*/nullptr, Method->getParent());
+ FromType, /*Qualifier=*/std::nullopt, Method->getParent());
} else if (isa<UnaryOperator>(From->IgnoreParens())) {
assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode() ==
UO_AddrOf &&
@@ -2602,10 +2601,12 @@ IsTransparentUnionStandardConversion(Sema &S, Expr* From,
bool CStyle) {
const RecordType *UT = ToType->getAsUnionType();
- if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (!UT)
return false;
// The field to initialize within the transparent union.
- RecordDecl *UD = UT->getDecl();
+ const RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!UD->hasAttr<TransparentUnionAttr>())
+ return false;
// It's compatible if the expression matches any of the fields.
for (const auto *it : UD->fields()) {
if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS,
@@ -2660,18 +2661,18 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// integral promotion can be applied to its underlying type, a prvalue of an
// unscoped enumeration type whose underlying type is fixed can also be
// converted to a prvalue of the promoted underlying type.
- if (const EnumType *FromEnumType = FromType->getAs<EnumType>()) {
+ if (const auto *FromED = FromType->getAsEnumDecl()) {
// C++0x 7.2p9: Note that this implicit enum to int conversion is not
// provided for a scoped enumeration.
- if (FromEnumType->getDecl()->isScoped())
+ if (FromED->isScoped())
return false;
// We can perform an integral promotion to the underlying type of the enum,
// even if that's not the promoted type. Note that the check for promoting
// the underlying type is based on the type alone, and does not consider
// the bitfield-ness of the actual source expression.
- if (FromEnumType->getDecl()->isFixed()) {
- QualType Underlying = FromEnumType->getDecl()->getIntegerType();
+ if (FromED->isFixed()) {
+ QualType Underlying = FromED->getIntegerType();
return Context.hasSameUnqualifiedType(Underlying, ToType) ||
IsIntegralPromotion(nullptr, Underlying, ToType);
}
@@ -2679,8 +2680,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// We have already pre-calculated the promotion type, so this is trivial.
if (ToType->isIntegerType() &&
isCompleteType(From->getBeginLoc(), FromType))
- return Context.hasSameUnqualifiedType(
- ToType, FromEnumType->getDecl()->getPromotionType());
+ return Context.hasSameUnqualifiedType(ToType, FromED->getPromotionType());
// C++ [conv.prom]p5:
// If the bit-field has an enumerated type, it is treated as any other
@@ -3347,12 +3347,12 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
ToMember->getMostRecentCXXRecordDecl())) {
PDiag << ft_different_class;
if (ToMember->isSugared())
- PDiag << Context.getTypeDeclType(
+ PDiag << Context.getCanonicalTagType(
ToMember->getMostRecentCXXRecordDecl());
else
PDiag << ToMember->getQualifier();
if (FromMember->isSugared())
- PDiag << Context.getTypeDeclType(
+ PDiag << Context.getCanonicalTagType(
FromMember->getMostRecentCXXRecordDecl());
else
PDiag << FromMember->getQualifier();
@@ -3636,12 +3636,12 @@ Sema::MemberPointerConversionResult Sema::CheckMemberPointerConversion(
CXXRecordDecl *FromClass = FromPtrType->getMostRecentCXXRecordDecl(),
*ToClass = ToPtrType->getMostRecentCXXRecordDecl();
- auto DiagCls = [](PartialDiagnostic &PD, NestedNameSpecifier *Qual,
- const CXXRecordDecl *Cls) {
- if (declaresSameEntity(Qual->getAsRecordDecl(), Cls))
+ auto DiagCls = [&](PartialDiagnostic &PD, NestedNameSpecifier Qual,
+ const CXXRecordDecl *Cls) {
+ if (declaresSameEntity(Qual.getAsRecordDecl(), Cls))
PD << Qual;
else
- PD << QualType(Cls->getTypeForDecl(), 0);
+ PD << Context.getCanonicalTagType(Cls);
};
auto DiagFromTo = [&](PartialDiagnostic &PD) -> PartialDiagnostic & {
DiagCls(PD, FromPtrType->getQualifier(), FromClass);
@@ -3658,8 +3658,7 @@ Sema::MemberPointerConversionResult Sema::CheckMemberPointerConversion(
if (!IsDerivedFrom(OpRange.getBegin(), Derived, Base, Paths))
return MemberPointerConversionResult::NotDerived;
- if (Paths.isAmbiguous(
- Base->getTypeForDecl()->getCanonicalTypeUnqualified())) {
+ if (Paths.isAmbiguous(Context.getCanonicalTagType(Base))) {
PartialDiagnostic PD = PDiag(diag::err_ambiguous_memptr_conv);
PD << int(Direction);
DiagFromTo(PD) << getAmbiguousPathsDisplayString(Paths) << OpRange;
@@ -3687,8 +3686,8 @@ Sema::MemberPointerConversionResult Sema::CheckMemberPointerConversion(
? diag::err_upcast_to_inaccessible_base
: diag::err_downcast_from_inaccessible_base,
[&](PartialDiagnostic &PD) {
- NestedNameSpecifier *BaseQual = FromPtrType->getQualifier(),
- *DerivedQual = ToPtrType->getQualifier();
+ NestedNameSpecifier BaseQual = FromPtrType->getQualifier(),
+ DerivedQual = ToPtrType->getQualifier();
if (Direction == MemberPointerConversionDirection::Upcast)
std::swap(BaseQual, DerivedQual);
DiagCls(PD, DerivedQual, Derived);
@@ -3967,7 +3966,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// If the type we are conversion to is a class type, enumerate its
// constructors.
- if (const RecordType *ToRecordType = ToType->getAs<RecordType>()) {
+ if (const RecordType *ToRecordType = ToType->getAsCanonical<RecordType>()) {
// C++ [over.match.ctor]p1:
// When objects of class type are direct-initialized (8.5), or
// copy-initialized from an expression of the same or a
@@ -3977,14 +3976,15 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// that class. The argument list is the expression-list within
// the parentheses of the initializer.
if (S.Context.hasSameUnqualifiedType(ToType, From->getType()) ||
- (From->getType()->getAs<RecordType>() &&
+ (From->getType()->isRecordType() &&
S.IsDerivedFrom(From->getBeginLoc(), From->getType(), ToType)))
ConstructorsOnly = true;
if (!S.isCompleteType(From->getExprLoc(), ToType)) {
// We're not going to find any constructors.
- } else if (CXXRecordDecl *ToRecordDecl
- = dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
+ } else if (auto *ToRecordDecl =
+ dyn_cast<CXXRecordDecl>(ToRecordType->getOriginalDecl())) {
+ ToRecordDecl = ToRecordDecl->getDefinitionOrSelf();
Expr **Args = &From;
unsigned NumArgs = 1;
@@ -4056,9 +4056,10 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
} else if (!S.isCompleteType(From->getBeginLoc(), From->getType())) {
// No conversion functions from incomplete types.
} else if (const RecordType *FromRecordType =
- From->getType()->getAs<RecordType>()) {
- if (CXXRecordDecl *FromRecordDecl
- = dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) {
+ From->getType()->getAsCanonical<RecordType>()) {
+ if (auto *FromRecordDecl =
+ dyn_cast<CXXRecordDecl>(FromRecordType->getOriginalDecl())) {
+ FromRecordDecl = FromRecordDecl->getDefinitionOrSelf();
// Add all of the conversion functions as candidates.
const auto &Conversions = FromRecordDecl->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
@@ -4505,11 +4506,10 @@ getFixedEnumPromtion(Sema &S, const StandardConversionSequence &SCS) {
if (SCS.Second != ICK_Integral_Promotion)
return FixedEnumPromotion::None;
- QualType FromType = SCS.getFromType();
- if (!FromType->isEnumeralType())
+ const auto *Enum = SCS.getFromType()->getAsEnumDecl();
+ if (!Enum)
return FixedEnumPromotion::None;
- EnumDecl *Enum = FromType->castAs<EnumType>()->getDecl();
if (!Enum->isFixed())
return FixedEnumPromotion::None;
@@ -5145,8 +5145,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
Expr *Init, QualType T2, bool AllowRvalues,
bool AllowExplicit) {
assert(T2->isRecordType() && "Can only find conversions of record types.");
- auto *T2RecordDecl = cast<CXXRecordDecl>(T2->castAs<RecordType>()->getDecl());
-
+ auto *T2RecordDecl = T2->castAsCXXRecordDecl();
OverloadCandidateSet CandidateSet(
DeclLoc, OverloadCandidateSet::CSK_InitByUserDefinedConversion);
const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions();
@@ -5934,7 +5933,7 @@ static ImplicitConversionSequence TryObjectArgumentInitialization(
assert(FromType->isRecordType());
- QualType ClassType = S.Context.getTypeDeclType(ActingContext);
+ CanQualType ClassType = S.Context.getCanonicalTagType(ActingContext);
// C++98 [class.dtor]p2:
// A destructor can be invoked for a const, volatile or const volatile
// object.
@@ -6055,7 +6054,7 @@ static ImplicitConversionSequence TryObjectArgumentInitialization(
/// the implicit object parameter for the given Method with the given
/// expression.
ExprResult Sema::PerformImplicitObjectArgumentInitialization(
- Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl,
+ Expr *From, NestedNameSpecifier Qualifier, NamedDecl *FoundDecl,
CXXMethodDecl *Method) {
QualType FromRecordType, DestType;
QualType ImplicitParamRecordType = Method->getFunctionObjectParameterType();
@@ -6268,7 +6267,9 @@ static ExprResult BuildConvertedConstantExpression(Sema &S, Expr *From,
QualType T, CCEKind CCE,
NamedDecl *Dest,
APValue &PreNarrowingValue) {
- assert((S.getLangOpts().CPlusPlus11 || CCE == CCEKind::TempArgStrict) &&
+ [[maybe_unused]] bool isCCEAllowedPreCXX11 =
+ (CCE == CCEKind::TempArgStrict || CCE == CCEKind::ExplicitBool);
+ assert((S.getLangOpts().CPlusPlus11 || isCCEAllowedPreCXX11) &&
"converted constant expression outside C++11 or TTP matching");
if (checkPlaceholderForOverload(S, From))
@@ -6812,7 +6813,7 @@ ExprResult Sema::PerformContextualImplicitConversion(
// We can only perform contextual implicit conversions on objects of class
// type.
- const RecordType *RecordTy = T->getAs<RecordType>();
+ const RecordType *RecordTy = T->getAsCanonical<RecordType>();
if (!RecordTy || !getLangOpts().CPlusPlus) {
if (!Converter.Suppress)
Converter.diagnoseNoMatch(*this, Loc, T) << From->getSourceRange();
@@ -6840,8 +6841,9 @@ ExprResult Sema::PerformContextualImplicitConversion(
UnresolvedSet<4>
ViableConversions; // These are *potentially* viable in C++1y.
UnresolvedSet<4> ExplicitConversions;
- const auto &Conversions =
- cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
+ const auto &Conversions = cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf()
+ ->getVisibleConversionFunctions();
bool HadMultipleCandidates =
(std::distance(Conversions.begin(), Conversions.end()) > 1);
@@ -7160,7 +7162,8 @@ void Sema::AddOverloadCandidate(
// C++ [class.copy]p3:
// A member function template is never instantiated to perform the copy
// of a class object to an object of its class type.
- QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
+ CanQualType ClassType =
+ Context.getCanonicalTagType(Constructor->getParent());
if (Args.size() == 1 && Constructor->isSpecializationCopyingObject() &&
(Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
IsDerivedFrom(Args[0]->getBeginLoc(), Args[0]->getType(),
@@ -7181,8 +7184,8 @@ void Sema::AddOverloadCandidate(
if (Shadow && Args.size() == 1 && Constructor->getNumParams() >= 1 &&
Constructor->getParamDecl(0)->getType()->isReferenceType()) {
QualType P = Constructor->getParamDecl(0)->getType()->getPointeeType();
- QualType C = Context.getRecordType(Constructor->getParent());
- QualType D = Context.getRecordType(Shadow->getParent());
+ CanQualType C = Context.getCanonicalTagType(Constructor->getParent());
+ CanQualType D = Context.getCanonicalTagType(Shadow->getParent());
SourceLocation Loc = Args.front()->getExprLoc();
if ((Context.hasSameUnqualifiedType(P, C) || IsDerivedFrom(Loc, P, C)) &&
(Context.hasSameUnqualifiedType(D, P) || IsDerivedFrom(Loc, D, P))) {
@@ -7403,7 +7406,7 @@ static bool convertArgsForAvailabilityChecks(
"Shouldn't have `this` for ctors!");
assert(!Method->isStatic() && "Shouldn't have `this` for static methods!");
ExprResult R = S.PerformImplicitObjectArgumentInitialization(
- ThisArg, /*Qualifier=*/nullptr, Method, Method);
+ ThisArg, /*Qualifier=*/std::nullopt, Method, Method);
if (R.isInvalid())
return false;
ConvertedThis = R.get();
@@ -8159,29 +8162,24 @@ bool Sema::CheckNonDependentConversions(
ArgType = ArgType->getPointeeType();
}
- if (auto *RT = ParamType->getAs<RecordType>())
- if (auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- RD && RD->hasDefinition()) {
- if (llvm::any_of(LookupConstructors(RD), [](NamedDecl *ND) {
- auto Info = getConstructorInfo(ND);
- if (!Info)
- return false;
- CXXConstructorDecl *Ctor = Info.Constructor;
- /// isConvertingConstructor takes copy/move constructors into
- /// account!
- return !Ctor->isCopyOrMoveConstructor() &&
- Ctor->isConvertingConstructor(
- /*AllowExplicit=*/true);
- }))
- return true;
- }
-
- if (auto *RT = ArgType->getAs<RecordType>())
- if (auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- RD && RD->hasDefinition() &&
- !RD->getVisibleConversionFunctions().empty()) {
- return true;
- }
+ if (auto *RD = ParamType->getAsCXXRecordDecl();
+ RD && RD->hasDefinition() &&
+ llvm::any_of(LookupConstructors(RD), [](NamedDecl *ND) {
+ auto Info = getConstructorInfo(ND);
+ if (!Info)
+ return false;
+ CXXConstructorDecl *Ctor = Info.Constructor;
+ /// isConvertingConstructor takes copy/move constructors into
+ /// account!
+ return !Ctor->isCopyOrMoveConstructor() &&
+ Ctor->isConvertingConstructor(
+ /*AllowExplicit=*/true);
+ }))
+ return true;
+ if (auto *RD = ArgType->getAsCXXRecordDecl();
+ RD && RD->hasDefinition() &&
+ !RD->getVisibleConversionFunctions().empty())
+ return true;
return false;
};
@@ -8335,9 +8333,7 @@ void Sema::AddConversionCandidate(
QualType ObjectType = From->getType();
if (const auto *FromPtrType = ObjectType->getAs<PointerType>())
ObjectType = FromPtrType->getPointeeType();
- const auto *ConversionContext =
- cast<CXXRecordDecl>(ObjectType->castAs<RecordType>()->getDecl());
-
+ const auto *ConversionContext = ObjectType->castAsCXXRecordDecl();
// C++23 [over.best.ics.general]
// However, if the target is [...]
// - the object parameter of a user-defined conversion function
@@ -8735,16 +8731,16 @@ void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
// defined, the set of member candidates is the result of the
// qualified lookup of T1::operator@ (13.3.1.1.1); otherwise,
// the set of member candidates is empty.
- if (const RecordType *T1Rec = T1->getAs<RecordType>()) {
+ if (T1->isRecordType()) {
+ bool IsComplete = isCompleteType(OpLoc, T1);
+ auto *T1RD = T1->getAsCXXRecordDecl();
// Complete the type if it can be completed.
- if (!isCompleteType(OpLoc, T1) && !T1Rec->isBeingDefined())
- return;
// If the type is neither complete nor being defined, bail out now.
- if (!T1Rec->getDecl()->getDefinition())
+ if (!T1RD || (!IsComplete && !T1RD->isBeingDefined()))
return;
LookupResult Operators(*this, OpName, OpLoc, LookupOrdinaryName);
- LookupQualifiedName(Operators, T1Rec->getDecl());
+ LookupQualifiedName(Operators, T1RD);
Operators.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = Operators.begin(),
@@ -9009,8 +9005,8 @@ BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants(
if ((CVR | BaseCVR) != CVR) continue;
QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
- MemberPointerTypes.insert(
- Context.getMemberPointerType(QPointeeTy, /*Qualifier=*/nullptr, Cls));
+ MemberPointerTypes.insert(Context.getMemberPointerType(
+ QPointeeTy, /*Qualifier=*/std::nullopt, Cls));
}
return true;
@@ -9046,8 +9042,8 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
Ty = Ty.getLocalUnqualifiedType();
// Flag if we ever add a non-record type.
- const RecordType *TyRec = Ty->getAs<RecordType>();
- HasNonRecordTypes = HasNonRecordTypes || !TyRec;
+ bool TyIsRec = Ty->isRecordType();
+ HasNonRecordTypes = HasNonRecordTypes || !TyIsRec;
// Flag if we encounter an arithmetic type.
HasArithmeticOrEnumeralTypes =
@@ -9082,12 +9078,12 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
MatrixTypes.insert(Ty);
} else if (Ty->isNullPtrType()) {
HasNullPtrType = true;
- } else if (AllowUserConversions && TyRec) {
+ } else if (AllowUserConversions && TyIsRec) {
// No conversion functions in incomplete types.
if (!SemaRef.isCompleteType(Loc, Ty))
return;
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
+ auto *ClassDecl = Ty->castAsCXXRecordDecl();
for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) {
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
@@ -10146,7 +10142,7 @@ public:
continue;
for (QualType MemPtrTy : CandidateTypes[1].member_pointer_types()) {
const MemberPointerType *mptr = cast<MemberPointerType>(MemPtrTy);
- CXXRecordDecl *D1 = C1->getAsCXXRecordDecl(),
+ CXXRecordDecl *D1 = C1->castAsCXXRecordDecl(),
*D2 = mptr->getMostRecentCXXRecordDecl();
if (!declaresSameEntity(D1, D2) &&
!S.IsDerivedFrom(CandidateSet.getLocation(), D1, D2))
@@ -10199,7 +10195,9 @@ public:
if (S.getLangOpts().CPlusPlus11) {
for (QualType EnumTy : CandidateTypes[ArgIdx].enumeration_types()) {
- if (!EnumTy->castAs<EnumType>()->getDecl()->isScoped())
+ if (!EnumTy->castAsCanonical<EnumType>()
+ ->getOriginalDecl()
+ ->isScoped())
continue;
if (!AddedTypes.insert(S.Context.getCanonicalType(EnumTy)).second)
@@ -10955,9 +10953,9 @@ bool clang::isBetterOverloadCandidate(
isa<CXXConversionDecl>(Cand1.Function) ? TPOC_Conversion
: TPOC_Call,
Cand1.ExplicitCallArguments,
- Obj1Context ? QualType(Obj1Context->getTypeForDecl(), 0)
+ Obj1Context ? S.Context.getCanonicalTagType(Obj1Context)
: QualType{},
- Obj2Context ? QualType(Obj2Context->getTypeForDecl(), 0)
+ Obj2Context ? S.Context.getCanonicalTagType(Obj2Context)
: QualType{},
Cand1.isReversed() ^ Cand2.isReversed(), PartialOverloading)) {
return BetterTemplate == Cand1.Function->getPrimaryTemplate();
@@ -14863,8 +14861,8 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
if (Method->isExplicitObjectMemberFunction())
Exp = InitializeExplicitObjectArgument(*this, E, Method);
else
- Exp = PerformImplicitObjectArgumentInitialization(E, /*Qualifier=*/nullptr,
- FoundDecl, Method);
+ Exp = PerformImplicitObjectArgumentInitialization(
+ E, /*Qualifier=*/std::nullopt, FoundDecl, Method);
if (Exp.isInvalid())
return true;
@@ -15025,7 +15023,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
InputInit = InitializeExplicitObjectArgument(*this, Input, Method);
else
InputInit = PerformImplicitObjectArgumentInitialization(
- Input, /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Input, /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
if (InputInit.isInvalid())
return ExprError();
Base = Input = InputInit.get();
@@ -15408,7 +15406,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
ParamIdx = 1;
} else {
Arg0 = PerformImplicitObjectArgumentInitialization(
- Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Args[0], /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
}
Arg1 = PerformCopyInitialization(
InitializedEntity::InitializeParameter(
@@ -15878,7 +15876,7 @@ ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ArgExpr = Args;
} else {
ExprResult Arg0 = PerformImplicitObjectArgumentInitialization(
- Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Args[0], /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
if (Arg0.isInvalid())
return ExprError();
@@ -16074,7 +16072,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
CXXMethodDecl *Method = nullptr;
bool HadMultipleCandidates = false;
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_public);
- NestedNameSpecifier *Qualifier = nullptr;
+ NestedNameSpecifier Qualifier = std::nullopt;
if (isa<MemberExpr>(NakedMemExpr)) {
MemExpr = cast<MemberExpr>(NakedMemExpr);
Method = cast<CXXMethodDecl>(MemExpr->getMemberDecl());
@@ -16344,9 +16342,9 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
diag::err_incomplete_object_call, Object.get()))
return true;
- const auto *Record = Object.get()->getType()->castAs<RecordType>();
+ auto *Record = Object.get()->getType()->castAsCXXRecordDecl();
LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName);
- LookupQualifiedName(R, Record->getDecl());
+ LookupQualifiedName(R, Record);
R.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
@@ -16365,8 +16363,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// we filter them out to produce better error diagnostics, ie to avoid
// showing 2 failed overloads instead of one.
bool IgnoreSurrogateFunctions = false;
- if (CandidateSet.nonDeferredCandidatesCount() == 1 &&
- Record->getAsCXXRecordDecl()->isLambda()) {
+ if (CandidateSet.nonDeferredCandidatesCount() == 1 && Record->isLambda()) {
const OverloadCandidate &Candidate = *CandidateSet.begin();
if (!Candidate.Viable &&
Candidate.FailureKind == ovl_fail_constraints_not_satisfied)
@@ -16390,8 +16387,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// functions for each conversion function declared in an
// accessible base class provided the function is not hidden
// within T by another intervening declaration.
- const auto &Conversions =
- cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions();
+ const auto &Conversions = Record->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end();
!IgnoreSurrogateFunctions && I != E; ++I) {
NamedDecl *D = *I;
@@ -16541,7 +16537,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
IsError |= PrepareExplicitObjectArgument(*this, Method, Obj, Args, NewArgs);
} else {
ExprResult ObjRes = PerformImplicitObjectArgumentInitialization(
- Object.get(), /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Object.get(), /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
if (ObjRes.isInvalid())
IsError = true;
else
@@ -16612,7 +16608,7 @@ ExprResult Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base,
return ExprError();
LookupResult R(*this, OpName, OpLoc, LookupOrdinaryName);
- LookupQualifiedName(R, Base->getType()->castAs<RecordType>()->getDecl());
+ LookupQualifiedName(R, Base->getType()->castAsRecordDecl());
R.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
@@ -16686,7 +16682,7 @@ ExprResult Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base,
Base = R.get();
} else {
ExprResult BaseResult = PerformImplicitObjectArgumentInitialization(
- Base, /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Base, /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
if (BaseResult.isInvalid())
return ExprError();
Base = BaseResult.get();
@@ -16940,7 +16936,7 @@ ExprResult Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
assert(isa<DeclRefExpr>(SubExpr.get()) &&
"fixed to something other than a decl ref");
- NestedNameSpecifier *Qualifier =
+ NestedNameSpecifier Qualifier =
cast<DeclRefExpr>(SubExpr.get())->getQualifier();
assert(Qualifier &&
"fixed to a member ref with no nested name qualifier");
diff --git a/clang/lib/Sema/SemaPPC.cpp b/clang/lib/Sema/SemaPPC.cpp
index d5c83ae..bfa458d 100644
--- a/clang/lib/Sema/SemaPPC.cpp
+++ b/clang/lib/Sema/SemaPPC.cpp
@@ -41,8 +41,7 @@ void SemaPPC::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
return;
QualType ArgType = Arg->getType();
- for (const FieldDecl *FD :
- ArgType->castAs<RecordType>()->getDecl()->fields()) {
+ for (const FieldDecl *FD : ArgType->castAsRecordDecl()->fields()) {
if (const auto *AA = FD->getAttr<AlignedAttr>()) {
CharUnits Alignment = getASTContext().toCharUnitsFromBits(
AA->getAlignment(getASTContext()));
@@ -106,6 +105,7 @@ bool SemaPPC::CheckPPCBuiltinFunctionCall(const TargetInfo &TI,
switch (BuiltinID) {
default:
return false;
+ case PPC::BI__builtin_ppc_bcdsetsign:
case PPC::BI__builtin_ppc_national2packed:
case PPC::BI__builtin_ppc_packed2zoned:
case PPC::BI__builtin_ppc_zoned2packed:
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 7b16d08..3ba93ff9 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1000,6 +1000,7 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvtbf16_f_f_w_rm:
return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 4);
case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
@@ -1038,6 +1039,7 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvtbf16_f_f_w_rm_tu:
case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
@@ -1051,6 +1053,7 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvtbf16_f_f_w_rm_m:
return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 4);
case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
@@ -1100,6 +1103,8 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vf_rm:
case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
@@ -1124,6 +1129,8 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vf_rm_tu:
case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
@@ -1161,6 +1168,7 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvtbf16_f_f_w_rm_tum:
case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
@@ -1174,6 +1182,7 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvtbf16_f_f_w_rm_tumu:
case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
@@ -1187,6 +1196,7 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvtbf16_f_f_w_rm_mu:
return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 4);
case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
@@ -1212,6 +1222,8 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vf_rm_m:
case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
@@ -1256,6 +1268,8 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vf_rm_tum:
case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
@@ -1304,6 +1318,8 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vf_rm_tumu:
case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
@@ -1348,6 +1364,8 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmaccbf16_vf_rm_mu:
return SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 4);
case RISCV::BI__builtin_riscv_ntl_load:
case RISCV::BI__builtin_riscv_ntl_store:
diff --git a/clang/lib/Sema/SemaSYCL.cpp b/clang/lib/Sema/SemaSYCL.cpp
index 4683c81..2f97f62 100644
--- a/clang/lib/Sema/SemaSYCL.cpp
+++ b/clang/lib/Sema/SemaSYCL.cpp
@@ -221,7 +221,7 @@ static SourceLocation SourceLocationForUserDeclaredType(QualType QT) {
SourceLocation Loc;
const Type *T = QT->getUnqualifiedDesugaredType();
if (const TagType *TT = dyn_cast<TagType>(T))
- Loc = TT->getDecl()->getLocation();
+ Loc = TT->getOriginalDecl()->getLocation();
else if (const ObjCInterfaceType *ObjCIT = dyn_cast<ObjCInterfaceType>(T))
Loc = ObjCIT->getDecl()->getLocation();
return Loc;
@@ -250,6 +250,23 @@ static bool CheckSYCLKernelName(Sema &S, SourceLocation Loc,
return false;
}
+void SemaSYCL::CheckSYCLExternalFunctionDecl(FunctionDecl *FD) {
+ const auto *SEAttr = FD->getAttr<SYCLExternalAttr>();
+ assert(SEAttr && "Missing sycl_external attribute");
+ if (!FD->isInvalidDecl() && !FD->isTemplated()) {
+ if (!FD->isExternallyVisible())
+ if (!FD->isFunctionTemplateSpecialization() ||
+ FD->getTemplateSpecializationInfo()->isExplicitSpecialization())
+ Diag(SEAttr->getLocation(), diag::err_sycl_external_invalid_linkage)
+ << SEAttr;
+ }
+ if (FD->isDeletedAsWritten()) {
+ Diag(SEAttr->getLocation(),
+ diag::err_sycl_external_invalid_deleted_function)
+ << SEAttr;
+ }
+}
+
void SemaSYCL::CheckSYCLEntryPointFunctionDecl(FunctionDecl *FD) {
// Ensure that all attributes present on the declaration are consistent
// and warn about any redundant ones.
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index a5f9202..5625fb3 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -316,17 +316,10 @@ void DiagnoseUnused(Sema &S, const Expr *E, std::optional<unsigned> DiagID) {
}
}
} else if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
- if (const CXXConstructorDecl *Ctor = CE->getConstructor()) {
- const NamedDecl *OffendingDecl = nullptr;
- const auto *A = Ctor->getAttr<WarnUnusedResultAttr>();
- if (!A) {
- OffendingDecl = Ctor->getParent();
- A = OffendingDecl->getAttr<WarnUnusedResultAttr>();
- }
- if (DiagnoseNoDiscard(S, OffendingDecl, A, Loc, R1, R2,
- /*isCtor=*/true))
- return;
- }
+ auto [OffendingDecl, A] = CE->getUnusedResultAttr(S.Context);
+ if (DiagnoseNoDiscard(S, OffendingDecl, A, Loc, R1, R2,
+ /*isCtor=*/true))
+ return;
} else if (const auto *ILE = dyn_cast<InitListExpr>(E)) {
if (const TagDecl *TD = ILE->getType()->getAsTagDecl()) {
@@ -798,7 +791,7 @@ bool Sema::checkMustTailAttr(const Stmt *St, const Attr &MTA) {
const auto *MPT =
CalleeBinOp->getRHS()->getType()->castAs<MemberPointerType>();
CalleeType.This =
- Context.getTypeDeclType(MPT->getMostRecentCXXRecordDecl());
+ Context.getCanonicalTagType(MPT->getMostRecentCXXRecordDecl());
CalleeType.Func = MPT->getPointeeType()->castAs<FunctionProtoType>();
CalleeType.MemberType = FuncType::ft_pointer_to_member;
} else if (isa<CXXPseudoDestructorExpr>(CalleeExpr)) {
@@ -1254,7 +1247,7 @@ static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S,
dyn_cast<DeclRefExpr>(CaseExpr->IgnoreParenImpCasts())) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
QualType VarType = VD->getType();
- QualType EnumType = S.Context.getTypeDeclType(ED);
+ CanQualType EnumType = S.Context.getCanonicalTagType(ED);
if (VD->hasGlobalStorage() && VarType.isConstQualified() &&
S.Context.hasSameUnqualifiedType(EnumType, VarType))
return false;
@@ -1278,17 +1271,17 @@ static void checkEnumTypesInSwitchStmt(Sema &S, const Expr *Cond,
QualType CondType = Cond->getType();
QualType CaseType = Case->getType();
- const EnumType *CondEnumType = CondType->getAs<EnumType>();
- const EnumType *CaseEnumType = CaseType->getAs<EnumType>();
+ const EnumType *CondEnumType = CondType->getAsCanonical<EnumType>();
+ const EnumType *CaseEnumType = CaseType->getAsCanonical<EnumType>();
if (!CondEnumType || !CaseEnumType)
return;
// Ignore anonymous enums.
- if (!CondEnumType->getDecl()->getIdentifier() &&
- !CondEnumType->getDecl()->getTypedefNameForAnonDecl())
+ if (!CondEnumType->getOriginalDecl()->getIdentifier() &&
+ !CondEnumType->getOriginalDecl()->getTypedefNameForAnonDecl())
return;
- if (!CaseEnumType->getDecl()->getIdentifier() &&
- !CaseEnumType->getDecl()->getTypedefNameForAnonDecl())
+ if (!CaseEnumType->getOriginalDecl()->getIdentifier() &&
+ !CaseEnumType->getOriginalDecl()->getTypedefNameForAnonDecl())
return;
if (S.Context.hasSameUnqualifiedType(CondType, CaseType))
@@ -1597,13 +1590,13 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// we still do the analysis to preserve this information in the AST
// (which can be used by flow-based analyes).
//
- const EnumType *ET = CondTypeBeforePromotion->getAs<EnumType>();
-
// If switch has default case, then ignore it.
if (!CaseListIsErroneous && !CaseListIsIncomplete && !HasConstantCond &&
- ET && ET->getDecl()->isCompleteDefinition() &&
- !ET->getDecl()->enumerators().empty()) {
- const EnumDecl *ED = ET->getDecl();
+ CondTypeBeforePromotion->isEnumeralType()) {
+ const auto *ED = CondTypeBeforePromotion->castAsEnumDecl();
+ if (!ED->isCompleteDefinition() || ED->enumerators().empty())
+ goto enum_out;
+
EnumValsTy EnumVals;
// Gather all enum values, set their type and sort them,
@@ -1716,6 +1709,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (!hasCasesNotInSwitch)
SS->setAllEnumCasesCovered();
}
+ enum_out:;
}
if (BodyStmt)
@@ -1734,8 +1728,7 @@ void
Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr) {
- const auto *ET = DstType->getAs<EnumType>();
- if (!ET)
+ if (!DstType->isEnumeralType())
return;
if (!SrcType->isIntegerType() ||
@@ -1745,7 +1738,7 @@ Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
if (SrcExpr->isTypeDependent() || SrcExpr->isValueDependent())
return;
- const EnumDecl *ED = ET->getDecl();
+ const auto *ED = DstType->castAsEnumDecl();
if (!ED->isClosed())
return;
@@ -3710,7 +3703,7 @@ private:
Sema &S;
};
bool LocalTypedefNameReferencer::VisitRecordType(RecordType *RT) {
- auto *R = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ auto *R = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl());
if (!R || !R->isLocalClass() || !R->isLocalClass()->isExternallyVisible() ||
R->isDependentType())
return true;
@@ -3929,7 +3922,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
<< RetValExp->getSourceRange();
if (FD->hasAttr<CmseNSEntryAttr>() && RetValExp) {
if (const auto *RT = dyn_cast<RecordType>(FnRetType.getCanonicalType())) {
- if (RT->getDecl()->isOrContainsUnion())
+ if (RT->getOriginalDecl()->isOrContainsUnion())
Diag(RetValExp->getBeginLoc(), diag::warn_cmse_nonsecure_union) << 1;
}
}
@@ -4620,7 +4613,8 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
// Build the context parameter
DeclContext *DC = CapturedDecl::castToDeclContext(CD);
IdentifierInfo *ParamName = &Context.Idents.get("__context");
- QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
+ CanQualType ParamType =
+ Context.getPointerType(Context.getCanonicalTagType(RD));
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
ImplicitParamKind::CapturedContext);
@@ -4662,9 +4656,10 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
assert(!ContextIsFound &&
"null type has been found already for '__context' parameter");
IdentifierInfo *ParamName = &Context.Idents.get("__context");
- QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD))
- .withConst()
- .withRestrict();
+ QualType ParamType =
+ Context.getPointerType(Context.getCanonicalTagType(RD))
+ .withConst()
+ .withRestrict();
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
ImplicitParamKind::CapturedContext);
@@ -4684,7 +4679,8 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
if (!ContextIsFound) {
// Add __context implicitly if it is not specified.
IdentifierInfo *ParamName = &Context.Idents.get("__context");
- QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
+ CanQualType ParamType =
+ Context.getPointerType(Context.getCanonicalTagType(RD));
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
ImplicitParamKind::CapturedContext);
diff --git a/clang/lib/Sema/SemaStmtAsm.cpp b/clang/lib/Sema/SemaStmtAsm.cpp
index 4507a21..0438af7 100644
--- a/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/clang/lib/Sema/SemaStmtAsm.cpp
@@ -885,18 +885,19 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
for (StringRef NextMember : Members) {
const RecordType *RT = nullptr;
if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
- RT = VD->getType()->getAs<RecordType>();
+ RT = VD->getType()->getAsCanonical<RecordType>();
else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) {
MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
// MS InlineAsm often uses struct pointer aliases as a base
QualType QT = TD->getUnderlyingType();
if (const auto *PT = QT->getAs<PointerType>())
QT = PT->getPointeeType();
- RT = QT->getAs<RecordType>();
+ RT = QT->getAsCanonical<RecordType>();
} else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
- RT = TD->getTypeForDecl()->getAs<RecordType>();
+ RT = QualType(Context.getCanonicalTypeDeclType(TD))
+ ->getAsCanonical<RecordType>();
else if (FieldDecl *TD = dyn_cast<FieldDecl>(FoundDecl))
- RT = TD->getType()->getAs<RecordType>();
+ RT = TD->getType()->getAsCanonical<RecordType>();
if (!RT)
return true;
@@ -907,7 +908,8 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
LookupResult FieldResult(*this, &Context.Idents.get(NextMember),
SourceLocation(), LookupMemberName);
- if (!LookupQualifiedName(FieldResult, RT->getDecl()))
+ RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!LookupQualifiedName(FieldResult, RD))
return true;
if (!FieldResult.isSingleResult())
@@ -919,7 +921,7 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
if (!FD)
return true;
- const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
+ const ASTRecordLayout &RL = Context.getASTRecordLayout(RD);
unsigned i = FD->getFieldIndex();
CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
Offset += (unsigned)Result.getQuantity();
@@ -943,15 +945,15 @@ Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member,
/*FirstQualifierFoundInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr);
}
- const RecordType *RT = T->getAs<RecordType>();
+ auto *RD = T->getAsRecordDecl();
// FIXME: Diagnose this as field access into a scalar type.
- if (!RT)
+ if (!RD)
return ExprResult();
LookupResult FieldResult(*this, &Context.Idents.get(Member), AsmLoc,
LookupMemberName);
- if (!LookupQualifiedName(FieldResult, RT->getDecl()))
+ if (!LookupQualifiedName(FieldResult, RD))
return ExprResult();
// Only normal and indirect field results will work.
diff --git a/clang/lib/Sema/SemaSwift.cpp b/clang/lib/Sema/SemaSwift.cpp
index 4000bef..d21d793 100644
--- a/clang/lib/Sema/SemaSwift.cpp
+++ b/clang/lib/Sema/SemaSwift.cpp
@@ -129,9 +129,9 @@ static bool isErrorParameter(Sema &S, QualType QT) {
// Check for CFError**.
if (const auto *PT = Pointee->getAs<PointerType>())
- if (const auto *RT = PT->getPointeeType()->getAs<RecordType>())
- if (S.ObjC().isCFError(RT->getDecl()))
- return true;
+ if (auto *RD = PT->getPointeeType()->getAsRecordDecl();
+ RD && S.ObjC().isCFError(RD))
+ return true;
return false;
}
@@ -271,11 +271,10 @@ static void checkSwiftAsyncErrorBlock(Sema &S, Decl *D,
}
// Check for CFError *.
if (const auto *PtrTy = Param->getAs<PointerType>()) {
- if (const auto *RT = PtrTy->getPointeeType()->getAs<RecordType>()) {
- if (S.ObjC().isCFError(RT->getDecl())) {
- AnyErrorParams = true;
- break;
- }
+ if (auto *RD = PtrTy->getPointeeType()->getAsRecordDecl();
+ RD && S.ObjC().isCFError(RD)) {
+ AnyErrorParams = true;
+ break;
}
}
}
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index b6b8932..3d8416a 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
+#include "clang/AST/TypeOrdering.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DiagnosticSema.h"
@@ -293,7 +294,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
if (!SS.isInvalid()) {
- NestedNameSpecifier *Qualifier = SS.getScopeRep();
+ NestedNameSpecifier Qualifier = SS.getScopeRep();
Template = Context.getQualifiedTemplateName(Qualifier, hasTemplateKeyword,
Template);
}
@@ -316,6 +317,12 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
}
}
+ if (isPackProducingBuiltinTemplateName(Template) &&
+ S->getTemplateParamParent() == nullptr)
+ Diag(Name.getBeginLoc(), diag::err_builtin_pack_outside_template) << TName;
+ // Recover by returning the template, even though we would never be able to
+ // substitute it.
+
TemplateResult = TemplateTy::make(Template);
return TemplateKind;
}
@@ -367,9 +374,8 @@ bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
// The code is missing a 'template' keyword prior to the dependent template
// name.
- NestedNameSpecifier *Qualifier = (NestedNameSpecifier *)SS->getScopeRep();
SuggestedTemplate = TemplateTy::make(Context.getDependentTemplateName(
- {Qualifier, &II, /*HasTemplateKeyword=*/false}));
+ {SS->getScopeRep(), &II, /*HasTemplateKeyword=*/false}));
Diag(IILoc, diag::err_template_kw_missing)
<< SuggestedTemplate.get()
<< FixItHint::CreateInsertion(IILoc, "template ");
@@ -401,7 +407,9 @@ bool Sema::LookupTemplateName(LookupResult &Found, Scope *S, CXXScopeSpec &SS,
IsDependent = !LookupCtx && ObjectType->isDependentType();
assert((IsDependent || !ObjectType->isIncompleteType() ||
!ObjectType->getAs<TagType>() ||
- ObjectType->castAs<TagType>()->isBeingDefined()) &&
+ ObjectType->castAs<TagType>()
+ ->getOriginalDecl()
+ ->isEntityBeingDefined()) &&
"Caller should have completed object type");
// Template names cannot appear inside an Objective-C class or object type
@@ -801,9 +809,9 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
if (!Complain || (PatternDef && PatternDef->isInvalidDecl()))
return true;
- QualType InstantiationTy;
+ CanQualType InstantiationTy;
if (TagDecl *TD = dyn_cast<TagDecl>(Instantiation))
- InstantiationTy = Context.getTypeDeclType(TD);
+ InstantiationTy = Context.getCanonicalTagType(TD);
if (PatternDef) {
Diag(PointOfInstantiation,
diag::err_template_instantiate_within_definition)
@@ -911,12 +919,12 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
TypeSourceInfo *DI;
QualType T = SemaRef.GetTypeFromParser(Arg.getAsType(), &DI);
if (!DI)
- DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getLocation());
+ DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getNameLoc());
return TemplateArgumentLoc(TemplateArgument(T), DI);
}
case ParsedTemplateArgument::NonType: {
- Expr *E = static_cast<Expr *>(Arg.getAsExpr());
+ Expr *E = Arg.getAsExpr();
return TemplateArgumentLoc(TemplateArgument(E, /*IsCanonical=*/false), E);
}
@@ -928,9 +936,9 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
else
TArg = Template;
return TemplateArgumentLoc(
- SemaRef.Context, TArg,
+ SemaRef.Context, TArg, Arg.getTemplateKwLoc(),
Arg.getScopeSpec().getWithLocInContext(SemaRef.Context),
- Arg.getLocation(), Arg.getEllipsisLoc());
+ Arg.getNameLoc(), Arg.getEllipsisLoc());
}
}
@@ -971,15 +979,12 @@ ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
TL = PET.getPatternLoc();
}
- CXXScopeSpec SS;
- if (auto ET = TL.getAs<ElaboratedTypeLoc>()) {
- SS.Adopt(ET.getQualifierLoc());
- TL = ET.getNamedTypeLoc();
- }
-
if (auto DTST = TL.getAs<DeducedTemplateSpecializationTypeLoc>()) {
TemplateName Name = DTST.getTypePtr()->getTemplateName();
- ParsedTemplateArgument Result(SS, TemplateTy::make(Name),
+ CXXScopeSpec SS;
+ SS.Adopt(DTST.getQualifierLoc());
+ ParsedTemplateArgument Result(/*TemplateKwLoc=*/SourceLocation(), SS,
+ TemplateTy::make(Name),
DTST.getTemplateNameLoc());
if (EllipsisLoc.isValid())
Result = Result.getTemplatePackExpansion(EllipsisLoc);
@@ -2080,14 +2085,19 @@ DeclResult Sema::CheckClassTemplate(
// If we have a prior definition that is not visible, treat this as
// simply making that previous definition visible.
NamedDecl *Hidden = nullptr;
- if (SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
+ bool HiddenDefVisible = false;
+ if (SkipBody &&
+ isRedefinitionAllowedFor(Def, &Hidden, HiddenDefVisible)) {
SkipBody->ShouldSkip = true;
SkipBody->Previous = Def;
- auto *Tmpl = cast<CXXRecordDecl>(Hidden)->getDescribedClassTemplate();
- assert(Tmpl && "original definition of a class template is not a "
- "class template?");
- makeMergedDefinitionVisible(Hidden);
- makeMergedDefinitionVisible(Tmpl);
+ if (!HiddenDefVisible && Hidden) {
+ auto *Tmpl =
+ cast<CXXRecordDecl>(Hidden)->getDescribedClassTemplate();
+ assert(Tmpl && "original definition of a class template is not a "
+ "class template?");
+ makeMergedDefinitionVisible(Hidden);
+ makeMergedDefinitionVisible(Tmpl);
+ }
} else {
Diag(NameLoc, diag::err_redefinition) << Name;
Diag(Def->getLocation(), diag::note_previous_definition);
@@ -2145,11 +2155,11 @@ DeclResult Sema::CheckClassTemplate(
bool ShouldAddRedecl =
!(TUK == TagUseKind::Friend && CurContext->isDependentContext());
- CXXRecordDecl *NewClass =
- CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
- PrevClassTemplate && ShouldAddRedecl ?
- PrevClassTemplate->getTemplatedDecl() : nullptr,
- /*DelayTypeCreation=*/true);
+ CXXRecordDecl *NewClass = CXXRecordDecl::Create(
+ Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
+ PrevClassTemplate && ShouldAddRedecl
+ ? PrevClassTemplate->getTemplatedDecl()
+ : nullptr);
SetNestedNameSpecifier(*this, NewClass, SS);
if (NumOuterTemplateParamLists > 0)
NewClass->setTemplateParameterListsInfo(
@@ -2178,12 +2188,6 @@ DeclResult Sema::CheckClassTemplate(
if (ModulePrivateLoc.isValid())
NewTemplate->setModulePrivate();
- // Build the type for the class template declaration now.
- QualType T = NewTemplate->getInjectedClassNameSpecialization();
- T = Context.getInjectedClassNameType(NewClass, T);
- assert(T->isDependentType() && "Class template type is not dependent?");
- (void)T;
-
// If we are providing an explicit specialization of a member that is a
// class template, make a note of that.
if (PrevClassTemplate &&
@@ -2674,11 +2678,11 @@ struct DependencyChecker : DynamicRecursiveASTVisitor {
return DynamicRecursiveASTVisitor::TraverseStmt(S);
}
- bool TraverseTypeLoc(TypeLoc TL) override {
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) override {
if (IgnoreNonTypeDependent && !TL.isNull() &&
!TL.getType()->isDependentType())
return true;
- return DynamicRecursiveASTVisitor::TraverseTypeLoc(TL);
+ return DynamicRecursiveASTVisitor::TraverseTypeLoc(TL, TraverseQualifier);
}
bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) override {
@@ -2727,8 +2731,12 @@ struct DependencyChecker : DynamicRecursiveASTVisitor {
return TraverseTemplateArgument(T->getArgumentPack());
}
- bool TraverseInjectedClassNameType(InjectedClassNameType *T) override {
- return TraverseType(T->getInjectedSpecializationType());
+ bool TraverseInjectedClassNameType(InjectedClassNameType *T,
+ bool TraverseQualifier) override {
+ // An InjectedClassNameType will never have a dependent template name,
+ // so no need to traverse it.
+ return TraverseTemplateArguments(
+ T->getTemplateArgs(T->getOriginalDecl()->getASTContext()));
}
};
} // end anonymous namespace
@@ -2751,14 +2759,14 @@ static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context,
QualType T,
const CXXScopeSpec &SS) {
NestedNameSpecifierLoc NNSLoc(SS.getScopeRep(), SS.location_data());
- while (NestedNameSpecifier *NNS = NNSLoc.getNestedNameSpecifier()) {
- if (const Type *CurType = NNS->getAsType()) {
- if (Context.hasSameUnqualifiedType(T, QualType(CurType, 0)))
- return NNSLoc.getTypeLoc().getSourceRange();
- } else
+ for (;;) {
+ NestedNameSpecifier NNS = NNSLoc.getNestedNameSpecifier();
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
break;
-
- NNSLoc = NNSLoc.getPrefix();
+ if (Context.hasSameUnqualifiedType(T, QualType(NNS.getAsType(), 0)))
+ return NNSLoc.castAsTypeLoc().getSourceRange();
+ // FIXME: This will always be empty.
+ NNSLoc = NNSLoc.getAsNamespaceAndPrefix().Prefix;
}
return SourceRange();
@@ -2777,12 +2785,13 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// by the nested-name-specifier and walking out until we run out of types.
SmallVector<QualType, 4> NestedTypes;
QualType T;
- if (SS.getScopeRep()) {
- if (CXXRecordDecl *Record
- = dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true)))
- T = Context.getTypeDeclType(Record);
+ if (NestedNameSpecifier Qualifier = SS.getScopeRep();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Type) {
+ if (CXXRecordDecl *Record =
+ dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true)))
+ T = Context.getCanonicalTagType(Record);
else
- T = QualType(SS.getScopeRep()->getAsType(), 0);
+ T = QualType(Qualifier.getAsType(), 0);
}
// If we found an explicit specialization that prevents us from needing
@@ -2830,9 +2839,10 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// Look one step prior in a dependent template specialization type.
if (const DependentTemplateSpecializationType *DependentTST
= T->getAs<DependentTemplateSpecializationType>()) {
- if (NestedNameSpecifier *NNS =
- DependentTST->getDependentTemplateName().getQualifier())
- T = QualType(NNS->getAsType(), 0);
+ if (NestedNameSpecifier NNS =
+ DependentTST->getDependentTemplateName().getQualifier();
+ NNS.getKind() == NestedNameSpecifier::Kind::Type)
+ T = QualType(NNS.getAsType(), 0);
else
T = QualType();
continue;
@@ -2840,22 +2850,23 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// Look one step prior in a dependent name type.
if (const DependentNameType *DependentName = T->getAs<DependentNameType>()){
- if (NestedNameSpecifier *NNS = DependentName->getQualifier())
- T = QualType(NNS->getAsType(), 0);
+ if (NestedNameSpecifier NNS = DependentName->getQualifier();
+ NNS.getKind() == NestedNameSpecifier::Kind::Type)
+ T = QualType(NNS.getAsType(), 0);
else
T = QualType();
continue;
}
// Retrieve the parent of an enumeration type.
- if (const EnumType *EnumT = T->getAs<EnumType>()) {
+ if (const EnumType *EnumT = T->getAsCanonical<EnumType>()) {
// FIXME: Forward-declared enums require a TSK_ExplicitSpecialization
// check here.
- EnumDecl *Enum = EnumT->getDecl();
+ EnumDecl *Enum = EnumT->getOriginalDecl();
// Get to the parent type.
if (TypeDecl *Parent = dyn_cast<TypeDecl>(Enum->getParent()))
- T = Context.getTypeDeclType(Parent);
+ T = Context.getCanonicalTypeDeclType(Parent);
else
T = QualType();
continue;
@@ -3151,7 +3162,8 @@ void Sema::NoteAllFoundTemplates(TemplateName Name) {
}
}
-static QualType builtinCommonTypeImpl(Sema &S, TemplateName BaseTemplate,
+static QualType builtinCommonTypeImpl(Sema &S, ElaboratedTypeKeyword Keyword,
+ TemplateName BaseTemplate,
SourceLocation TemplateLoc,
ArrayRef<TemplateArgument> Ts) {
auto lookUpCommonType = [&](TemplateArgument T1,
@@ -3159,7 +3171,8 @@ static QualType builtinCommonTypeImpl(Sema &S, TemplateName BaseTemplate,
// Don't bother looking for other specializations if both types are
// builtins - users aren't allowed to specialize for them
if (T1.getAsType()->isBuiltinType() && T2.getAsType()->isBuiltinType())
- return builtinCommonTypeImpl(S, BaseTemplate, TemplateLoc, {T1, T2});
+ return builtinCommonTypeImpl(S, Keyword, BaseTemplate, TemplateLoc,
+ {T1, T2});
TemplateArgumentListInfo Args;
Args.addArgument(TemplateArgumentLoc(
@@ -3173,7 +3186,7 @@ static QualType builtinCommonTypeImpl(Sema &S, TemplateName BaseTemplate,
Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
QualType BaseTemplateInst =
- S.CheckTemplateIdType(BaseTemplate, TemplateLoc, Args);
+ S.CheckTemplateIdType(Keyword, BaseTemplate, TemplateLoc, Args);
if (SFINAE.hasErrorOccurred())
return QualType();
@@ -3286,7 +3299,7 @@ static QualType builtinCommonTypeImpl(Sema &S, TemplateName BaseTemplate,
}
static bool isInVkNamespace(const RecordType *RT) {
- DeclContext *DC = RT->getDecl()->getDeclContext();
+ DeclContext *DC = RT->getOriginalDecl()->getDeclContext();
if (!DC)
return false;
@@ -3300,23 +3313,25 @@ static bool isInVkNamespace(const RecordType *RT) {
static SpirvOperand checkHLSLSpirvTypeOperand(Sema &SemaRef,
QualType OperandArg,
SourceLocation Loc) {
- if (auto *RT = OperandArg->getAs<RecordType>()) {
+ if (auto *RT = OperandArg->getAsCanonical<RecordType>()) {
bool Literal = false;
SourceLocation LiteralLoc;
- if (isInVkNamespace(RT) && RT->getDecl()->getName() == "Literal") {
- auto SpecDecl = dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (isInVkNamespace(RT) && RT->getOriginalDecl()->getName() == "Literal") {
+ auto SpecDecl =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl());
assert(SpecDecl);
const TemplateArgumentList &LiteralArgs = SpecDecl->getTemplateArgs();
QualType ConstantType = LiteralArgs[0].getAsType();
- RT = ConstantType->getAs<RecordType>();
+ RT = ConstantType->getAsCanonical<RecordType>();
Literal = true;
LiteralLoc = SpecDecl->getSourceRange().getBegin();
}
if (RT && isInVkNamespace(RT) &&
- RT->getDecl()->getName() == "integral_constant") {
- auto SpecDecl = dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ RT->getOriginalDecl()->getName() == "integral_constant") {
+ auto SpecDecl =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl());
assert(SpecDecl);
const TemplateArgumentList &ConstantArgs = SpecDecl->getTemplateArgs();
@@ -3338,11 +3353,10 @@ static SpirvOperand checkHLSLSpirvTypeOperand(Sema &SemaRef,
return SpirvOperand::createType(OperandArg);
}
-static QualType
-checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
- ArrayRef<TemplateArgument> Converted,
- SourceLocation TemplateLoc,
- TemplateArgumentListInfo &TemplateArgs) {
+static QualType checkBuiltinTemplateIdType(
+ Sema &SemaRef, ElaboratedTypeKeyword Keyword, BuiltinTemplateDecl *BTD,
+ ArrayRef<TemplateArgument> Converted, SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
ASTContext &Context = SemaRef.getASTContext();
switch (BTD->getBuiltinTemplateKind()) {
@@ -3389,7 +3403,7 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
// The first template argument will be reused as the template decl that
// our synthetic template arguments will be applied to.
- return SemaRef.CheckTemplateIdType(Converted[0].getAsTemplate(),
+ return SemaRef.CheckTemplateIdType(Keyword, Converted[0].getAsTemplate(),
TemplateLoc, SyntheticTemplateArgs);
}
@@ -3426,14 +3440,16 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
TemplateName BaseTemplate = Converted[0].getAsTemplate();
ArrayRef<TemplateArgument> Ts = Converted[3].getPackAsArray();
- if (auto CT = builtinCommonTypeImpl(SemaRef, BaseTemplate, TemplateLoc, Ts);
+ if (auto CT = builtinCommonTypeImpl(SemaRef, Keyword, BaseTemplate,
+ TemplateLoc, Ts);
!CT.isNull()) {
TemplateArgumentListInfo TAs;
TAs.addArgument(TemplateArgumentLoc(
TemplateArgument(CT), SemaRef.Context.getTrivialTypeSourceInfo(
CT, TemplateArgs[1].getLocation())));
TemplateName HasTypeMember = Converted[1].getAsTemplate();
- return SemaRef.CheckTemplateIdType(HasTypeMember, TemplateLoc, TAs);
+ return SemaRef.CheckTemplateIdType(Keyword, HasTypeMember, TemplateLoc,
+ TAs);
}
QualType HasNoTypeMember = Converted[2].getAsType();
return HasNoTypeMember;
@@ -3468,6 +3484,28 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
return Context.getHLSLInlineSpirvType(Opcode, Size, Alignment, Operands);
}
+ case BTK__builtin_dedup_pack: {
+ assert(Converted.size() == 1 && "__builtin_dedup_pack should be given "
+ "a parameter pack");
+ TemplateArgument Ts = Converted[0];
+ // Delay the computation until we can compute the final result. We choose
+ // not to remove the duplicates upfront before substitution to keep the code
+ // simple.
+ if (Ts.isDependent())
+ return QualType();
+ assert(Ts.getKind() == clang::TemplateArgument::Pack);
+ llvm::SmallVector<TemplateArgument> OutArgs;
+ llvm::SmallDenseSet<QualType> Seen;
+ // Synthesize a new template argument list, removing duplicates.
+ for (auto T : Ts.getPackAsArray()) {
+ assert(T.getKind() == clang::TemplateArgument::Type);
+ if (!Seen.insert(T.getAsType().getCanonicalType()).second)
+ continue;
+ OutArgs.push_back(T);
+ }
+ return Context.getSubstBuiltinTemplatePack(
+ TemplateArgument::CreatePackCopy(Context, OutArgs));
+ }
}
llvm_unreachable("unexpected BuiltinTemplateDecl!");
}
@@ -3545,7 +3583,7 @@ public:
if (DR && DR->getQualifier()) {
// If this is a qualified name, expand the template arguments in nested
// qualifiers.
- DR->getQualifier()->print(OS, Policy, true);
+ DR->getQualifier().print(OS, Policy, true);
// Then print the decl itself.
const ValueDecl *VD = DR->getDecl();
OS << VD->getName();
@@ -3610,7 +3648,8 @@ Sema::findFailedBooleanCondition(Expr *Cond) {
return { FailedCond, Description };
}
-QualType Sema::CheckTemplateIdType(TemplateName Name,
+QualType Sema::CheckTemplateIdType(ElaboratedTypeKeyword Keyword,
+ TemplateName Name,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
// FIXME: 'getUnderlying' loses SubstTemplateTemplateParm nodes from alias
@@ -3661,6 +3700,18 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
} else if (TypeAliasTemplateDecl *AliasTemplate =
dyn_cast<TypeAliasTemplateDecl>(Template)) {
+ // C++0x [dcl.type.elab]p2:
+ // If the identifier resolves to a typedef-name or the simple-template-id
+ // resolves to an alias template specialization, the
+ // elaborated-type-specifier is ill-formed.
+ if (Keyword != ElaboratedTypeKeyword::None &&
+ Keyword != ElaboratedTypeKeyword::Typename) {
+ SemaRef.Diag(TemplateLoc, diag::err_tag_reference_non_tag)
+ << AliasTemplate << NonTagKind::TypeAliasTemplate
+ << KeywordHelpers::getTagTypeKindForKeyword(Keyword);
+ SemaRef.Diag(AliasTemplate->getLocation(), diag::note_declared_at);
+ }
+
// Find the canonical type for this type alias template specialization.
TypeAliasDecl *Pattern = AliasTemplate->getTemplatedDecl();
if (Pattern->isInvalidDecl())
@@ -3728,8 +3779,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
return QualType();
}
} else if (auto *BTD = dyn_cast<BuiltinTemplateDecl>(Template)) {
- CanonType = checkBuiltinTemplateIdType(*this, BTD, CTAI.SugaredConverted,
- TemplateLoc, TemplateArgs);
+ CanonType = checkBuiltinTemplateIdType(
+ *this, Keyword, BTD, CTAI.SugaredConverted, TemplateLoc, TemplateArgs);
} else if (Name.isDependent() ||
TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs, CTAI.CanonicalConverted)) {
@@ -3768,16 +3819,15 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Fetch the injected class name type and check whether its
// injected type is equal to the type we just built.
- QualType ICNT = Context.getTypeDeclType(Record);
- QualType Injected = cast<InjectedClassNameType>(ICNT)
- ->getInjectedSpecializationType();
+ CanQualType ICNT = Context.getCanonicalTagType(Record);
+ CanQualType Injected =
+ Record->getCanonicalTemplateSpecializationType(Context);
- if (CanonType != Injected->getCanonicalTypeInternal())
+ if (CanonType != Injected)
continue;
// If so, the canonical type of this TST is the injected
// class name type of the record we just found.
- assert(ICNT.isCanonical());
CanonType = ICNT;
break;
}
@@ -3819,7 +3869,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Diagnose uses of this specialization.
(void)DiagnoseUseOfDecl(Decl, TemplateLoc);
- CanonType = Context.getTypeDeclType(Decl);
+ CanonType = Context.getCanonicalTagType(Decl);
assert(isa<RecordType>(CanonType) &&
"type of non-dependent specialization is not a RecordType");
} else {
@@ -3830,7 +3880,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// specialization, which refers back to the class template
// specialization we created or found.
return Context.getTemplateSpecializationType(
- Name, TemplateArgs.arguments(), CTAI.CanonicalConverted, CanonType);
+ Keyword, Name, TemplateArgs.arguments(), CTAI.CanonicalConverted,
+ CanonType);
}
void Sema::ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &ParsedName,
@@ -3878,7 +3929,7 @@ bool Sema::resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
diagnoseTypo(Corrected, PDiag(diag::err_no_template_suggest)
<< ATN->getDeclName());
Name = Context.getQualifiedTemplateName(
- /*NNS=*/nullptr, /*TemplateKeyword=*/false,
+ /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
TemplateName(Corrected.getCorrectionDeclAs<TemplateDecl>()));
return false;
}
@@ -3889,11 +3940,12 @@ bool Sema::resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
}
TypeResult Sema::ActOnTemplateIdType(
- Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
- TemplateTy TemplateD, const IdentifierInfo *TemplateII,
- SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
- ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc,
- bool IsCtorOrDtorName, bool IsClassName,
+ Scope *S, ElaboratedTypeKeyword ElaboratedKeyword,
+ SourceLocation ElaboratedKeywordLoc, CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc, TemplateTy TemplateD,
+ const IdentifierInfo *TemplateII, SourceLocation TemplateIILoc,
+ SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc, bool IsCtorOrDtorName, bool IsClassName,
ImplicitTypenameContext AllowImplicitTypename) {
if (SS.isInvalid())
return true;
@@ -3909,8 +3961,9 @@ TypeResult Sema::ActOnTemplateIdType(
// elaborated-type-specifier (7.1.5.3).
if (!LookupCtx && isDependentScopeSpecifier(SS)) {
// C++2a relaxes some of those restrictions in [temp.res]p5.
- NestedNameSpecifier *NNS =
- NestedNameSpecifier::Create(Context, SS.getScopeRep(), TemplateII);
+ QualType DNT = Context.getDependentNameType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TemplateII);
+ NestedNameSpecifier NNS(DNT.getTypePtr());
if (AllowImplicitTypename == ImplicitTypenameContext::Yes) {
auto DB = DiagCompat(SS.getBeginLoc(), diag_compat::implicit_typename)
<< NNS;
@@ -3954,12 +4007,12 @@ TypeResult Sema::ActOnTemplateIdType(
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
assert(SS.getScopeRep() == DTN->getQualifier());
QualType T = Context.getDependentTemplateSpecializationType(
- ElaboratedTypeKeyword::None, *DTN, TemplateArgs.arguments());
+ ElaboratedKeyword, *DTN, TemplateArgs.arguments());
// Build type-source information.
TypeLocBuilder TLB;
DependentTemplateSpecializationTypeLoc SpecTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(T);
- SpecTL.setElaboratedKeywordLoc(SourceLocation());
+ SpecTL.setElaboratedKeywordLoc(ElaboratedKeywordLoc);
SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateIILoc);
@@ -3970,30 +4023,17 @@ TypeResult Sema::ActOnTemplateIdType(
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
- QualType SpecTy = CheckTemplateIdType(Template, TemplateIILoc, TemplateArgs);
+ QualType SpecTy = CheckTemplateIdType(ElaboratedKeyword, Template,
+ TemplateIILoc, TemplateArgs);
if (SpecTy.isNull())
return true;
// Build type-source information.
TypeLocBuilder TLB;
- TemplateSpecializationTypeLoc SpecTL =
- TLB.push<TemplateSpecializationTypeLoc>(SpecTy);
- SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
- SpecTL.setTemplateNameLoc(TemplateIILoc);
- SpecTL.setLAngleLoc(LAngleLoc);
- SpecTL.setRAngleLoc(RAngleLoc);
- for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
- SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
-
- // Create an elaborated-type-specifier containing the nested-name-specifier.
- QualType ElTy =
- getElaboratedType(ElaboratedTypeKeyword::None,
- !IsCtorOrDtorName ? SS : CXXScopeSpec(), SpecTy);
- ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(ElTy);
- ElabTL.setElaboratedKeywordLoc(SourceLocation());
- if (!ElabTL.isEmpty())
- ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
- return CreateParsedType(ElTy, TLB.getTypeSourceInfo(Context, ElTy));
+ TLB.push<TemplateSpecializationTypeLoc>(SpecTy).set(
+ ElaboratedKeywordLoc, SS.getWithLocInContext(Context), TemplateKWLoc,
+ TemplateIILoc, TemplateArgs);
+ return CreateParsedType(SpecTy, TLB.getTypeSourceInfo(Context, SpecTy));
}
TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
@@ -4040,24 +4080,14 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
- if (TypeAliasTemplateDecl *TAT =
- dyn_cast_or_null<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) {
- // C++0x [dcl.type.elab]p2:
- // If the identifier resolves to a typedef-name or the simple-template-id
- // resolves to an alias template specialization, the
- // elaborated-type-specifier is ill-formed.
- Diag(TemplateLoc, diag::err_tag_reference_non_tag)
- << TAT << NonTagKind::TypeAliasTemplate << TagKind;
- Diag(TAT->getLocation(), diag::note_declared_at);
- }
-
- QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
+ QualType Result =
+ CheckTemplateIdType(Keyword, Template, TemplateLoc, TemplateArgs);
if (Result.isNull())
return TypeResult(true);
// Check the tag kind
if (const RecordType *RT = Result->getAs<RecordType>()) {
- RecordDecl *D = RT->getDecl();
+ RecordDecl *D = RT->getOriginalDecl();
IdentifierInfo *Id = D->getIdentifier();
assert(Id && "templated class must have an identifier");
@@ -4073,21 +4103,9 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
// Provide source-location information for the template specialization.
TypeLocBuilder TLB;
- TemplateSpecializationTypeLoc SpecTL
- = TLB.push<TemplateSpecializationTypeLoc>(Result);
- SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
- SpecTL.setTemplateNameLoc(TemplateLoc);
- SpecTL.setLAngleLoc(LAngleLoc);
- SpecTL.setRAngleLoc(RAngleLoc);
- for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
- SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
-
- // Construct an elaborated type containing the nested-name-specifier (if any)
- // and tag keyword.
- Result = Context.getElaboratedType(Keyword, SS.getScopeRep(), Result);
- ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
- ElabTL.setElaboratedKeywordLoc(TagLoc);
- ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TLB.push<TemplateSpecializationTypeLoc>(Result).set(
+ TagLoc, SS.getWithLocInContext(Context), TemplateKWLoc, TemplateLoc,
+ TemplateArgs);
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
@@ -4099,7 +4117,6 @@ static bool CheckTemplateSpecializationScope(Sema &S, NamedDecl *Specialized,
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D);
static bool isTemplateArgumentTemplateParameter(const TemplateArgument &Arg,
- NamedDecl *Param,
unsigned Depth,
unsigned Index) {
switch (Arg.getKind()) {
@@ -4115,7 +4132,7 @@ static bool isTemplateArgumentTemplateParameter(const TemplateArgument &Arg,
case TemplateArgument::Type: {
QualType Type = Arg.getAsType();
const TemplateTypeParmType *TPT =
- Arg.getAsType()->getAs<TemplateTypeParmType>();
+ Arg.getAsType()->getAsCanonical<TemplateTypeParmType>();
return TPT && !Type.hasQualifiers() &&
TPT->getDepth() == Depth && TPT->getIndex() == Index;
}
@@ -4139,8 +4156,9 @@ static bool isTemplateArgumentTemplateParameter(const TemplateArgument &Arg,
}
static bool isSameAsPrimaryTemplate(TemplateParameterList *Params,
+ TemplateParameterList *SpecParams,
ArrayRef<TemplateArgument> Args) {
- if (Params->size() != Args.size())
+ if (Params->size() != Args.size() || Params->size() != SpecParams->size())
return false;
unsigned Depth = Params->getDepth();
@@ -4157,9 +4175,19 @@ static bool isSameAsPrimaryTemplate(TemplateParameterList *Params,
Arg = Arg.pack_begin()->getPackExpansionPattern();
}
- if (!isTemplateArgumentTemplateParameter(Arg, Params->getParam(I), Depth,
- I))
+ if (!isTemplateArgumentTemplateParameter(Arg, Depth, I))
return false;
+
+ // For NTTPs further specialization is allowed via deduced types, so
+ // we need to make sure to only reject here if primary template and
+ // specialization use the same type for the NTTP.
+ if (auto *SpecNTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(SpecParams->getParam(I))) {
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(I));
+ if (!NTTP || NTTP->getType().getCanonicalType() !=
+ SpecNTTP->getType().getCanonicalType())
+ return false;
+ }
}
return true;
@@ -4357,7 +4385,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
}
if (isSameAsPrimaryTemplate(VarTemplate->getTemplateParameters(),
- CTAI.CanonicalConverted) &&
+ TemplateParams, CTAI.CanonicalConverted) &&
(!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
@@ -5055,7 +5083,7 @@ TemplateNameKind Sema::ActOnTemplateName(Scope *S,
return TNK_Non_template;
}
- NestedNameSpecifier *Qualifier = SS.getScopeRep();
+ NestedNameSpecifier Qualifier = SS.getScopeRep();
switch (Name.getKind()) {
case UnqualifiedIdKind::IK_Identifier:
@@ -5343,8 +5371,9 @@ static bool SubstDefaultTemplateArgument(
///
/// \returns the substituted template argument, or NULL if an error occurred.
static TemplateName SubstDefaultTemplateArgument(
- Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
- SourceLocation RAngleLoc, TemplateTemplateParmDecl *Param,
+ Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateKWLoc,
+ SourceLocation TemplateLoc, SourceLocation RAngleLoc,
+ TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
ArrayRef<TemplateArgument> CanonicalConverted,
NestedNameSpecifierLoc &QualifierLoc) {
@@ -5361,25 +5390,17 @@ static TemplateName SubstDefaultTemplateArgument(
TemplateArgLists.addOuterTemplateArguments(std::nullopt);
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
- // Substitute into the nested-name-specifier first,
- QualifierLoc = Param->getDefaultArgument().getTemplateQualifierLoc();
- if (QualifierLoc) {
- QualifierLoc =
- SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgLists);
- if (!QualifierLoc)
- return TemplateName();
- }
- return SemaRef.SubstTemplateName(
- QualifierLoc,
- Param->getDefaultArgument().getArgument().getAsTemplate(),
- Param->getDefaultArgument().getTemplateNameLoc(),
- TemplateArgLists);
+ const TemplateArgumentLoc &A = Param->getDefaultArgument();
+ QualifierLoc = A.getTemplateQualifierLoc();
+ return SemaRef.SubstTemplateName(TemplateKWLoc, QualifierLoc,
+ A.getArgument().getAsTemplate(),
+ A.getTemplateNameLoc(), TemplateArgLists);
}
TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
- TemplateDecl *Template, SourceLocation TemplateLoc,
- SourceLocation RAngleLoc, Decl *Param,
+ TemplateDecl *Template, SourceLocation TemplateKWLoc,
+ SourceLocation TemplateNameLoc, SourceLocation RAngleLoc, Decl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
ArrayRef<TemplateArgument> CanonicalConverted, bool &HasDefaultArg) {
HasDefaultArg = false;
@@ -5390,8 +5411,8 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
HasDefaultArg = true;
TemplateArgumentLoc Output;
- if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
- TypeParm, SugaredConverted,
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateNameLoc,
+ RAngleLoc, TypeParm, SugaredConverted,
CanonicalConverted, Output))
return TemplateArgumentLoc();
return Output;
@@ -5404,8 +5425,8 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
HasDefaultArg = true;
TemplateArgumentLoc Output;
- if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
- NonTypeParm, SugaredConverted,
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateNameLoc,
+ RAngleLoc, NonTypeParm, SugaredConverted,
CanonicalConverted, Output))
return TemplateArgumentLoc();
return Output;
@@ -5417,17 +5438,16 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
return TemplateArgumentLoc();
HasDefaultArg = true;
+ const TemplateArgumentLoc &A = TempTempParm->getDefaultArgument();
NestedNameSpecifierLoc QualifierLoc;
TemplateName TName = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, TempTempParm, SugaredConverted,
- CanonicalConverted, QualifierLoc);
+ *this, Template, TemplateKWLoc, TemplateNameLoc, RAngleLoc, TempTempParm,
+ SugaredConverted, CanonicalConverted, QualifierLoc);
if (TName.isNull())
return TemplateArgumentLoc();
- return TemplateArgumentLoc(
- Context, TemplateArgument(TName),
- TempTempParm->getDefaultArgument().getTemplateQualifierLoc(),
- TempTempParm->getDefaultArgument().getTemplateNameLoc());
+ return TemplateArgumentLoc(Context, TemplateArgument(TName), TemplateKWLoc,
+ QualifierLoc, A.getTemplateNameLoc());
}
/// Convert a template-argument that we parsed as a type into a template, if
@@ -5435,33 +5455,24 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
/// template template arguments and as template type arguments.
static TemplateArgumentLoc
convertTypeTemplateArgumentToTemplate(ASTContext &Context, TypeLoc TLoc) {
- // Extract and step over any surrounding nested-name-specifier.
- NestedNameSpecifierLoc QualLoc;
- if (auto ETLoc = TLoc.getAs<ElaboratedTypeLoc>()) {
- if (ETLoc.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None)
- return TemplateArgumentLoc();
+ auto TagLoc = TLoc.getAs<TagTypeLoc>();
+ if (!TagLoc)
+ return TemplateArgumentLoc();
- QualLoc = ETLoc.getQualifierLoc();
- TLoc = ETLoc.getNamedTypeLoc();
- }
// If this type was written as an injected-class-name, it can be used as a
// template template argument.
- if (auto InjLoc = TLoc.getAs<InjectedClassNameTypeLoc>())
- return TemplateArgumentLoc(Context, InjLoc.getTypePtr()->getTemplateName(),
- QualLoc, InjLoc.getNameLoc());
-
// If this type was written as an injected-class-name, it may have been
// converted to a RecordType during instantiation. If the RecordType is
// *not* wrapped in a TemplateSpecializationType and denotes a class
// template specialization, it must have come from an injected-class-name.
- if (auto RecLoc = TLoc.getAs<RecordTypeLoc>())
- if (auto *CTSD =
- dyn_cast<ClassTemplateSpecializationDecl>(RecLoc.getDecl()))
- return TemplateArgumentLoc(Context,
- TemplateName(CTSD->getSpecializedTemplate()),
- QualLoc, RecLoc.getNameLoc());
- return TemplateArgumentLoc();
+ TemplateName Name = TagLoc.getTypePtr()->getTemplateName(Context);
+ if (Name.isNull())
+ return TemplateArgumentLoc();
+
+ return TemplateArgumentLoc(Context, Name,
+ /*TemplateKWLoc=*/SourceLocation(),
+ TagLoc.getQualifierLoc(), TagLoc.getNameLoc());
}
bool Sema::CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &ArgLoc,
@@ -5857,6 +5868,29 @@ bool Sema::CheckTemplateArgumentList(
}
}
+ // Check for builtins producing template packs in this context, we do not
+ // support them yet.
+ if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(*Param);
+ NTTP && NTTP->isPackExpansion()) {
+ auto TL = NTTP->getTypeSourceInfo()
+ ->getTypeLoc()
+ .castAs<PackExpansionTypeLoc>();
+ llvm::SmallVector<UnexpandedParameterPack> Unexpanded;
+ collectUnexpandedParameterPacks(TL.getPatternLoc(), Unexpanded);
+ for (const auto &UPP : Unexpanded) {
+ auto *TST = UPP.first.dyn_cast<const TemplateSpecializationType *>();
+ if (!TST)
+ continue;
+ assert(isPackProducingBuiltinTemplateName(TST->getTemplateName()));
+ // Expanding a built-in pack in this context is not yet supported.
+ Diag(TL.getEllipsisLoc(),
+ diag::err_unsupported_builtin_template_pack_expansion)
+ << TST->getTemplateName();
+ return true;
+ }
+ }
+
if (ArgIdx < NumArgs) {
TemplateArgumentLoc &ArgLoc = NewArgs[ArgIdx];
bool NonPackParameter =
@@ -6007,8 +6041,8 @@ bool Sema::CheckTemplateArgumentList(
// (when the template parameter was part of a nested template) into
// the default argument.
TemplateArgumentLoc Arg = SubstDefaultTemplateArgumentIfAvailable(
- Template, TemplateLoc, RAngleLoc, *Param, CTAI.SugaredConverted,
- CTAI.CanonicalConverted, HasDefaultArg);
+ Template, /*TemplateKWLoc=*/SourceLocation(), TemplateLoc, RAngleLoc,
+ *Param, CTAI.SugaredConverted, CTAI.CanonicalConverted, HasDefaultArg);
if (Arg.getArgument().isNull()) {
if (!HasDefaultArg) {
@@ -6152,7 +6186,7 @@ namespace {
#include "clang/AST/TypeNodes.inc"
bool VisitTagDecl(const TagDecl *Tag);
- bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+ bool VisitNestedNameSpecifier(NestedNameSpecifier NNS);
};
} // end anonymous namespace
@@ -6297,11 +6331,11 @@ bool UnnamedLocalNoLinkageFinder::VisitDeducedTemplateSpecializationType(
}
bool UnnamedLocalNoLinkageFinder::VisitRecordType(const RecordType* T) {
- return VisitTagDecl(T->getDecl());
+ return VisitTagDecl(T->getOriginalDecl()->getDefinitionOrSelf());
}
bool UnnamedLocalNoLinkageFinder::VisitEnumType(const EnumType* T) {
- return VisitTagDecl(T->getDecl());
+ return VisitTagDecl(T->getOriginalDecl()->getDefinitionOrSelf());
}
bool UnnamedLocalNoLinkageFinder::VisitTemplateTypeParmType(
@@ -6314,6 +6348,11 @@ bool UnnamedLocalNoLinkageFinder::VisitSubstTemplateTypeParmPackType(
return false;
}
+bool UnnamedLocalNoLinkageFinder::VisitSubstBuiltinTemplatePackType(
+ const SubstBuiltinTemplatePackType *) {
+ return false;
+}
+
bool UnnamedLocalNoLinkageFinder::VisitTemplateSpecializationType(
const TemplateSpecializationType*) {
return false;
@@ -6321,7 +6360,7 @@ bool UnnamedLocalNoLinkageFinder::VisitTemplateSpecializationType(
bool UnnamedLocalNoLinkageFinder::VisitInjectedClassNameType(
const InjectedClassNameType* T) {
- return VisitTagDecl(T->getDecl());
+ return VisitTagDecl(T->getOriginalDecl()->getDefinitionOrSelf());
}
bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
@@ -6331,10 +6370,7 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType* T) {
- if (auto *Q = T->getDependentTemplateName().getQualifier())
- return VisitNestedNameSpecifier(Q);
-
- return false;
+ return VisitNestedNameSpecifier(T->getDependentTemplateName().getQualifier());
}
bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
@@ -6380,11 +6416,10 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentBitIntType(
bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
if (Tag->getDeclContext()->isFunctionOrMethod()) {
- S.Diag(SR.getBegin(),
- S.getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_template_arg_local_type :
- diag::ext_template_arg_local_type)
- << S.Context.getTypeDeclType(Tag) << SR;
+ S.Diag(SR.getBegin(), S.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_template_arg_local_type
+ : diag::ext_template_arg_local_type)
+ << S.Context.getCanonicalTagType(Tag) << SR;
return true;
}
@@ -6401,20 +6436,15 @@ bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
}
bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
- NestedNameSpecifier *NNS) {
- assert(NNS);
- if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
- return true;
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ NestedNameSpecifier NNS) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Namespace:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return false;
-
- case NestedNameSpecifier::TypeSpec:
- return Visit(QualType(NNS->getAsType(), 0));
+ case NestedNameSpecifier::Kind::Type:
+ return Visit(QualType(NNS.getAsType(), 0));
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
@@ -7381,8 +7411,8 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType,
// always a no-op, except when the parameter type is bool. In
// that case, this may extend the argument from 1 bit to 8 bits.
QualType IntegerType = ParamType;
- if (const EnumType *Enum = IntegerType->getAs<EnumType>())
- IntegerType = Enum->getDecl()->getIntegerType();
+ if (const auto *ED = IntegerType->getAsEnumDecl())
+ IntegerType = ED->getIntegerType();
Value = Value.extOrTrunc(IntegerType->isBitIntType()
? Context.getIntWidth(IntegerType)
: Context.getTypeSize(IntegerType));
@@ -7479,8 +7509,8 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType,
}
QualType IntegerType = ParamType;
- if (const EnumType *Enum = IntegerType->getAs<EnumType>()) {
- IntegerType = Enum->getDecl()->getIntegerType();
+ if (const auto *ED = IntegerType->getAsEnumDecl()) {
+ IntegerType = ED->getIntegerType();
}
if (ParamType->isBooleanType()) {
@@ -7903,10 +7933,9 @@ ExprResult Sema::BuildExpressionFromDeclTemplateArgument(
assert(VD->getDeclContext()->isRecord() &&
(isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD) ||
isa<IndirectFieldDecl>(VD)));
- QualType ClassType
- = Context.getTypeDeclType(cast<RecordDecl>(VD->getDeclContext()));
- NestedNameSpecifier *Qualifier =
- NestedNameSpecifier::Create(Context, nullptr, ClassType.getTypePtr());
+ CanQualType ClassType =
+ Context.getCanonicalTagType(cast<RecordDecl>(VD->getDeclContext()));
+ NestedNameSpecifier Qualifier(ClassType.getTypePtr());
SS.MakeTrivial(Context, Qualifier, Loc);
}
@@ -7997,8 +8026,8 @@ static Expr *BuildExpressionFromIntegralTemplateArgumentValue(
// any integral type with C++11 enum classes, make sure we create the right
// type of literal for it.
QualType T = OrigT;
- if (const EnumType *ET = OrigT->getAs<EnumType>())
- T = ET->getDecl()->getIntegerType();
+ if (const auto *ED = OrigT->getAsEnumDecl())
+ T = ED->getIntegerType();
Expr *E;
if (T->isAnyCharacterType()) {
@@ -8881,7 +8910,6 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
isPartialSpecialization))
return true;
- QualType CanonType;
if (!isPartialSpecialization) {
// Create a new class template specialization declaration node for
// this explicit specialization or friend declaration.
@@ -8897,18 +8925,14 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (!PrevDecl)
ClassTemplate->AddSpecialization(Specialization, InsertPos);
-
- if (!CurContext->isDependentContext())
- CanonType = Context.getTypeDeclType(Specialization);
- }
-
- TypeSourceInfo *WrittenTy = Context.getTemplateSpecializationTypeInfo(
- Name, TemplateNameLoc, TemplateArgs, CTAI.CanonicalConverted, CanonType);
-
- if (isPartialSpecialization) {
+ } else {
+ CanQualType CanonType = CanQualType::CreateUnsafe(
+ Context.getCanonicalTemplateSpecializationType(
+ TemplateName(ClassTemplate->getCanonicalDecl()),
+ CTAI.CanonicalConverted));
if (Context.hasSameType(
- WrittenTy->getType(),
- ClassTemplate->getInjectedClassNameSpecialization()) &&
+ CanonType,
+ ClassTemplate->getCanonicalInjectedSpecializationType(Context)) &&
(!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
@@ -8930,13 +8954,12 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
// Create a new class template partial specialization declaration node.
- ClassTemplatePartialSpecializationDecl *PrevPartial
- = cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
+ ClassTemplatePartialSpecializationDecl *PrevPartial =
+ cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
ClassTemplatePartialSpecializationDecl *Partial =
ClassTemplatePartialSpecializationDecl::Create(
Context, Kind, DC, KWLoc, TemplateNameLoc, TemplateParams,
- ClassTemplate, CTAI.CanonicalConverted, WrittenTy->getType(),
- PrevPartial);
+ ClassTemplate, CTAI.CanonicalConverted, CanonType, PrevPartial);
Partial->setTemplateArgsAsWritten(TemplateArgs);
SetNestedNameSpecifier(*this, Partial, SS);
if (TemplateParameterLists.size() > 1 && SS.isSet()) {
@@ -8975,7 +8998,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (!Okay) {
SourceRange Range(TemplateNameLoc, RAngleLoc);
Diag(TemplateNameLoc, diag::err_specialization_after_instantiation)
- << Context.getTypeDeclType(Specialization) << Range;
+ << Context.getCanonicalTagType(Specialization) << Range;
Diag(PrevDecl->getPointOfInstantiation(),
diag::note_instantiation_required_here)
@@ -8993,10 +9016,13 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (TUK == TagUseKind::Definition) {
RecordDecl *Def = Specialization->getDefinition();
NamedDecl *Hidden = nullptr;
- if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
+ bool HiddenDefVisible = false;
+ if (Def && SkipBody &&
+ isRedefinitionAllowedFor(Def, &Hidden, HiddenDefVisible)) {
SkipBody->ShouldSkip = true;
SkipBody->Previous = Def;
- makeMergedDefinitionVisible(Hidden);
+ if (!HiddenDefVisible && Hidden)
+ makeMergedDefinitionVisible(Hidden);
} else if (Def) {
SourceRange Range(TemplateNameLoc, RAngleLoc);
Diag(TemplateNameLoc, diag::err_redefinition) << Specialization << Range;
@@ -9038,6 +9064,13 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
Specialization->startDefinition();
if (TUK == TagUseKind::Friend) {
+ CanQualType CanonType = Context.getCanonicalTagType(Specialization);
+ TypeSourceInfo *WrittenTy = Context.getTemplateSpecializationTypeInfo(
+ ElaboratedTypeKeyword::None, /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context),
+ /*TemplateKeywordLoc=*/SourceLocation(), Name, TemplateNameLoc,
+ TemplateArgs, CTAI.CanonicalConverted, CanonType);
+
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
// itself. This means that we'll pretty-print the type retrieved
@@ -10078,9 +10111,6 @@ static bool CheckExplicitInstantiation(Sema &S, NamedDecl *D,
/// Determine whether the given scope specifier has a template-id in it.
static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
- if (!SS.isSet())
- return false;
-
// C++11 [temp.explicit]p3:
// If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
@@ -10088,12 +10118,14 @@ static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
// name shall be a simple-template-id.
//
// C++98 has the same restriction, just worded differently.
- for (NestedNameSpecifier *NNS = SS.getScopeRep(); NNS;
- NNS = NNS->getPrefix())
- if (const Type *T = NNS->getAsType())
- if (isa<TemplateSpecializationType>(T))
- return true;
-
+ for (NestedNameSpecifier NNS = SS.getScopeRep();
+ NNS.getKind() == NestedNameSpecifier::Kind::Type;
+ /**/) {
+ const Type *T = NNS.getAsType();
+ if (isa<TemplateSpecializationType>(T))
+ return true;
+ NNS = T->getPrefix();
+ }
return false;
}
@@ -10448,7 +10480,7 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
if (!Pattern) {
Diag(TemplateLoc, diag::err_explicit_instantiation_nontemplate_type)
- << Context.getTypeDeclType(Record);
+ << Context.getCanonicalTagType(Record);
Diag(Record->getLocation(), diag::note_nontemplate_decl_here);
return true;
}
@@ -10995,7 +11027,7 @@ TypeResult Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// This has to hold, because SS is expected to be defined.
assert(Name && "Expected a name in a dependent tag");
- NestedNameSpecifier *NNS = SS.getScopeRep();
+ NestedNameSpecifier NNS = SS.getScopeRep();
if (!NNS)
return true;
@@ -11107,7 +11139,10 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
- QualType T = CheckTemplateIdType(Template, TemplateIILoc, TemplateArgs);
+ QualType T = CheckTemplateIdType(TypenameLoc.isValid()
+ ? ElaboratedTypeKeyword::Typename
+ : ElaboratedTypeKeyword::None,
+ Template, TemplateIILoc, TemplateArgs);
if (T.isNull())
return true;
@@ -11115,18 +11150,8 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
TypeLocBuilder Builder;
TemplateSpecializationTypeLoc SpecTL
= Builder.push<TemplateSpecializationTypeLoc>(T);
- SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
- SpecTL.setTemplateNameLoc(TemplateIILoc);
- SpecTL.setLAngleLoc(LAngleLoc);
- SpecTL.setRAngleLoc(RAngleLoc);
- for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
- SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
-
- T = Context.getElaboratedType(Keyword, SS.getScopeRep(), T);
- ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
- TL.setElaboratedKeywordLoc(TypenameLoc);
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
-
+ SpecTL.set(TypenameLoc, SS.getWithLocInContext(Context), TemplateKWLoc,
+ TemplateIILoc, TemplateArgs);
TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T);
return CreateParsedType(T, TSI);
}
@@ -11140,11 +11165,12 @@ static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
return false;
// ... within an explicitly-written template specialization...
- if (!NNS || !NNS.getNestedNameSpecifier()->getAsType())
+ if (NNS.getNestedNameSpecifier().getKind() != NestedNameSpecifier::Kind::Type)
return false;
- TypeLoc EnableIfTy = NNS.getTypeLoc();
- TemplateSpecializationTypeLoc EnableIfTSTLoc =
- EnableIfTy.getAs<TemplateSpecializationTypeLoc>();
+
+ // FIXME: Look through sugar.
+ auto EnableIfTSTLoc =
+ NNS.castAsTypeLoc().getAs<TemplateSpecializationTypeLoc>();
if (!EnableIfTSTLoc || EnableIfTSTLoc.getNumArgs() == 0)
return false;
const TemplateSpecializationType *EnableIfTST = EnableIfTSTLoc.getTypePtr();
@@ -11192,19 +11218,33 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
if (T.isNull())
return QualType();
- *TSI = Context.CreateTypeSourceInfo(T);
+ TypeLocBuilder TLB;
if (isa<DependentNameType>(T)) {
- DependentNameTypeLoc TL =
- (*TSI)->getTypeLoc().castAs<DependentNameTypeLoc>();
+ auto TL = TLB.push<DependentNameTypeLoc>(T);
TL.setElaboratedKeywordLoc(KeywordLoc);
TL.setQualifierLoc(QualifierLoc);
TL.setNameLoc(IILoc);
- } else {
- ElaboratedTypeLoc TL = (*TSI)->getTypeLoc().castAs<ElaboratedTypeLoc>();
+ } else if (isa<DeducedTemplateSpecializationType>(T)) {
+ auto TL = TLB.push<DeducedTemplateSpecializationTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(KeywordLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.setNameLoc(IILoc);
+ } else if (isa<TemplateTypeParmType>(T)) {
+ // FIXME: There might be a 'typename' keyword here, but we just drop it
+ // as it can't be represented.
+ assert(!QualifierLoc);
+ TLB.pushTypeSpec(T).setNameLoc(IILoc);
+ } else if (isa<TagType>(T)) {
+ auto TL = TLB.push<TagTypeLoc>(T);
TL.setElaboratedKeywordLoc(KeywordLoc);
TL.setQualifierLoc(QualifierLoc);
- TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IILoc);
+ TL.setNameLoc(IILoc);
+ } else if (isa<TypedefType>(T)) {
+ TLB.push<TypedefTypeLoc>(T).set(KeywordLoc, QualifierLoc, IILoc);
+ } else {
+ TLB.push<UnresolvedUsingTypeLoc>(T).set(KeywordLoc, QualifierLoc, IILoc);
}
+ *TSI = TLB.getTypeSourceInfo(Context, T);
return T;
}
@@ -11227,7 +11267,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
if (!Ctx) {
// If the nested-name-specifier is dependent and couldn't be
// resolved to a type, build a typename type.
- assert(QualifierLoc.getNestedNameSpecifier()->isDependent());
+ assert(QualifierLoc.getNestedNameSpecifier().isDependent());
return Context.getDependentNameType(Keyword,
QualifierLoc.getNestedNameSpecifier(),
&II);
@@ -11309,6 +11349,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
&II);
case LookupResultKind::Found:
+ // FXIME: Missing support for UsingShadowDecl on this path?
if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
// C++ [class.qual]p2:
// In a lookup in which function names are not ignored and the
@@ -11324,15 +11365,20 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
//
// FIXME: That's not strictly true: mem-initializer-id lookup does not
// ignore functions, but that appears to be an oversight.
- QualType T = getTypeDeclType(Ctx,
- Keyword == ElaboratedTypeKeyword::Typename
- ? DiagCtorKind::Typename
- : DiagCtorKind::None,
- Type, IILoc);
- // We found a type. Build an ElaboratedType, since the
- // typename-specifier was just sugar.
- return Context.getElaboratedType(
- Keyword, QualifierLoc.getNestedNameSpecifier(), T);
+ checkTypeDeclType(Ctx,
+ Keyword == ElaboratedTypeKeyword::Typename
+ ? DiagCtorKind::Typename
+ : DiagCtorKind::None,
+ Type, IILoc);
+ // FIXME: This appears to be the only case where a template type parameter
+ // can have an elaborated keyword. We should preserve it somehow.
+ if (isa<TemplateTypeParmDecl>(Type)) {
+ assert(Keyword == ElaboratedTypeKeyword::Typename);
+ assert(!QualifierLoc);
+ Keyword = ElaboratedTypeKeyword::None;
+ }
+ return Context.getTypeDeclType(
+ Keyword, QualifierLoc.getNestedNameSpecifier(), Type);
}
// C++ [dcl.type.simple]p2:
@@ -11342,22 +11388,22 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
if (getLangOpts().CPlusPlus17) {
if (auto *TD = getAsTypeTemplateDecl(Result.getFoundDecl())) {
if (!DeducedTSTContext) {
- QualType T(QualifierLoc
- ? QualifierLoc.getNestedNameSpecifier()->getAsType()
- : nullptr, 0);
- if (!T.isNull())
+ NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::Type)
Diag(IILoc, diag::err_dependent_deduced_tst)
- << (int)getTemplateNameKindForDiagnostics(TemplateName(TD)) << T;
+ << (int)getTemplateNameKindForDiagnostics(TemplateName(TD))
+ << QualType(Qualifier.getAsType(), 0);
else
Diag(IILoc, diag::err_deduced_tst)
<< (int)getTemplateNameKindForDiagnostics(TemplateName(TD));
NoteTemplateLocation(*TD);
return QualType();
}
- return Context.getElaboratedType(
- Keyword, QualifierLoc.getNestedNameSpecifier(),
- Context.getDeducedTemplateSpecializationType(TemplateName(TD),
- QualType(), false));
+ TemplateName Name = Context.getQualifiedTemplateName(
+ QualifierLoc.getNestedNameSpecifier(), /*TemplateKeyword=*/false,
+ TemplateName(TD));
+ return Context.getDeducedTemplateSpecializationType(
+ Keyword, Name, /*DeducedType=*/QualType(), /*IsDependent=*/false);
}
}
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index 0d70321..cce40c0 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -691,38 +691,31 @@ DeduceTemplateSpecArguments(Sema &S, TemplateParameterList *TemplateParams,
TemplateDeductionInfo &Info, bool PartialOrdering,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
bool *HasDeducedAnyParam) {
- QualType UP = P;
- if (const auto *IP = P->getAs<InjectedClassNameType>())
- UP = IP->getInjectedSpecializationType();
-
- assert(isa<TemplateSpecializationType>(UP.getCanonicalType()));
- const TemplateSpecializationType *TP = ::getLastTemplateSpecType(UP);
- TemplateName TNP = TP->getTemplateName();
+ TemplateName TNP;
+ ArrayRef<TemplateArgument> PResolved;
+ if (isa<TemplateSpecializationType>(P.getCanonicalType())) {
+ const TemplateSpecializationType *TP = ::getLastTemplateSpecType(P);
+ TNP = TP->getTemplateName();
+ // FIXME: To preserve sugar, the TST needs to carry sugared resolved
+ // arguments.
+ PResolved =
+ TP->castAsCanonical<TemplateSpecializationType>()->template_arguments();
+ } else {
+ const auto *TT = P->castAs<InjectedClassNameType>();
+ TNP = TT->getTemplateName(S.Context);
+ PResolved = TT->getTemplateArgs(S.Context);
+ }
// If the parameter is an alias template, there is nothing to deduce.
if (const auto *TD = TNP.getAsTemplateDecl(); TD && TD->isTypeAlias())
return TemplateDeductionResult::Success;
-
- // FIXME: To preserve sugar, the TST needs to carry sugared resolved
- // arguments.
- ArrayRef<TemplateArgument> PResolved =
- TP->getCanonicalTypeInternal()
- ->castAs<TemplateSpecializationType>()
- ->template_arguments();
-
- QualType UA = A;
- std::optional<NestedNameSpecifier *> NNS;
- // Treat an injected-class-name as its underlying template-id.
- if (const auto *Elaborated = A->getAs<ElaboratedType>()) {
- NNS = Elaborated->getQualifier();
- } else if (const auto *Injected = A->getAs<InjectedClassNameType>()) {
- UA = Injected->getInjectedSpecializationType();
- NNS = nullptr;
- }
+ // Pack-producing templates can only be matched after substitution.
+ if (isPackProducingBuiltinTemplateName(TNP))
+ return TemplateDeductionResult::Success;
// Check whether the template argument is a dependent template-id.
- if (isa<TemplateSpecializationType>(UA.getCanonicalType())) {
- const TemplateSpecializationType *SA = ::getLastTemplateSpecType(UA);
+ if (isa<TemplateSpecializationType>(A.getCanonicalType())) {
+ const TemplateSpecializationType *SA = ::getLastTemplateSpecType(A);
TemplateName TNA = SA->getTemplateName();
// If the argument is an alias template, there is nothing to deduce.
@@ -756,34 +749,36 @@ DeduceTemplateSpecArguments(Sema &S, TemplateParameterList *TemplateParams,
// If the argument type is a class template specialization, we
// perform template argument deduction using its template
// arguments.
- const auto *RA = UA->getAs<RecordType>();
- const auto *SA =
- RA ? dyn_cast<ClassTemplateSpecializationDecl>(RA->getDecl()) : nullptr;
- if (!SA) {
+ const auto *TA = A->getAs<TagType>();
+ TemplateName TNA;
+ if (TA) {
+ // FIXME: Can't use the template arguments from this TST, as they are not
+ // resolved.
+ if (const auto *TST = A->getAsNonAliasTemplateSpecializationType())
+ TNA = TST->getTemplateName();
+ else
+ TNA = TA->getTemplateName(S.Context);
+ }
+ if (TNA.isNull()) {
Info.FirstArg = TemplateArgument(P);
Info.SecondArg = TemplateArgument(A);
return TemplateDeductionResult::NonDeducedMismatch;
}
- TemplateName TNA = TemplateName(SA->getSpecializedTemplate());
- if (NNS)
- TNA = S.Context.getQualifiedTemplateName(
- *NNS, false, TemplateName(SA->getSpecializedTemplate()));
-
+ ArrayRef<TemplateArgument> AResolved = TA->getTemplateArgs(S.Context);
// Perform template argument deduction for the template name.
- if (auto Result = DeduceTemplateArguments(
- S, TemplateParams, TNP, TNA, Info,
- /*DefaultArguments=*/SA->getTemplateArgs().asArray(), PartialOrdering,
- Deduced, HasDeducedAnyParam);
+ if (auto Result =
+ DeduceTemplateArguments(S, TemplateParams, TNP, TNA, Info,
+ /*DefaultArguments=*/AResolved,
+ PartialOrdering, Deduced, HasDeducedAnyParam);
Result != TemplateDeductionResult::Success)
return Result;
// Perform template argument deduction for the template arguments.
- return DeduceTemplateArguments(S, TemplateParams, PResolved,
- SA->getTemplateArgs().asArray(), Info, Deduced,
- /*NumberOfArgumentsMustMatch=*/true,
- PartialOrdering, PackFold::ParameterToArgument,
- HasDeducedAnyParam);
+ return DeduceTemplateArguments(
+ S, TemplateParams, PResolved, AResolved, Info, Deduced,
+ /*NumberOfArgumentsMustMatch=*/true, PartialOrdering,
+ PackFold::ParameterToArgument, HasDeducedAnyParam);
}
static bool IsPossiblyOpaquelyQualifiedTypeInternal(const Type *T) {
@@ -935,7 +930,11 @@ private:
S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
unsigned Depth, Index;
- std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (auto DI = getDepthAndIndex(Unexpanded[I]))
+ std::tie(Depth, Index) = *DI;
+ else
+ continue;
+
if (Depth == Info.getDeducedDepth())
AddPack(Index);
}
@@ -943,7 +942,6 @@ private:
// Look for unexpanded packs in the pattern.
Collect(Pattern);
- assert(!Packs.empty() && "Pack expansion without unexpanded packs?");
unsigned NumNamedPacks = Packs.size();
@@ -1438,7 +1436,8 @@ static bool isForwardingReference(QualType Param, unsigned FirstInnerIndex) {
if (auto *ParamRef = Param->getAs<RValueReferenceType>()) {
if (ParamRef->getPointeeType().getQualifiers())
return false;
- auto *TypeParm = ParamRef->getPointeeType()->getAs<TemplateTypeParmType>();
+ auto *TypeParm =
+ ParamRef->getPointeeType()->getAsCanonical<TemplateTypeParmType>();
return TypeParm && TypeParm->getIndex() >= FirstInnerIndex;
}
return false;
@@ -1703,7 +1702,7 @@ static TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
//
// T
// cv-list T
- if (const auto *TTP = P->getAs<TemplateTypeParmType>()) {
+ if (const auto *TTP = P->getAsCanonical<TemplateTypeParmType>()) {
// Just skip any attempts to deduce from a placeholder type or a parameter
// at a different depth.
if (A->isPlaceholderType() || Info.getDeducedDepth() != TTP->getDepth())
@@ -1865,6 +1864,7 @@ static TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
+ case Type::SubstBuiltinTemplatePack:
llvm_unreachable("Type nodes handled above");
case Type::Auto:
@@ -2188,26 +2188,19 @@ static TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
Result != TemplateDeductionResult::Success)
return Result;
- QualType TP;
- if (MPP->isSugared()) {
- TP = S.Context.getTypeDeclType(MPP->getMostRecentCXXRecordDecl());
- } else {
- NestedNameSpecifier *QP = MPP->getQualifier();
- if (QP->getKind() == NestedNameSpecifier::Identifier)
- // Skip translation if it's a non-deduced context anyway.
- return TemplateDeductionResult::Success;
- TP = QualType(QP->translateToType(S.Context), 0);
- }
+ QualType TP =
+ MPP->isSugared()
+ ? S.Context.getCanonicalTagType(MPP->getMostRecentCXXRecordDecl())
+ : QualType(MPP->getQualifier().getAsType(), 0);
assert(!TP.isNull() && "member pointer with non-type class");
- QualType TA;
- if (MPA->isSugared()) {
- TA = S.Context.getTypeDeclType(MPA->getMostRecentCXXRecordDecl());
- } else {
- NestedNameSpecifier *QA = MPA->getQualifier();
- TA = QualType(QA->translateToType(S.Context), 0).getUnqualifiedType();
- }
+ QualType TA =
+ MPA->isSugared()
+ ? S.Context.getCanonicalTagType(MPA->getMostRecentCXXRecordDecl())
+ : QualType(MPA->getQualifier().getAsType(), 0)
+ .getUnqualifiedType();
assert(!TA.isNull() && "member pointer with non-type class");
+
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, TP, TA, Info, Deduced, SubTDF,
degradeCallPartialOrderingKind(POK),
@@ -2925,18 +2918,12 @@ Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
case TemplateArgument::TemplateExpansion: {
NestedNameSpecifierLocBuilder Builder;
TemplateName Template = Arg.getAsTemplateOrTemplatePattern();
- if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
- Builder.MakeTrivial(Context, DTN->getQualifier(), Loc);
- else if (QualifiedTemplateName *QTN =
- Template.getAsQualifiedTemplateName())
- Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
-
- if (Arg.getKind() == TemplateArgument::Template)
- return TemplateArgumentLoc(Context, Arg,
- Builder.getWithLocInContext(Context), Loc);
-
+ Builder.MakeTrivial(Context, Template.getQualifier(), Loc);
return TemplateArgumentLoc(
- Context, Arg, Builder.getWithLocInContext(Context), Loc, Loc);
+ Context, Arg, Loc, Builder.getWithLocInContext(Context), Loc,
+ /*EllipsisLoc=*/Arg.getKind() == TemplateArgument::TemplateExpansion
+ ? Loc
+ : SourceLocation());
}
case TemplateArgument::Expression:
@@ -3154,8 +3141,9 @@ static TemplateDeductionResult ConvertDeducedTemplateArguments(
S.getLangOpts().CPlusPlus17);
DefArg = S.SubstDefaultTemplateArgumentIfAvailable(
- TD, TD->getLocation(), TD->getSourceRange().getEnd(), Param,
- CTAI.SugaredConverted, CTAI.CanonicalConverted, HasDefaultArg);
+ TD, /*TemplateKWLoc=*/SourceLocation(), TD->getLocation(),
+ TD->getSourceRange().getEnd(), Param, CTAI.SugaredConverted,
+ CTAI.CanonicalConverted, HasDefaultArg);
}
// If there was no default argument, deduction is incomplete.
@@ -3512,7 +3500,7 @@ Sema::DeduceTemplateArgumentsFromType(TemplateDecl *TD, QualType FromType,
QualType PType;
if (const auto *CTD = dyn_cast<ClassTemplateDecl>(TD)) {
// Use the InjectedClassNameType.
- PType = Context.getTypeDeclType(CTD->getTemplatedDecl());
+ PType = Context.getCanonicalTagType(CTD->getTemplatedDecl());
} else if (const auto *AliasTemplate = dyn_cast<TypeAliasTemplateDecl>(TD)) {
PType = AliasTemplate->getTemplatedDecl()->getUnderlyingType();
} else {
@@ -3571,7 +3559,7 @@ static bool isSimpleTemplateIdType(QualType T) {
//
// This only arises during class template argument deduction for a copy
// deduction candidate, where it permits slicing.
- if (T->getAs<InjectedClassNameType>())
+ if (isa<InjectedClassNameType>(T.getCanonicalType()))
return true;
return false;
@@ -4182,7 +4170,7 @@ static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
return {};
return S.Context.getMemberPointerType(
- Fn->getType(), /*Qualifier=*/nullptr, Method->getParent());
+ Fn->getType(), /*Qualifier=*/std::nullopt, Method->getParent());
}
if (!R.IsAddressOfOperand) return Fn->getType();
@@ -5138,10 +5126,12 @@ namespace {
return TransformDesugared(TLB, TL);
QualType Result = SemaRef.Context.getDeducedTemplateSpecializationType(
- TL.getTypePtr()->getTemplateName(),
+ TL.getTypePtr()->getKeyword(), TL.getTypePtr()->getTemplateName(),
Replacement, Replacement.isNull());
auto NewTL = TLB.push<DeducedTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setNameLoc(TL.getNameLoc());
+ NewTL.setQualifierLoc(TL.getQualifierLoc());
return Result;
}
@@ -5606,8 +5596,9 @@ static TemplateDeductionResult CheckDeductionConsistency(
// so let it transform their specializations instead.
bool IsDeductionGuide = isa<CXXDeductionGuideDecl>(FTD->getTemplatedDecl());
if (IsDeductionGuide) {
- if (auto *Injected = P->getAs<InjectedClassNameType>())
- P = Injected->getInjectedSpecializationType();
+ if (auto *Injected = P->getAsCanonical<InjectedClassNameType>())
+ P = Injected->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ S.Context);
}
QualType InstP = S.SubstType(P.getCanonicalType(), MLTAL, FTD->getLocation(),
FTD->getDeclName(), &IsIncompleteSubstitution);
@@ -5626,10 +5617,12 @@ static TemplateDeductionResult CheckDeductionConsistency(
auto T1 = S.Context.getUnqualifiedArrayType(InstP.getNonReferenceType());
auto T2 = S.Context.getUnqualifiedArrayType(A.getNonReferenceType());
if (IsDeductionGuide) {
- if (auto *Injected = T1->getAs<InjectedClassNameType>())
- T1 = Injected->getInjectedSpecializationType();
- if (auto *Injected = T2->getAs<InjectedClassNameType>())
- T2 = Injected->getInjectedSpecializationType();
+ if (auto *Injected = T1->getAsCanonical<InjectedClassNameType>())
+ T1 = Injected->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ S.Context);
+ if (auto *Injected = T2->getAsCanonical<InjectedClassNameType>())
+ T2 = Injected->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ S.Context);
}
if (!S.Context.hasSameType(T1, T2))
return TemplateDeductionResult::NonDeducedMismatch;
@@ -6471,8 +6464,8 @@ Sema::getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc) {
- QualType PT1 = PS1->getInjectedSpecializationType().getCanonicalType();
- QualType PT2 = PS2->getInjectedSpecializationType().getCanonicalType();
+ QualType PT1 = PS1->getCanonicalInjectedSpecializationType(Context);
+ QualType PT2 = PS2->getCanonicalInjectedSpecializationType(Context);
TemplateDeductionInfo Info(Loc);
return getMoreSpecialized(*this, PT1, PT2, PS1, PS2, Info);
@@ -6481,9 +6474,8 @@ Sema::getMoreSpecializedPartialSpecialization(
bool Sema::isMoreSpecializedThanPrimary(
ClassTemplatePartialSpecializationDecl *Spec, TemplateDeductionInfo &Info) {
ClassTemplateDecl *Primary = Spec->getSpecializedTemplate();
- QualType PrimaryT =
- Primary->getInjectedClassNameSpecialization().getCanonicalType();
- QualType PartialT = Spec->getInjectedSpecializationType().getCanonicalType();
+ QualType PrimaryT = Primary->getCanonicalInjectedSpecializationType(Context);
+ QualType PartialT = Spec->getCanonicalInjectedSpecializationType(Context);
ClassTemplatePartialSpecializationDecl *MaybeSpec =
getMoreSpecialized(*this, PartialT, PrimaryT, Spec, Primary, Info);
@@ -6790,19 +6782,13 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
/// Mark the template parameters that are used by the given
/// nested name specifier.
-static void
-MarkUsedTemplateParameters(ASTContext &Ctx,
- NestedNameSpecifier *NNS,
- bool OnlyDeduced,
- unsigned Depth,
- llvm::SmallBitVector &Used) {
- if (!NNS)
+static void MarkUsedTemplateParameters(ASTContext &Ctx, NestedNameSpecifier NNS,
+ bool OnlyDeduced, unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
return;
-
- MarkUsedTemplateParameters(Ctx, NNS->getPrefix(), OnlyDeduced, Depth,
- Used);
- MarkUsedTemplateParameters(Ctx, QualType(NNS->getAsType(), 0),
- OnlyDeduced, Depth, Used);
+ MarkUsedTemplateParameters(Ctx, QualType(NNS.getAsType(), 0), OnlyDeduced,
+ Depth, Used);
}
/// Mark the template parameters that are used by the given
@@ -6876,7 +6862,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
MarkUsedTemplateParameters(Ctx, MemPtr->getPointeeType(), OnlyDeduced,
Depth, Used);
MarkUsedTemplateParameters(Ctx,
- QualType(MemPtr->getQualifier()->getAsType(), 0),
+ QualType(MemPtr->getQualifier().getAsType(), 0),
OnlyDeduced, Depth, Used);
break;
}
@@ -6988,13 +6974,20 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
= cast<SubstTemplateTypeParmPackType>(T);
if (Subst->getReplacedParameter()->getDepth() == Depth)
Used[Subst->getIndex()] = true;
- MarkUsedTemplateParameters(Ctx, Subst->getArgumentPack(),
+ MarkUsedTemplateParameters(Ctx, Subst->getArgumentPack(), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+ case Type::SubstBuiltinTemplatePack: {
+ MarkUsedTemplateParameters(Ctx, cast<SubstPackType>(T)->getArgumentPack(),
OnlyDeduced, Depth, Used);
break;
}
case Type::InjectedClassName:
- T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
+ T = cast<InjectedClassNameType>(T)
+ ->getOriginalDecl()
+ ->getCanonicalTemplateSpecializationType(Ctx);
[[fallthrough]];
case Type::TemplateSpecialization: {
diff --git a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
index 9be1c9c..3d54d1e 100644
--- a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
+++ b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
@@ -105,22 +105,21 @@ public:
return false;
}
- QualType
- RebuildTemplateSpecializationType(TemplateName Template,
- SourceLocation TemplateNameLoc,
- TemplateArgumentListInfo &TemplateArgs) {
+ QualType RebuildTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ SourceLocation TemplateNameLoc, TemplateArgumentListInfo &TemplateArgs) {
if (!OuterInstantiationArgs ||
!isa_and_present<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()))
- return Base::RebuildTemplateSpecializationType(Template, TemplateNameLoc,
- TemplateArgs);
+ return Base::RebuildTemplateSpecializationType(
+ Keyword, Template, TemplateNameLoc, TemplateArgs);
auto *TATD = cast<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
auto *Pattern = TATD;
while (Pattern->getInstantiatedFromMemberTemplate())
Pattern = Pattern->getInstantiatedFromMemberTemplate();
if (!mightReferToOuterTemplateParameters(Pattern->getTemplatedDecl()))
- return Base::RebuildTemplateSpecializationType(Template, TemplateNameLoc,
- TemplateArgs);
+ return Base::RebuildTemplateSpecializationType(
+ Keyword, Template, TemplateNameLoc, TemplateArgs);
Decl *NewD =
TypedefNameInstantiator->InstantiateTypeAliasTemplateDecl(TATD);
@@ -131,13 +130,14 @@ public:
MaterializedTypedefs.push_back(NewTATD->getTemplatedDecl());
return Base::RebuildTemplateSpecializationType(
- TemplateName(NewTATD), TemplateNameLoc, TemplateArgs);
+ Keyword, TemplateName(NewTATD), TemplateNameLoc, TemplateArgs);
}
QualType TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) {
ASTContext &Context = SemaRef.getASTContext();
- TypedefNameDecl *OrigDecl = TL.getTypedefNameDecl();
+ TypedefNameDecl *OrigDecl = TL.getDecl();
TypedefNameDecl *Decl = OrigDecl;
+ const TypedefType *T = TL.getTypePtr();
// Transform the underlying type of the typedef and clone the Decl only if
// the typedef has a dependent context.
bool InDependentContext = OrigDecl->getDeclContext()->isDependentContext();
@@ -155,7 +155,7 @@ public:
// };
// };
if (OuterInstantiationArgs && InDependentContext &&
- TL.getTypePtr()->isInstantiationDependentType()) {
+ T->isInstantiationDependentType()) {
Decl = cast_if_present<TypedefNameDecl>(
TypedefNameInstantiator->InstantiateTypedefNameDecl(
OrigDecl, /*IsTypeAlias=*/isa<TypeAliasDecl>(OrigDecl)));
@@ -180,10 +180,17 @@ public:
MaterializedTypedefs.push_back(Decl);
}
- QualType TDTy = Context.getTypedefType(Decl);
- TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(TDTy);
- TypedefTL.setNameLoc(TL.getNameLoc());
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ }
+ QualType TDTy = Context.getTypedefType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), Decl);
+ TLB.push<TypedefTypeLoc>(TDTy).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
return TDTy;
}
};
@@ -327,7 +334,7 @@ struct ConvertConstructorToDeductionGuideTransform {
DeclarationName DeductionGuideName =
SemaRef.Context.DeclarationNames.getCXXDeductionGuideName(Template);
- QualType DeducedType = SemaRef.Context.getTypeDeclType(Primary);
+ QualType DeducedType = SemaRef.Context.getCanonicalTagType(Primary);
// Index adjustment to apply to convert depth-1 template parameters into
// depth-0 template parameters.
@@ -593,7 +600,10 @@ private:
// context of the template), so implicit deduction guides can never collide
// with explicit ones.
QualType ReturnType = DeducedType;
- TLB.pushTypeSpec(ReturnType).setNameLoc(Primary->getLocation());
+ auto TTL = TLB.push<TagTypeLoc>(ReturnType);
+ TTL.setElaboratedKeywordLoc(SourceLocation());
+ TTL.setQualifierLoc(NestedNameSpecifierLoc());
+ TTL.setNameLoc(Primary->getLocation());
// Resolving a wording defect, we also inherit the variadicness of the
// constructor.
@@ -954,7 +964,8 @@ Expr *buildIsDeducibleConstraint(Sema &SemaRef,
SmallVector<TypeSourceInfo *> IsDeducibleTypeTraitArgs = {
Context.getTrivialTypeSourceInfo(
Context.getDeducedTemplateSpecializationType(
- TemplateName(AliasTemplate), /*DeducedType=*/QualType(),
+ ElaboratedTypeKeyword::None, TemplateName(AliasTemplate),
+ /*DeducedType=*/QualType(),
/*IsDependent=*/true),
AliasTemplate->getLocation()), // template specialization type whose
// arguments will be deduced.
@@ -970,10 +981,7 @@ Expr *buildIsDeducibleConstraint(Sema &SemaRef,
std::pair<TemplateDecl *, llvm::ArrayRef<TemplateArgument>>
getRHSTemplateDeclAndArgs(Sema &SemaRef, TypeAliasTemplateDecl *AliasTemplate) {
- // Unwrap the sugared ElaboratedType.
- auto RhsType = AliasTemplate->getTemplatedDecl()
- ->getUnderlyingType()
- .getSingleStepDesugaredType(SemaRef.Context);
+ auto RhsType = AliasTemplate->getTemplatedDecl()->getUnderlyingType();
TemplateDecl *Template = nullptr;
llvm::ArrayRef<TemplateArgument> AliasRhsTemplateArgs;
if (const auto *TST = RhsType->getAs<TemplateSpecializationType>()) {
@@ -987,8 +995,8 @@ getRHSTemplateDeclAndArgs(Sema &SemaRef, TypeAliasTemplateDecl *AliasTemplate) {
// Cases where template arguments in the RHS of the alias are not
// dependent. e.g.
// using AliasFoo = Foo<bool>;
- if (const auto *CTSD = llvm::dyn_cast<ClassTemplateSpecializationDecl>(
- RT->getAsCXXRecordDecl())) {
+ if (const auto *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl())) {
Template = CTSD->getSpecializedTemplate();
AliasRhsTemplateArgs = CTSD->getTemplateArgs().asArray();
}
@@ -1048,12 +1056,11 @@ BuildDeductionGuideForTypeAlias(Sema &SemaRef,
// The (trailing) return type of the deduction guide.
const TemplateSpecializationType *FReturnType =
RType->getAs<TemplateSpecializationType>();
- if (const auto *InjectedCNT = RType->getAs<InjectedClassNameType>())
+ if (const auto *ICNT = RType->getAsCanonical<InjectedClassNameType>())
// implicitly-generated deduction guide.
- FReturnType = InjectedCNT->getInjectedTST();
- else if (const auto *ET = RType->getAs<ElaboratedType>())
- // explicit deduction guide.
- FReturnType = ET->getNamedType()->getAsNonAliasTemplateSpecializationType();
+ FReturnType = cast<TemplateSpecializationType>(
+ ICNT->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ SemaRef.Context));
assert(FReturnType && "expected to see a return type");
// Deduce template arguments of the deduction guide f from the RHS of
// the alias.
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 0d96d18..a72c95d 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -353,45 +353,50 @@ Response HandleFunctionTemplateDecl(Sema &SemaRef,
SemaRef.Context),
/*Final=*/false);
- NestedNameSpecifier *NNS = FTD->getTemplatedDecl()->getQualifier();
-
- while (const Type *Ty = NNS ? NNS->getAsType() : nullptr) {
- if (NNS->isInstantiationDependent()) {
- if (const auto *TSTy = Ty->getAs<TemplateSpecializationType>()) {
- ArrayRef<TemplateArgument> Arguments = TSTy->template_arguments();
- // Prefer template arguments from the injected-class-type if possible.
- // For example,
- // ```cpp
- // template <class... Pack> struct S {
- // template <class T> void foo();
- // };
- // template <class... Pack> template <class T>
- // ^^^^^^^^^^^^^ InjectedTemplateArgs
- // They're of kind TemplateArgument::Pack, not of
- // TemplateArgument::Type.
- // void S<Pack...>::foo() {}
- // ^^^^^^^
- // TSTy->template_arguments() (which are of PackExpansionType)
- // ```
- // This meets the contract in
- // TreeTransform::TryExpandParameterPacks that the template arguments
- // for unexpanded parameters should be of a Pack kind.
- if (TSTy->isCurrentInstantiation()) {
- auto *RD = TSTy->getCanonicalTypeInternal()->getAsCXXRecordDecl();
- if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
- Arguments = CTD->getInjectedTemplateArgs(SemaRef.Context);
- else if (auto *Specialization =
- dyn_cast<ClassTemplateSpecializationDecl>(RD))
- Arguments =
- Specialization->getTemplateInstantiationArgs().asArray();
- }
- Result.addOuterTemplateArguments(
- TSTy->getTemplateName().getAsTemplateDecl(), Arguments,
- /*Final=*/false);
- }
- }
+ NestedNameSpecifier NNS = FTD->getTemplatedDecl()->getQualifier();
+
+ for (const Type *Ty = NNS.getKind() == NestedNameSpecifier::Kind::Type
+ ? NNS.getAsType()
+ : nullptr,
+ *NextTy = nullptr;
+ Ty && Ty->isInstantiationDependentType();
+ Ty = std::exchange(NextTy, nullptr)) {
+ if (NestedNameSpecifier P = Ty->getPrefix();
+ P.getKind() == NestedNameSpecifier::Kind::Type)
+ NextTy = P.getAsType();
+ const auto *TSTy = dyn_cast<TemplateSpecializationType>(Ty);
+ if (!TSTy)
+ continue;
- NNS = NNS->getPrefix();
+ ArrayRef<TemplateArgument> Arguments = TSTy->template_arguments();
+ // Prefer template arguments from the injected-class-type if possible.
+ // For example,
+ // ```cpp
+ // template <class... Pack> struct S {
+ // template <class T> void foo();
+ // };
+ // template <class... Pack> template <class T>
+ // ^^^^^^^^^^^^^ InjectedTemplateArgs
+ // They're of kind TemplateArgument::Pack, not of
+ // TemplateArgument::Type.
+ // void S<Pack...>::foo() {}
+ // ^^^^^^^
+ // TSTy->template_arguments() (which are of PackExpansionType)
+ // ```
+ // This meets the contract in
+ // TreeTransform::TryExpandParameterPacks that the template arguments
+ // for unexpanded parameters should be of a Pack kind.
+ if (TSTy->isCurrentInstantiation()) {
+ auto *RD = TSTy->getCanonicalTypeInternal()->getAsCXXRecordDecl();
+ if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
+ Arguments = CTD->getInjectedTemplateArgs(SemaRef.Context);
+ else if (auto *Specialization =
+ dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ Arguments = Specialization->getTemplateInstantiationArgs().asArray();
+ }
+ Result.addOuterTemplateArguments(
+ TSTy->getTemplateName().getAsTemplateDecl(), Arguments,
+ /*Final=*/false);
}
}
@@ -1165,7 +1170,7 @@ void Sema::PrintInstantiationStack(InstantiationContextDiagFuncRef DiagFunc) {
DiagFunc(Active->PointOfInstantiation,
PDiag(diag::note_member_synthesized_at)
<< MD->isExplicitlyDefaulted() << DFK.asSpecialMember()
- << Context.getTagDeclType(MD->getParent()));
+ << Context.getCanonicalTagType(MD->getParent()));
} else if (DFK.isComparison()) {
QualType RecordType = FD->getParamDecl(0)
->getType()
@@ -1454,6 +1459,7 @@ namespace {
bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
+ bool FailOnPackProducingTemplates,
bool &ShouldExpand, bool &RetainExpansion,
UnsignedOrNone &NumExpansions) {
if (SemaRef.CurrentInstantiationScope &&
@@ -1467,8 +1473,9 @@ namespace {
}
return getSema().CheckParameterPacksForExpansion(
- EllipsisLoc, PatternRange, Unexpanded, TemplateArgs, ShouldExpand,
- RetainExpansion, NumExpansions);
+ EllipsisLoc, PatternRange, Unexpanded, TemplateArgs,
+ FailOnPackProducingTemplates, ShouldExpand, RetainExpansion,
+ NumExpansions);
}
void ExpandingFunctionParameterPack(ParmVarDecl *Pack) {
@@ -1510,6 +1517,21 @@ namespace {
}
}
+ MultiLevelTemplateArgumentList ForgetSubstitution() {
+ MultiLevelTemplateArgumentList New;
+ New.addOuterRetainedLevels(this->TemplateArgs.getNumLevels());
+
+ MultiLevelTemplateArgumentList Old =
+ const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs) =
+ std::move(New);
+ return Old;
+ }
+ void RememberSubstitution(MultiLevelTemplateArgumentList Old) {
+ const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs) =
+ std::move(Old);
+ }
+
TemplateArgument
getTemplateArgumentPackPatternForRewrite(const TemplateArgument &TA) {
if (TA.getKind() != TemplateArgument::Pack)
@@ -1595,15 +1617,9 @@ namespace {
VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *TSInfo, QualType T);
- /// Check for tag mismatches when instantiating an
- /// elaborated type.
- QualType RebuildElaboratedType(SourceLocation KeywordLoc,
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifierLoc QualifierLoc,
- QualType T);
-
TemplateName
- TransformTemplateName(CXXScopeSpec &SS, TemplateName Name,
+ TransformTemplateName(NestedNameSpecifierLoc &QualifierLoc,
+ SourceLocation TemplateKWLoc, TemplateName Name,
SourceLocation NameLoc,
QualType ObjectType = QualType(),
NamedDecl *FirstQualifierInScope = nullptr,
@@ -1649,22 +1665,20 @@ namespace {
return inherited::TransformFunctionProtoType(TLB, TL);
}
- QualType TransformInjectedClassNameType(TypeLocBuilder &TLB,
- InjectedClassNameTypeLoc TL) {
- auto Type = inherited::TransformInjectedClassNameType(TLB, TL);
+ QualType TransformTagType(TypeLocBuilder &TLB, TagTypeLoc TL) {
+ auto Type = inherited::TransformTagType(TLB, TL);
+ if (!Type.isNull())
+ return Type;
// Special case for transforming a deduction guide, we return a
// transformed TemplateSpecializationType.
- if (Type.isNull() &&
- SemaRef.CodeSynthesisContexts.back().Kind ==
- Sema::CodeSynthesisContext::BuildingDeductionGuides) {
- // Return a TemplateSpecializationType for transforming a deduction
- // guide.
- if (auto *ICT = TL.getType()->getAs<InjectedClassNameType>()) {
- auto Type =
- inherited::TransformType(ICT->getInjectedSpecializationType());
- TLB.pushTrivial(SemaRef.Context, Type, TL.getNameLoc());
- return Type;
- }
+ // FIXME: Why is this hack necessary?
+ if (const auto *ICNT = dyn_cast<InjectedClassNameType>(TL.getTypePtr());
+ ICNT && SemaRef.CodeSynthesisContexts.back().Kind ==
+ Sema::CodeSynthesisContext::BuildingDeductionGuides) {
+ Type = inherited::TransformType(
+ ICNT->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ SemaRef.Context));
+ TLB.pushTrivial(SemaRef.Context, Type, TL.getNameLoc());
}
return Type;
}
@@ -1700,6 +1714,26 @@ namespace {
return inherited::TransformTemplateArgument(Input, Output, Uneval);
}
+ using TreeTransform::TransformTemplateSpecializationType;
+ QualType
+ TransformTemplateSpecializationType(TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL) {
+ auto *T = TL.getTypePtr();
+ if (!getSema().ArgPackSubstIndex || !T->isSugared() ||
+ !isPackProducingBuiltinTemplateName(T->getTemplateName()))
+ return TreeTransform::TransformTemplateSpecializationType(TLB, TL);
+ // Look through sugar to get to the SubstBuiltinTemplatePackType that we
+ // need to substitute into.
+
+ // `TransformType` code below will handle picking the element from a pack
+ // with the index `ArgPackSubstIndex`.
+ // FIXME: add ability to represent sugarred type for N-th element of a
+ // builtin pack and produce the sugar here.
+ QualType R = TransformType(T->desugar());
+ TLB.pushTrivial(getSema().getASTContext(), R, TL.getBeginLoc());
+ return R;
+ }
+
UnsignedOrNone ComputeSizeOfPackExprWithoutSubstitution(
ArrayRef<TemplateArgument> PackArgs) {
// Don't do this when rewriting template parameters for CTAD:
@@ -1749,6 +1783,9 @@ namespace {
TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
SubstTemplateTypeParmPackTypeLoc TL,
bool SuppressObjCLifetime);
+ QualType
+ TransformSubstBuiltinTemplatePackType(TypeLocBuilder &TLB,
+ SubstBuiltinTemplatePackTypeLoc TL);
CXXRecordDecl::LambdaDependencyKind
ComputeLambdaDependency(LambdaScopeInfo *LSI) {
@@ -1986,6 +2023,7 @@ bool TemplateInstantiator::maybeInstantiateFunctionParameterToScope(
UnsignedOrNone NumExpansions = OrigNumExpansions;
if (TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
Pattern.getSourceRange(), Unexpanded,
+ /*FailOnPackProducingTemplates=*/true,
ShouldExpand, RetainExpansion, NumExpansions))
return true;
@@ -2049,7 +2087,7 @@ TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D,
return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
if (const TagType *Tag = T->getAs<TagType>())
- return Tag->getDecl();
+ return Tag->getOriginalDecl();
// The resulting type is not a tag; complain.
getSema().Diag(Loc, diag::err_nested_name_spec_non_tag) << T;
@@ -2082,44 +2120,15 @@ VarDecl *TemplateInstantiator::RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
return Var;
}
-QualType
-TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifierLoc QualifierLoc,
- QualType T) {
- if (const TagType *TT = T->getAs<TagType>()) {
- TagDecl* TD = TT->getDecl();
-
- SourceLocation TagLocation = KeywordLoc;
-
- IdentifierInfo *Id = TD->getIdentifier();
-
- // TODO: should we even warn on struct/class mismatches for this? Seems
- // like it's likely to produce a lot of spurious errors.
- if (Id && Keyword != ElaboratedTypeKeyword::None &&
- Keyword != ElaboratedTypeKeyword::Typename) {
- TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
- if (!SemaRef.isAcceptableTagRedeclaration(TD, Kind, /*isDefinition*/false,
- TagLocation, Id)) {
- SemaRef.Diag(TagLocation, diag::err_use_with_wrong_tag)
- << Id
- << FixItHint::CreateReplacement(SourceRange(TagLocation),
- TD->getKindName());
- SemaRef.Diag(TD->getLocation(), diag::note_previous_use);
- }
- }
- }
-
- return inherited::RebuildElaboratedType(KeywordLoc, Keyword, QualifierLoc, T);
-}
-
TemplateName TemplateInstantiator::TransformTemplateName(
- CXXScopeSpec &SS, TemplateName Name, SourceLocation NameLoc,
- QualType ObjectType, NamedDecl *FirstQualifierInScope,
- bool AllowInjectedClassName) {
- if (TemplateTemplateParmDecl *TTP
- = dyn_cast_or_null<TemplateTemplateParmDecl>(Name.getAsTemplateDecl())) {
- if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ NestedNameSpecifierLoc &QualifierLoc, SourceLocation TemplateKWLoc,
+ TemplateName Name, SourceLocation NameLoc, QualType ObjectType,
+ NamedDecl *FirstQualifierInScope, bool AllowInjectedClassName) {
+ if (Name.getKind() == TemplateName::Template) {
+ assert(!QualifierLoc && "Unexpected qualifier");
+ if (auto *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(Name.getAsTemplateDecl());
+ TTP && TTP->getDepth() < TemplateArgs.getNumLevels()) {
// If the corresponding template argument is NULL or non-existent, it's
// because we are performing instantiation from explicitly-specified
// template arguments in a function template, but there were some
@@ -2162,7 +2171,6 @@ TemplateName TemplateInstantiator::TransformTemplateName(
TemplateName Template = Arg.getAsTemplate();
assert(!Template.isNull() && "Null template template argument");
-
return getSema().Context.getSubstTemplateTemplateParm(
Template, AssociatedDecl, TTP->getIndex(), PackIndex, Final);
}
@@ -2181,9 +2189,9 @@ TemplateName TemplateInstantiator::TransformTemplateName(
getPackIndex(Pack), SubstPack->getFinal());
}
- return inherited::TransformTemplateName(SS, Name, NameLoc, ObjectType,
- FirstQualifierInScope,
- AllowInjectedClassName);
+ return inherited::TransformTemplateName(
+ QualifierLoc, TemplateKWLoc, Name, NameLoc, ObjectType,
+ FirstQualifierInScope, AllowInjectedClassName);
}
ExprResult
@@ -2747,6 +2755,17 @@ QualType TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
getPackIndex(Pack), Arg, TL.getNameLoc());
}
+QualType TemplateInstantiator::TransformSubstBuiltinTemplatePackType(
+ TypeLocBuilder &TLB, SubstBuiltinTemplatePackTypeLoc TL) {
+ if (!getSema().ArgPackSubstIndex)
+ return TreeTransform::TransformSubstBuiltinTemplatePackType(TLB, TL);
+ auto &Sema = getSema();
+ TemplateArgument Result = getPackSubstitutedTemplateArgument(
+ Sema, TL.getTypePtr()->getArgumentPack());
+ TLB.pushTrivial(Sema.getASTContext(), Result.getAsType(), TL.getBeginLoc());
+ return Result.getAsType();
+}
+
static concepts::Requirement::SubstitutionDiagnostic *
createSubstDiag(Sema &S, TemplateDeductionInfo &Info,
Sema::EntityPrinter Printer) {
@@ -3159,10 +3178,6 @@ namespace {
// Only these types can contain 'auto' types, and subsequently be replaced
// by references to invented parameters.
- TemplateTypeParmDecl *VisitElaboratedType(const ElaboratedType *T) {
- return Visit(T->getNamedType());
- }
-
TemplateTypeParmDecl *VisitPointerType(const PointerType *T) {
return Visit(T->getPointeeType());
}
@@ -3478,6 +3493,72 @@ bool Sema::SubstDefaultArgument(
return false;
}
+// See TreeTransform::PreparePackForExpansion for the relevant comment.
+// This function implements the same concept for base specifiers.
+static bool
+PreparePackForExpansion(Sema &S, const CXXBaseSpecifier &Base,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TypeSourceInfo *&Out, UnexpandedInfo &Info) {
+ SourceRange BaseSourceRange = Base.getSourceRange();
+ SourceLocation BaseEllipsisLoc = Base.getEllipsisLoc();
+ Info.Ellipsis = Base.getEllipsisLoc();
+ auto ComputeInfo = [&S, &TemplateArgs, BaseSourceRange, BaseEllipsisLoc](
+ TypeSourceInfo *BaseTypeInfo,
+ bool IsLateExpansionAttempt, UnexpandedInfo &Info) {
+ // This is a pack expansion. See whether we should expand it now, or
+ // wait until later.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(BaseTypeInfo->getTypeLoc(), Unexpanded);
+ if (IsLateExpansionAttempt) {
+ // Request expansion only when there is an opportunity to expand a pack
+ // that required a substituion first.
+ bool SawPackTypes =
+ llvm::any_of(Unexpanded, [](UnexpandedParameterPack P) {
+ return P.first.dyn_cast<const SubstBuiltinTemplatePackType *>();
+ });
+ if (!SawPackTypes) {
+ Info.Expand = false;
+ return false;
+ }
+ }
+
+ // Determine whether the set of unexpanded parameter packs can and should be
+ // expanded.
+ Info.Expand = false;
+ Info.RetainExpansion = false;
+ Info.NumExpansions = std::nullopt;
+ return S.CheckParameterPacksForExpansion(
+ BaseEllipsisLoc, BaseSourceRange, Unexpanded, TemplateArgs,
+ /*FailOnPackProducingTemplates=*/false, Info.Expand,
+ Info.RetainExpansion, Info.NumExpansions);
+ };
+
+ if (ComputeInfo(Base.getTypeSourceInfo(), false, Info))
+ return true;
+
+ if (Info.Expand) {
+ Out = Base.getTypeSourceInfo();
+ return false;
+ }
+
+ // The resulting base specifier will (still) be a pack expansion.
+ {
+ Sema::ArgPackSubstIndexRAII SubstIndex(S, std::nullopt);
+ Out = S.SubstType(Base.getTypeSourceInfo(), TemplateArgs,
+ BaseSourceRange.getBegin(), DeclarationName());
+ }
+ if (!Out->getType()->containsUnexpandedParameterPack())
+ return false;
+
+ // Some packs will learn their length after substitution.
+ // We may need to request their expansion.
+ if (ComputeInfo(Out, /*IsLateExpansionAttempt=*/true, Info))
+ return true;
+ if (Info.Expand)
+ Info.ExpandUnderForgetSubstitions = true;
+ return false;
+}
+
bool
Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
@@ -3495,47 +3576,37 @@ Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
}
SourceLocation EllipsisLoc;
- TypeSourceInfo *BaseTypeLoc;
+ TypeSourceInfo *BaseTypeLoc = nullptr;
if (Base.isPackExpansion()) {
- // This is a pack expansion. See whether we should expand it now, or
- // wait until later.
- SmallVector<UnexpandedParameterPack, 2> Unexpanded;
- collectUnexpandedParameterPacks(Base.getTypeSourceInfo()->getTypeLoc(),
- Unexpanded);
- bool ShouldExpand = false;
- bool RetainExpansion = false;
- UnsignedOrNone NumExpansions = std::nullopt;
- if (CheckParameterPacksForExpansion(Base.getEllipsisLoc(),
- Base.getSourceRange(),
- Unexpanded,
- TemplateArgs, ShouldExpand,
- RetainExpansion,
- NumExpansions)) {
+ UnexpandedInfo Info;
+ if (PreparePackForExpansion(*this, Base, TemplateArgs, BaseTypeLoc,
+ Info)) {
Invalid = true;
continue;
}
// If we should expand this pack expansion now, do so.
- if (ShouldExpand) {
- for (unsigned I = 0; I != *NumExpansions; ++I) {
+ MultiLevelTemplateArgumentList EmptyList;
+ const MultiLevelTemplateArgumentList *ArgsForSubst = &TemplateArgs;
+ if (Info.ExpandUnderForgetSubstitions)
+ ArgsForSubst = &EmptyList;
+
+ if (Info.Expand) {
+ for (unsigned I = 0; I != *Info.NumExpansions; ++I) {
Sema::ArgPackSubstIndexRAII SubstIndex(*this, I);
- TypeSourceInfo *BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
- TemplateArgs,
- Base.getSourceRange().getBegin(),
- DeclarationName());
- if (!BaseTypeLoc) {
+ TypeSourceInfo *Expanded =
+ SubstType(BaseTypeLoc, *ArgsForSubst,
+ Base.getSourceRange().getBegin(), DeclarationName());
+ if (!Expanded) {
Invalid = true;
continue;
}
- if (CXXBaseSpecifier *InstantiatedBase
- = CheckBaseSpecifier(Instantiation,
- Base.getSourceRange(),
- Base.isVirtual(),
- Base.getAccessSpecifierAsWritten(),
- BaseTypeLoc,
- SourceLocation()))
+ if (CXXBaseSpecifier *InstantiatedBase = CheckBaseSpecifier(
+ Instantiation, Base.getSourceRange(), Base.isVirtual(),
+ Base.getAccessSpecifierAsWritten(), Expanded,
+ SourceLocation()))
InstantiatedBases.push_back(InstantiatedBase);
else
Invalid = true;
@@ -3547,10 +3618,9 @@ Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
// The resulting base specifier will (still) be a pack expansion.
EllipsisLoc = Base.getEllipsisLoc();
Sema::ArgPackSubstIndexRAII SubstIndex(*this, std::nullopt);
- BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
- TemplateArgs,
- Base.getSourceRange().getBegin(),
- DeclarationName());
+ BaseTypeLoc =
+ SubstType(BaseTypeLoc, *ArgsForSubst,
+ Base.getSourceRange().getBegin(), DeclarationName());
} else {
BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
TemplateArgs,
@@ -4557,14 +4627,14 @@ Sema::SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
}
TemplateName
-Sema::SubstTemplateName(NestedNameSpecifierLoc QualifierLoc,
- TemplateName Name, SourceLocation Loc,
+Sema::SubstTemplateName(SourceLocation TemplateKWLoc,
+ NestedNameSpecifierLoc &QualifierLoc, TemplateName Name,
+ SourceLocation NameLoc,
const MultiLevelTemplateArgumentList &TemplateArgs) {
- TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
+ TemplateInstantiator Instantiator(*this, TemplateArgs, NameLoc,
DeclarationName());
- CXXScopeSpec SS;
- SS.Adopt(QualifierLoc);
- return Instantiator.TransformTemplateName(SS, Name, Loc);
+ return Instantiator.TransformTemplateName(QualifierLoc, TemplateKWLoc, Name,
+ NameLoc);
}
static const Decl *getCanonicalParmVarDecl(const Decl *D) {
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 87ec4f7..ee1b520 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -133,8 +133,9 @@ static void instantiateDependentAlignedAttr(
// FIXME: Use the actual location of the ellipsis.
SourceLocation EllipsisLoc = Aligned->getLocation();
if (S.CheckParameterPacksForExpansion(EllipsisLoc, Aligned->getRange(),
- Unexpanded, TemplateArgs, Expand,
- RetainExpansion, NumExpansions))
+ Unexpanded, TemplateArgs,
+ /*FailOnPackProducingTemplates=*/true,
+ Expand, RetainExpansion, NumExpansions))
return;
if (!Expand) {
@@ -1483,9 +1484,9 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
// If the old typedef was the name for linkage purposes of an anonymous
// tag decl, re-establish that relationship for the new typedef.
if (const TagType *oldTagType = D->getUnderlyingType()->getAs<TagType>()) {
- TagDecl *oldTag = oldTagType->getDecl();
+ TagDecl *oldTag = oldTagType->getOriginalDecl();
if (oldTag->getTypedefNameForAnonDecl() == D && !Invalid) {
- TagDecl *newTag = DI->getType()->castAs<TagType>()->getDecl();
+ TagDecl *newTag = DI->getType()->castAs<TagType>()->getOriginalDecl();
assert(!newTag->hasNameForLinkage());
newTag->setTypedefNameForAnonDecl(Typedef);
}
@@ -1914,7 +1915,8 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
UnsignedOrNone NumExpansions = std::nullopt;
if (SemaRef.CheckParameterPacksForExpansion(
D->getEllipsisLoc(), D->getSourceRange(), Unexpanded,
- TemplateArgs, ShouldExpand, RetainExpansion, NumExpansions))
+ TemplateArgs, /*FailOnPackProducingTemplates=*/true,
+ ShouldExpand, RetainExpansion, NumExpansions))
return nullptr;
assert(!RetainExpansion &&
@@ -2250,8 +2252,7 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
CXXRecordDecl *RecordInst = CXXRecordDecl::Create(
SemaRef.Context, Pattern->getTagKind(), DC, Pattern->getBeginLoc(),
- Pattern->getLocation(), Pattern->getIdentifier(), PrevDecl,
- /*DelayTypeCreation=*/true);
+ Pattern->getLocation(), Pattern->getIdentifier(), PrevDecl);
if (QualifierLoc)
RecordInst->setQualifierInfo(QualifierLoc);
@@ -2271,8 +2272,6 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (PrevClassTemplate) {
Inst->setCommonPtr(PrevClassTemplate->getCommonPtr());
- RecordInst->setTypeForDecl(
- PrevClassTemplate->getTemplatedDecl()->getTypeForDecl());
const ClassTemplateDecl *MostRecentPrevCT =
PrevClassTemplate->getMostRecentDecl();
TemplateParameterList *PrevParams =
@@ -2306,10 +2305,6 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
Inst->setPreviousDecl(PrevClassTemplate);
- // Trigger creation of the type for the instantiation.
- SemaRef.Context.getInjectedClassNameType(
- RecordInst, Inst->getInjectedClassNameSpecialization());
-
// Finish handling of friends.
if (isFriend) {
DC->makeDeclVisibleInContext(Inst);
@@ -2515,11 +2510,9 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
else
Record = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
D->getBeginLoc(), D->getLocation(),
- D->getIdentifier(), PrevDecl,
- /*DelayTypeCreation=*/IsInjectedClassName);
- // Link the type of the injected-class-name to that of the outer class.
- if (IsInjectedClassName)
- (void)SemaRef.Context.getTypeDeclType(Record, cast<CXXRecordDecl>(Owner));
+ D->getIdentifier(), PrevDecl);
+
+ Record->setImplicit(D->isImplicit());
// Substitute the nested name specifier, if any.
if (SubstQualifier(D, Record))
@@ -2528,7 +2521,6 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
SemaRef.InstantiateAttrsForDecl(TemplateArgs, D, Record, LateAttrs,
StartingScope);
- Record->setImplicit(D->isImplicit());
// FIXME: Check against AS_none is an ugly hack to work around the issue that
// the tag decls introduced by friend class declarations don't have an access
// specifier. Remove once this area of the code gets sorted out.
@@ -3164,8 +3156,8 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
Method->setIneligibleOrNotSelected(true);
Method->setRangeEnd(Destructor->getEndLoc());
Method->setDeclName(SemaRef.Context.DeclarationNames.getCXXDestructorName(
- SemaRef.Context.getCanonicalType(
- SemaRef.Context.getTypeDeclType(Record))));
+
+ SemaRef.Context.getCanonicalTagType(Record)));
} else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
Method = CXXConversionDecl::Create(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
@@ -3474,10 +3466,11 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
->getEllipsisLoc(),
SourceRange(TC->getConceptNameLoc(),
- TC->hasExplicitTemplateArgs() ?
- TC->getTemplateArgsAsWritten()->getRAngleLoc() :
- TC->getConceptNameInfo().getEndLoc()),
- Unexpanded, TemplateArgs, Expand, RetainExpansion, NumExpanded))
+ TC->hasExplicitTemplateArgs()
+ ? TC->getTemplateArgsAsWritten()->getRAngleLoc()
+ : TC->getConceptNameInfo().getEndLoc()),
+ Unexpanded, TemplateArgs, /*FailOnPackProducingTemplates=*/true,
+ Expand, RetainExpansion, NumExpanded))
return nullptr;
}
}
@@ -3565,12 +3558,10 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
UnsignedOrNone OrigNumExpansions =
Expansion.getTypePtr()->getNumExpansions();
UnsignedOrNone NumExpansions = OrigNumExpansions;
- if (SemaRef.CheckParameterPacksForExpansion(Expansion.getEllipsisLoc(),
- Pattern.getSourceRange(),
- Unexpanded,
- TemplateArgs,
- Expand, RetainExpansion,
- NumExpansions))
+ if (SemaRef.CheckParameterPacksForExpansion(
+ Expansion.getEllipsisLoc(), Pattern.getSourceRange(), Unexpanded,
+ TemplateArgs, /*FailOnPackProducingTemplates=*/true, Expand,
+ RetainExpansion, NumExpansions))
return nullptr;
if (Expand) {
@@ -3736,12 +3727,10 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
bool Expand = true;
bool RetainExpansion = false;
UnsignedOrNone NumExpansions = std::nullopt;
- if (SemaRef.CheckParameterPacksForExpansion(D->getLocation(),
- TempParams->getSourceRange(),
- Unexpanded,
- TemplateArgs,
- Expand, RetainExpansion,
- NumExpansions))
+ if (SemaRef.CheckParameterPacksForExpansion(
+ D->getLocation(), TempParams->getSourceRange(), Unexpanded,
+ TemplateArgs, /*FailOnPackProducingTemplates=*/true, Expand,
+ RetainExpansion, NumExpansions))
return nullptr;
if (Expand) {
@@ -3793,19 +3782,18 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
D->getPosition(), D->isParameterPack(), D->getIdentifier(),
D->templateParameterKind(), D->wasDeclaredWithTypename(), InstParams);
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
- NestedNameSpecifierLoc QualifierLoc =
- D->getDefaultArgument().getTemplateQualifierLoc();
- QualifierLoc =
- SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgs);
+ const TemplateArgumentLoc &A = D->getDefaultArgument();
+ NestedNameSpecifierLoc QualifierLoc = A.getTemplateQualifierLoc();
+ // FIXME: Pass in the template keyword location.
TemplateName TName = SemaRef.SubstTemplateName(
- QualifierLoc, D->getDefaultArgument().getArgument().getAsTemplate(),
- D->getDefaultArgument().getTemplateNameLoc(), TemplateArgs);
+ A.getTemplateKWLoc(), QualifierLoc, A.getArgument().getAsTemplate(),
+ A.getTemplateNameLoc(), TemplateArgs);
if (!TName.isNull())
Param->setDefaultArgument(
SemaRef.Context,
TemplateArgumentLoc(SemaRef.Context, TemplateArgument(TName),
- D->getDefaultArgument().getTemplateQualifierLoc(),
- D->getDefaultArgument().getTemplateNameLoc()));
+ A.getTemplateKWLoc(), QualifierLoc,
+ A.getTemplateNameLoc()));
}
Param->setAccess(AS_public);
Param->setImplicit(D->isImplicit());
@@ -3907,7 +3895,7 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName)
if (auto *RD = dyn_cast<CXXRecordDecl>(SemaRef.CurContext))
NameInfo.setName(SemaRef.Context.DeclarationNames.getCXXConstructorName(
- SemaRef.Context.getCanonicalType(SemaRef.Context.getRecordType(RD))));
+ SemaRef.Context.getCanonicalTagType(RD)));
// We only need to do redeclaration lookups if we're in a class scope (in
// fact, it's not really even possible in non-class scopes).
@@ -4014,8 +4002,9 @@ Decl *TemplateDeclInstantiator::instantiateUnresolvedUsingDecl(
bool RetainExpansion = false;
UnsignedOrNone NumExpansions = std::nullopt;
if (SemaRef.CheckParameterPacksForExpansion(
- D->getEllipsisLoc(), D->getSourceRange(), Unexpanded, TemplateArgs,
- Expand, RetainExpansion, NumExpansions))
+ D->getEllipsisLoc(), D->getSourceRange(), Unexpanded, TemplateArgs,
+ /*FailOnPackProducingTemplates=*/true, Expand, RetainExpansion,
+ NumExpansions))
return nullptr;
// This declaration cannot appear within a function template signature,
@@ -4819,18 +4808,13 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
ClassTemplate->findPartialSpecialization(CTAI.CanonicalConverted,
InstParams, InsertPos);
- // Build the type that describes the converted template arguments of the class
- // template partial specialization.
- TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo(
- TemplateName(ClassTemplate), TemplArgInfo->getLAngleLoc(),
- InstTemplateArgs, CTAI.CanonicalConverted);
-
// Create the class template partial specialization declaration.
ClassTemplatePartialSpecializationDecl *InstPartialSpec =
ClassTemplatePartialSpecializationDecl::Create(
SemaRef.Context, PartialSpec->getTagKind(), Owner,
PartialSpec->getBeginLoc(), PartialSpec->getLocation(), InstParams,
- ClassTemplate, CTAI.CanonicalConverted, WrittenTy->getType(),
+ ClassTemplate, CTAI.CanonicalConverted,
+ /*CanonInjectedTST=*/CanQualType(),
/*PrevDecl=*/nullptr);
InstPartialSpec->setTemplateArgsAsWritten(InstTemplateArgs);
@@ -4861,7 +4845,7 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
diag::err_partial_spec_redeclared)
<< InstPartialSpec;
SemaRef.Diag(PrevDecl->getLocation(), diag::note_prev_partial_spec_here)
- << SemaRef.Context.getTypeDeclType(PrevDecl);
+ << SemaRef.Context.getCanonicalTagType(PrevDecl);
return nullptr;
}
@@ -5685,7 +5669,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
};
Function->setDeclarationNameLoc(NameLocPointsToPattern());
- EnterExpressionEvaluationContext EvalContext(
+ EnterExpressionEvaluationContextForFunction EvalContext(
*this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
Qualifiers ThisTypeQuals;
@@ -5750,15 +5734,19 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
QualType TransformRecordType(TypeLocBuilder &TLB, RecordTypeLoc TL) {
const RecordType *T = TL.getTypePtr();
RecordDecl *Record = cast_or_null<RecordDecl>(
- getDerived().TransformDecl(TL.getNameLoc(), T->getDecl()));
+ getDerived().TransformDecl(TL.getNameLoc(), T->getOriginalDecl()));
if (Record != OldDecl)
return Base::TransformRecordType(TLB, TL);
- QualType Result = getDerived().RebuildRecordType(NewDecl);
+ // FIXME: transform the rest of the record type.
+ QualType Result = getDerived().RebuildTagType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt, NewDecl);
if (Result.isNull())
return QualType();
- RecordTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ TagTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(SourceLocation());
+ NewTL.setQualifierLoc(NestedNameSpecifierLoc());
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
@@ -6413,12 +6401,10 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
bool ShouldExpand = false;
bool RetainExpansion = false;
UnsignedOrNone NumExpansions = std::nullopt;
- if (CheckParameterPacksForExpansion(Init->getEllipsisLoc(),
- BaseTL.getSourceRange(),
- Unexpanded,
- TemplateArgs, ShouldExpand,
- RetainExpansion,
- NumExpansions)) {
+ if (CheckParameterPacksForExpansion(
+ Init->getEllipsisLoc(), BaseTL.getSourceRange(), Unexpanded,
+ TemplateArgs, /*FailOnPackProducingTemplates=*/true, ShouldExpand,
+ RetainExpansion, NumExpansions)) {
AnyErrors = true;
New->setInvalidDecl();
continue;
@@ -6922,7 +6908,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
Args.addArgument(
getTrivialTemplateArgumentLoc(UnpackedArg, QualType(), Loc));
}
- QualType T = CheckTemplateIdType(TemplateName(TD), Loc, Args);
+ QualType T = CheckTemplateIdType(ElaboratedTypeKeyword::None,
+ TemplateName(TD), Loc, Args);
// We may get a non-null type with errors, in which case
// `getAsCXXRecordDecl` will return `nullptr`. For instance, this
// happens when one of the template arguments is an invalid
@@ -6986,18 +6973,15 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
// If our context used to be dependent, we may need to instantiate
// it before performing lookup into that context.
bool IsBeingInstantiated = false;
- if (CXXRecordDecl *Spec = dyn_cast<CXXRecordDecl>(ParentDC)) {
+ if (auto *Spec = dyn_cast<CXXRecordDecl>(ParentDC)) {
if (!Spec->isDependentContext()) {
- QualType T = Context.getTypeDeclType(Spec);
- const RecordType *Tag = T->getAs<RecordType>();
- assert(Tag && "type of non-dependent record is not a RecordType");
- if (Tag->isBeingDefined())
+ if (Spec->isEntityBeingDefined())
IsBeingInstantiated = true;
- if (!Tag->isBeingDefined() &&
- RequireCompleteType(Loc, T, diag::err_incomplete_type))
+ else if (RequireCompleteType(Loc, Context.getCanonicalTagType(Spec),
+ diag::err_incomplete_type))
return nullptr;
- ParentDC = Tag->getDecl();
+ ParentDC = Spec->getDefinitionOrSelf();
}
}
@@ -7043,8 +7027,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
// of member classes, and introduces ordering dependencies via
// template instantiation.
Diag(Loc, diag::err_member_not_yet_instantiated)
- << D->getDeclName()
- << Context.getTypeDeclType(cast<CXXRecordDecl>(ParentDC));
+ << D->getDeclName()
+ << Context.getCanonicalTagType(cast<CXXRecordDecl>(ParentDC));
Diag(D->getLocation(), diag::note_non_instantiated_member_here);
} else if (EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
// This enumeration constant was found when the template was defined,
@@ -7059,7 +7043,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
<< D->getDeclName()
<< Context.getTypeDeclType(cast<TypeDecl>(Spec->getDeclContext()));
Diag(Spec->getLocation(), diag::note_enum_specialized_here)
- << Context.getTypeDeclType(Spec);
+ << Context.getCanonicalTagType(Spec);
} else {
// We should have found something, but didn't.
llvm_unreachable("Unable to find instantiation of declaration!");
diff --git a/clang/lib/Sema/SemaTemplateVariadic.cpp b/clang/lib/Sema/SemaTemplateVariadic.cpp
index d2baa2e..0f72d6a 100644
--- a/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -65,6 +65,41 @@ class CollectUnexpandedParameterPacksVisitor
Unexpanded.push_back({T, Loc});
}
+ bool addUnexpanded(const SubstBuiltinTemplatePackType *T,
+ SourceLocation Loc = SourceLocation()) {
+ Unexpanded.push_back({T, Loc});
+ return true;
+ }
+
+ bool addUnexpanded(const TemplateSpecializationType *T,
+ SourceLocation Loc = SourceLocation()) {
+ assert(T->isCanonicalUnqualified() &&
+ isPackProducingBuiltinTemplateName(T->getTemplateName()));
+ Unexpanded.push_back({T, Loc});
+ return true;
+ }
+
+ /// Returns true iff it handled the traversal. On false, the callers must
+ /// traverse themselves.
+ bool
+ TryTraverseSpecializationProducingPacks(const TemplateSpecializationType *T,
+ SourceLocation Loc) {
+ if (!isPackProducingBuiltinTemplateName(T->getTemplateName()))
+ return false;
+ // Canonical types are inputs to the initial substitution. Report them and
+ // do not recurse any further.
+ if (T->isCanonicalUnqualified()) {
+ addUnexpanded(T, Loc);
+ return true;
+ }
+ // For sugared types, do not use the default traversal as it would be
+ // looking at (now irrelevant) template arguments. Instead, look at the
+ // result of substitution, it usually contains SubstPackType that needs to
+ // be expanded further.
+ DynamicRecursiveASTVisitor::TraverseType(T->desugar());
+ return true;
+ }
+
public:
explicit CollectUnexpandedParameterPacksVisitor(
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded)
@@ -123,6 +158,23 @@ class CollectUnexpandedParameterPacksVisitor
return DynamicRecursiveASTVisitor::TraverseTemplateName(Template);
}
+ bool
+ TraverseTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc T,
+ bool TraverseQualifier) override {
+ if (TryTraverseSpecializationProducingPacks(T.getTypePtr(),
+ T.getBeginLoc()))
+ return true;
+ return DynamicRecursiveASTVisitor::TraverseTemplateSpecializationTypeLoc(
+ T, TraverseQualifier);
+ }
+
+ bool TraverseTemplateSpecializationType(TemplateSpecializationType *T,
+ bool TraverseQualfier) override {
+ if (TryTraverseSpecializationProducingPacks(T, SourceLocation()))
+ return true;
+ return DynamicRecursiveASTVisitor::TraverseTemplateSpecializationType(T);
+ }
+
/// Suppress traversal into Objective-C container literal
/// elements that are pack expansions.
bool TraverseObjCDictionaryLiteral(ObjCDictionaryLiteral *E) override {
@@ -155,21 +207,22 @@ class CollectUnexpandedParameterPacksVisitor
/// Suppress traversal into types that do not contain
/// unexpanded parameter packs.
- bool TraverseType(QualType T) override {
+ bool TraverseType(QualType T, bool TraverseQualifier = true) override {
if ((!T.isNull() && T->containsUnexpandedParameterPack()) ||
InLambdaOrBlock)
- return DynamicRecursiveASTVisitor::TraverseType(T);
+ return DynamicRecursiveASTVisitor::TraverseType(T, TraverseQualifier);
return true;
}
/// Suppress traversal into types with location information
/// that do not contain unexpanded parameter packs.
- bool TraverseTypeLoc(TypeLoc TL) override {
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) override {
if ((!TL.getType().isNull() &&
TL.getType()->containsUnexpandedParameterPack()) ||
InLambdaOrBlock)
- return DynamicRecursiveASTVisitor::TraverseTypeLoc(TL);
+ return DynamicRecursiveASTVisitor::TraverseTypeLoc(TL,
+ TraverseQualifier);
return true;
}
@@ -195,10 +248,12 @@ class CollectUnexpandedParameterPacksVisitor
/// Suppress traversal of pack expansion expressions and types.
///@{
- bool TraversePackExpansionType(PackExpansionType *T) override {
+ bool TraversePackExpansionType(PackExpansionType *T,
+ bool TraverseQualifier) override {
return true;
}
- bool TraversePackExpansionTypeLoc(PackExpansionTypeLoc TL) override {
+ bool TraversePackExpansionTypeLoc(PackExpansionTypeLoc TL,
+ bool TraverseQualifier) override {
return true;
}
bool TraversePackExpansionExpr(PackExpansionExpr *E) override {
@@ -208,10 +263,12 @@ class CollectUnexpandedParameterPacksVisitor
bool TraversePackIndexingExpr(PackIndexingExpr *E) override {
return DynamicRecursiveASTVisitor::TraverseStmt(E->getIndexExpr());
}
- bool TraversePackIndexingType(PackIndexingType *E) override {
+ bool TraversePackIndexingType(PackIndexingType *E,
+ bool TraverseQualifier) override {
return DynamicRecursiveASTVisitor::TraverseStmt(E->getIndexExpr());
}
- bool TraversePackIndexingTypeLoc(PackIndexingTypeLoc TL) override {
+ bool TraversePackIndexingTypeLoc(PackIndexingTypeLoc TL,
+ bool TraverseQualifier) override {
return DynamicRecursiveASTVisitor::TraverseStmt(TL.getIndexExpr());
}
@@ -320,6 +377,14 @@ class CollectUnexpandedParameterPacksVisitor
return DynamicRecursiveASTVisitor::TraverseUnresolvedLookupExpr(E);
}
+ bool TraverseSubstBuiltinTemplatePackType(SubstBuiltinTemplatePackType *T,
+ bool TraverseQualifier) override {
+ addUnexpanded(T);
+ // Do not call into base implementation to supress traversal of the
+ // substituted types.
+ return true;
+ }
+
#ifndef NDEBUG
bool TraverseFunctionParmPackExpr(FunctionParmPackExpr *) override {
ContainsIntermediatePacks = true;
@@ -525,8 +590,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
// C++0x [temp.variadic]p5:
// An appearance of a name of a parameter pack that is not expanded is
// ill-formed.
- if (!SS.getScopeRep() ||
- !SS.getScopeRep()->containsUnexpandedParameterPack())
+ if (!SS.getScopeRep().containsUnexpandedParameterPack())
return false;
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
@@ -642,6 +706,23 @@ void Sema::collectUnexpandedParameterPacks(
}
ParsedTemplateArgument
+Sema::ActOnTemplateTemplateArgument(const ParsedTemplateArgument &Arg) {
+ if (Arg.isInvalid())
+ return Arg;
+
+ // We do not allow to reference builtin templates that produce multiple
+ // values, they would not have a well-defined semantics outside template
+ // arguments.
+ auto *T = dyn_cast_or_null<BuiltinTemplateDecl>(
+ Arg.getAsTemplate().get().getAsTemplateDecl());
+ if (T && T->isPackProducingBuiltinTemplate())
+ diagnoseMissingTemplateArguments(Arg.getAsTemplate().get(),
+ Arg.getNameLoc());
+
+ return Arg;
+}
+
+ParsedTemplateArgument
Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc) {
if (Arg.isInvalid())
@@ -654,7 +735,7 @@ Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
return ParsedTemplateArgument();
return ParsedTemplateArgument(Arg.getKind(), Result.get().getAsOpaquePtr(),
- Arg.getLocation());
+ Arg.getNameLoc());
}
case ParsedTemplateArgument::NonType: {
@@ -663,12 +744,12 @@ Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
return ParsedTemplateArgument();
return ParsedTemplateArgument(Arg.getKind(), Result.get(),
- Arg.getLocation());
+ Arg.getNameLoc());
}
case ParsedTemplateArgument::Template:
if (!Arg.getAsTemplate().get().containsUnexpandedParameterPack()) {
- SourceRange R(Arg.getLocation());
+ SourceRange R(Arg.getNameLoc());
if (Arg.getScopeSpec().isValid())
R.setBegin(Arg.getScopeSpec().getBeginLoc());
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
@@ -727,7 +808,7 @@ QualType Sema::CheckPackExpansion(QualType Pattern, SourceRange PatternRange,
if (!Pattern->containsUnexpandedParameterPack() &&
!Pattern->getContainedDeducedType()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
- << PatternRange;
+ << PatternRange;
return QualType();
}
@@ -761,7 +842,8 @@ ExprResult Sema::CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
bool Sema::CheckParameterPacksForExpansion(
SourceLocation EllipsisLoc, SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
- const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool FailOnPackProducingTemplates, bool &ShouldExpand,
bool &RetainExpansion, UnsignedOrNone &NumExpansions) {
ShouldExpand = true;
RetainExpansion = false;
@@ -777,12 +859,31 @@ bool Sema::CheckParameterPacksForExpansion(
IdentifierInfo *Name;
bool IsVarDeclPack = false;
FunctionParmPackExpr *BindingPack = nullptr;
+ std::optional<unsigned> NumPrecomputedArguments;
- if (const TemplateTypeParmType *TTP =
- ParmPack.first.dyn_cast<const TemplateTypeParmType *>()) {
+ if (auto *TTP = ParmPack.first.dyn_cast<const TemplateTypeParmType *>()) {
Depth = TTP->getDepth();
Index = TTP->getIndex();
Name = TTP->getIdentifier();
+ } else if (auto *TST =
+ ParmPack.first
+ .dyn_cast<const TemplateSpecializationType *>()) {
+ assert(isPackProducingBuiltinTemplateName(TST->getTemplateName()));
+ // Delay expansion, substitution is required to know the size.
+ ShouldExpand = false;
+ if (!FailOnPackProducingTemplates)
+ continue;
+
+ // It is not yet supported in certain contexts.
+ return Diag(PatternRange.getBegin().isValid() ? PatternRange.getBegin()
+ : EllipsisLoc,
+ diag::err_unsupported_builtin_template_pack_expansion)
+ << TST->getTemplateName();
+ } else if (auto *S =
+ ParmPack.first
+ .dyn_cast<const SubstBuiltinTemplatePackType *>()) {
+ Name = nullptr;
+ NumPrecomputedArguments = S->getNumArgs();
} else {
NamedDecl *ND = cast<NamedDecl *>(ParmPack.first);
if (isa<VarDecl>(ND))
@@ -822,6 +923,8 @@ bool Sema::CheckParameterPacksForExpansion(
}
} else if (BindingPack) {
NewPackSize = BindingPack->getNumExpansions();
+ } else if (NumPrecomputedArguments) {
+ NewPackSize = *NumPrecomputedArguments;
} else {
// If we don't have a template argument at this depth/index, then we
// cannot expand the pack expansion. Make a note of this, but we still
@@ -963,6 +1066,21 @@ UnsignedOrNone Sema::getNumArgumentsInExpansionFromUnexpanded(
Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) {
Depth = TTP->getDepth();
Index = TTP->getIndex();
+ } else if (auto *TST =
+ Unexpanded[I]
+ .first.dyn_cast<const TemplateSpecializationType *>()) {
+ // This is a dependent pack, we are not ready to expand it yet.
+ assert(isPackProducingBuiltinTemplateName(TST->getTemplateName()));
+ (void)TST;
+ return std::nullopt;
+ } else if (auto *PST =
+ Unexpanded[I]
+ .first
+ .dyn_cast<const SubstBuiltinTemplatePackType *>()) {
+ assert((!Result || *Result == PST->getNumArgs()) &&
+ "inconsistent pack sizes");
+ Result = PST->getNumArgs();
+ continue;
} else {
NamedDecl *ND = cast<NamedDecl *>(Unexpanded[I].first);
if (isa<VarDecl>(ND)) {
@@ -1115,8 +1233,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
break;
case DeclaratorChunk::MemberPointer:
- if (Chunk.Mem.Scope().getScopeRep() &&
- Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack())
+ if (Chunk.Mem.Scope().getScopeRep().containsUnexpandedParameterPack())
return true;
break;
}
@@ -1300,9 +1417,9 @@ TemplateArgumentLoc Sema::getTemplateArgumentPackExpansionPattern(
case TemplateArgument::TemplateExpansion:
Ellipsis = OrigLoc.getTemplateEllipsisLoc();
NumExpansions = Argument.getNumTemplateExpansions();
- return TemplateArgumentLoc(Context, Argument.getPackExpansionPattern(),
- OrigLoc.getTemplateQualifierLoc(),
- OrigLoc.getTemplateNameLoc());
+ return TemplateArgumentLoc(
+ Context, Argument.getPackExpansionPattern(), OrigLoc.getTemplateKWLoc(),
+ OrigLoc.getTemplateQualifierLoc(), OrigLoc.getTemplateNameLoc());
case TemplateArgument::Declaration:
case TemplateArgument::NullPtr:
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 1289bed..0f655d7 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -156,6 +156,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_Allocating: \
case ParsedAttr::AT_Regparm: \
case ParsedAttr::AT_CFIUncheckedCallee: \
+ case ParsedAttr::AT_CFISalt: \
case ParsedAttr::AT_CmseNSCall: \
case ParsedAttr::AT_ArmStreaming: \
case ParsedAttr::AT_ArmStreamingCompatible: \
@@ -1211,14 +1212,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified &&
"No qualifiers on tag names!");
+ ElaboratedTypeKeyword Keyword =
+ KeywordHelpers::getKeywordForTypeSpec(DS.getTypeSpecType());
// TypeQuals handled by caller.
- Result = Context.getTypeDeclType(D);
-
- // In both C and C++, make an ElaboratedType.
- ElaboratedTypeKeyword Keyword
- = ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType());
- Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result,
- DS.isTypeSpecOwned() ? D : nullptr);
+ Result = Context.getTagType(Keyword, DS.getTypeSpecScope().getScopeRep(), D,
+ DS.isTypeSpecOwned());
break;
}
case DeclSpec::TST_typename: {
@@ -1241,7 +1239,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
assert(!Result.isNull() && "Didn't get a type for typeof?");
if (!Result->isDependentType())
if (const TagType *TT = Result->getAs<TagType>())
- S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc());
+ S.DiagnoseUseOfDecl(TT->getOriginalDecl(), DS.getTypeSpecTypeLoc());
// TypeQuals handled by caller.
Result = Context.getTypeOfType(
Result, DS.getTypeSpecType() == DeclSpec::TST_typeof_unqualType
@@ -2085,7 +2083,7 @@ QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM,
// an inheritance model, even if it's inside an unused typedef.
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>())
- if (!MPTy->getQualifier()->isDependent())
+ if (!MPTy->getQualifier().isDependent())
(void)isCompleteType(Loc, T);
} else {
@@ -2117,10 +2115,10 @@ QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM,
return QualType();
}
- if (const RecordType *EltTy = T->getAs<RecordType>()) {
+ if (const auto *RD = T->getAsRecordDecl()) {
// If the element type is a struct or union that contains a variadic
// array, accept it as a GNU extension: C99 6.7.2.1p2.
- if (EltTy->getDecl()->hasFlexibleArrayMember())
+ if (RD->hasFlexibleArrayMember())
Diag(Loc, diag::ext_flexible_array_in_array) << T;
} else if (T->isObjCObjectType()) {
Diag(Loc, diag::err_objc_array_of_interfaces) << T;
@@ -3460,7 +3458,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
if (DiagID != 0) {
SemaRef.Diag(OwnedTagDecl->getLocation(), DiagID)
- << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ << SemaRef.Context.getCanonicalTagType(OwnedTagDecl);
D.setInvalidType(true);
}
}
@@ -3653,11 +3651,22 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
// here: even (e.g.) "int ::x" is visually ambiguous even though it's
// formally unambiguous.
if (StartsWithDeclaratorId && D.getCXXScopeSpec().isValid()) {
- for (NestedNameSpecifier *NNS = D.getCXXScopeSpec().getScopeRep(); NNS;
- NNS = NNS->getPrefix()) {
- if (NNS->getKind() == NestedNameSpecifier::Global)
+ NestedNameSpecifier NNS = D.getCXXScopeSpec().getScopeRep();
+ for (;;) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Global:
return;
+ case NestedNameSpecifier::Kind::Type:
+ NNS = NNS.getAsType()->getPrefix();
+ continue;
+ case NestedNameSpecifier::Kind::Namespace:
+ NNS = NNS.getAsNamespaceAndPrefix().Prefix;
+ continue;
+ default:
+ goto out;
+ }
}
+ out:;
}
S.Diag(Paren.Loc, diag::warn_redundant_parens_around_declarator)
@@ -3964,9 +3973,7 @@ classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
if (numNormalPointers == 0)
return PointerDeclaratorKind::NonPointer;
- if (auto recordType = type->getAs<RecordType>()) {
- RecordDecl *recordDecl = recordType->getDecl();
-
+ if (auto *recordDecl = type->getAsRecordDecl()) {
// If this is CFErrorRef*, report it as such.
if (numNormalPointers == 2 && numTypeSpecifierPointers < 2 &&
S.ObjC().isCFError(recordDecl)) {
@@ -5101,7 +5108,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Types shall not be defined in return or parameter types.
TagDecl *Tag = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
S.Diag(Tag->getLocation(), diag::err_type_defined_in_result_type)
- << Context.getTypeDeclType(Tag);
+ << Context.getCanonicalTagType(Tag);
}
// Exception specs are not allowed in typedefs. Complain, but add it
@@ -5319,7 +5326,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
state.getDeclarator()
.getCXXScopeSpec()
.getScopeRep()
- ->getKind() == NestedNameSpecifier::TypeSpec) ||
+ .getKind() == NestedNameSpecifier::Kind::Type) ||
state.getDeclarator().getContext() ==
DeclaratorContext::Member ||
state.getDeclarator().getContext() ==
@@ -5898,7 +5905,49 @@ namespace {
// int __attr * __attr * __attr *p;
void VisitPointerTypeLoc(PointerTypeLoc TL) { Visit(TL.getNextTypeLoc()); }
void VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<TypedefTypeLoc>());
+ return;
+ }
+ }
+ TL.set(TL.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation(),
+ DS.getTypeSpecScope().getWithLocInContext(Context),
+ DS.getTypeSpecTypeNameLoc());
+ }
+ void VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<UnresolvedUsingTypeLoc>());
+ return;
+ }
+ }
+ TL.set(TL.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation(),
+ DS.getTypeSpecScope().getWithLocInContext(Context),
+ DS.getTypeSpecTypeNameLoc());
+ }
+ void VisitUsingTypeLoc(UsingTypeLoc TL) {
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<UsingTypeLoc>());
+ return;
+ }
+ }
+ TL.set(TL.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation(),
+ DS.getTypeSpecScope().getWithLocInContext(Context),
+ DS.getTypeSpecTypeNameLoc());
}
void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
@@ -5929,16 +5978,9 @@ namespace {
}
TypeLoc OldTL = TInfo->getTypeLoc();
- if (TInfo->getType()->getAs<ElaboratedType>()) {
- ElaboratedTypeLoc ElabTL = OldTL.castAs<ElaboratedTypeLoc>();
- TemplateSpecializationTypeLoc NamedTL = ElabTL.getNamedTypeLoc()
- .castAs<TemplateSpecializationTypeLoc>();
- TL.copy(NamedTL);
- } else {
- TL.copy(OldTL.castAs<TemplateSpecializationTypeLoc>());
- assert(TL.getRAngleLoc() == OldTL.castAs<TemplateSpecializationTypeLoc>().getRAngleLoc());
- }
-
+ TL.copy(OldTL.castAs<TemplateSpecializationTypeLoc>());
+ assert(TL.getRAngleLoc() ==
+ OldTL.castAs<TemplateSpecializationTypeLoc>().getRAngleLoc());
}
void VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr ||
@@ -5987,24 +6029,6 @@ namespace {
TL.expandBuiltinRange(DS.getTypeSpecWidthRange());
}
}
- void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- if (DS.getTypeSpecType() == TST_typename) {
- TypeSourceInfo *TInfo = nullptr;
- Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
- if (TInfo)
- if (auto ETL = TInfo->getTypeLoc().getAs<ElaboratedTypeLoc>()) {
- TL.copy(ETL);
- return;
- }
- }
- const ElaboratedType *T = TL.getTypePtr();
- TL.setElaboratedKeywordLoc(T->getKeyword() != ElaboratedTypeKeyword::None
- ? DS.getTypeSpecTypeLoc()
- : SourceLocation());
- const CXXScopeSpec& SS = DS.getTypeSpecScope();
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
- Visit(TL.getNextTypeLoc().getUnqualifiedLoc());
- }
void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
assert(DS.getTypeSpecType() == TST_typename);
TypeSourceInfo *TInfo = nullptr;
@@ -6063,7 +6087,29 @@ namespace {
ASTTemplateArgumentListInfo::Create(Context, TemplateArgsInfo));
TL.setConceptReference(CR);
}
+ void VisitDeducedTemplateSpecializationTypeLoc(
+ DeducedTemplateSpecializationTypeLoc TL) {
+ assert(DS.getTypeSpecType() == TST_typename);
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.copy(
+ TInfo->getTypeLoc().castAs<DeducedTemplateSpecializationTypeLoc>());
+ }
void VisitTagTypeLoc(TagTypeLoc TL) {
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<TagTypeLoc>());
+ return;
+ }
+ }
+ TL.setElaboratedKeywordLoc(TL.getTypePtr()->getKeyword() !=
+ ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation());
+ TL.setQualifierLoc(DS.getTypeSpecScope().getWithLocInContext(Context));
TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
}
void VisitAtomicTypeLoc(AtomicTypeLoc TL) {
@@ -7029,9 +7075,6 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
if (const TypedefType *TT = dyn_cast<TypedefType>(Desugared)) {
Desugared = TT->desugar();
continue;
- } else if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Desugared)) {
- Desugared = ET->desugar();
- continue;
}
const AttributedType *AT = dyn_cast<AttributedType>(Desugared);
if (!AT)
@@ -7939,6 +7982,36 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
return true;
}
+ if (attr.getKind() == ParsedAttr::AT_CFISalt) {
+ if (attr.getNumArgs() != 1)
+ return true;
+
+ StringRef Argument;
+ if (!S.checkStringLiteralArgumentAttr(attr, 0, Argument))
+ return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ const auto *FnTy = unwrapped.get()->getAs<FunctionProtoType>();
+ if (!FnTy) {
+ S.Diag(attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << attr << attr.isRegularKeywordAttribute()
+ << ExpectedFunctionWithProtoType;
+ attr.setInvalid();
+ return true;
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
+ EPI.ExtraAttributeInfo.CFISalt = Argument;
+
+ QualType newtype = S.Context.getFunctionType(FnTy->getReturnType(),
+ FnTy->getParamTypes(), EPI);
+ type = unwrapped.wrap(S, newtype->getAs<FunctionType>());
+ return true;
+ }
+
if (attr.getKind() == ParsedAttr::AT_ArmStreaming ||
attr.getKind() == ParsedAttr::AT_ArmStreamingCompatible ||
attr.getKind() == ParsedAttr::AT_ArmPreserves ||
@@ -9170,11 +9243,9 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
if (RequireCompleteTypeImpl(Loc, T, Kind, &Diagnoser))
return true;
- if (const TagType *Tag = T->getAs<TagType>()) {
- if (!Tag->getDecl()->isCompleteDefinitionRequired()) {
- Tag->getDecl()->setCompleteDefinitionRequired();
- Consumer.HandleTagDeclRequiredDefinition(Tag->getDecl());
- }
+ if (auto *TD = T->getAsTagDecl(); TD && !TD->isCompleteDefinitionRequired()) {
+ TD->setCompleteDefinitionRequired();
+ Consumer.HandleTagDeclRequiredDefinition(TD);
}
return false;
}
@@ -9312,7 +9383,7 @@ bool Sema::hasReachableDefinition(NamedDecl *D, NamedDecl **Suggested,
/// Locks in the inheritance model for the given class and all of its bases.
static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
- RD = RD->getMostRecentNonInjectedDecl();
+ RD = RD->getMostRecentDecl();
if (!RD->hasAttr<MSInheritanceAttr>()) {
MSInheritanceModel IM;
bool BestCase = false;
@@ -9352,10 +9423,10 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// assert(!T->isDependentType() &&
// "Can't ask whether a dependent type is complete");
- if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
+ if (const auto *MPTy = dyn_cast<MemberPointerType>(T.getCanonicalType())) {
if (CXXRecordDecl *RD = MPTy->getMostRecentCXXRecordDecl();
RD && !RD->isDependentType()) {
- QualType T = Context.getTypeDeclType(RD);
+ CanQualType T = Context.getCanonicalTagType(RD);
if (getLangOpts().CompleteMemberPointers && !RD->isBeingDefined() &&
RequireCompleteType(Loc, T, Kind, diag::err_memptr_incomplete))
return true;
@@ -9492,10 +9563,10 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If the type was a forward declaration of a class/struct/union
// type, produce a note.
if (Tag && !Tag->isInvalidDecl() && !Tag->getLocation().isInvalid())
- Diag(Tag->getLocation(),
- Tag->isBeingDefined() ? diag::note_type_being_defined
- : diag::note_forward_declaration)
- << Context.getTagDeclType(Tag);
+ Diag(Tag->getLocation(), Tag->isBeingDefined()
+ ? diag::note_type_being_defined
+ : diag::note_forward_declaration)
+ << Context.getCanonicalTagType(Tag);
// If the Objective-C class was a forward declaration, produce a note.
if (IFace && !IFace->isInvalidDecl() && !IFace->getLocation().isInvalid())
@@ -9546,18 +9617,16 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
if (T->isVariableArrayType())
return true;
- const RecordType *RT = ElemType->getAs<RecordType>();
- if (!RT)
+ if (!ElemType->isRecordType())
return true;
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
// A partially-defined class type can't be a literal type, because a literal
// class type must have a trivial destructor (which can't be checked until
// the class definition is complete).
if (RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T))
return true;
+ const auto *RD = ElemType->castAsCXXRecordDecl();
// [expr.prim.lambda]p3:
// This class type is [not] a literal type.
if (RD->isLambda() && !getLangOpts().CPlusPlus17) {
@@ -9628,15 +9697,6 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireLiteralType(Loc, T, Diagnoser);
}
-QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
- const CXXScopeSpec &SS, QualType T,
- TagDecl *OwnedTagDecl) {
- if (T.isNull())
- return T;
- return Context.getElaboratedType(
- Keyword, SS.isValid() ? SS.getScopeRep() : nullptr, T, OwnedTagDecl);
-}
-
QualType Sema::BuildTypeofExprType(Expr *E, TypeOfKind Kind) {
assert(!E->hasPlaceholderType() && "unexpected placeholder");
@@ -9647,7 +9707,7 @@ QualType Sema::BuildTypeofExprType(Expr *E, TypeOfKind Kind) {
if (!E->isTypeDependent()) {
QualType T = E->getType();
if (const TagType *TT = T->getAs<TagType>())
- DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc());
+ DiagnoseUseOfDecl(TT->getOriginalDecl(), E->getExprLoc());
}
return Context.getTypeOfExprType(E, Kind);
}
@@ -9813,13 +9873,19 @@ QualType Sema::BuildPackIndexingType(QualType Pattern, Expr *IndexExpr,
static QualType GetEnumUnderlyingType(Sema &S, QualType BaseType,
SourceLocation Loc) {
assert(BaseType->isEnumeralType());
- EnumDecl *ED = BaseType->castAs<EnumType>()->getDecl();
- assert(ED && "EnumType has no EnumDecl");
+ EnumDecl *ED = BaseType->castAs<EnumType>()->getOriginalDecl();
S.DiagnoseUseOfDecl(ED, Loc);
QualType Underlying = ED->getIntegerType();
- assert(!Underlying.isNull());
+ if (Underlying.isNull()) {
+ // This is an enum without a fixed underlying type which we skipped parsing
+ // the body because we saw its definition previously in another module.
+ // Use the definition's integer type in that case.
+ assert(ED->isThisDeclarationADemotedDefinition());
+ Underlying = ED->getDefinition()->getIntegerType();
+ assert(!Underlying.isNull());
+ }
return Underlying;
}
diff --git a/clang/lib/Sema/SemaTypeTraits.cpp b/clang/lib/Sema/SemaTypeTraits.cpp
index 1d8687e..b779759 100644
--- a/clang/lib/Sema/SemaTypeTraits.cpp
+++ b/clang/lib/Sema/SemaTypeTraits.cpp
@@ -32,8 +32,7 @@ static CXXMethodDecl *LookupSpecialMemberFromXValue(Sema &SemaRef,
RD = RD->getDefinition();
SourceLocation LookupLoc = RD->getLocation();
- CanQualType CanTy = SemaRef.getASTContext().getCanonicalType(
- SemaRef.getASTContext().getTagDeclType(RD));
+ CanQualType CanTy = SemaRef.getASTContext().getCanonicalTagType(RD);
DeclarationName Name;
Expr *Arg = nullptr;
unsigned NumArgs;
@@ -557,12 +556,11 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
}
}
-static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
+static bool HasNoThrowOperator(CXXRecordDecl *RD, OverloadedOperatorKind Op,
Sema &Self, SourceLocation KeyLoc, ASTContext &C,
bool (CXXRecordDecl::*HasTrivial)() const,
bool (CXXRecordDecl::*HasNonTrivial)() const,
bool (CXXMethodDecl::*IsDesiredOp)() const) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if ((RD->*HasTrivial)() && !(RD->*HasNonTrivial)())
return true;
@@ -599,6 +597,7 @@ static bool HasNonDeletedDefaultedEqualityComparison(Sema &S,
if (Decl->isLambda())
return Decl->isCapturelessLambda();
+ CanQualType T = S.Context.getCanonicalTagType(Decl);
{
EnterExpressionEvaluationContext UnevaluatedContext(
S, Sema::ExpressionEvaluationContext::Unevaluated);
@@ -606,10 +605,7 @@ static bool HasNonDeletedDefaultedEqualityComparison(Sema &S,
Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
// const ClassT& obj;
- OpaqueValueExpr Operand(
- KeyLoc,
- Decl->getTypeForDecl()->getCanonicalTypeUnqualified().withConst(),
- ExprValueKind::VK_LValue);
+ OpaqueValueExpr Operand(KeyLoc, T.withConst(), ExprValueKind::VK_LValue);
UnresolvedSet<16> Functions;
// obj == obj;
S.LookupBinOp(S.TUScope, {}, BinaryOperatorKind::BO_EQ, Functions);
@@ -628,8 +624,7 @@ static bool HasNonDeletedDefaultedEqualityComparison(Sema &S,
return false;
if (!ParamT->isReferenceType() && !Decl->isTriviallyCopyable())
return false;
- if (ParamT.getNonReferenceType()->getUnqualifiedDesugaredType() !=
- Decl->getTypeForDecl())
+ if (!S.Context.hasSameUnqualifiedType(ParamT.getNonReferenceType(), T))
return false;
}
@@ -1010,8 +1005,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
if (T.isPODType(C) || T->isObjCLifetimeType())
return true;
- if (const RecordType *RT = T->getAs<RecordType>())
- return HasNoThrowOperator(RT, OO_Equal, Self, KeyLoc, C,
+ if (auto *RD = T->getAsCXXRecordDecl())
+ return HasNoThrowOperator(RD, OO_Equal, Self, KeyLoc, C,
&CXXRecordDecl::hasTrivialCopyAssignment,
&CXXRecordDecl::hasNonTrivialCopyAssignment,
&CXXMethodDecl::isCopyAssignmentOperator);
@@ -1023,8 +1018,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
if (T.isPODType(C))
return true;
- if (const RecordType *RT = C.getBaseElementType(T)->getAs<RecordType>())
- return HasNoThrowOperator(RT, OO_Equal, Self, KeyLoc, C,
+ if (auto *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
+ return HasNoThrowOperator(RD, OO_Equal, Self, KeyLoc, C,
&CXXRecordDecl::hasTrivialMoveAssignment,
&CXXRecordDecl::hasNonTrivialMoveAssignment,
&CXXMethodDecl::isMoveAssignmentOperator);
@@ -1588,8 +1583,8 @@ bool Sema::BuiltinIsBaseOf(SourceLocation RhsTLoc, QualType LhsT,
// Base and Derived are not unions and name the same class type without
// regard to cv-qualifiers.
- const RecordType *lhsRecord = LhsT->getAs<RecordType>();
- const RecordType *rhsRecord = RhsT->getAs<RecordType>();
+ const RecordType *lhsRecord = LhsT->getAsCanonical<RecordType>();
+ const RecordType *rhsRecord = RhsT->getAsCanonical<RecordType>();
if (!rhsRecord || !lhsRecord) {
const ObjCObjectType *LHSObjTy = LhsT->getAs<ObjCObjectType>();
const ObjCObjectType *RHSObjTy = RhsT->getAs<ObjCObjectType>();
@@ -1613,9 +1608,9 @@ bool Sema::BuiltinIsBaseOf(SourceLocation RhsTLoc, QualType LhsT,
// Unions are never base classes, and never have base classes.
// It doesn't matter if they are complete or not. See PR#41843
- if (lhsRecord && lhsRecord->getDecl()->isUnion())
+ if (lhsRecord && lhsRecord->getOriginalDecl()->isUnion())
return false;
- if (rhsRecord && rhsRecord->getDecl()->isUnion())
+ if (rhsRecord && rhsRecord->getOriginalDecl()->isUnion())
return false;
if (lhsRecord == rhsRecord)
@@ -1629,8 +1624,8 @@ bool Sema::BuiltinIsBaseOf(SourceLocation RhsTLoc, QualType LhsT,
diag::err_incomplete_type_used_in_type_trait_expr))
return false;
- return cast<CXXRecordDecl>(rhsRecord->getDecl())
- ->isDerivedFrom(cast<CXXRecordDecl>(lhsRecord->getDecl()));
+ return cast<CXXRecordDecl>(rhsRecord->getOriginalDecl())
+ ->isDerivedFrom(cast<CXXRecordDecl>(lhsRecord->getOriginalDecl()));
}
static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT,
@@ -1648,8 +1643,8 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT,
return Self.BuiltinIsBaseOf(Rhs->getTypeLoc().getBeginLoc(), LhsT, RhsT);
case BTT_IsVirtualBaseOf: {
- const RecordType *BaseRecord = LhsT->getAs<RecordType>();
- const RecordType *DerivedRecord = RhsT->getAs<RecordType>();
+ const RecordType *BaseRecord = LhsT->getAsCanonical<RecordType>();
+ const RecordType *DerivedRecord = RhsT->getAsCanonical<RecordType>();
if (!BaseRecord || !DerivedRecord) {
DiagnoseVLAInCXXTypeTrait(Self, Lhs,
@@ -1670,8 +1665,9 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT,
diag::err_incomplete_type))
return false;
- return cast<CXXRecordDecl>(DerivedRecord->getDecl())
- ->isVirtuallyDerivedFrom(cast<CXXRecordDecl>(BaseRecord->getDecl()));
+ return cast<CXXRecordDecl>(DerivedRecord->getOriginalDecl())
+ ->isVirtuallyDerivedFrom(
+ cast<CXXRecordDecl>(BaseRecord->getOriginalDecl()));
}
case BTT_IsSame:
return Self.Context.hasSameType(LhsT, RhsT);
@@ -1768,7 +1764,10 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT,
// Objective-C lifetime, this is a non-trivial assignment.
if (LhsT.getNonReferenceType().hasNonTrivialObjCLifetime())
return false;
-
+ const ASTContext &Context = Self.getASTContext();
+ if (Context.containsAddressDiscriminatedPointerAuth(LhsT) ||
+ Context.containsAddressDiscriminatedPointerAuth(RhsT))
+ return false;
return !Result.get()->hasNonTrivialCall(Self.Context);
}
@@ -1826,6 +1825,51 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT,
return Self.HLSL().IsScalarizedLayoutCompatible(LhsT, RhsT);
}
+ case BTT_LtSynthesisesFromSpaceship:
+ case BTT_LeSynthesisesFromSpaceship:
+ case BTT_GtSynthesisesFromSpaceship:
+ case BTT_GeSynthesisesFromSpaceship: {
+ EnterExpressionEvaluationContext UnevaluatedContext(
+ Self, Sema::ExpressionEvaluationContext::Unevaluated);
+ Sema::SFINAETrap SFINAE(Self, /*ForValidityCheck=*/true);
+ Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
+
+ OpaqueValueExpr LHS(KeyLoc, LhsT.getNonReferenceType(),
+ LhsT->isLValueReferenceType() ? ExprValueKind::VK_LValue
+ : LhsT->isRValueReferenceType()
+ ? ExprValueKind::VK_XValue
+ : ExprValueKind::VK_PRValue);
+ OpaqueValueExpr RHS(KeyLoc, RhsT.getNonReferenceType(),
+ RhsT->isLValueReferenceType() ? ExprValueKind::VK_LValue
+ : RhsT->isRValueReferenceType()
+ ? ExprValueKind::VK_XValue
+ : ExprValueKind::VK_PRValue);
+
+ auto OpKind = [&] {
+ switch (BTT) {
+ case BTT_LtSynthesisesFromSpaceship:
+ return BinaryOperatorKind::BO_LT;
+ case BTT_LeSynthesisesFromSpaceship:
+ return BinaryOperatorKind::BO_LE;
+ case BTT_GtSynthesisesFromSpaceship:
+ return BinaryOperatorKind::BO_GT;
+ case BTT_GeSynthesisesFromSpaceship:
+ return BinaryOperatorKind::BO_GE;
+ default:
+ llvm_unreachable("Trying to Synthesize non-comparison operator?");
+ }
+ }();
+
+ UnresolvedSet<16> Functions;
+ Self.LookupBinOp(Self.TUScope, KeyLoc, OpKind, Functions);
+
+ ExprResult Result =
+ Self.CreateOverloadedBinOp(KeyLoc, OpKind, Functions, &LHS, &RHS);
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return false;
+
+ return isa<CXXRewrittenBinaryOperator>(Result.get());
+ }
default:
llvm_unreachable("not a BTT");
}
@@ -1966,6 +2010,7 @@ static std::optional<TypeTrait> StdNameToTypeTrait(StringRef Name) {
.Case("is_empty", TypeTrait::UTT_IsEmpty)
.Case("is_standard_layout", TypeTrait::UTT_IsStandardLayout)
.Case("is_constructible", TypeTrait::TT_IsConstructible)
+ .Case("is_final", TypeTrait::UTT_IsFinal)
.Default(std::nullopt);
}
@@ -2018,11 +2063,10 @@ static ExtractedTypeTraitInfo ExtractTypeTraitFromExpression(const Expr *E) {
// std::is_xxx<>::value
if (const auto *VD = dyn_cast<VarDecl>(Ref->getDecl());
Ref->hasQualifier() && VD && VD->getIdentifier()->isStr("value")) {
- const Type *T = Ref->getQualifier()->getAsType();
- if (!T)
+ NestedNameSpecifier Qualifier = Ref->getQualifier();
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Type)
return std::nullopt;
- const TemplateSpecializationType *Ts =
- T->getAs<TemplateSpecializationType>();
+ const auto *Ts = Qualifier.getAsType()->getAs<TemplateSpecializationType>();
if (!Ts)
return std::nullopt;
const TemplateDecl *D = Ts->getTemplateName().getAsTemplateDecl();
@@ -2451,6 +2495,52 @@ static void DiagnoseIsEmptyReason(Sema &S, SourceLocation Loc, QualType T) {
}
}
+static void DiagnoseIsFinalReason(Sema &S, SourceLocation Loc,
+ const CXXRecordDecl *D) {
+ if (!D || D->isInvalidDecl())
+ return;
+
+ // Complete record but not 'final'.
+ if (!D->isEffectivelyFinal()) {
+ S.Diag(Loc, diag::note_unsatisfied_trait_reason)
+ << diag::TraitNotSatisfiedReason::NotMarkedFinal;
+ S.Diag(D->getLocation(), diag::note_defined_here) << D;
+ return;
+ }
+}
+
+static void DiagnoseIsFinalReason(Sema &S, SourceLocation Loc, QualType T) {
+ // Primary: “%0 is not final”
+ S.Diag(Loc, diag::note_unsatisfied_trait) << T << diag::TraitName::Final;
+ if (T->isReferenceType()) {
+ S.Diag(Loc, diag::note_unsatisfied_trait_reason)
+ << diag::TraitNotSatisfiedReason::Ref;
+ S.Diag(Loc, diag::note_unsatisfied_trait_reason)
+ << diag::TraitNotSatisfiedReason::NotClassOrUnion;
+ return;
+ }
+ // Arrays / functions / non-records → not a class/union.
+ if (S.Context.getAsArrayType(T)) {
+ S.Diag(Loc, diag::note_unsatisfied_trait_reason)
+ << diag::TraitNotSatisfiedReason::NotClassOrUnion;
+ return;
+ }
+ if (T->isFunctionType()) {
+ S.Diag(Loc, diag::note_unsatisfied_trait_reason)
+ << diag::TraitNotSatisfiedReason::FunctionType;
+ S.Diag(Loc, diag::note_unsatisfied_trait_reason)
+ << diag::TraitNotSatisfiedReason::NotClassOrUnion;
+ return;
+ }
+ if (!T->isRecordType()) {
+ S.Diag(Loc, diag::note_unsatisfied_trait_reason)
+ << diag::TraitNotSatisfiedReason::NotClassOrUnion;
+ return;
+ }
+ if (const auto *D = T->getAsCXXRecordDecl())
+ DiagnoseIsFinalReason(S, Loc, D);
+}
+
static bool hasMultipleDataBaseClassesWithFields(const CXXRecordDecl *D) {
int NumBasesWithFields = 0;
for (const CXXBaseSpecifier &Base : D->bases()) {
@@ -2627,6 +2717,15 @@ void Sema::DiagnoseTypeTraitDetails(const Expr *E) {
case TT_IsConstructible:
DiagnoseNonConstructibleReason(*this, E->getBeginLoc(), Args);
break;
+ case UTT_IsFinal: {
+ QualType QT = Args[0];
+ if (QT->isDependentType())
+ break;
+ const auto *RD = QT->getAsCXXRecordDecl();
+ if (!RD || !RD->isEffectivelyFinal())
+ DiagnoseIsFinalReason(*this, E->getBeginLoc(), QT); // unsatisfied
+ break;
+ }
default:
break;
}
diff --git a/clang/lib/Sema/SemaWasm.cpp b/clang/lib/Sema/SemaWasm.cpp
index 8998492..e773113 100644
--- a/clang/lib/Sema/SemaWasm.cpp
+++ b/clang/lib/Sema/SemaWasm.cpp
@@ -17,6 +17,7 @@
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Attr.h"
#include "clang/Sema/Sema.h"
@@ -227,7 +228,8 @@ bool SemaWasm::BuiltinWasmTableCopy(CallExpr *TheCall) {
return false;
}
-bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall) {
+bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(const TargetInfo &TI,
+ CallExpr *TheCall) {
if (SemaRef.checkArgCount(TheCall, 1))
return true;
@@ -250,27 +252,31 @@ bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall) {
<< ArgType << FuncPtrArg->getSourceRange();
}
- // Check that the function pointer doesn't use reference types
- if (FuncTy->getReturnType().isWebAssemblyReferenceType()) {
- return Diag(
- FuncPtrArg->getBeginLoc(),
- diag::err_wasm_builtin_test_fp_sig_cannot_include_reference_type)
- << 0 << FuncTy->getReturnType() << FuncPtrArg->getSourceRange();
- }
- auto NParams = FuncTy->getNumParams();
- for (unsigned I = 0; I < NParams; I++) {
- if (FuncTy->getParamType(I).isWebAssemblyReferenceType()) {
+ if (TI.getABI() == "experimental-mv") {
+ auto isStructOrUnion = [](QualType T) {
+ return T->isUnionType() || T->isStructureType();
+ };
+ if (isStructOrUnion(FuncTy->getReturnType())) {
return Diag(
FuncPtrArg->getBeginLoc(),
diag::
- err_wasm_builtin_test_fp_sig_cannot_include_reference_type)
- << 1 << FuncPtrArg->getSourceRange();
+ err_wasm_builtin_test_fp_sig_cannot_include_struct_or_union)
+ << 0 << FuncTy->getReturnType() << FuncPtrArg->getSourceRange();
+ }
+ auto NParams = FuncTy->getNumParams();
+ for (unsigned I = 0; I < NParams; I++) {
+ if (isStructOrUnion(FuncTy->getParamType(I))) {
+ return Diag(
+ FuncPtrArg->getBeginLoc(),
+ diag::
+ err_wasm_builtin_test_fp_sig_cannot_include_struct_or_union)
+ << 1 << FuncPtrArg->getSourceRange();
+ }
}
}
// Set return type to int (the result of the test)
TheCall->setType(getASTContext().IntTy);
-
return false;
}
@@ -297,7 +303,7 @@ bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
case WebAssembly::BI__builtin_wasm_table_copy:
return BuiltinWasmTableCopy(TheCall);
case WebAssembly::BI__builtin_wasm_test_function_pointer_signature:
- return BuiltinWasmTestFunctionPointerSignature(TheCall);
+ return BuiltinWasmTestFunctionPointerSignature(TI, TheCall);
}
return false;
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 0030946..aa1bb32 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -45,6 +45,7 @@
#include "clang/Sema/SemaOpenMP.h"
#include "clang/Sema/SemaPseudoObject.h"
#include "clang/Sema/SemaSYCL.h"
+#include "clang/Sema/Template.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@@ -55,6 +56,17 @@ using namespace llvm::omp;
namespace clang {
using namespace sema;
+// This helper class is used to facilitate pack expansion during tree transform.
+struct UnexpandedInfo {
+ SourceLocation Ellipsis;
+ UnsignedOrNone OrigNumExpansions = std::nullopt;
+
+ bool Expand = false;
+ bool RetainExpansion = false;
+ UnsignedOrNone NumExpansions = std::nullopt;
+ bool ExpandUnderForgetSubstitions = false;
+};
+
/// A semantic tree transformation that allows one to transform one
/// abstract syntax tree into another.
///
@@ -292,6 +304,7 @@ public:
bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
+ bool FailOnPackProducingTemplates,
bool &ShouldExpand, bool &RetainExpansion,
UnsignedOrNone &NumExpansions) {
ShouldExpand = false;
@@ -314,6 +327,27 @@ public:
/// This routine is meant to be overridden by the template instantiator.
void RememberPartiallySubstitutedPack(TemplateArgument Arg) { }
+ /// "Forget" the template substitution to allow transforming the AST without
+ /// any template instantiations. This is used to expand template packs when
+ /// their size is not known in advance (e.g. for builtins that produce type
+ /// packs).
+ MultiLevelTemplateArgumentList ForgetSubstitution() { return {}; }
+ void RememberSubstitution(MultiLevelTemplateArgumentList) {}
+
+private:
+ struct ForgetSubstitutionRAII {
+ Derived &Self;
+ MultiLevelTemplateArgumentList Old;
+
+ public:
+ ForgetSubstitutionRAII(Derived &Self) : Self(Self) {
+ Old = Self.ForgetSubstitution();
+ }
+
+ ~ForgetSubstitutionRAII() { Self.RememberSubstitution(std::move(Old)); }
+ };
+
+public:
/// Note to the derived class when a function parameter pack is
/// being expanded.
void ExpandingFunctionParameterPack(ParmVarDecl *Pack) { }
@@ -591,12 +625,12 @@ public:
/// By default, transforms the template name by transforming the declarations
/// and nested-name-specifiers that occur within the template name.
/// Subclasses may override this function to provide alternate behavior.
- TemplateName
- TransformTemplateName(CXXScopeSpec &SS, TemplateName Name,
- SourceLocation NameLoc,
- QualType ObjectType = QualType(),
- NamedDecl *FirstQualifierInScope = nullptr,
- bool AllowInjectedClassName = false);
+ TemplateName TransformTemplateName(NestedNameSpecifierLoc &QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ TemplateName Name, SourceLocation NameLoc,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = nullptr,
+ bool AllowInjectedClassName = false);
/// Transform the given template argument.
///
@@ -610,9 +644,9 @@ public:
TemplateArgumentLoc &Output,
bool Uneval = false);
- TemplateArgument
- TransformNamedTemplateTemplateArgument(CXXScopeSpec &SS, TemplateName Name,
- SourceLocation NameLoc);
+ TemplateArgument TransformNamedTemplateTemplateArgument(
+ NestedNameSpecifierLoc &QualifierLoc, SourceLocation TemplateKeywordLoc,
+ TemplateName Name, SourceLocation NameLoc);
/// Transform the given set of template arguments.
///
@@ -660,6 +694,19 @@ public:
TemplateArgumentListInfo &Outputs,
bool Uneval = false);
+ /// Checks if the argument pack from \p In will need to be expanded and does
+ /// the necessary prework.
+ /// Whether the expansion is needed is captured in Info.Expand.
+ ///
+ /// - When the expansion is required, \p Out will be a template pattern that
+ /// would need to be expanded.
+ /// - When the expansion must not happen, \p Out will be a pack that must be
+ /// returned to the outputs directly.
+ ///
+ /// \return true iff the error occurred
+ bool PreparePackForExpansion(TemplateArgumentLoc In, bool Uneval,
+ TemplateArgumentLoc &Out, UnexpandedInfo &Info);
+
/// Fakes up a TemplateArgumentLoc for a given TemplateArgument.
void InventTemplateArgumentLoc(const TemplateArgument &Arg,
TemplateArgumentLoc &ArgLoc);
@@ -697,20 +744,12 @@ public:
StmtResult TransformSEHHandler(Stmt *Handler);
- QualType
- TransformTemplateSpecializationType(TypeLocBuilder &TLB,
- TemplateSpecializationTypeLoc TL,
- TemplateName Template);
-
- QualType
- TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
- DependentTemplateSpecializationTypeLoc TL,
- TemplateName Template,
- CXXScopeSpec &SS);
-
QualType TransformDependentTemplateSpecializationType(
TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL,
- CXXScopeSpec &SS);
+ QualType ObjectType, NamedDecl *UnqualLookup,
+ bool AllowInjectedClassName);
+
+ QualType TransformTagType(TypeLocBuilder &TLB, TagTypeLoc TL);
/// Transforms the parameters of a function type into the
/// given vectors.
@@ -1020,16 +1059,22 @@ public:
/// Rebuild an unresolved typename type, given the decl that
/// the UnresolvedUsingTypenameDecl was transformed to.
- QualType RebuildUnresolvedUsingType(SourceLocation NameLoc, Decl *D);
+ QualType RebuildUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ SourceLocation NameLoc, Decl *D);
/// Build a new type found via an alias.
- QualType RebuildUsingType(UsingShadowDecl *Found, QualType Underlying) {
- return SemaRef.Context.getUsingType(Found, Underlying);
+ QualType RebuildUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, UsingShadowDecl *D,
+ QualType UnderlyingType) {
+ return SemaRef.Context.getUsingType(Keyword, Qualifier, D, UnderlyingType);
}
/// Build a new typedef type.
- QualType RebuildTypedefType(TypedefNameDecl *Typedef) {
- return SemaRef.Context.getTypeDeclType(Typedef);
+ QualType RebuildTypedefType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ TypedefNameDecl *Typedef) {
+ return SemaRef.Context.getTypedefType(Keyword, Qualifier, Typedef);
}
/// Build a new MacroDefined type.
@@ -1038,14 +1083,14 @@ public:
return SemaRef.Context.getMacroQualifiedType(T, MacroII);
}
- /// Build a new class/struct/union type.
- QualType RebuildRecordType(RecordDecl *Record) {
- return SemaRef.Context.getTypeDeclType(Record);
+ /// Build a new class/struct/union/enum type.
+ QualType RebuildTagType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, TagDecl *Tag) {
+ return SemaRef.Context.getTagType(Keyword, Qualifier, Tag,
+ /*OwnsTag=*/false);
}
-
- /// Build a new Enum type.
- QualType RebuildEnumType(EnumDecl *Enum) {
- return SemaRef.Context.getTypeDeclType(Enum);
+ QualType RebuildCanonicalTagType(TagDecl *Tag) {
+ return SemaRef.Context.getCanonicalTagType(Tag);
}
/// Build a new typeof(expr) type.
@@ -1094,10 +1139,10 @@ public:
/// By default, builds a new DeducedTemplateSpecializationType with the given
/// deduced type.
- QualType RebuildDeducedTemplateSpecializationType(TemplateName Template,
- QualType Deduced) {
+ QualType RebuildDeducedTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword, TemplateName Template, QualType Deduced) {
return SemaRef.Context.getDeducedTemplateSpecializationType(
- Template, Deduced, /*IsDependent*/ false);
+ Keyword, Template, Deduced, /*IsDependent*/ false);
}
/// Build a new template specialization type.
@@ -1105,7 +1150,8 @@ public:
/// By default, performs semantic analysis when building the template
/// specialization type. Subclasses may override this routine to provide
/// different behavior.
- QualType RebuildTemplateSpecializationType(TemplateName Template,
+ QualType RebuildTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &Args);
@@ -1117,41 +1163,22 @@ public:
return SemaRef.BuildParenType(InnerType);
}
- /// Build a new qualified name type.
- ///
- /// By default, builds a new ElaboratedType type from the keyword,
- /// the nested-name-specifier and the named type.
- /// Subclasses may override this routine to provide different behavior.
- QualType RebuildElaboratedType(SourceLocation KeywordLoc,
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifierLoc QualifierLoc,
- QualType Named) {
- return SemaRef.Context.getElaboratedType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
- Named);
- }
-
/// Build a new typename type that refers to a template-id.
///
/// By default, builds a new DependentNameType type from the
/// nested-name-specifier and the given type. Subclasses may override
/// this routine to provide different behavior.
QualType RebuildDependentTemplateSpecializationType(
- ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
- SourceLocation TemplateKWLoc, TemplateName Name, SourceLocation NameLoc,
- TemplateArgumentListInfo &Args, bool AllowInjectedClassName) {
+ ElaboratedTypeKeyword Keyword, SourceLocation TemplateKWLoc,
+ TemplateName Name, SourceLocation NameLoc, TemplateArgumentListInfo &Args,
+ bool AllowInjectedClassName) {
// If it's still dependent, make a dependent specialization.
if (const DependentTemplateStorage *S = Name.getAsDependentTemplateName())
return SemaRef.Context.getDependentTemplateSpecializationType(
Keyword, *S, Args.arguments());
- // Otherwise, make an elaborated type wrapping a non-dependent
- // specialization.
- QualType T =
- getDerived().RebuildTemplateSpecializationType(Name, NameLoc, Args);
- if (T.isNull())
- return QualType();
- return SemaRef.Context.getElaboratedType(Keyword, NNS, T);
+ return getDerived().RebuildTemplateSpecializationType(Keyword, Name,
+ NameLoc, Args);
}
/// Build a new typename type that refers to an identifier.
@@ -1168,7 +1195,7 @@ public:
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
- if (QualifierLoc.getNestedNameSpecifier()->isDependent()) {
+ if (QualifierLoc.getNestedNameSpecifier().isDependent()) {
// If the name is still dependent, just build a new dependent name type.
if (!SemaRef.computeDeclContext(SS))
return SemaRef.Context.getDependentNameType(Keyword,
@@ -1238,19 +1265,14 @@ public:
}
return QualType();
}
-
if (!SemaRef.isAcceptableTagRedeclaration(Tag, Kind, /*isDefinition*/false,
IdLoc, Id)) {
SemaRef.Diag(KeywordLoc, diag::err_use_with_wrong_tag) << Id;
SemaRef.Diag(Tag->getLocation(), diag::note_previous_use);
return QualType();
}
-
- // Build the elaborated-type-specifier type.
- QualType T = SemaRef.Context.getTypeDeclType(Tag);
- return SemaRef.Context.getElaboratedType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
- T);
+ return getDerived().RebuildTagType(
+ Keyword, QualifierLoc.getNestedNameSpecifier(), Tag);
}
/// Build a new pack expansion type.
@@ -1288,9 +1310,8 @@ public:
///
/// By default, builds the new template name directly. Subclasses may override
/// this routine to provide different behavior.
- TemplateName RebuildTemplateName(CXXScopeSpec &SS,
- bool TemplateKW,
- TemplateDecl *Template);
+ TemplateName RebuildTemplateName(CXXScopeSpec &SS, bool TemplateKW,
+ TemplateName Name);
/// Build a new template name given a nested name specifier and the
/// name that is referred to as a template.
@@ -1303,7 +1324,6 @@ public:
SourceLocation TemplateKWLoc,
const IdentifierInfo &Name,
SourceLocation NameLoc, QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
bool AllowInjectedClassName);
/// Build a new template name given a nested name specifier and the
@@ -1323,7 +1343,6 @@ public:
SourceLocation TemplateKWLoc,
IdentifierOrOverloadedOperator IO,
SourceLocation NameLoc, QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
bool AllowInjectedClassName);
/// Build a new template name given a template template parameter pack
@@ -4012,8 +4031,8 @@ public:
SemaRef.Context,
TemplateArgument(Pattern.getArgument().getAsTemplate(),
NumExpansions),
- Pattern.getTemplateQualifierLoc(), Pattern.getTemplateNameLoc(),
- EllipsisLoc);
+ Pattern.getTemplateKWLoc(), Pattern.getTemplateQualifierLoc(),
+ Pattern.getTemplateNameLoc(), EllipsisLoc);
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -4073,7 +4092,7 @@ public:
PVD->getUninstantiatedDefaultArg()
->containsUnexpandedParameterPack();
}
- return getSema().BuildLambdaExpr(StartLoc, EndLoc, LSI);
+ return getSema().BuildLambdaExpr(StartLoc, EndLoc);
}
/// Build an empty C++1z fold-expression with the given operator.
@@ -4258,23 +4277,29 @@ public:
}
private:
- TypeLoc TransformTypeInObjectScope(TypeLoc TL,
- QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- CXXScopeSpec &SS);
+ QualType TransformTypeInObjectScope(TypeLocBuilder &TLB, TypeLoc TL,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope);
TypeSourceInfo *TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- CXXScopeSpec &SS);
+ NamedDecl *FirstQualifierInScope) {
+ if (getDerived().AlreadyTransformed(TSInfo->getType()))
+ return TSInfo;
- TypeSourceInfo *TransformTSIInObjectScope(TypeLoc TL, QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- CXXScopeSpec &SS);
+ TypeLocBuilder TLB;
+ QualType T = TransformTypeInObjectScope(TLB, TSInfo->getTypeLoc(),
+ ObjectType, FirstQualifierInScope);
+ if (T.isNull())
+ return nullptr;
+ return TLB.getTypeSourceInfo(SemaRef.Context, T);
+ }
QualType TransformDependentNameType(TypeLocBuilder &TLB,
DependentNameTypeLoc TL,
- bool DeducibleTSTContext);
+ bool DeducibleTSTContext,
+ QualType ObjectType = QualType(),
+ NamedDecl *UnqualLookup = nullptr);
llvm::SmallVector<OpenACCClause *>
TransformOpenACCClauseList(OpenACCDirectiveKind DirKind,
@@ -4477,11 +4502,10 @@ bool TreeTransform<Derived>::TransformExprs(Expr *const *Inputs,
bool RetainExpansion = false;
UnsignedOrNone OrigNumExpansions = Expansion->getNumExpansions();
UnsignedOrNone NumExpansions = OrigNumExpansions;
- if (getDerived().TryExpandParameterPacks(Expansion->getEllipsisLoc(),
- Pattern->getSourceRange(),
- Unexpanded,
- Expand, RetainExpansion,
- NumExpansions))
+ if (getDerived().TryExpandParameterPacks(
+ Expansion->getEllipsisLoc(), Pattern->getSourceRange(),
+ Unexpanded, /*FailOnPackProducingTemplates=*/true, Expand,
+ RetainExpansion, NumExpansions))
return true;
if (!Expand) {
@@ -4603,7 +4627,7 @@ NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
auto insertNNS = [&Qualifiers](NestedNameSpecifierLoc NNS) {
for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
- Qualifier = Qualifier.getPrefix())
+ Qualifier = Qualifier.getAsNamespaceAndPrefix().Prefix)
Qualifiers.push_back(Qualifier);
};
insertNNS(NNS);
@@ -4611,76 +4635,87 @@ NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
CXXScopeSpec SS;
while (!Qualifiers.empty()) {
NestedNameSpecifierLoc Q = Qualifiers.pop_back_val();
- NestedNameSpecifier *QNNS = Q.getNestedNameSpecifier();
-
- switch (QNNS->getKind()) {
- case NestedNameSpecifier::Identifier: {
- Sema::NestedNameSpecInfo IdInfo(QNNS->getAsIdentifier(),
- Q.getLocalBeginLoc(), Q.getLocalEndLoc(),
- ObjectType);
- if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr, IdInfo, false,
- SS, FirstQualifierInScope, false))
- return NestedNameSpecifierLoc();
- break;
- }
+ NestedNameSpecifier QNNS = Q.getNestedNameSpecifier();
+
+ switch (QNNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
- case NestedNameSpecifier::Namespace: {
+ case NestedNameSpecifier::Kind::Namespace: {
auto *NS = cast<NamespaceBaseDecl>(getDerived().TransformDecl(
- Q.getLocalBeginLoc(), QNNS->getAsNamespace()));
+ Q.getLocalBeginLoc(), const_cast<NamespaceBaseDecl *>(
+ QNNS.getAsNamespaceAndPrefix().Namespace)));
SS.Extend(SemaRef.Context, NS, Q.getLocalBeginLoc(), Q.getLocalEndLoc());
break;
}
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
// There is no meaningful transformation that one could perform on the
// global scope.
SS.MakeGlobal(SemaRef.Context, Q.getBeginLoc());
break;
- case NestedNameSpecifier::Super: {
- CXXRecordDecl *RD =
- cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
- SourceLocation(), QNNS->getAsRecordDecl()));
- SS.MakeSuper(SemaRef.Context, RD, Q.getBeginLoc(), Q.getEndLoc());
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
+ CXXRecordDecl *RD = cast_or_null<CXXRecordDecl>(
+ getDerived().TransformDecl(SourceLocation(), QNNS.getAsRecordDecl()));
+ SS.MakeMicrosoftSuper(SemaRef.Context, RD, Q.getBeginLoc(),
+ Q.getEndLoc());
break;
}
- case NestedNameSpecifier::TypeSpec: {
- TypeLoc TL = TransformTypeInObjectScope(Q.getTypeLoc(), ObjectType,
- FirstQualifierInScope, SS);
-
- if (!TL)
- return NestedNameSpecifierLoc();
+ case NestedNameSpecifier::Kind::Type: {
+ assert(SS.isEmpty());
+ TypeLoc TL = Q.castAsTypeLoc();
+
+ if (auto DNT = TL.getAs<DependentNameTypeLoc>()) {
+ NestedNameSpecifierLoc QualifierLoc = DNT.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, FirstQualifierInScope);
+ if (!QualifierLoc)
+ return NestedNameSpecifierLoc();
+ ObjectType = QualType();
+ FirstQualifierInScope = nullptr;
+ }
+ SS.Adopt(QualifierLoc);
+ Sema::NestedNameSpecInfo IdInfo(
+ const_cast<IdentifierInfo *>(DNT.getTypePtr()->getIdentifier()),
+ DNT.getNameLoc(), Q.getLocalEndLoc(), ObjectType);
+ if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr, IdInfo,
+ false, SS,
+ FirstQualifierInScope, false))
+ return NestedNameSpecifierLoc();
+ return SS.getWithLocInContext(SemaRef.Context);
+ }
QualType T = TL.getType();
+ TypeLocBuilder TLB;
+ if (!getDerived().AlreadyTransformed(T)) {
+ T = TransformTypeInObjectScope(TLB, TL, ObjectType,
+ FirstQualifierInScope);
+ if (T.isNull())
+ return NestedNameSpecifierLoc();
+ TL = TLB.getTypeLocInContext(SemaRef.Context, T);
+ }
+
if (T->isDependentType() || T->isRecordType() ||
(SemaRef.getLangOpts().CPlusPlus11 && T->isEnumeralType())) {
if (T->isEnumeralType())
SemaRef.Diag(TL.getBeginLoc(),
diag::warn_cxx98_compat_enum_nested_name_spec);
-
- if (const auto ETL = TL.getAs<ElaboratedTypeLoc>()) {
- SS.Adopt(ETL.getQualifierLoc());
- TL = ETL.getNamedTypeLoc();
- }
-
- SS.Extend(SemaRef.Context, TL, Q.getLocalEndLoc());
+ SS.Make(SemaRef.Context, TL, Q.getLocalEndLoc());
break;
}
// If the nested-name-specifier is an invalid type def, don't emit an
// error because a previous error should have already been emitted.
TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>();
- if (!TTL || !TTL.getTypedefNameDecl()->isInvalidDecl()) {
+ if (!TTL || !TTL.getDecl()->isInvalidDecl()) {
SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag)
<< T << SS.getRange();
}
return NestedNameSpecifierLoc();
}
}
-
- // The qualifier-in-scope and object type only apply to the leftmost entity.
- FirstQualifierInScope = nullptr;
- ObjectType = QualType();
}
// Don't rebuild the nested-name-specifier if we don't have to.
@@ -4766,78 +4801,112 @@ template <typename Derived>
TemplateName TreeTransform<Derived>::RebuildTemplateName(
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
IdentifierOrOverloadedOperator IO, SourceLocation NameLoc,
- QualType ObjectType, NamedDecl *FirstQualifierInScope,
- bool AllowInjectedClassName) {
- if (const IdentifierInfo *II = IO.getIdentifier()) {
+ QualType ObjectType, bool AllowInjectedClassName) {
+ if (const IdentifierInfo *II = IO.getIdentifier())
return getDerived().RebuildTemplateName(SS, TemplateKWLoc, *II, NameLoc,
- ObjectType, FirstQualifierInScope,
- AllowInjectedClassName);
- }
+ ObjectType, AllowInjectedClassName);
return getDerived().RebuildTemplateName(SS, TemplateKWLoc, IO.getOperator(),
NameLoc, ObjectType,
AllowInjectedClassName);
}
-template<typename Derived>
-TemplateName
-TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
- TemplateName Name,
- SourceLocation NameLoc,
- QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- bool AllowInjectedClassName) {
+template <typename Derived>
+TemplateName TreeTransform<Derived>::TransformTemplateName(
+ NestedNameSpecifierLoc &QualifierLoc, SourceLocation TemplateKWLoc,
+ TemplateName Name, SourceLocation NameLoc, QualType ObjectType,
+ NamedDecl *FirstQualifierInScope, bool AllowInjectedClassName) {
if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
- TemplateDecl *Template = QTN->getUnderlyingTemplate().getAsTemplateDecl();
- assert(Template && "qualified template name must refer to a template");
+ TemplateName UnderlyingName = QTN->getUnderlyingTemplate();
+
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, FirstQualifierInScope);
+ if (!QualifierLoc)
+ return TemplateName();
+ }
- TemplateDecl *TransTemplate
- = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
- Template));
- if (!TransTemplate)
+ NestedNameSpecifierLoc UnderlyingQualifier;
+ TemplateName NewUnderlyingName = getDerived().TransformTemplateName(
+ UnderlyingQualifier, TemplateKWLoc, UnderlyingName, NameLoc, ObjectType,
+ FirstQualifierInScope, AllowInjectedClassName);
+ if (NewUnderlyingName.isNull())
return TemplateName();
+ assert(!UnderlyingQualifier && "unexpected qualifier");
if (!getDerived().AlwaysRebuild() &&
- SS.getScopeRep() == QTN->getQualifier() &&
- TransTemplate == Template)
+ QualifierLoc.getNestedNameSpecifier() == QTN->getQualifier() &&
+ NewUnderlyingName == UnderlyingName)
return Name;
-
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
return getDerived().RebuildTemplateName(SS, QTN->hasTemplateKeyword(),
- TransTemplate);
+ NewUnderlyingName);
}
if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) {
- if (SS.getScopeRep()) {
- // These apply to the scope specifier, not the template.
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, FirstQualifierInScope);
+ if (!QualifierLoc)
+ return TemplateName();
+ // The qualifier-in-scope and object type only apply to the leftmost
+ // entity.
ObjectType = QualType();
- FirstQualifierInScope = nullptr;
}
if (!getDerived().AlwaysRebuild() &&
- SS.getScopeRep() == DTN->getQualifier() &&
+ QualifierLoc.getNestedNameSpecifier() == DTN->getQualifier() &&
ObjectType.isNull())
return Name;
- // FIXME: Preserve the location of the "template" keyword.
- SourceLocation TemplateKWLoc = NameLoc;
- return getDerived().RebuildTemplateName(
- SS, TemplateKWLoc, DTN->getName(), NameLoc, ObjectType,
- FirstQualifierInScope, AllowInjectedClassName);
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ return getDerived().RebuildTemplateName(SS, TemplateKWLoc, DTN->getName(),
+ NameLoc, ObjectType,
+ AllowInjectedClassName);
}
- // FIXME: Try to preserve more of the TemplateName.
- if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
- TemplateDecl *TransTemplate
- = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
- Template));
- if (!TransTemplate)
+ if (SubstTemplateTemplateParmStorage *S =
+ Name.getAsSubstTemplateTemplateParm()) {
+ assert(!QualifierLoc && "Unexpected qualified SubstTemplateTemplateParm");
+
+ NestedNameSpecifierLoc ReplacementQualifierLoc;
+ TemplateName ReplacementName = S->getReplacement();
+ if (NestedNameSpecifier Qualifier = ReplacementName.getQualifier()) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(SemaRef.Context, Qualifier, NameLoc);
+ ReplacementQualifierLoc = Builder.getWithLocInContext(SemaRef.Context);
+ }
+
+ TemplateName NewName = getDerived().TransformTemplateName(
+ ReplacementQualifierLoc, TemplateKWLoc, ReplacementName, NameLoc,
+ ObjectType, FirstQualifierInScope, AllowInjectedClassName);
+ if (NewName.isNull())
return TemplateName();
+ Decl *AssociatedDecl =
+ getDerived().TransformDecl(NameLoc, S->getAssociatedDecl());
+ if (!getDerived().AlwaysRebuild() && NewName == S->getReplacement() &&
+ AssociatedDecl == S->getAssociatedDecl())
+ return Name;
+ return SemaRef.Context.getSubstTemplateTemplateParm(
+ NewName, AssociatedDecl, S->getIndex(), S->getPackIndex(),
+ S->getFinal());
+ }
- return getDerived().RebuildTemplateName(SS, /*TemplateKeyword=*/false,
- TransTemplate);
+ assert(!Name.getAsDeducedTemplateName() &&
+ "DeducedTemplateName should not escape partial ordering");
+
+ // FIXME: Preserve UsingTemplateName.
+ if (auto *Template = Name.getAsTemplateDecl()) {
+ assert(!QualifierLoc && "Unexpected qualifier");
+ return TemplateName(cast_or_null<TemplateDecl>(
+ getDerived().TransformDecl(NameLoc, Template)));
}
if (SubstTemplateTemplateParmPackStorage *SubstPack
= Name.getAsSubstTemplateTemplateParmPack()) {
+ assert(!QualifierLoc &&
+ "Unexpected qualified SubstTemplateTemplateParmPack");
return getDerived().RebuildTemplateName(
SubstPack->getArgumentPack(), SubstPack->getAssociatedDecl(),
SubstPack->getIndex(), SubstPack->getFinal());
@@ -4849,8 +4918,10 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
template <typename Derived>
TemplateArgument TreeTransform<Derived>::TransformNamedTemplateTemplateArgument(
- CXXScopeSpec &SS, TemplateName Name, SourceLocation NameLoc) {
- TemplateName TN = getDerived().TransformTemplateName(SS, Name, NameLoc);
+ NestedNameSpecifierLoc &QualifierLoc, SourceLocation TemplateKeywordLoc,
+ TemplateName Name, SourceLocation NameLoc) {
+ TemplateName TN = getDerived().TransformTemplateName(
+ QualifierLoc, TemplateKeywordLoc, Name, NameLoc);
if (TN.isNull())
return TemplateArgument();
return TemplateArgument(TN);
@@ -4932,21 +5003,14 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
case TemplateArgument::Template: {
NestedNameSpecifierLoc QualifierLoc = Input.getTemplateQualifierLoc();
- if (QualifierLoc) {
- QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
- if (!QualifierLoc)
- return true;
- }
-
- CXXScopeSpec SS;
- SS.Adopt(QualifierLoc);
TemplateArgument Out = getDerived().TransformNamedTemplateTemplateArgument(
- SS, Arg.getAsTemplate(), Input.getTemplateNameLoc());
+ QualifierLoc, Input.getTemplateKWLoc(), Arg.getAsTemplate(),
+ Input.getTemplateNameLoc());
if (Out.isNull())
return true;
- Output = TemplateArgumentLoc(SemaRef.Context, Out, QualifierLoc,
- Input.getTemplateNameLoc());
+ Output = TemplateArgumentLoc(SemaRef.Context, Out, Input.getTemplateKWLoc(),
+ QualifierLoc, Input.getTemplateNameLoc());
return false;
}
@@ -5063,60 +5127,30 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
}
if (In.getArgument().isPackExpansion()) {
- // We have a pack expansion, for which we will be substituting into
- // the pattern.
- SourceLocation Ellipsis;
- UnsignedOrNone OrigNumExpansions = std::nullopt;
- TemplateArgumentLoc Pattern
- = getSema().getTemplateArgumentPackExpansionPattern(
- In, Ellipsis, OrigNumExpansions);
-
- SmallVector<UnexpandedParameterPack, 2> Unexpanded;
- getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
- assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
-
- // Determine whether the set of unexpanded parameter packs can and should
- // be expanded.
- bool Expand = true;
- bool RetainExpansion = false;
- UnsignedOrNone NumExpansions = OrigNumExpansions;
- if (getDerived().TryExpandParameterPacks(Ellipsis,
- Pattern.getSourceRange(),
- Unexpanded,
- Expand,
- RetainExpansion,
- NumExpansions))
+ UnexpandedInfo Info;
+ TemplateArgumentLoc Prepared;
+ if (PreparePackForExpansion(In, Uneval, Prepared, Info))
return true;
-
- if (!Expand) {
- // The transform has determined that we should perform a simple
- // transformation on the pack expansion, producing another pack
- // expansion.
- TemplateArgumentLoc OutPattern;
- Sema::ArgPackSubstIndexRAII SubstIndex(getSema(), std::nullopt);
- if (getDerived().TransformTemplateArgument(Pattern, OutPattern, Uneval))
- return true;
-
- Out = getDerived().RebuildPackExpansion(OutPattern, Ellipsis,
- NumExpansions);
- if (Out.getArgument().isNull())
- return true;
-
- Outputs.addArgument(Out);
+ if (!Info.Expand) {
+ Outputs.addArgument(Prepared);
continue;
}
// The transform has determined that we should perform an elementwise
// expansion of the pattern. Do so.
- for (unsigned I = 0; I != *NumExpansions; ++I) {
+ std::optional<ForgetSubstitutionRAII> ForgetSubst;
+ if (Info.ExpandUnderForgetSubstitions)
+ ForgetSubst.emplace(getDerived());
+ for (unsigned I = 0; I != *Info.NumExpansions; ++I) {
Sema::ArgPackSubstIndexRAII SubstIndex(getSema(), I);
- if (getDerived().TransformTemplateArgument(Pattern, Out, Uneval))
+ TemplateArgumentLoc Out;
+ if (getDerived().TransformTemplateArgument(Prepared, Out, Uneval))
return true;
if (Out.getArgument().containsUnexpandedParameterPack()) {
- Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
- OrigNumExpansions);
+ Out = getDerived().RebuildPackExpansion(Out, Info.Ellipsis,
+ Info.OrigNumExpansions);
if (Out.getArgument().isNull())
return true;
}
@@ -5126,14 +5160,15 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
// If we're supposed to retain a pack expansion, do so by temporarily
// forgetting the partially-substituted parameter pack.
- if (RetainExpansion) {
+ if (Info.RetainExpansion) {
ForgetPartiallySubstitutedPackRAII Forget(getDerived());
- if (getDerived().TransformTemplateArgument(Pattern, Out, Uneval))
+ TemplateArgumentLoc Out;
+ if (getDerived().TransformTemplateArgument(Prepared, Out, Uneval))
return true;
- Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
- OrigNumExpansions);
+ Out = getDerived().RebuildPackExpansion(Out, Info.Ellipsis,
+ Info.OrigNumExpansions);
if (Out.getArgument().isNull())
return true;
@@ -5154,6 +5189,114 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
}
+// FIXME: Find ways to reduce code duplication for pack expansions.
+template <typename Derived>
+bool TreeTransform<Derived>::PreparePackForExpansion(TemplateArgumentLoc In,
+ bool Uneval,
+ TemplateArgumentLoc &Out,
+ UnexpandedInfo &Info) {
+ auto ComputeInfo = [this](TemplateArgumentLoc Arg,
+ bool IsLateExpansionAttempt, UnexpandedInfo &Info,
+ TemplateArgumentLoc &Pattern) {
+ assert(Arg.getArgument().isPackExpansion());
+ // We have a pack expansion, for which we will be substituting into the
+ // pattern.
+ Pattern = getSema().getTemplateArgumentPackExpansionPattern(
+ Arg, Info.Ellipsis, Info.OrigNumExpansions);
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ if (IsLateExpansionAttempt) {
+ // Request expansion only when there is an opportunity to expand a pack
+ // that required a substituion first.
+ bool SawPackTypes =
+ llvm::any_of(Unexpanded, [](UnexpandedParameterPack P) {
+ return P.first.dyn_cast<const SubstBuiltinTemplatePackType *>();
+ });
+ if (!SawPackTypes) {
+ Info.Expand = false;
+ return false;
+ }
+ }
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and
+ // should be expanded.
+ Info.Expand = true;
+ Info.RetainExpansion = false;
+ Info.NumExpansions = Info.OrigNumExpansions;
+ return getDerived().TryExpandParameterPacks(
+ Info.Ellipsis, Pattern.getSourceRange(), Unexpanded,
+ /*FailOnPackProducingTemplates=*/false, Info.Expand,
+ Info.RetainExpansion, Info.NumExpansions);
+ };
+
+ TemplateArgumentLoc Pattern;
+ if (ComputeInfo(In, false, Info, Pattern))
+ return true;
+
+ if (Info.Expand) {
+ Out = Pattern;
+ return false;
+ }
+
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ TemplateArgumentLoc OutPattern;
+ std::optional<Sema::ArgPackSubstIndexRAII> SubstIndex(
+ std::in_place, getSema(), std::nullopt);
+ if (getDerived().TransformTemplateArgument(Pattern, OutPattern, Uneval))
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(OutPattern, Info.Ellipsis,
+ Info.NumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+ SubstIndex.reset();
+
+ if (!OutPattern.getArgument().containsUnexpandedParameterPack())
+ return false;
+
+ // Some packs will learn their length after substitution, e.g.
+ // __builtin_dedup_pack<T,int> has size 1 or 2, depending on the substitution
+ // value of `T`.
+ //
+ // We only expand after we know sizes of all packs, check if this is the case
+ // or not. However, we avoid a full template substitution and only do
+ // expanstions after this point.
+
+ // E.g. when substituting template arguments of tuple with {T -> int} in the
+ // following example:
+ // template <class T>
+ // struct TupleWithInt {
+ // using type = std::tuple<__builtin_dedup_pack<T, int>...>;
+ // };
+ // TupleWithInt<int>::type y;
+ // At this point we will see the `__builtin_dedup_pack<int, int>` with a known
+ // lenght and run `ComputeInfo()` to provide the necessary information to our
+ // caller.
+ //
+ // Note that we may still have situations where builtin is not going to be
+ // expanded. For example:
+ // template <class T>
+ // struct Foo {
+ // template <class U> using tuple_with_t =
+ // std::tuple<__builtin_dedup_pack<T, U, int>...>; using type =
+ // tuple_with_t<short>;
+ // }
+ // Because the substitution into `type` happens in dependent context, `type`
+ // will be `tuple<builtin_dedup_pack<T, short, int>...>` after substitution
+ // and the caller will not be able to expand it.
+ ForgetSubstitutionRAII ForgetSubst(getDerived());
+ if (ComputeInfo(Out, true, Info, OutPattern))
+ return true;
+ if (!Info.Expand)
+ return false;
+ Out = OutPattern;
+ Info.ExpandUnderForgetSubstitions = true;
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Type transformation
//===----------------------------------------------------------------------===//
@@ -5380,85 +5523,27 @@ QualType TreeTransform<Derived>::RebuildQualifiedType(QualType T,
return SemaRef.BuildQualifiedType(T, Loc, Quals);
}
-template<typename Derived>
-TypeLoc
-TreeTransform<Derived>::TransformTypeInObjectScope(TypeLoc TL,
- QualType ObjectType,
- NamedDecl *UnqualLookup,
- CXXScopeSpec &SS) {
- if (getDerived().AlreadyTransformed(TL.getType()))
- return TL;
-
- TypeSourceInfo *TSI =
- TransformTSIInObjectScope(TL, ObjectType, UnqualLookup, SS);
- if (TSI)
- return TSI->getTypeLoc();
- return TypeLoc();
-}
-
-template<typename Derived>
-TypeSourceInfo *
-TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
- QualType ObjectType,
- NamedDecl *UnqualLookup,
- CXXScopeSpec &SS) {
- if (getDerived().AlreadyTransformed(TSInfo->getType()))
- return TSInfo;
-
- return TransformTSIInObjectScope(TSInfo->getTypeLoc(), ObjectType,
- UnqualLookup, SS);
-}
-
template <typename Derived>
-TypeSourceInfo *TreeTransform<Derived>::TransformTSIInObjectScope(
- TypeLoc TL, QualType ObjectType, NamedDecl *UnqualLookup,
- CXXScopeSpec &SS) {
- QualType T = TL.getType();
- assert(!getDerived().AlreadyTransformed(T));
-
- TypeLocBuilder TLB;
- QualType Result;
-
- if (isa<TemplateSpecializationType>(T)) {
- TemplateSpecializationTypeLoc SpecTL =
- TL.castAs<TemplateSpecializationTypeLoc>();
-
- TemplateName Template = getDerived().TransformTemplateName(
- SS, SpecTL.getTypePtr()->getTemplateName(), SpecTL.getTemplateNameLoc(),
- ObjectType, UnqualLookup, /*AllowInjectedClassName*/true);
- if (Template.isNull())
- return nullptr;
-
- Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
- Template);
- } else if (isa<DependentTemplateSpecializationType>(T)) {
- DependentTemplateSpecializationTypeLoc SpecTL =
- TL.castAs<DependentTemplateSpecializationTypeLoc>();
-
- const IdentifierInfo *II = SpecTL.getTypePtr()
- ->getDependentTemplateName()
- .getName()
- .getIdentifier();
- TemplateName Template = getDerived().RebuildTemplateName(
- SS, SpecTL.getTemplateKeywordLoc(), *II, SpecTL.getTemplateNameLoc(),
- ObjectType, UnqualLookup,
- /*AllowInjectedClassName*/ true);
- if (Template.isNull())
- return nullptr;
+QualType TreeTransform<Derived>::TransformTypeInObjectScope(
+ TypeLocBuilder &TLB, TypeLoc TL, QualType ObjectType,
+ NamedDecl *UnqualLookup) {
+ assert(!getDerived().AlreadyTransformed(TL.getType()));
- Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
- SpecTL,
- Template,
- SS);
- } else {
- // Nothing special needs to be done for these.
- Result = getDerived().TransformType(TLB, TL);
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::DependentTemplateSpecialization:
+ return getDerived().TransformDependentTemplateSpecializationType(
+ TLB, TL.castAs<DependentTemplateSpecializationTypeLoc>(), ObjectType,
+ UnqualLookup, /*AllowInjectedClassName=*/true);
+ case TypeLoc::DependentName: {
+ return getDerived().TransformDependentNameType(
+ TLB, TL.castAs<DependentNameTypeLoc>(), /*DeducedTSTContext=*/false,
+ ObjectType, UnqualLookup);
+ }
+ default:
+ // Any dependent canonical type can appear here, through type alias
+ // templates.
+ return getDerived().TransformType(TLB, TL);
}
-
- if (Result.isNull())
- return nullptr;
-
- return TLB.getTypeSourceInfo(SemaRef.Context, Result);
}
template <class TyLoc> static inline
@@ -6211,12 +6296,10 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
if (Unexpanded.size() > 0) {
OrigNumExpansions = ExpansionTL.getTypePtr()->getNumExpansions();
NumExpansions = OrigNumExpansions;
- if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
- Pattern.getSourceRange(),
- Unexpanded,
- ShouldExpand,
- RetainExpansion,
- NumExpansions)) {
+ if (getDerived().TryExpandParameterPacks(
+ ExpansionTL.getEllipsisLoc(), Pattern.getSourceRange(),
+ Unexpanded, /*FailOnPackProducingTemplates=*/true,
+ ShouldExpand, RetainExpansion, NumExpansions)) {
return true;
}
} else {
@@ -6322,11 +6405,10 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
// Determine whether we should expand the parameter packs.
bool ShouldExpand = false;
bool RetainExpansion = false;
- if (getDerived().TryExpandParameterPacks(Loc, SourceRange(),
- Unexpanded,
- ShouldExpand,
- RetainExpansion,
- NumExpansions)) {
+ if (getDerived().TryExpandParameterPacks(
+ Loc, SourceRange(), Unexpanded,
+ /*FailOnPackProducingTemplates=*/true, ShouldExpand,
+ RetainExpansion, NumExpansions)) {
return true;
}
@@ -6623,8 +6705,9 @@ bool TreeTransform<Derived>::TransformExceptionSpec(
// FIXME: Track the location of the ellipsis (and track source location
// information for the types in the exception specification in general).
if (getDerived().TryExpandParameterPacks(
- Loc, SourceRange(), Unexpanded, Expand,
- RetainExpansion, NumExpansions))
+ Loc, SourceRange(), Unexpanded,
+ /*FailOnPackProducingTemplates=*/true, Expand, RetainExpansion,
+ NumExpansions))
return true;
if (!Expand) {
@@ -6694,23 +6777,38 @@ QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
template <typename Derived>
QualType TreeTransform<Derived>::TransformUnresolvedUsingType(
TypeLocBuilder &TLB, UnresolvedUsingTypeLoc TL) {
+
const UnresolvedUsingType *T = TL.getTypePtr();
- Decl *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
+ bool Changed = false;
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (NestedNameSpecifierLoc OldQualifierLoc = QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ Changed |= QualifierLoc != OldQualifierLoc;
+ }
+
+ auto *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
if (!D)
return QualType();
+ Changed |= D != T->getDecl();
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() || D != T->getDecl()) {
- Result = getDerived().RebuildUnresolvedUsingType(TL.getNameLoc(), D);
+ if (getDerived().AlwaysRebuild() || Changed) {
+ Result = getDerived().RebuildUnresolvedUsingType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), TL.getNameLoc(),
+ D);
if (Result.isNull())
return QualType();
}
- // We might get an arbitrary type spec type back. We should at
- // least always get a type spec type, though.
- TypeSpecTypeLoc NewTL = TLB.pushTypeSpec(Result);
- NewTL.setNameLoc(TL.getNameLoc());
-
+ if (isa<UsingType>(Result))
+ TLB.push<UsingTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
+ else
+ TLB.push<UnresolvedUsingTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
return Result;
}
@@ -6718,25 +6816,37 @@ template <typename Derived>
QualType TreeTransform<Derived>::TransformUsingType(TypeLocBuilder &TLB,
UsingTypeLoc TL) {
const UsingType *T = TL.getTypePtr();
+ bool Changed = false;
- auto *Found = cast_or_null<UsingShadowDecl>(getDerived().TransformDecl(
- TL.getLocalSourceRange().getBegin(), T->getFoundDecl()));
- if (!Found)
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (NestedNameSpecifierLoc OldQualifierLoc = QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ Changed |= QualifierLoc != OldQualifierLoc;
+ }
+
+ auto *D = cast_or_null<UsingShadowDecl>(
+ getDerived().TransformDecl(TL.getNameLoc(), T->getDecl()));
+ if (!D)
return QualType();
+ Changed |= D != T->getDecl();
- QualType Underlying = getDerived().TransformType(T->desugar());
- if (Underlying.isNull())
+ QualType UnderlyingType = getDerived().TransformType(T->desugar());
+ if (UnderlyingType.isNull())
return QualType();
+ Changed |= UnderlyingType != T->desugar();
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() || Found != T->getFoundDecl() ||
- Underlying != T->getUnderlyingType()) {
- Result = getDerived().RebuildUsingType(Found, Underlying);
+ if (getDerived().AlwaysRebuild() || Changed) {
+ Result = getDerived().RebuildUsingType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), D,
+ UnderlyingType);
if (Result.isNull())
return QualType();
}
-
- TLB.pushTypeSpec(Result).setNameLoc(TL.getNameLoc());
+ TLB.push<UsingTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(), QualifierLoc,
+ TL.getNameLoc());
return Result;
}
@@ -6744,23 +6854,34 @@ template<typename Derived>
QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB,
TypedefTypeLoc TL) {
const TypedefType *T = TL.getTypePtr();
- TypedefNameDecl *Typedef
- = cast_or_null<TypedefNameDecl>(getDerived().TransformDecl(TL.getNameLoc(),
- T->getDecl()));
+ bool Changed = false;
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (NestedNameSpecifierLoc OldQualifierLoc = QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ Changed |= QualifierLoc != OldQualifierLoc;
+ }
+
+ auto *Typedef = cast_or_null<TypedefNameDecl>(
+ getDerived().TransformDecl(TL.getNameLoc(), T->getDecl()));
if (!Typedef)
return QualType();
+ Changed |= Typedef != T->getDecl();
+
+ // FIXME: Transform the UnderlyingType if different from decl.
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- Typedef != T->getDecl()) {
- Result = getDerived().RebuildTypedefType(Typedef);
+ if (getDerived().AlwaysRebuild() || Changed) {
+ Result = getDerived().RebuildTypedefType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), Typedef);
if (Result.isNull())
return QualType();
}
- TypedefTypeLoc NewTL = TLB.push<TypedefTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
-
+ TLB.push<TypedefTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
return Result;
}
@@ -6898,9 +7019,10 @@ TreeTransform<Derived>::TransformPackIndexingType(TypeLocBuilder &TLB,
bool ShouldExpand = true;
bool RetainExpansion = false;
UnsignedOrNone NumExpansions = std::nullopt;
- if (getDerived().TryExpandParameterPacks(TL.getEllipsisLoc(), SourceRange(),
- Unexpanded, ShouldExpand,
- RetainExpansion, NumExpansions))
+ if (getDerived().TryExpandParameterPacks(
+ TL.getEllipsisLoc(), SourceRange(), Unexpanded,
+ /*FailOnPackProducingTemplates=*/true, ShouldExpand,
+ RetainExpansion, NumExpansions))
return QualType();
if (!ShouldExpand) {
Sema::ArgPackSubstIndexRAII SubstIndex(getSema(), std::nullopt);
@@ -6996,9 +7118,10 @@ QualType TreeTransform<Derived>::TransformDeducedTemplateSpecializationType(
TypeLocBuilder &TLB, DeducedTemplateSpecializationTypeLoc TL) {
const DeducedTemplateSpecializationType *T = TL.getTypePtr();
- CXXScopeSpec SS;
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
TemplateName TemplateName = getDerived().TransformTemplateName(
- SS, T->getTemplateName(), TL.getTemplateNameLoc());
+ QualifierLoc, /*TemplateKELoc=*/SourceLocation(), T->getTemplateName(),
+ TL.getTemplateNameLoc());
if (TemplateName.isNull())
return QualType();
@@ -7011,76 +7134,71 @@ QualType TreeTransform<Derived>::TransformDeducedTemplateSpecializationType(
}
QualType Result = getDerived().RebuildDeducedTemplateSpecializationType(
- TemplateName, NewDeduced);
+ T->getKeyword(), TemplateName, NewDeduced);
if (Result.isNull())
return QualType();
- DeducedTemplateSpecializationTypeLoc NewTL =
- TLB.push<DeducedTemplateSpecializationTypeLoc>(Result);
+ auto NewTL = TLB.push<DeducedTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
-
+ NewTL.setQualifierLoc(QualifierLoc);
return Result;
}
-template<typename Derived>
-QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB,
- RecordTypeLoc TL) {
- const RecordType *T = TL.getTypePtr();
- RecordDecl *Record
- = cast_or_null<RecordDecl>(getDerived().TransformDecl(TL.getNameLoc(),
- T->getDecl()));
- if (!Record)
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformTagType(TypeLocBuilder &TLB,
+ TagTypeLoc TL) {
+ const TagType *T = TL.getTypePtr();
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ }
+
+ auto *TD = cast_or_null<TagDecl>(
+ getDerived().TransformDecl(TL.getNameLoc(), T->getOriginalDecl()));
+ if (!TD)
return QualType();
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- Record != T->getDecl()) {
- Result = getDerived().RebuildRecordType(Record);
+ if (getDerived().AlwaysRebuild() || QualifierLoc != TL.getQualifierLoc() ||
+ TD != T->getOriginalDecl()) {
+ if (T->isCanonicalUnqualified())
+ Result = getDerived().RebuildCanonicalTagType(TD);
+ else
+ Result = getDerived().RebuildTagType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), TD);
if (Result.isNull())
return QualType();
}
- RecordTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ TagTypeLoc NewTL = TLB.push<TagTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
-template<typename Derived>
+template <typename Derived>
QualType TreeTransform<Derived>::TransformEnumType(TypeLocBuilder &TLB,
EnumTypeLoc TL) {
- const EnumType *T = TL.getTypePtr();
- EnumDecl *Enum
- = cast_or_null<EnumDecl>(getDerived().TransformDecl(TL.getNameLoc(),
- T->getDecl()));
- if (!Enum)
- return QualType();
-
- QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- Enum != T->getDecl()) {
- Result = getDerived().RebuildEnumType(Enum);
- if (Result.isNull())
- return QualType();
- }
-
- EnumTypeLoc NewTL = TLB.push<EnumTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
+ return getDerived().TransformTagType(TLB, TL);
+}
- return Result;
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB,
+ RecordTypeLoc TL) {
+ return getDerived().TransformTagType(TLB, TL);
}
template<typename Derived>
QualType TreeTransform<Derived>::TransformInjectedClassNameType(
TypeLocBuilder &TLB,
InjectedClassNameTypeLoc TL) {
- Decl *D = getDerived().TransformDecl(TL.getNameLoc(),
- TL.getTypePtr()->getDecl());
- if (!D) return QualType();
-
- QualType T = SemaRef.Context.getTypeDeclType(cast<TypeDecl>(D));
- TLB.pushTypeSpec(T).setNameLoc(TL.getNameLoc());
- return T;
+ return getDerived().TransformTagType(TLB, TL);
}
template<typename Derived>
@@ -7126,6 +7244,11 @@ QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType(
return Result;
}
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformSubstBuiltinTemplatePackType(
+ TypeLocBuilder &TLB, SubstBuiltinTemplatePackTypeLoc TL) {
+ return TransformTypeSpecType(TLB, TL);
+}
template<typename Derived>
QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType(
@@ -7142,24 +7265,6 @@ QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType(
}
template<typename Derived>
-QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
- TypeLocBuilder &TLB,
- TemplateSpecializationTypeLoc TL) {
- const TemplateSpecializationType *T = TL.getTypePtr();
-
- // The nested-name-specifier never matters in a TemplateSpecializationType,
- // because we can't have a dependent nested-name-specifier anyway.
- CXXScopeSpec SS;
- TemplateName Template
- = getDerived().TransformTemplateName(SS, T->getTemplateName(),
- TL.getTemplateNameLoc());
- if (Template.isNull())
- return QualType();
-
- return getDerived().TransformTemplateSpecializationType(TLB, TL, Template);
-}
-
-template<typename Derived>
QualType TreeTransform<Derived>::TransformAtomicType(TypeLocBuilder &TLB,
AtomicTypeLoc TL) {
QualType ValueType = getDerived().TransformType(TLB, TL.getValueLoc());
@@ -7398,9 +7503,16 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
template <typename Derived>
QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
- TypeLocBuilder &TLB,
- TemplateSpecializationTypeLoc TL,
- TemplateName Template) {
+ TypeLocBuilder &TLB, TemplateSpecializationTypeLoc TL) {
+ const TemplateSpecializationType *T = TL.getTypePtr();
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TemplateName Template = getDerived().TransformTemplateName(
+ QualifierLoc, TL.getTemplateKeywordLoc(), T->getTemplateName(),
+ TL.getTemplateNameLoc());
+ if (Template.isNull())
+ return QualType();
+
TemplateArgumentListInfo NewTemplateArgs;
NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
@@ -7415,10 +7527,9 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
// original template changed. If the template changed, and even if the
// arguments didn't change, these arguments might not correspond to their
// respective parameters, therefore needing conversions.
- QualType Result =
- getDerived().RebuildTemplateSpecializationType(Template,
- TL.getTemplateNameLoc(),
- NewTemplateArgs);
+ QualType Result = getDerived().RebuildTemplateSpecializationType(
+ TL.getTypePtr()->getKeyword(), Template, TL.getTemplateNameLoc(),
+ NewTemplateArgs);
if (!Result.isNull()) {
// Specializations of template template parameters are represented as
@@ -7428,8 +7539,8 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
if (isa<DependentTemplateSpecializationType>(Result)) {
DependentTemplateSpecializationTypeLoc NewTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
- NewTL.setElaboratedKeywordLoc(SourceLocation());
- NewTL.setQualifierLoc(NestedNameSpecifierLoc());
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
NewTL.setLAngleLoc(TL.getLAngleLoc());
@@ -7438,129 +7549,11 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
return Result;
}
-
- TemplateSpecializationTypeLoc NewTL
- = TLB.push<TemplateSpecializationTypeLoc>(Result);
- NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
- NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
- NewTL.setLAngleLoc(TL.getLAngleLoc());
- NewTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
- NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
- }
-
- return Result;
-}
-
-template <typename Derived>
-QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
- TypeLocBuilder &TLB,
- DependentTemplateSpecializationTypeLoc TL,
- TemplateName Template,
- CXXScopeSpec &SS) {
- TemplateArgumentListInfo NewTemplateArgs;
- NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
- NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
- typedef TemplateArgumentLocContainerIterator<
- DependentTemplateSpecializationTypeLoc> ArgIterator;
- if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
- ArgIterator(TL, TL.getNumArgs()),
- NewTemplateArgs))
- return QualType();
-
- // FIXME: maybe don't rebuild if all the template arguments are the same.
-
- if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
- assert(DTN->getQualifier() == SS.getScopeRep());
- QualType Result = getSema().Context.getDependentTemplateSpecializationType(
- TL.getTypePtr()->getKeyword(), *DTN, NewTemplateArgs.arguments());
-
- DependentTemplateSpecializationTypeLoc NewTL
- = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
- NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
- NewTL.setQualifierLoc(SS.getWithLocInContext(SemaRef.Context));
- NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
- NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
- NewTL.setLAngleLoc(TL.getLAngleLoc());
- NewTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
- NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
- return Result;
- }
-
- QualType Result
- = getDerived().RebuildTemplateSpecializationType(Template,
- TL.getTemplateNameLoc(),
- NewTemplateArgs);
-
- if (!Result.isNull()) {
- /// FIXME: Wrap this in an elaborated-type-specifier?
- TemplateSpecializationTypeLoc NewTL
- = TLB.push<TemplateSpecializationTypeLoc>(Result);
- NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
- NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
- NewTL.setLAngleLoc(TL.getLAngleLoc());
- NewTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
- NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
- }
-
- return Result;
-}
-
-template<typename Derived>
-QualType
-TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
- ElaboratedTypeLoc TL) {
- const ElaboratedType *T = TL.getTypePtr();
-
- NestedNameSpecifierLoc QualifierLoc;
- // NOTE: the qualifier in an ElaboratedType is optional.
- if (TL.getQualifierLoc()) {
- QualifierLoc
- = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
- if (!QualifierLoc)
- return QualType();
- }
-
- QualType NamedT = getDerived().TransformType(TLB, TL.getNamedTypeLoc());
- if (NamedT.isNull())
- return QualType();
-
- // C++0x [dcl.type.elab]p2:
- // If the identifier resolves to a typedef-name or the simple-template-id
- // resolves to an alias template specialization, the
- // elaborated-type-specifier is ill-formed.
- if (T->getKeyword() != ElaboratedTypeKeyword::None &&
- T->getKeyword() != ElaboratedTypeKeyword::Typename) {
- if (const TemplateSpecializationType *TST =
- NamedT->getAs<TemplateSpecializationType>()) {
- TemplateName Template = TST->getTemplateName();
- if (TypeAliasTemplateDecl *TAT = dyn_cast_or_null<TypeAliasTemplateDecl>(
- Template.getAsTemplateDecl())) {
- SemaRef.Diag(TL.getNamedTypeLoc().getBeginLoc(),
- diag::err_tag_reference_non_tag)
- << TAT << NonTagKind::TypeAliasTemplate
- << ElaboratedType::getTagTypeKindForKeyword(T->getKeyword());
- SemaRef.Diag(TAT->getLocation(), diag::note_declared_at);
- }
- }
- }
-
- QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- QualifierLoc != TL.getQualifierLoc() ||
- NamedT != T->getNamedType()) {
- Result = getDerived().RebuildElaboratedType(TL.getElaboratedKeywordLoc(),
- T->getKeyword(),
- QualifierLoc, NamedT);
- if (Result.isNull())
- return QualType();
+ TLB.push<TemplateSpecializationTypeLoc>(Result).set(
+ TL.getElaboratedKeywordLoc(), QualifierLoc, TL.getTemplateKeywordLoc(),
+ TL.getTemplateNameLoc(), NewTemplateArgs);
}
- ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
- NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
- NewTL.setQualifierLoc(QualifierLoc);
return Result;
}
@@ -7754,15 +7747,22 @@ QualType TreeTransform<Derived>::TransformDependentNameType(
return TransformDependentNameType(TLB, TL, false);
}
-template<typename Derived>
+template <typename Derived>
QualType TreeTransform<Derived>::TransformDependentNameType(
- TypeLocBuilder &TLB, DependentNameTypeLoc TL, bool DeducedTSTContext) {
+ TypeLocBuilder &TLB, DependentNameTypeLoc TL, bool DeducedTSTContext,
+ QualType ObjectType, NamedDecl *UnqualLookup) {
const DependentNameType *T = TL.getTypePtr();
- NestedNameSpecifierLoc QualifierLoc
- = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
- if (!QualifierLoc)
- return QualType();
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, UnqualLookup);
+ if (!QualifierLoc)
+ return QualType();
+ } else {
+ assert((ObjectType.isNull() && !UnqualLookup) &&
+ "must be transformed by TransformNestedNameSpecifierLoc");
+ }
QualType Result
= getDerived().RebuildDependentNameType(T->getKeyword(),
@@ -7774,15 +7774,24 @@ QualType TreeTransform<Derived>::TransformDependentNameType(
if (Result.isNull())
return QualType();
- if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) {
- QualType NamedT = ElabT->getNamedType();
- TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc());
-
- ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ if (isa<TagType>(Result)) {
+ auto NewTL = TLB.push<TagTypeLoc>(Result);
NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setQualifierLoc(QualifierLoc);
+ NewTL.setNameLoc(TL.getNameLoc());
+ } else if (isa<DeducedTemplateSpecializationType>(Result)) {
+ auto NewTL = TLB.push<DeducedTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getNameLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ } else if (isa<TypedefType>(Result)) {
+ TLB.push<TypedefTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
+ } else if (isa<UnresolvedUsingType>(Result)) {
+ auto NewTL = TLB.push<UnresolvedUsingTypeLoc>(Result);
+ NewTL.set(TL.getElaboratedKeywordLoc(), QualifierLoc, TL.getNameLoc());
} else {
- DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result);
+ auto NewTL = TLB.push<DependentNameTypeLoc>(Result);
NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setQualifierLoc(QualifierLoc);
NewTL.setNameLoc(TL.getNameLoc());
@@ -7790,33 +7799,34 @@ QualType TreeTransform<Derived>::TransformDependentNameType(
return Result;
}
-template<typename Derived>
-QualType TreeTransform<Derived>::
- TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
- DependentTemplateSpecializationTypeLoc TL) {
- NestedNameSpecifierLoc QualifierLoc;
- if (TL.getQualifierLoc()) {
- QualifierLoc
- = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
- if (!QualifierLoc)
- return QualType();
- }
-
- CXXScopeSpec SS;
- SS.Adopt(QualifierLoc);
- return getDerived().TransformDependentTemplateSpecializationType(TLB, TL, SS);
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
+ TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL) {
+ return getDerived().TransformDependentTemplateSpecializationType(
+ TLB, TL, QualType(), nullptr, false);
}
template <typename Derived>
QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL,
- CXXScopeSpec &SS) {
+ QualType ObjectType, NamedDecl *UnqualLookup, bool AllowInjectedClassName) {
const DependentTemplateSpecializationType *T = TL.getTypePtr();
- TemplateArgumentListInfo NewTemplateArgs;
- NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
- NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, UnqualLookup);
+ if (!QualifierLoc)
+ return QualType();
+ // These only apply to the leftmost prefix.
+ ObjectType = QualType();
+ UnqualLookup = nullptr;
+ }
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ TemplateArgumentListInfo NewTemplateArgs(TL.getLAngleLoc(),
+ TL.getRAngleLoc());
auto ArgsRange = llvm::make_range<TemplateArgumentLocContainerIterator<
DependentTemplateSpecializationTypeLoc>>({TL, 0}, {TL, TL.getNumArgs()});
@@ -7833,43 +7843,27 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || SS.getScopeRep() != DTN.getQualifier() ||
- TemplateArgumentsChanged) {
+ TemplateArgumentsChanged || !ObjectType.isNull()) {
TemplateName Name = getDerived().RebuildTemplateName(
SS, TL.getTemplateKeywordLoc(), DTN.getName(), TL.getTemplateNameLoc(),
- /*ObjectType=*/QualType(), /*FirstQualifierInScope=*/nullptr,
- /*AllowInjectedClassName=*/false);
+ ObjectType, AllowInjectedClassName);
if (Name.isNull())
return QualType();
Result = getDerived().RebuildDependentTemplateSpecializationType(
- T->getKeyword(), SS.getScopeRep(), TL.getTemplateKeywordLoc(), Name,
+ T->getKeyword(), TL.getTemplateKeywordLoc(), Name,
TL.getTemplateNameLoc(), NewTemplateArgs,
/*AllowInjectedClassName=*/false);
if (Result.isNull())
return QualType();
}
- NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(SemaRef.Context);
- if (const ElaboratedType *ElabT = dyn_cast<ElaboratedType>(Result)) {
- QualType NamedT = ElabT->getNamedType();
-
- // Copy information relevant to the template specialization.
- TemplateSpecializationTypeLoc NamedTL
- = TLB.push<TemplateSpecializationTypeLoc>(NamedT);
- NamedTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
- NamedTL.setTemplateNameLoc(TL.getTemplateNameLoc());
- NamedTL.setLAngleLoc(TL.getLAngleLoc());
- NamedTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
- NamedTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
-
- // Copy information relevant to the elaborated type.
- ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
- NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
- NewTL.setQualifierLoc(QualifierLoc);
+ QualifierLoc = SS.getWithLocInContext(SemaRef.Context);
+ if (isa<TemplateSpecializationType>(Result)) {
+ TLB.push<TemplateSpecializationTypeLoc>(Result).set(
+ TL.getElaboratedKeywordLoc(), QualifierLoc, TL.getTemplateKeywordLoc(),
+ TL.getTemplateNameLoc(), NewTemplateArgs);
} else {
- assert(isa<DependentTemplateSpecializationType>(Result));
- DependentTemplateSpecializationTypeLoc SpecTL
- = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
+ auto SpecTL = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
SpecTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
SpecTL.setQualifierLoc(QualifierLoc);
SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
@@ -7981,8 +7975,9 @@ TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
bool RetainExpansion = false;
UnsignedOrNone NumExpansions = PackExpansion->getNumExpansions();
if (getDerived().TryExpandParameterPacks(
- PackExpansionLoc.getEllipsisLoc(), PatternLoc.getSourceRange(),
- Unexpanded, Expand, RetainExpansion, NumExpansions))
+ PackExpansionLoc.getEllipsisLoc(), PatternLoc.getSourceRange(),
+ Unexpanded, /*FailOnPackProducingTemplates=*/true, Expand,
+ RetainExpansion, NumExpansions))
return QualType();
if (!Expand) {
@@ -8590,14 +8585,18 @@ TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
DeclChanged = true;
if (LSI) {
- if (auto *TD = dyn_cast<TypeDecl>(Transformed))
- LSI->ContainsUnexpandedParameterPack |=
- getSema()
- .getASTContext()
- .getTypeDeclType(TD)
- .getSingleStepDesugaredType(getSema().getASTContext())
- ->containsUnexpandedParameterPack();
-
+ if (auto *TD = dyn_cast<TypeDecl>(Transformed)) {
+ if (auto *TN = dyn_cast<TypedefNameDecl>(TD)) {
+ LSI->ContainsUnexpandedParameterPack |=
+ TN->getUnderlyingType()->containsUnexpandedParameterPack();
+ } else {
+ LSI->ContainsUnexpandedParameterPack |=
+ getSema()
+ .getASTContext()
+ .getTypeDeclType(TD)
+ ->containsUnexpandedParameterPack();
+ }
+ }
if (auto *VD = dyn_cast<VarDecl>(Transformed))
LSI->ContainsUnexpandedParameterPack |=
VD->getType()->containsUnexpandedParameterPack();
@@ -10954,8 +10953,7 @@ TreeTransform<Derived>::TransformOMPMessageClause(OMPMessageClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPMessageClause(
- C->getMessageString(), C->getBeginLoc(), C->getLParenLoc(),
- C->getEndLoc());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -12416,18 +12414,36 @@ void OpenACCClauseTransform<Derived>::VisitReductionClause(
const OpenACCReductionClause &C) {
SmallVector<Expr *> TransformedVars = VisitVarList(C.getVarList());
SmallVector<Expr *> ValidVars;
+ llvm::SmallVector<OpenACCReductionRecipe> Recipes;
- for (Expr *Var : TransformedVars) {
+ for (const auto [Var, OrigRecipes] :
+ llvm::zip(TransformedVars, C.getRecipes())) {
ExprResult Res = Self.getSema().OpenACC().CheckReductionVar(
ParsedClause.getDirectiveKind(), C.getReductionOp(), Var);
- if (Res.isUsable())
+ if (Res.isUsable()) {
ValidVars.push_back(Res.get());
+
+ // TODO OpenACC: When the recipe changes, make sure we get these right
+ // too. We probably need something similar for the operation.
+ static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int*));
+ VarDecl *InitRecipe = nullptr;
+ if (OrigRecipes.RecipeDecl)
+ InitRecipe = OrigRecipes.RecipeDecl;
+ else
+ InitRecipe = Self.getSema()
+ .OpenACC()
+ .CreateInitRecipe(OpenACCClauseKind::Reduction,
+ C.getReductionOp(), Res.get())
+ .first;
+
+ Recipes.push_back({InitRecipe});
+ }
}
NewClause = Self.getSema().OpenACC().CheckReductionClause(
ExistingClauses, ParsedClause.getDirectiveKind(),
ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
- C.getReductionOp(), ValidVars, ParsedClause.getEndLoc());
+ C.getReductionOp(), ValidVars, Recipes, ParsedClause.getEndLoc());
}
template <typename Derived>
@@ -14352,9 +14368,9 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
Expr *Op = E->getExprOperand();
auto EvalCtx = Sema::ExpressionEvaluationContext::Unevaluated;
if (E->isGLValue())
- if (auto *RecordT = Op->getType()->getAs<RecordType>())
- if (cast<CXXRecordDecl>(RecordT->getDecl())->isPolymorphic())
- EvalCtx = SemaRef.ExprEvalContexts.back().Context;
+ if (auto *RD = Op->getType()->getAsCXXRecordDecl();
+ RD && RD->isPolymorphic())
+ EvalCtx = SemaRef.ExprEvalContexts.back().Context;
EnterExpressionEvaluationContext Unevaluated(SemaRef, EvalCtx,
Sema::ReuseLambdaContextDecl);
@@ -14428,7 +14444,9 @@ TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
// for type deduction, so we need to recompute it.
//
// Always recompute the type if we're in the body of a lambda, and
- // 'this' is dependent on a lambda's explicit object parameter.
+ // 'this' is dependent on a lambda's explicit object parameter; we
+ // also need to always rebuild the expression in this case to clear
+ // the flag.
QualType T = [&]() {
auto &S = getSema();
if (E->isCapturedByCopyInLambdaWithExplicitObjectParameter())
@@ -14438,7 +14456,8 @@ TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
return S.getCurrentThisType();
}();
- if (!getDerived().AlwaysRebuild() && T == E->getType()) {
+ if (!getDerived().AlwaysRebuild() && T == E->getType() &&
+ !E->isCapturedByCopyInLambdaWithExplicitObjectParameter()) {
// Mark it referenced in the new context regardless.
// FIXME: this is a bit instantiation-specific.
getSema().MarkThisReferenced(E);
@@ -14592,11 +14611,9 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
if (E->isArray() && !E->getAllocatedType()->isDependentType()) {
QualType ElementType
= SemaRef.Context.getBaseElementType(E->getAllocatedType());
- if (const RecordType *RecordT = ElementType->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordT->getDecl());
- if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(Record)) {
+ if (CXXRecordDecl *Record = ElementType->getAsCXXRecordDecl()) {
+ if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(Record))
SemaRef.MarkFunctionReferenced(E->getBeginLoc(), Destructor);
- }
}
}
@@ -14662,11 +14679,9 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
if (!E->getArgument()->isTypeDependent()) {
QualType Destroyed = SemaRef.Context.getBaseElementType(
E->getDestroyedType());
- if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ if (auto *Record = Destroyed->getAsCXXRecordDecl())
SemaRef.MarkFunctionReferenced(E->getBeginLoc(),
SemaRef.LookupDestructor(Record));
- }
}
return E;
@@ -14707,9 +14722,9 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
PseudoDestructorTypeStorage Destroyed;
if (E->getDestroyedTypeInfo()) {
- TypeSourceInfo *DestroyedTypeInfo
- = getDerived().TransformTypeInObjectScope(E->getDestroyedTypeInfo(),
- ObjectType, nullptr, SS);
+ TypeSourceInfo *DestroyedTypeInfo = getDerived().TransformTypeInObjectScope(
+ E->getDestroyedTypeInfo(), ObjectType,
+ /*FirstQualifierInScope=*/nullptr);
if (!DestroyedTypeInfo)
return ExprError();
Destroyed = DestroyedTypeInfo;
@@ -14733,9 +14748,8 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
TypeSourceInfo *ScopeTypeInfo = nullptr;
if (E->getScopeTypeInfo()) {
- CXXScopeSpec EmptySS;
ScopeTypeInfo = getDerived().TransformTypeInObjectScope(
- E->getScopeTypeInfo(), ObjectType, nullptr, EmptySS);
+ E->getScopeTypeInfo(), ObjectType, nullptr);
if (!ScopeTypeInfo)
return ExprError();
}
@@ -14938,11 +14952,10 @@ TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
UnsignedOrNone OrigNumExpansions =
ExpansionTL.getTypePtr()->getNumExpansions();
UnsignedOrNone NumExpansions = OrigNumExpansions;
- if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
- PatternTL.getSourceRange(),
- Unexpanded,
- Expand, RetainExpansion,
- NumExpansions))
+ if (getDerived().TryExpandParameterPacks(
+ ExpansionTL.getEllipsisLoc(), PatternTL.getSourceRange(),
+ Unexpanded, /*FailOnPackProducingTemplates=*/true, Expand,
+ RetainExpansion, NumExpansions))
return ExprError();
if (!Expand) {
@@ -15516,8 +15529,8 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
ExpansionTL.getTypePtr()->getNumExpansions();
UnsignedOrNone NumExpansions = OrigNumExpansions;
if (getDerived().TryExpandParameterPacks(
- ExpansionTL.getEllipsisLoc(),
- OldVD->getInit()->getSourceRange(), Unexpanded, Expand,
+ ExpansionTL.getEllipsisLoc(), OldVD->getInit()->getSourceRange(),
+ Unexpanded, /*FailOnPackProducingTemplates=*/true, Expand,
RetainExpansion, NumExpansions))
return ExprError();
assert(!RetainExpansion && "Should not need to retain expansion after a "
@@ -15677,11 +15690,10 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
bool ShouldExpand = false;
bool RetainExpansion = false;
UnsignedOrNone NumExpansions = std::nullopt;
- if (getDerived().TryExpandParameterPacks(C->getEllipsisLoc(),
- C->getLocation(),
- Unexpanded,
- ShouldExpand, RetainExpansion,
- NumExpansions)) {
+ if (getDerived().TryExpandParameterPacks(
+ C->getEllipsisLoc(), C->getLocation(), Unexpanded,
+ /*FailOnPackProducingTemplates=*/true, ShouldExpand,
+ RetainExpansion, NumExpansions)) {
Invalid = true;
continue;
}
@@ -15813,12 +15825,9 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
return ExprError();
}
- // Copy the LSI before ActOnFinishFunctionBody removes it.
- // FIXME: This is dumb. Store the lambda information somewhere that outlives
- // the call operator.
- auto LSICopy = *LSI;
getSema().ActOnFinishFunctionBody(NewCallOperator, Body.get(),
- /*IsInstantiation*/ true);
+ /*IsInstantiation=*/true,
+ /*RetainFunctionScopeInfo=*/true);
SavedContext.pop();
// Recompute the dependency of the lambda so that we can defer the lambda call
@@ -15854,15 +15863,11 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// *after* the substitution in case we can't decide the dependency
// so early, e.g. because we want to see if any of the *substituted*
// parameters are dependent.
- DependencyKind = getDerived().ComputeLambdaDependency(&LSICopy);
+ DependencyKind = getDerived().ComputeLambdaDependency(LSI);
Class->setLambdaDependencyKind(DependencyKind);
- // Clean up the type cache created previously. Then, we re-create a type for
- // such Decl with the new DependencyKind.
- Class->setTypeForDecl(nullptr);
- getSema().Context.getTypeDeclType(Class);
return getDerived().RebuildLambdaExpr(E->getBeginLoc(),
- Body.get()->getEndLoc(), &LSICopy);
+ Body.get()->getEndLoc(), LSI);
}
template<typename Derived>
@@ -16202,10 +16207,10 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
bool ShouldExpand = false;
bool RetainExpansion = false;
UnsignedOrNone NumExpansions = std::nullopt;
- if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
- Unexpanded,
- ShouldExpand, RetainExpansion,
- NumExpansions))
+ if (getDerived().TryExpandParameterPacks(
+ E->getOperatorLoc(), E->getPackLoc(), Unexpanded,
+ /*FailOnPackProducingTemplates=*/true, ShouldExpand,
+ RetainExpansion, NumExpansions))
return ExprError();
// If we need to expand the pack, build a template argument from it and
@@ -16322,7 +16327,8 @@ TreeTransform<Derived>::TransformPackIndexingExpr(PackIndexingExpr *E) {
NumExpansions = std::nullopt;
if (getDerived().TryExpandParameterPacks(
E->getEllipsisLoc(), Pattern->getSourceRange(), Unexpanded,
- ShouldExpand, RetainExpansion, NumExpansions))
+ /*FailOnPackProducingTemplates=*/true, ShouldExpand,
+ RetainExpansion, NumExpansions))
return true;
if (!ShouldExpand) {
Sema::ArgPackSubstIndexRAII SubstIndex(getSema(), std::nullopt);
@@ -16428,11 +16434,10 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
bool RetainExpansion = false;
UnsignedOrNone OrigNumExpansions = E->getNumExpansions(),
NumExpansions = OrigNumExpansions;
- if (getDerived().TryExpandParameterPacks(E->getEllipsisLoc(),
- Pattern->getSourceRange(),
- Unexpanded,
- Expand, RetainExpansion,
- NumExpansions))
+ if (getDerived().TryExpandParameterPacks(
+ E->getEllipsisLoc(), Pattern->getSourceRange(), Unexpanded,
+ /*FailOnPackProducingTemplates=*/true, Expand, RetainExpansion,
+ NumExpansions))
return true;
if (!Expand) {
@@ -16666,9 +16671,10 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
UnsignedOrNone NumExpansions = OrigNumExpansions;
SourceRange PatternRange(OrigElement.Key->getBeginLoc(),
OrigElement.Value->getEndLoc());
- if (getDerived().TryExpandParameterPacks(OrigElement.EllipsisLoc,
- PatternRange, Unexpanded, Expand,
- RetainExpansion, NumExpansions))
+ if (getDerived().TryExpandParameterPacks(
+ OrigElement.EllipsisLoc, PatternRange, Unexpanded,
+ /*FailOnPackProducingTemplates=*/true, Expand, RetainExpansion,
+ NumExpansions))
return ExprError();
if (!Expand) {
@@ -17362,9 +17368,10 @@ QualType TreeTransform<Derived>::RebuildFunctionNoProtoType(QualType T) {
return SemaRef.Context.getFunctionNoProtoType(T);
}
-template<typename Derived>
-QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
- Decl *D) {
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ SourceLocation NameLoc, Decl *D) {
assert(D && "no decl found");
if (D->isInvalidDecl()) return QualType();
@@ -17374,7 +17381,7 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
// UsingDecls, but they must each have exactly one type, and it must be
// the same type in every case. But we must have at least one expansion!
if (UPD->expansions().empty()) {
- getSema().Diag(Loc, diag::err_using_pack_expansion_empty)
+ getSema().Diag(NameLoc, diag::err_using_pack_expansion_empty)
<< UPD->isCXXClassMember() << UPD;
return QualType();
}
@@ -17385,10 +17392,11 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
QualType FallbackT;
QualType T;
for (auto *E : UPD->expansions()) {
- QualType ThisT = RebuildUnresolvedUsingType(Loc, E);
+ QualType ThisT =
+ RebuildUnresolvedUsingType(Keyword, Qualifier, NameLoc, E);
if (ThisT.isNull())
continue;
- else if (ThisT->getAs<UnresolvedUsingType>())
+ if (ThisT->getAs<UnresolvedUsingType>())
FallbackT = ThisT;
else if (T.isNull())
T = ThisT;
@@ -17397,7 +17405,8 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
"mismatched resolved types in using pack expansion");
}
return T.isNull() ? FallbackT : T;
- } else if (auto *Using = dyn_cast<UsingDecl>(D)) {
+ }
+ if (auto *Using = dyn_cast<UsingDecl>(D)) {
assert(Using->hasTypename() &&
"UnresolvedUsingTypenameDecl transformed to non-typename using");
@@ -17405,17 +17414,14 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
assert(++Using->shadow_begin() == Using->shadow_end());
UsingShadowDecl *Shadow = *Using->shadow_begin();
- if (SemaRef.DiagnoseUseOfDecl(Shadow->getTargetDecl(), Loc))
+ if (SemaRef.DiagnoseUseOfDecl(Shadow->getTargetDecl(), NameLoc))
return QualType();
- return SemaRef.Context.getUsingType(
- Shadow, SemaRef.Context.getTypeDeclType(
- cast<TypeDecl>(Shadow->getTargetDecl())));
- } else {
- assert(isa<UnresolvedUsingTypenameDecl>(D) &&
- "UnresolvedUsingTypenameDecl transformed to non-using decl");
- return SemaRef.Context.getTypeDeclType(
- cast<UnresolvedUsingTypenameDecl>(D));
+ return SemaRef.Context.getUsingType(Keyword, Qualifier, Shadow);
}
+ assert(isa<UnresolvedUsingTypenameDecl>(D) &&
+ "UnresolvedUsingTypenameDecl transformed to non-using decl");
+ return SemaRef.Context.getUnresolvedUsingType(
+ Keyword, Qualifier, cast<UnresolvedUsingTypenameDecl>(D));
}
template <typename Derived>
@@ -17451,12 +17457,12 @@ QualType TreeTransform<Derived>::RebuildUnaryTransformType(QualType BaseType,
return SemaRef.BuildUnaryTransformType(BaseType, UKind, Loc);
}
-template<typename Derived>
+template <typename Derived>
QualType TreeTransform<Derived>::RebuildTemplateSpecializationType(
- TemplateName Template,
- SourceLocation TemplateNameLoc,
- TemplateArgumentListInfo &TemplateArgs) {
- return SemaRef.CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ SourceLocation TemplateNameLoc, TemplateArgumentListInfo &TemplateArgs) {
+ return SemaRef.CheckTemplateIdType(Keyword, Template, TemplateNameLoc,
+ TemplateArgs);
}
template<typename Derived>
@@ -17490,24 +17496,18 @@ QualType TreeTransform<Derived>::RebuildDependentBitIntType(
return SemaRef.BuildBitIntType(IsUnsigned, NumBitsExpr, Loc);
}
-template<typename Derived>
-TemplateName
-TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
- bool TemplateKW,
- TemplateDecl *Template) {
+template <typename Derived>
+TemplateName TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ bool TemplateKW,
+ TemplateName Name) {
return SemaRef.Context.getQualifiedTemplateName(SS.getScopeRep(), TemplateKW,
- TemplateName(Template));
+ Name);
}
-template<typename Derived>
-TemplateName
-TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- const IdentifierInfo &Name,
- SourceLocation NameLoc,
- QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- bool AllowInjectedClassName) {
+template <typename Derived>
+TemplateName TreeTransform<Derived>::RebuildTemplateName(
+ CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const IdentifierInfo &Name,
+ SourceLocation NameLoc, QualType ObjectType, bool AllowInjectedClassName) {
UnqualifiedId TemplateName;
TemplateName.setIdentifier(&Name, NameLoc);
Sema::TemplateTy Template;
@@ -17630,12 +17630,13 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage Destroyed) {
- QualType BaseType = Base->getType();
+ QualType CanonicalBaseType = Base->getType().getCanonicalType();
if (Base->isTypeDependent() || Destroyed.getIdentifier() ||
- (!isArrow && !BaseType->getAs<RecordType>()) ||
- (isArrow && BaseType->getAs<PointerType>() &&
- !BaseType->castAs<PointerType>()->getPointeeType()
- ->template getAs<RecordType>())){
+ (!isArrow && !isa<RecordType>(CanonicalBaseType)) ||
+ (isArrow && isa<PointerType>(CanonicalBaseType) &&
+ !cast<PointerType>(CanonicalBaseType)
+ ->getPointeeType()
+ ->getAsCanonical<RecordType>())) {
// This pseudo-destructor expression is still a pseudo-destructor.
return SemaRef.BuildPseudoDestructorExpr(
Base, OperatorLoc, isArrow ? tok::arrow : tok::period, SS, ScopeType,
@@ -17649,25 +17650,24 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
NameInfo.setNamedTypeInfo(DestroyedType);
// The scope type is now known to be a valid nested name specifier
- // component. Tack it on to the end of the nested name specifier.
+ // component. Tack it on to the nested name specifier.
if (ScopeType) {
- if (!ScopeType->getType()->getAs<TagType>()) {
+ if (!isa<TagType>(ScopeType->getType().getCanonicalType())) {
getSema().Diag(ScopeType->getTypeLoc().getBeginLoc(),
diag::err_expected_class_or_namespace)
<< ScopeType->getType() << getSema().getLangOpts().CPlusPlus;
return ExprError();
}
- SS.Extend(SemaRef.Context, ScopeType->getTypeLoc(), CCLoc);
+ SS.clear();
+ SS.Make(SemaRef.Context, ScopeType->getTypeLoc(), CCLoc);
}
SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
- return getSema().BuildMemberReferenceExpr(Base, BaseType,
- OperatorLoc, isArrow,
- SS, TemplateKWLoc,
- /*FIXME: FirstQualifier*/ nullptr,
- NameInfo,
- /*TemplateArgs*/ nullptr,
- /*S*/nullptr);
+ return getSema().BuildMemberReferenceExpr(
+ Base, Base->getType(), OperatorLoc, isArrow, SS, TemplateKWLoc,
+ /*FIXME: FirstQualifier*/ nullptr, NameInfo,
+ /*TemplateArgs*/ nullptr,
+ /*S*/ nullptr);
}
template<typename Derived>
diff --git a/clang/lib/Sema/UsedDeclVisitor.h b/clang/lib/Sema/UsedDeclVisitor.h
index 580d702..546b3a9 100644
--- a/clang/lib/Sema/UsedDeclVisitor.h
+++ b/clang/lib/Sema/UsedDeclVisitor.h
@@ -70,11 +70,10 @@ public:
QualType DestroyedOrNull = E->getDestroyedType();
if (!DestroyedOrNull.isNull()) {
QualType Destroyed = S.Context.getBaseElementType(DestroyedOrNull);
- if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- if (Record->getDefinition())
- asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
- }
+ if (auto *Record = Destroyed->getAsCXXRecordDecl();
+ Record &&
+ (Record->isBeingDefined() || Record->isCompleteDefinition()))
+ asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
}
Inherited::VisitCXXDeleteExpr(E);
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index ed0ec9e..7268af6 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -5500,7 +5500,7 @@ void ASTReader::InitializeContext() {
Error("Invalid FILE type in AST file");
return;
}
- Context.setFILEDecl(Tag->getDecl());
+ Context.setFILEDecl(Tag->getOriginalDecl());
}
}
}
@@ -5521,7 +5521,7 @@ void ASTReader::InitializeContext() {
Error("Invalid jmp_buf type in AST file");
return;
}
- Context.setjmp_bufDecl(Tag->getDecl());
+ Context.setjmp_bufDecl(Tag->getOriginalDecl());
}
}
}
@@ -5539,7 +5539,7 @@ void ASTReader::InitializeContext() {
else {
const TagType *Tag = Sigjmp_bufType->getAs<TagType>();
assert(Tag && "Invalid sigjmp_buf type in AST file");
- Context.setsigjmp_bufDecl(Tag->getDecl());
+ Context.setsigjmp_bufDecl(Tag->getOriginalDecl());
}
}
}
@@ -5574,7 +5574,7 @@ void ASTReader::InitializeContext() {
else {
const TagType *Tag = Ucontext_tType->getAs<TagType>();
assert(Tag && "Invalid ucontext_t type in AST file");
- Context.setucontext_tDecl(Tag->getDecl());
+ Context.setucontext_tDecl(Tag->getOriginalDecl());
}
}
}
@@ -7226,6 +7226,7 @@ public:
void VisitFunctionTypeLoc(FunctionTypeLoc);
void VisitArrayTypeLoc(ArrayTypeLoc);
+ void VisitTagTypeLoc(TagTypeLoc TL);
};
} // namespace clang
@@ -7372,15 +7373,24 @@ void TypeLocReader::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
}
void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+ SourceLocation ElaboratedKeywordLoc = readSourceLocation();
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc();
+ SourceLocation NameLoc = readSourceLocation();
+ TL.set(ElaboratedKeywordLoc, QualifierLoc, NameLoc);
}
void TypeLocReader::VisitUsingTypeLoc(UsingTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+ SourceLocation ElaboratedKeywordLoc = readSourceLocation();
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc();
+ SourceLocation NameLoc = readSourceLocation();
+ TL.set(ElaboratedKeywordLoc, QualifierLoc, NameLoc);
}
void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+ SourceLocation ElaboratedKeywordLoc = readSourceLocation();
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc();
+ SourceLocation NameLoc = readSourceLocation();
+ TL.set(ElaboratedKeywordLoc, QualifierLoc, NameLoc);
}
void TypeLocReader::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
@@ -7434,17 +7444,27 @@ void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
void TypeLocReader::VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(readSourceLocation());
+ TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
TL.setTemplateNameLoc(readSourceLocation());
}
-void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
+void TypeLocReader::VisitTagTypeLoc(TagTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(readSourceLocation());
+ TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
TL.setNameLoc(readSourceLocation());
}
-void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
+ VisitTagTypeLoc(TL);
+}
+
+void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ VisitTagTypeLoc(TL);
}
+void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) { VisitTagTypeLoc(TL); }
+
void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
TL.setAttr(ReadAttr());
}
@@ -7480,16 +7500,25 @@ void TypeLocReader::VisitSubstTemplateTypeParmPackTypeLoc(
TL.setNameLoc(readSourceLocation());
}
+void TypeLocReader::VisitSubstBuiltinTemplatePackTypeLoc(
+ SubstBuiltinTemplatePackTypeLoc TL) {
+ TL.setNameLoc(readSourceLocation());
+}
+
void TypeLocReader::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
- TL.setTemplateKeywordLoc(readSourceLocation());
- TL.setTemplateNameLoc(readSourceLocation());
- TL.setLAngleLoc(readSourceLocation());
- TL.setRAngleLoc(readSourceLocation());
- for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- TL.setArgLocInfo(i,
- Reader.readTemplateArgumentLocInfo(
- TL.getTypePtr()->template_arguments()[i].getKind()));
+ SourceLocation ElaboratedKeywordLoc = readSourceLocation();
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc();
+ SourceLocation TemplateKeywordLoc = readSourceLocation();
+ SourceLocation NameLoc = readSourceLocation();
+ SourceLocation LAngleLoc = readSourceLocation();
+ SourceLocation RAngleLoc = readSourceLocation();
+ TL.set(ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
+ LAngleLoc, RAngleLoc);
+ MutableArrayRef<TemplateArgumentLocInfo> Args = TL.getArgLocInfos();
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ Args[I] = Reader.readTemplateArgumentLocInfo(
+ TL.getTypePtr()->template_arguments()[I].getKind());
}
void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
@@ -7497,15 +7526,6 @@ void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
TL.setRParenLoc(readSourceLocation());
}
-void TypeLocReader::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- TL.setElaboratedKeywordLoc(readSourceLocation());
- TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
-}
-
-void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
-}
-
void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
TL.setElaboratedKeywordLoc(readSourceLocation());
TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
@@ -7962,18 +7982,15 @@ ASTRecordReader::readTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind) {
return readExpr();
case TemplateArgument::Type:
return readTypeSourceInfo();
- case TemplateArgument::Template: {
- NestedNameSpecifierLoc QualifierLoc =
- readNestedNameSpecifierLoc();
- SourceLocation TemplateNameLoc = readSourceLocation();
- return TemplateArgumentLocInfo(getASTContext(), QualifierLoc,
- TemplateNameLoc, SourceLocation());
- }
+ case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion: {
+ SourceLocation TemplateKWLoc = readSourceLocation();
NestedNameSpecifierLoc QualifierLoc = readNestedNameSpecifierLoc();
SourceLocation TemplateNameLoc = readSourceLocation();
- SourceLocation EllipsisLoc = readSourceLocation();
- return TemplateArgumentLocInfo(getASTContext(), QualifierLoc,
+ SourceLocation EllipsisLoc = Kind == TemplateArgument::TemplateExpansion
+ ? readSourceLocation()
+ : SourceLocation();
+ return TemplateArgumentLocInfo(getASTContext(), TemplateKWLoc, QualifierLoc,
TemplateNameLoc, EllipsisLoc);
}
case TemplateArgument::Null:
@@ -9586,12 +9603,6 @@ void ASTReader::AssignedLambdaNumbering(CXXRecordDecl *Lambda) {
CXXRecordDecl *Previous =
cast<CXXRecordDecl>(Iter->second)->getMostRecentDecl();
Lambda->setPreviousDecl(Previous);
- // FIXME: It will be best to use the Previous type when we creating the
- // lambda directly. But that requires us to get the lambda context decl and
- // lambda index before creating the lambda, which needs a drastic change in
- // the parser.
- const_cast<QualType &>(Lambda->TypeForDecl->CanonicalType) =
- Previous->TypeForDecl->CanonicalType;
return;
}
@@ -10107,41 +10118,37 @@ ASTRecordReader::readNestedNameSpecifierLoc() {
for (unsigned I = 0; I != N; ++I) {
auto Kind = readNestedNameSpecifierKind();
switch (Kind) {
- case NestedNameSpecifier::Identifier: {
- IdentifierInfo *II = readIdentifier();
- SourceRange Range = readSourceRange();
- Builder.Extend(Context, II, Range.getBegin(), Range.getEnd());
- break;
- }
-
- case NestedNameSpecifier::Namespace: {
+ case NestedNameSpecifier::Kind::Namespace: {
auto *NS = readDeclAs<NamespaceBaseDecl>();
SourceRange Range = readSourceRange();
Builder.Extend(Context, NS, Range.getBegin(), Range.getEnd());
break;
}
- case NestedNameSpecifier::TypeSpec: {
+ case NestedNameSpecifier::Kind::Type: {
TypeSourceInfo *T = readTypeSourceInfo();
if (!T)
return NestedNameSpecifierLoc();
SourceLocation ColonColonLoc = readSourceLocation();
- Builder.Extend(Context, T->getTypeLoc(), ColonColonLoc);
+ Builder.Make(Context, T->getTypeLoc(), ColonColonLoc);
break;
}
- case NestedNameSpecifier::Global: {
+ case NestedNameSpecifier::Kind::Global: {
SourceLocation ColonColonLoc = readSourceLocation();
Builder.MakeGlobal(Context, ColonColonLoc);
break;
}
- case NestedNameSpecifier::Super: {
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
CXXRecordDecl *RD = readDeclAs<CXXRecordDecl>();
SourceRange Range = readSourceRange();
- Builder.MakeSuper(Context, RD, Range.getBegin(), Range.getEnd());
+ Builder.MakeMicrosoftSuper(Context, RD, Range.getBegin(), Range.getEnd());
break;
}
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
}
@@ -10537,12 +10544,7 @@ void ASTReader::finishPendingActions() {
// happen now, after the redeclaration chains have been fully wired.
for (Decl *D : PendingDefinitions) {
if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
- if (const TagType *TagT = dyn_cast<TagType>(TD->getTypeForDecl())) {
- // Make sure that the TagType points at the definition.
- const_cast<TagType*>(TagT)->decl = TD;
- }
-
- if (auto RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(TD)) {
for (auto *R = getMostRecentExistingDecl(RD); R;
R = R->getPreviousDecl()) {
assert((R == D) ==
@@ -11006,8 +11008,9 @@ void ASTReader::diagnoseOdrViolations() {
}
void ASTReader::StartedDeserializing() {
- if (++NumCurrentElementsDeserializing == 1 && ReadTimer.get())
- ReadTimer->startTimer();
+ if (llvm::Timer *T = ReadTimer.get();
+ ++NumCurrentElementsDeserializing == 1 && T)
+ ReadTimeRegion.emplace(T);
}
void ASTReader::FinishedDeserializing() {
@@ -11065,8 +11068,7 @@ void ASTReader::FinishedDeserializing() {
(void)UndeducedFD->getMostRecentDecl();
}
- if (ReadTimer)
- ReadTimer->stopTimer();
+ ReadTimeRegion.reset();
diagnoseOdrViolations();
}
@@ -11840,6 +11842,7 @@ void OMPClauseReader::VisitOMPSeverityClause(OMPSeverityClause *C) {
}
void OMPClauseReader::VisitOMPMessageClause(OMPMessageClause *C) {
+ VisitOMPClauseWithPreInit(C);
C->setMessageString(Record.readSubExpr());
C->setLParenLoc(Record.readSourceLocation());
}
@@ -12999,8 +13002,16 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
SourceLocation LParenLoc = readSourceLocation();
OpenACCReductionOperator Op = readEnum<OpenACCReductionOperator>();
llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ llvm::SmallVector<OpenACCReductionRecipe> RecipeList;
+
+ for (unsigned I = 0; I < VarList.size(); ++I) {
+ VarDecl *Recipe = readDeclAs<VarDecl>();
+ static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *));
+ RecipeList.push_back({Recipe});
+ }
+
return OpenACCReductionClause::Create(getContext(), BeginLoc, LParenLoc, Op,
- VarList, EndLoc);
+ VarList, RecipeList, EndLoc);
}
case OpenACCClauseKind::Seq:
return OpenACCSeqClause::Create(getContext(), BeginLoc, EndLoc);
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index 2c7beb4..6b35b20 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -538,7 +538,11 @@ void ASTDeclReader::Visit(Decl *D) {
if (auto *TD = dyn_cast<TypeDecl>(D)) {
// We have a fully initialized TypeDecl. Read its type now.
- TD->setTypeForDecl(Reader.GetType(DeferredTypeID).getTypePtrOrNull());
+ if (isa<TagDecl, TypedefDecl, TypeAliasDecl>(TD))
+ assert(DeferredTypeID == 0 &&
+ "Deferred type not used for TagDecls and Typedefs");
+ else
+ TD->setTypeForDecl(Reader.GetType(DeferredTypeID).getTypePtrOrNull());
// If this is a tag declaration with a typedef name for linkage, it's safe
// to load that typedef now.
@@ -695,7 +699,8 @@ void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
VisitNamedDecl(TD);
TD->setLocStart(readSourceLocation());
// Delay type reading until after we have fully initialized the decl.
- DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
+ if (!isa<TagDecl, TypedefDecl, TypeAliasDecl>(TD))
+ DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
}
RedeclarableResult ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
@@ -2237,15 +2242,6 @@ RedeclarableResult ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
// Merged when we merge the template.
auto *Template = readDeclAs<ClassTemplateDecl>();
D->TemplateOrInstantiation = Template;
- if (!Template->getTemplatedDecl()) {
- // We've not actually loaded the ClassTemplateDecl yet, because we're
- // currently being loaded as its pattern. Rely on it to set up our
- // TypeForDecl (see VisitClassTemplateDecl).
- //
- // Beware: we do not yet know our canonical declaration, and may still
- // get merged once the surrounding class template has got off the ground.
- DeferredTypeID = 0;
- }
break;
}
case CXXRecMemberSpecialization: {
@@ -2479,14 +2475,6 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
ReadSpecializations(*Loc.F, D, Loc.F->DeclsCursor, /*IsPartial=*/false);
ReadSpecializations(*Loc.F, D, Loc.F->DeclsCursor, /*IsPartial=*/true);
}
-
- if (D->getTemplatedDecl()->TemplateOrInstantiation) {
- // We were loaded before our templated declaration was. We've not set up
- // its corresponding type yet (see VisitCXXRecordDeclImpl), so reconstruct
- // it now.
- Reader.getContext().getInjectedClassNameType(
- D->getTemplatedDecl(), D->getInjectedClassNameSpecialization());
- }
}
void ASTDeclReader::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) {
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index c072acd..293b67a 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -345,6 +345,7 @@ public:
void VisitArrayTypeLoc(ArrayTypeLoc TyLoc);
void VisitFunctionTypeLoc(FunctionTypeLoc TyLoc);
+ void VisitTagTypeLoc(TagTypeLoc TL);
};
} // namespace
@@ -490,14 +491,20 @@ void TypeLocWriter::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
}
void TypeLocWriter::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitUsingTypeLoc(UsingTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getNameLoc());
}
@@ -564,17 +571,27 @@ void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
void TypeLocWriter::VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getTemplateNameLoc());
}
-void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
+void TypeLocWriter::VisitTagTypeLoc(TagTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getNameLoc());
}
-void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
- addSourceLocation(TL.getNameLoc());
+void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
+ VisitTagTypeLoc(TL);
+}
+
+void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ VisitTagTypeLoc(TL);
}
+void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) { VisitTagTypeLoc(TL); }
+
void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Record.AddAttr(TL.getAttr());
}
@@ -610,15 +627,21 @@ void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
addSourceLocation(TL.getNameLoc());
}
+void TypeLocWriter::VisitSubstBuiltinTemplatePackTypeLoc(
+ SubstBuiltinTemplatePackTypeLoc TL) {
+ addSourceLocation(TL.getNameLoc());
+}
+
void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getTemplateKeywordLoc());
addSourceLocation(TL.getTemplateNameLoc());
addSourceLocation(TL.getLAngleLoc());
addSourceLocation(TL.getRAngleLoc());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- Record.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
- TL.getArgLoc(i).getLocInfo());
+ Record.AddTemplateArgumentLocInfo(TL.getArgLoc(i));
}
void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
@@ -630,15 +653,6 @@ void TypeLocWriter::VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
addSourceLocation(TL.getExpansionLoc());
}
-void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- addSourceLocation(TL.getElaboratedKeywordLoc());
- Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
-}
-
-void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- addSourceLocation(TL.getNameLoc());
-}
-
void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
addSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
@@ -654,8 +668,7 @@ void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
addSourceLocation(TL.getLAngleLoc());
addSourceLocation(TL.getRAngleLoc());
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
- Record.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
- TL.getArgLoc(I).getLocInfo());
+ Record.AddTemplateArgumentLocInfo(TL.getArgLoc(I));
}
void TypeLocWriter::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
@@ -1038,7 +1051,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_OBJC_INTERFACE);
RECORD(TYPE_OBJC_OBJECT_POINTER);
RECORD(TYPE_DECLTYPE);
- RECORD(TYPE_ELABORATED);
RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM);
RECORD(TYPE_UNRESOLVED_USING);
RECORD(TYPE_INJECTED_CLASS_NAME);
@@ -1053,6 +1065,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_PACK_EXPANSION);
RECORD(TYPE_ATTRIBUTED);
RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK);
+ RECORD(TYPE_SUBST_BUILTIN_TEMPLATE_PACK);
RECORD(TYPE_AUTO);
RECORD(TYPE_UNARY_TRANSFORM);
RECORD(TYPE_ATOMIC);
@@ -6769,22 +6782,22 @@ void ASTRecordWriter::AddCXXTemporary(const CXXTemporary *Temp) {
}
void ASTRecordWriter::AddTemplateArgumentLocInfo(
- TemplateArgument::ArgKind Kind, const TemplateArgumentLocInfo &Arg) {
- switch (Kind) {
+ const TemplateArgumentLoc &Arg) {
+ const TemplateArgumentLocInfo &Info = Arg.getLocInfo();
+ switch (auto K = Arg.getArgument().getKind()) {
case TemplateArgument::Expression:
- AddStmt(Arg.getAsExpr());
+ AddStmt(Info.getAsExpr());
break;
case TemplateArgument::Type:
- AddTypeSourceInfo(Arg.getAsTypeSourceInfo());
+ AddTypeSourceInfo(Info.getAsTypeSourceInfo());
break;
case TemplateArgument::Template:
- AddNestedNameSpecifierLoc(Arg.getTemplateQualifierLoc());
- AddSourceLocation(Arg.getTemplateNameLoc());
- break;
case TemplateArgument::TemplateExpansion:
+ AddSourceLocation(Arg.getTemplateKWLoc());
AddNestedNameSpecifierLoc(Arg.getTemplateQualifierLoc());
AddSourceLocation(Arg.getTemplateNameLoc());
- AddSourceLocation(Arg.getTemplateEllipsisLoc());
+ if (K == TemplateArgument::TemplateExpansion)
+ AddSourceLocation(Arg.getTemplateEllipsisLoc());
break;
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -6807,7 +6820,7 @@ void ASTRecordWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg) {
if (InfoHasSameExpr)
return; // Avoid storing the same expr twice.
}
- AddTemplateArgumentLocInfo(Arg.getArgument().getKind(), Arg.getLocInfo());
+ AddTemplateArgumentLocInfo(Arg);
}
void ASTRecordWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo) {
@@ -7065,49 +7078,50 @@ void ASTRecordWriter::AddQualifierInfo(const QualifierInfo &Info) {
AddTemplateParameterList(Info.TemplParamLists[i]);
}
-void ASTRecordWriter::AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
+void ASTRecordWriter::AddNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc QualifierLoc) {
// Nested name specifiers usually aren't too long. I think that 8 would
// typically accommodate the vast majority.
SmallVector<NestedNameSpecifierLoc , 8> NestedNames;
// Push each of the nested-name-specifiers's onto a stack for
// serialization in reverse order.
- while (NNS) {
- NestedNames.push_back(NNS);
- NNS = NNS.getPrefix();
+ while (QualifierLoc) {
+ NestedNames.push_back(QualifierLoc);
+ QualifierLoc = QualifierLoc.getAsNamespaceAndPrefix().Prefix;
}
Record->push_back(NestedNames.size());
while(!NestedNames.empty()) {
- NNS = NestedNames.pop_back_val();
- NestedNameSpecifier::SpecifierKind Kind
- = NNS.getNestedNameSpecifier()->getKind();
- Record->push_back(Kind);
+ QualifierLoc = NestedNames.pop_back_val();
+ NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier::Kind Kind = Qualifier.getKind();
+ Record->push_back(llvm::to_underlying(Kind));
switch (Kind) {
- case NestedNameSpecifier::Identifier:
- AddIdentifierRef(NNS.getNestedNameSpecifier()->getAsIdentifier());
- AddSourceRange(NNS.getLocalSourceRange());
+ case NestedNameSpecifier::Kind::Namespace:
+ AddDeclRef(Qualifier.getAsNamespaceAndPrefix().Namespace);
+ AddSourceRange(QualifierLoc.getLocalSourceRange());
break;
- case NestedNameSpecifier::Namespace:
- AddDeclRef(NNS.getNestedNameSpecifier()->getAsNamespace());
- AddSourceRange(NNS.getLocalSourceRange());
+ case NestedNameSpecifier::Kind::Type: {
+ TypeLoc TL = QualifierLoc.castAsTypeLoc();
+ AddTypeRef(TL.getType());
+ AddTypeLoc(TL);
+ AddSourceLocation(QualifierLoc.getLocalSourceRange().getEnd());
break;
+ }
- case NestedNameSpecifier::TypeSpec:
- AddTypeRef(NNS.getTypeLoc().getType());
- AddTypeLoc(NNS.getTypeLoc());
- AddSourceLocation(NNS.getLocalSourceRange().getEnd());
+ case NestedNameSpecifier::Kind::Global:
+ AddSourceLocation(QualifierLoc.getLocalSourceRange().getEnd());
break;
- case NestedNameSpecifier::Global:
- AddSourceLocation(NNS.getLocalSourceRange().getEnd());
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ AddDeclRef(Qualifier.getAsMicrosoftSuper());
+ AddSourceRange(QualifierLoc.getLocalSourceRange());
break;
- case NestedNameSpecifier::Super:
- AddDeclRef(NNS.getNestedNameSpecifier()->getAsRecordDecl());
- AddSourceRange(NNS.getLocalSourceRange());
- break;
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
}
}
@@ -8533,6 +8547,7 @@ void OMPClauseWriter::VisitOMPSeverityClause(OMPSeverityClause *C) {
}
void OMPClauseWriter::VisitOMPMessageClause(OMPMessageClause *C) {
+ VisitOMPClauseWithPreInit(C);
Record.AddStmt(C->getMessageString());
Record.AddSourceLocation(C->getLParenLoc());
}
@@ -8878,6 +8893,11 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
writeSourceLocation(RC->getLParenLoc());
writeEnum(RC->getReductionOp());
writeOpenACCVarList(RC);
+
+ for (const OpenACCReductionRecipe &R : RC->getRecipes()) {
+ static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *));
+ AddDeclRef(R.RecipeDecl);
+ }
return;
}
case OpenACCClauseKind::Seq:
diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp
index 6fd25b9..ec3dda1 100644
--- a/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -524,7 +524,7 @@ void ASTDeclWriter::VisitDecl(Decl *D) {
// bits actually. However, if we changed the order to be 0x0f, then we can
// store it as 0b001111, which takes 6 bits only now.
DeclBits.addBits((uint64_t)D->getModuleOwnershipKind(), /*BitWidth=*/3);
- DeclBits.addBit(D->isReferenced());
+ DeclBits.addBit(D->isThisDeclarationReferenced());
DeclBits.addBit(D->isUsed(false));
DeclBits.addBits(D->getAccess(), /*BitWidth=*/2);
DeclBits.addBit(D->isImplicit());
@@ -601,7 +601,8 @@ void ASTDeclWriter::VisitNamedDecl(NamedDecl *D) {
void ASTDeclWriter::VisitTypeDecl(TypeDecl *D) {
VisitNamedDecl(D);
Record.AddSourceLocation(D->getBeginLoc());
- Record.AddTypeRef(QualType(D->getTypeForDecl(), 0));
+ if (!isa<TagDecl, TypedefDecl, TypeAliasDecl>(D))
+ Record.AddTypeRef(QualType(D->getTypeForDecl(), 0));
}
void ASTDeclWriter::VisitTypedefNameDecl(TypedefNameDecl *D) {
@@ -2561,7 +2562,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
// TypeDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
// TagDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
Abv->Add(BitCodeAbbrevOp(
@@ -2607,7 +2607,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
// TypeDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
// TagDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
Abv->Add(BitCodeAbbrevOp(
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index be9bad9..301ed9b 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -730,7 +730,7 @@ void ASTStmtWriter::VisitIntegerLiteral(IntegerLiteral *E) {
Record.AddSourceLocation(E->getLocation());
Record.AddAPInt(E->getValue());
- if (E->getValue().getBitWidth() == 32) {
+ if (E->getBitWidth() == 32) {
AbbrevToUse = Writer.getIntegerLiteralAbbrev();
}
diff --git a/clang/lib/Serialization/TemplateArgumentHasher.cpp b/clang/lib/Serialization/TemplateArgumentHasher.cpp
index c56138e..3e8ffea 100644
--- a/clang/lib/Serialization/TemplateArgumentHasher.cpp
+++ b/clang/lib/Serialization/TemplateArgumentHasher.cpp
@@ -320,7 +320,7 @@ public:
void VisitMemberPointerType(const MemberPointerType *T) {
AddQualType(T->getPointeeType());
- AddType(T->getQualifier()->getAsType());
+ AddType(T->getQualifier().getAsType());
if (auto *RD = T->getMostRecentCXXRecordDecl())
AddDecl(RD->getCanonicalDecl());
}
@@ -358,7 +358,7 @@ public:
AddQualType(T->getReplacementType());
}
- void VisitTagType(const TagType *T) { AddDecl(T->getDecl()); }
+ void VisitTagType(const TagType *T) { AddDecl(T->getOriginalDecl()); }
void VisitRecordType(const RecordType *T) { VisitTagType(T); }
void VisitEnumType(const EnumType *T) { VisitTagType(T); }
@@ -379,10 +379,6 @@ public:
void VisitTypedefType(const TypedefType *T) { AddDecl(T->getDecl()); }
- void VisitElaboratedType(const ElaboratedType *T) {
- AddQualType(T->getNamedType());
- }
-
void VisitUnaryTransformType(const UnaryTransformType *T) {
AddQualType(T->getUnderlyingType());
AddQualType(T->getBaseType());
diff --git a/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
index 3b3def7..e64153d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -183,7 +183,8 @@ public:
llvm::errs() << "NewAllocator\n";
}
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const {
if (isCallbackEnabled(C, "Bind"))
llvm::errs() << "Bind\n";
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index 837cbbc..921114a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -29,7 +29,8 @@ class BoolAssignmentChecker : public Checker<check::Bind> {
bool IsTainted = false) const;
public:
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
};
} // end anonymous namespace
@@ -55,6 +56,7 @@ static bool isBooleanType(QualType Ty) {
}
void BoolAssignmentChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
+ bool AtDeclInit,
CheckerContext &C) const {
// We are only interested in stores into Booleans.
diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 0e5fc0a..cfc6d34 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -1129,9 +1129,9 @@ bool CStringChecker::isFirstBufInBound(CheckerContext &C, ProgramStateRef State,
if (!ER)
return true; // cf top comment.
- // FIXME: Does this crash when a non-standard definition
- // of a library function is encountered?
- assert(ER->getValueType() == C.getASTContext().CharTy &&
+ // Support library functions defined with non-default address spaces
+ assert(ER->getValueType()->getCanonicalTypeUnqualified() ==
+ C.getASTContext().CharTy &&
"isFirstBufInBound should only be called with char* ElementRegions");
// Get the size of the array.
diff --git a/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 2393564..e98710a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -250,7 +250,7 @@ public:
bool Find(const TypedValueRegion *R) {
QualType T = R->getValueType();
if (const RecordType *RT = T->getAsStructureType()) {
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinition();
assert(RD && "Referred record has no definition");
for (const auto *I : RD->fields()) {
if (I->isUnnamedBitField())
@@ -258,7 +258,7 @@ public:
const FieldRegion *FR = MrMgr.getFieldRegion(I, R);
FieldChain.push_back(I);
T = I->getType();
- if (T->getAsStructureType()) {
+ if (T->isStructureType()) {
if (Find(FR))
return true;
} else {
diff --git a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 0b52c9b..781216d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -49,11 +49,10 @@ public:
/// of struct bar.
static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
CharUnits TypeSize, QualType ToPointeeTy) {
- const RecordType *RT = ToPointeeTy->getAs<RecordType>();
- if (!RT)
+ const auto *RD = ToPointeeTy->getAsRecordDecl();
+ if (!RD)
return false;
- const RecordDecl *RD = RT->getDecl();
RecordDecl::field_iterator Iter(RD->field_begin());
RecordDecl::field_iterator End(RD->field_end());
const FieldDecl *Last = nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 350db4b..392c7ee 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -175,9 +175,12 @@ public:
/// \param Loc The value of the location (pointer).
/// \param Val The value which will be stored at the location Loc.
/// \param S The bind is performed while processing the statement S.
+ /// \param AtDeclInit Whether the bind is performed during declaration
+ /// initialization.
///
/// check::Bind
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &) const {}
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &) const {}
/// Called after a CFG edge is taken within a function.
///
diff --git a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 152129e..395d724 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -48,7 +48,8 @@ class DereferenceChecker
public:
void checkLocation(SVal location, bool isLoad, const Stmt* S,
CheckerContext &C) const;
- void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
static void AddDerefSource(raw_ostream &os,
SmallVectorImpl<SourceRange> &Ranges,
@@ -309,7 +310,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
}
void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
// If we're binding to a reference, check if the value is known to be null.
if (V.isUndef())
return;
diff --git a/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 4982cd59..cee744a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -245,7 +245,7 @@ static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD,
assert(MD);
ASTContext &Ctx = C.getASTContext();
- QualType Ty = Ctx.getPointerType(Ctx.getRecordType(MD->getParent()));
+ CanQualType Ty = Ctx.getPointerType(Ctx.getCanonicalTagType(MD->getParent()));
ProgramStateRef State = C.getState();
State = setDynamicTypeInfo(State, Region, Ty, /*CanBeSubClassed=*/false);
diff --git a/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
index 355e82e..76a1470 100644
--- a/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
@@ -139,17 +139,11 @@ void EnumCastOutOfRangeChecker::checkPreStmt(const CastExpr *CE,
if (!ValueToCast)
return;
- const QualType T = CE->getType();
// Check whether the cast type is an enum.
- if (!T->isEnumeralType())
+ const auto *ED = CE->getType()->getAsEnumDecl();
+ if (!ED)
return;
- // If the cast is an enum, get its declaration.
- // If the isEnumeralType() returned true, then the declaration must exist
- // even if it is a stub declaration. It is up to the getDeclValuesForEnum()
- // function to handle this.
- const EnumDecl *ED = T->castAs<EnumType>()->getDecl();
-
// [[clang::flag_enum]] annotated enums are by definition should be ignored.
if (ED->hasAttr<FlagEnumAttr>())
return;
diff --git a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index 7ad54c0..7eb9a1d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -150,7 +150,8 @@ public:
IteratorModeling() = default;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPostStmt(const UnaryOperator *UO, CheckerContext &C) const;
void checkPostStmt(const BinaryOperator *BO, CheckerContext &C) const;
void checkPostStmt(const MaterializeTemporaryExpr *MTE,
@@ -234,7 +235,7 @@ void IteratorModeling::checkPostCall(const CallEvent &Call,
}
void IteratorModeling::checkBind(SVal Loc, SVal Val, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
if (Pos) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 1cb3848..c0727ae 100644
--- a/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -27,7 +27,7 @@ using namespace ento;
//===----------------------------------------------------------------------===//
static bool IsLLVMStringRef(QualType T) {
- const RecordType *RT = T->getAs<RecordType>();
+ const RecordType *RT = T->getAsCanonical<RecordType>();
if (!RT)
return false;
@@ -47,9 +47,6 @@ static bool InNamespace(const Decl *D, StringRef NS) {
}
static bool IsStdString(QualType T) {
- if (const ElaboratedType *QT = T->getAs<ElaboratedType>())
- T = QT->getNamedType();
-
const TypedefType *TT = T->getAs<TypedefType>();
if (!TT)
return false;
@@ -198,14 +195,10 @@ static bool IsPartOfAST(const CXXRecordDecl *R) {
if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R))
return true;
- for (const auto &BS : R->bases()) {
- QualType T = BS.getType();
- if (const RecordType *baseT = T->getAs<RecordType>()) {
- CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getDecl());
- if (IsPartOfAST(baseD))
- return true;
- }
- }
+ for (const auto &BS : R->bases())
+ if (const auto *baseD = BS.getType()->getAsCXXRecordDecl();
+ baseD && IsPartOfAST(baseD))
+ return true;
return false;
}
@@ -246,11 +239,9 @@ void ASTFieldVisitor::Visit(FieldDecl *D) {
if (AllocatesMemory(T))
ReportError(T);
- if (const RecordType *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ if (const auto *RD = T->getAsRecordDecl())
for (auto *I : RD->fields())
Visit(I);
- }
FieldChain.pop_back();
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 538104d..62d568f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -71,7 +71,7 @@ class NonLocalizedStringChecker
// Methods that return a localized string
mutable llvm::SmallSet<std::pair<const IdentifierInfo *, Selector>, 12> LSM;
// C Functions that return a localized string
- mutable llvm::SmallSet<const IdentifierInfo *, 5> LSF;
+ mutable llvm::SmallPtrSet<const IdentifierInfo *, 5> LSF;
void initUIMethods(ASTContext &Ctx) const;
void initLocStringsMethods(ASTContext &Ctx) const;
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 369d619..efb9809 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -3156,7 +3156,7 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
for (unsigned I = 0, E = Call.getNumArgs(); I != E; ++I) {
SVal ArgSVal = Call.getArgSVal(I);
if (isa<Loc>(ArgSVal)) {
- SymbolRef Sym = ArgSVal.getAsSymbol();
+ SymbolRef Sym = ArgSVal.getAsSymbol(/*IncludeBaseRegions=*/true);
if (!Sym)
continue;
if (checkUseAfterFree(Sym, C, Call.getArgExpr(I)))
diff --git a/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 0aea981..b1a7cd7 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -148,7 +148,9 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
QualType T = ArgE->getType();
const RecordType *UT = T->getAsUnionType();
- if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (!UT || !UT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<TransparentUnionAttr>())
continue;
auto CSV = DV->getAs<nonloc::CompoundVal>();
diff --git a/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index 7927967..b5e32495 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -114,10 +114,7 @@ bool NonnullGlobalConstantsChecker::isGlobalConstString(SVal V) const {
if (AT->getAttrKind() == attr::TypeNonNull)
return true;
Ty = AT->getModifiedType();
- } else if (const auto *ET = dyn_cast<ElaboratedType>(T)) {
- const auto *TT = dyn_cast<TypedefType>(ET->getNamedType());
- if (!TT)
- return false;
+ } else if (const auto *TT = dyn_cast<TypedefType>(T)) {
Ty = TT->getDecl()->getUnderlyingType();
// It is sufficient for any intermediate typedef
// to be classified const.
diff --git a/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index 9744d1a..eeb6b72 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -97,7 +97,8 @@ public:
// libraries.
bool NoDiagnoseCallsToSystemHeaders = false;
- void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPostStmt(const ExplicitCastExpr *CE, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
@@ -1250,7 +1251,7 @@ static bool isARCNilInitializedLocal(CheckerContext &C, const Stmt *S) {
/// Propagate the nullability information through binds and warn when nullable
/// pointer or null symbol is assigned to a pointer with a nonnull type.
void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
const TypedValueRegion *TVR =
dyn_cast_or_null<TypedValueRegion>(L.getAsRegion());
if (!TVR)
diff --git a/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index f217520..68ab22a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -197,9 +197,9 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
BugReporter &BR) const {
// Currently this matches CoreFoundation opaque pointer typedefs.
auto CSuspiciousNumberObjectExprM = expr(ignoringParenImpCasts(
- expr(hasType(elaboratedType(namesType(typedefType(
+ expr(hasType(typedefType(
hasDeclaration(anyOf(typedefDecl(hasName("CFNumberRef")),
- typedefDecl(hasName("CFBooleanRef")))))))))
+ typedefDecl(hasName("CFBooleanRef")))))))
.bind("c_object")));
// Currently this matches XNU kernel number-object pointers.
@@ -238,8 +238,7 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
// The .bind here is in order to compose the error message more accurately.
auto ObjCSuspiciousScalarBooleanTypeM =
- qualType(elaboratedType(namesType(
- typedefType(hasDeclaration(typedefDecl(hasName("BOOL")))))))
+ qualType(typedefType(hasDeclaration(typedefDecl(hasName("BOOL")))))
.bind("objc_bool_type");
// The .bind here is in order to compose the error message more accurately.
@@ -252,8 +251,8 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
// for storing pointers.
auto SuspiciousScalarNumberTypeM =
qualType(hasCanonicalType(isInteger()),
- unless(elaboratedType(namesType(typedefType(hasDeclaration(
- typedefDecl(matchesName("^::u?intptr_t$"))))))))
+ unless(typedefType(
+ hasDeclaration(typedefDecl(matchesName("^::u?intptr_t$"))))))
.bind("int_type");
auto SuspiciousScalarTypeM =
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index ace3426..e40b4f8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -73,7 +73,8 @@ public:
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
void checkLocation(SVal location, bool isLoad, const Stmt *S,
CheckerContext &C) const;
- void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal loc, SVal val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPreCall(const CallEvent &CE, CheckerContext &C) const;
void checkPostCall(const CallEvent &CE, CheckerContext &C) const;
@@ -311,9 +312,8 @@ void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad,
C);
}
-
void ObjCSelfInitChecker::checkBind(SVal loc, SVal val, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
// Allow assignment of anything to self. Self is a local variable in the
// initializer, so it is legal to assign anything to it, like results of
// static functions/method calls. After self is assigned something we cannot
diff --git a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index d4efbdd..1554604 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -104,7 +104,7 @@ public:
// There is not enough excess padding to trigger a warning.
return;
}
- reportRecord(RD, BaselinePad, OptimalPad, OptimalFieldsOrder);
+ reportRecord(ASTContext, RD, BaselinePad, OptimalPad, OptimalFieldsOrder);
}
/// Look for arrays of overly padded types. If the padding of the
@@ -118,12 +118,12 @@ public:
Elts = CArrTy->getZExtSize();
if (Elts == 0)
return;
- const RecordType *RT = ArrTy->getElementType()->getAs<RecordType>();
- if (RT == nullptr)
+ const auto *RD = ArrTy->getElementType()->getAsRecordDecl();
+ if (!RD)
return;
// TODO: Recurse into the fields to see if they have excess padding.
- visitRecord(RT->getDecl(), Elts);
+ visitRecord(RD, Elts);
}
bool shouldSkipDecl(const RecordDecl *RD) const {
@@ -159,9 +159,7 @@ public:
return true;
// Can't layout a template, so skip it. We do still layout the
// instantiations though.
- if (CXXRD->getTypeForDecl()->isDependentType())
- return true;
- if (CXXRD->getTypeForDecl()->isInstantiationDependentType())
+ if (CXXRD->isDependentType())
return true;
}
// How do you reorder fields if you haven't got any?
@@ -306,14 +304,14 @@ public:
}
void reportRecord(
- const RecordDecl *RD, CharUnits BaselinePad, CharUnits OptimalPad,
+ const ASTContext &Ctx, const RecordDecl *RD, CharUnits BaselinePad,
+ CharUnits OptimalPad,
const SmallVector<const FieldDecl *, 20> &OptimalFieldsOrder) const {
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
Os << "Excessive padding in '";
- Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers(),
- LangOptions())
- << "'";
+ QualType(Ctx.getCanonicalTagType(RD)).print(Os, LangOptions());
+ Os << "'";
if (auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
// TODO: make this show up better in the console output and in
diff --git a/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index 1141f07..30e01e7 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -58,7 +58,7 @@ class PointerArithChecker
const BugType BT_pointerArith{this, "Dangerous pointer arithmetic"};
const BugType BT_polyArray{this, "Dangerous pointer arithmetic"};
- mutable llvm::SmallSet<IdentifierInfo *, 8> AllocFunctions;
+ mutable llvm::SmallPtrSet<IdentifierInfo *, 8> AllocFunctions;
public:
void checkPreStmt(const UnaryOperator *UOp, CheckerContext &C) const;
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index 62bc321..1762505 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -840,20 +840,27 @@ ProgramStateRef RetainCountChecker::updateSymbol(ProgramStateRef state,
const RefCountBug &
RetainCountChecker::errorKindToBugKind(RefVal::Kind ErrorKind,
SymbolRef Sym) const {
+ const RefCountFrontend &FE = getPreferredFrontend();
+
switch (ErrorKind) {
case RefVal::ErrorUseAfterRelease:
- return *UseAfterRelease;
+ return FE.UseAfterRelease;
case RefVal::ErrorReleaseNotOwned:
- return *ReleaseNotOwned;
+ return FE.ReleaseNotOwned;
case RefVal::ErrorDeallocNotOwned:
if (Sym->getType()->getPointeeCXXRecordDecl())
- return *FreeNotOwned;
- return *DeallocNotOwned;
+ return FE.FreeNotOwned;
+ return FE.DeallocNotOwned;
default:
llvm_unreachable("Unhandled error.");
}
}
+bool RetainCountChecker::isReleaseUnownedError(RefVal::Kind ErrorKind) const {
+ return ErrorKind == RefVal::ErrorReleaseNotOwned ||
+ ErrorKind == RefVal::ErrorDeallocNotOwned;
+}
+
void RetainCountChecker::processNonLeakError(ProgramStateRef St,
SourceRange ErrorRange,
RefVal::Kind ErrorKind,
@@ -874,8 +881,8 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St,
return;
auto report = std::make_unique<RefCountReport>(
- errorKindToBugKind(ErrorKind, Sym),
- C.getASTContext().getLangOpts(), N, Sym);
+ errorKindToBugKind(ErrorKind, Sym), C.getASTContext().getLangOpts(), N,
+ Sym, /*isLeak=*/false, isReleaseUnownedError(ErrorKind));
report->addRange(ErrorRange);
C.emitReport(std::move(report));
}
@@ -1090,8 +1097,8 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *N = C.addTransition(state, Pred);
if (N) {
const LangOptions &LOpts = C.getASTContext().getLangOpts();
- auto R =
- std::make_unique<RefLeakReport>(*LeakAtReturn, LOpts, N, Sym, C);
+ auto R = std::make_unique<RefLeakReport>(
+ getPreferredFrontend().LeakAtReturn, LOpts, N, Sym, C);
C.emitReport(std::move(R));
}
return N;
@@ -1113,7 +1120,8 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *N = C.addTransition(state, Pred);
if (N) {
auto R = std::make_unique<RefCountReport>(
- *ReturnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
+ getPreferredFrontend().ReturnNotOwnedForOwned,
+ C.getASTContext().getLangOpts(), N, Sym);
C.emitReport(std::move(R));
}
return N;
@@ -1128,7 +1136,7 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
//===----------------------------------------------------------------------===//
void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
ProgramStateRef state = C.getState();
const MemRegion *MR = loc.getAsRegion();
@@ -1261,8 +1269,8 @@ ProgramStateRef RetainCountChecker::handleAutoreleaseCounts(
os << "has a +" << V.getCount() << " retain count";
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
- auto R = std::make_unique<RefCountReport>(*OverAutorelease, LOpts, N, Sym,
- os.str());
+ auto R = std::make_unique<RefCountReport>(
+ getPreferredFrontend().OverAutorelease, LOpts, N, Sym, os.str());
Ctx.emitReport(std::move(R));
}
@@ -1307,8 +1315,10 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
if (N) {
+ const RefCountFrontend &FE = getPreferredFrontend();
+ const RefCountBug &BT = Pred ? FE.LeakWithinFunction : FE.LeakAtReturn;
+
for (SymbolRef L : Leaked) {
- const RefCountBug &BT = Pred ? *LeakWithinFunction : *LeakAtReturn;
Ctx.emitReport(std::make_unique<RefLeakReport>(BT, LOpts, N, L, Ctx));
}
}
@@ -1463,44 +1473,31 @@ std::unique_ptr<SimpleProgramPointTag> RetainCountChecker::DeallocSentTag;
std::unique_ptr<SimpleProgramPointTag> RetainCountChecker::CastFailTag;
void ento::registerRetainCountBase(CheckerManager &Mgr) {
- auto *Chk = Mgr.registerChecker<RetainCountChecker>();
+ auto *Chk = Mgr.getChecker<RetainCountChecker>();
Chk->DeallocSentTag = std::make_unique<SimpleProgramPointTag>(
"RetainCountChecker", "DeallocSent");
Chk->CastFailTag = std::make_unique<SimpleProgramPointTag>(
"RetainCountChecker", "DynamicCastFail");
}
-bool ento::shouldRegisterRetainCountBase(const CheckerManager &mgr) {
+bool ento::shouldRegisterRetainCountBase(const CheckerManager &) {
return true;
}
+
void ento::registerRetainCountChecker(CheckerManager &Mgr) {
auto *Chk = Mgr.getChecker<RetainCountChecker>();
- Chk->TrackObjCAndCFObjects = true;
+ Chk->RetainCount.enable(Mgr);
Chk->TrackNSCFStartParam = Mgr.getAnalyzerOptions().getCheckerBooleanOption(
Mgr.getCurrentCheckerName(), "TrackNSCFStartParam");
-
-#define INIT_BUGTYPE(KIND) \
- Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \
- RefCountBug::KIND);
- // TODO: Ideally, we should have a checker for each of these bug types.
- INIT_BUGTYPE(UseAfterRelease)
- INIT_BUGTYPE(ReleaseNotOwned)
- INIT_BUGTYPE(DeallocNotOwned)
- INIT_BUGTYPE(FreeNotOwned)
- INIT_BUGTYPE(OverAutorelease)
- INIT_BUGTYPE(ReturnNotOwnedForOwned)
- INIT_BUGTYPE(LeakWithinFunction)
- INIT_BUGTYPE(LeakAtReturn)
-#undef INIT_BUGTYPE
}
-bool ento::shouldRegisterRetainCountChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterRetainCountChecker(const CheckerManager &) {
return true;
}
void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
auto *Chk = Mgr.getChecker<RetainCountChecker>();
- Chk->TrackOSObjects = true;
+ Chk->OSObjectRetainCount.enable(Mgr);
// FIXME: We want bug reports to always have the same checker name associated
// with them, yet here, if RetainCountChecker is disabled but
@@ -1511,21 +1508,8 @@ void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
// diagnostics, and **hidden checker options** with the fine-tuning of
// modeling. Following this logic, OSObjectRetainCountChecker should be the
// latter, but we can't just remove it for backward compatibility reasons.
-#define LAZY_INIT_BUGTYPE(KIND) \
- if (!Chk->KIND) \
- Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \
- RefCountBug::KIND);
- LAZY_INIT_BUGTYPE(UseAfterRelease)
- LAZY_INIT_BUGTYPE(ReleaseNotOwned)
- LAZY_INIT_BUGTYPE(DeallocNotOwned)
- LAZY_INIT_BUGTYPE(FreeNotOwned)
- LAZY_INIT_BUGTYPE(OverAutorelease)
- LAZY_INIT_BUGTYPE(ReturnNotOwnedForOwned)
- LAZY_INIT_BUGTYPE(LeakWithinFunction)
- LAZY_INIT_BUGTYPE(LeakAtReturn)
-#undef LAZY_INIT_BUGTYPE
}
-bool ento::shouldRegisterOSObjectRetainCountChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterOSObjectRetainCountChecker(const CheckerManager &) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
index 0e81143..dc8bad6 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
@@ -235,51 +235,32 @@ public:
};
class RetainCountChecker
- : public Checker< check::Bind,
- check::DeadSymbols,
- check::BeginFunction,
- check::EndFunction,
- check::PostStmt<BlockExpr>,
- check::PostStmt<CastExpr>,
- check::PostStmt<ObjCArrayLiteral>,
- check::PostStmt<ObjCDictionaryLiteral>,
- check::PostStmt<ObjCBoxedExpr>,
- check::PostStmt<ObjCIvarRefExpr>,
- check::PostCall,
- check::RegionChanges,
- eval::Assume,
- eval::Call > {
+ : public CheckerFamily<
+ check::Bind, check::DeadSymbols, check::BeginFunction,
+ check::EndFunction, check::PostStmt<BlockExpr>,
+ check::PostStmt<CastExpr>, check::PostStmt<ObjCArrayLiteral>,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostStmt<ObjCBoxedExpr>, check::PostStmt<ObjCIvarRefExpr>,
+ check::PostCall, check::RegionChanges, eval::Assume, eval::Call> {
public:
- std::unique_ptr<RefCountBug> UseAfterRelease;
- std::unique_ptr<RefCountBug> ReleaseNotOwned;
- std::unique_ptr<RefCountBug> DeallocNotOwned;
- std::unique_ptr<RefCountBug> FreeNotOwned;
- std::unique_ptr<RefCountBug> OverAutorelease;
- std::unique_ptr<RefCountBug> ReturnNotOwnedForOwned;
- std::unique_ptr<RefCountBug> LeakWithinFunction;
- std::unique_ptr<RefCountBug> LeakAtReturn;
+ RefCountFrontend RetainCount;
+ RefCountFrontend OSObjectRetainCount;
mutable std::unique_ptr<RetainSummaryManager> Summaries;
static std::unique_ptr<SimpleProgramPointTag> DeallocSentTag;
static std::unique_ptr<SimpleProgramPointTag> CastFailTag;
- /// Track Objective-C and CoreFoundation objects.
- bool TrackObjCAndCFObjects = false;
-
- /// Track sublcasses of OSObject.
- bool TrackOSObjects = false;
-
/// Track initial parameters (for the entry point) for NS/CF objects.
bool TrackNSCFStartParam = false;
- RetainCountChecker() {};
+ StringRef getDebugTag() const override { return "RetainCountChecker"; }
RetainSummaryManager &getSummaryManager(ASTContext &Ctx) const {
if (!Summaries)
- Summaries.reset(
- new RetainSummaryManager(Ctx, TrackObjCAndCFObjects, TrackOSObjects));
+ Summaries = std::make_unique<RetainSummaryManager>(
+ Ctx, RetainCount.isEnabled(), OSObjectRetainCount.isEnabled());
return *Summaries;
}
@@ -287,10 +268,20 @@ public:
return getSummaryManager(C.getASTContext());
}
+ const RefCountFrontend &getPreferredFrontend() const {
+ // FIXME: The two frontends of this checker family are in an unusual
+ // relationship: if they are both enabled, then all bug reports are
+ // reported by RetainCount (i.e. `osx.cocoa.RetainCount`), even the bugs
+ // that "belong to" OSObjectRetainCount (i.e. `osx.OSObjectRetainCount`).
+ // This is counter-intuitive and should be fixed to avoid confusion.
+ return RetainCount.isEnabled() ? RetainCount : OSObjectRetainCount;
+ }
+
void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const override;
- void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal loc, SVal val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
@@ -337,6 +328,8 @@ public:
const RefCountBug &errorKindToBugKind(RefVal::Kind ErrorKind,
SymbolRef Sym) const;
+ bool isReleaseUnownedError(RefVal::Kind ErrorKind) const;
+
void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
RefVal::Kind ErrorKind, SymbolRef Sym,
CheckerContext &C) const;
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index c9f5dc9..cad2c72 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -21,57 +21,6 @@ using namespace clang;
using namespace ento;
using namespace retaincountchecker;
-StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugKind BT) {
- switch (BT) {
- case UseAfterRelease:
- return "Use-after-release";
- case ReleaseNotOwned:
- return "Bad release";
- case DeallocNotOwned:
- return "-dealloc sent to non-exclusively owned object";
- case FreeNotOwned:
- return "freeing non-exclusively owned object";
- case OverAutorelease:
- return "Object autoreleased too many times";
- case ReturnNotOwnedForOwned:
- return "Method should return an owned object";
- case LeakWithinFunction:
- return "Leak";
- case LeakAtReturn:
- return "Leak of returned object";
- }
- llvm_unreachable("Unknown RefCountBugKind");
-}
-
-StringRef RefCountBug::getDescription() const {
- switch (BT) {
- case UseAfterRelease:
- return "Reference-counted object is used after it is released";
- case ReleaseNotOwned:
- return "Incorrect decrement of the reference count of an object that is "
- "not owned at this point by the caller";
- case DeallocNotOwned:
- return "-dealloc sent to object that may be referenced elsewhere";
- case FreeNotOwned:
- return "'free' called on an object that may be referenced elsewhere";
- case OverAutorelease:
- return "Object autoreleased too many times";
- case ReturnNotOwnedForOwned:
- return "Object with a +0 retain count returned to caller where a +1 "
- "(owning) retain count is expected";
- case LeakWithinFunction:
- case LeakAtReturn:
- return "";
- }
- llvm_unreachable("Unknown RefCountBugKind");
-}
-
-RefCountBug::RefCountBug(CheckerNameRef Checker, RefCountBugKind BT)
- : BugType(Checker, bugTypeToName(BT), categories::MemoryRefCount,
- /*SuppressOnSink=*/BT == LeakWithinFunction ||
- BT == LeakAtReturn),
- BT(BT) {}
-
static bool isNumericLiteralExpression(const Expr *E) {
// FIXME: This set of cases was copied from SemaExprObjC.
return isa<IntegerLiteral, CharacterLiteral, FloatingLiteral,
@@ -312,9 +261,11 @@ namespace retaincountchecker {
class RefCountReportVisitor : public BugReporterVisitor {
protected:
SymbolRef Sym;
+ bool IsReleaseUnowned;
public:
- RefCountReportVisitor(SymbolRef sym) : Sym(sym) {}
+ RefCountReportVisitor(SymbolRef S, bool IRU)
+ : Sym(S), IsReleaseUnowned(IRU) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int x = 0;
@@ -334,7 +285,8 @@ public:
class RefLeakReportVisitor : public RefCountReportVisitor {
public:
RefLeakReportVisitor(SymbolRef Sym, const MemRegion *LastBinding)
- : RefCountReportVisitor(Sym), LastBinding(LastBinding) {}
+ : RefCountReportVisitor(Sym, /*IsReleaseUnowned=*/false),
+ LastBinding(LastBinding) {}
PathDiagnosticPieceRef getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
@@ -452,12 +404,6 @@ annotateStartParameter(const ExplodedNode *N, SymbolRef Sym,
PathDiagnosticPieceRef
RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
PathSensitiveBugReport &BR) {
-
- const auto &BT = static_cast<const RefCountBug&>(BR.getBugType());
-
- bool IsFreeUnowned = BT.getBugType() == RefCountBug::FreeNotOwned ||
- BT.getBugType() == RefCountBug::DeallocNotOwned;
-
const SourceManager &SM = BRC.getSourceManager();
CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
if (auto CE = N->getLocationAs<CallExitBegin>())
@@ -490,7 +436,7 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
- if (PrevT && IsFreeUnowned && CurrV.isNotOwned() && PrevT->isOwned()) {
+ if (PrevT && IsReleaseUnowned && CurrV.isNotOwned() && PrevT->isOwned()) {
os << "Object is now not exclusively owned";
auto Pos = PathDiagnosticLocation::create(N->getLocation(), SM);
return std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf);
@@ -815,10 +761,8 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
if (K == ObjKind::ObjC || K == ObjKind::CF) {
os << "whose name ('" << *FD
<< "') does not contain 'Copy' or 'Create'. This violates the "
- "naming"
- " convention rules given in the Memory Management Guide for "
- "Core"
- " Foundation";
+ "naming convention rules given in the Memory Management Guide "
+ "for Core Foundation";
} else if (RV->getObjKind() == ObjKind::OS) {
std::string FuncName = FD->getNameAsString();
os << "whose name ('" << FuncName << "') starts with '"
@@ -836,19 +780,20 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
}
RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
- ExplodedNode *n, SymbolRef sym, bool isLeak)
- : PathSensitiveBugReport(D, D.getDescription(), n), Sym(sym),
+ ExplodedNode *n, SymbolRef sym, bool isLeak,
+ bool IsReleaseUnowned)
+ : PathSensitiveBugReport(D, D.getReportMessage(), n), Sym(sym),
isLeak(isLeak) {
if (!isLeak)
- addVisitor<RefCountReportVisitor>(sym);
+ addVisitor<RefCountReportVisitor>(sym, IsReleaseUnowned);
}
RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
StringRef endText)
- : PathSensitiveBugReport(D, D.getDescription(), endText, n) {
+ : PathSensitiveBugReport(D, D.getReportMessage(), endText, n) {
- addVisitor<RefCountReportVisitor>(sym);
+ addVisitor<RefCountReportVisitor>(sym, /*IsReleaseUnowned=*/false);
}
void RefLeakReport::deriveParamLocation(CheckerContext &Ctx) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
index d059008..6ceb86f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
@@ -25,25 +25,44 @@ namespace ento {
namespace retaincountchecker {
class RefCountBug : public BugType {
+ StringRef ReportMessage;
+
public:
- enum RefCountBugKind {
- UseAfterRelease,
- ReleaseNotOwned,
- DeallocNotOwned,
- FreeNotOwned,
- OverAutorelease,
- ReturnNotOwnedForOwned,
- LeakWithinFunction,
- LeakAtReturn,
- };
- RefCountBug(CheckerNameRef Checker, RefCountBugKind BT);
- StringRef getDescription() const;
-
- RefCountBugKind getBugType() const { return BT; }
-
-private:
- RefCountBugKind BT;
- static StringRef bugTypeToName(RefCountBugKind BT);
+ RefCountBug(const CheckerFrontend *CF, StringRef Desc, StringRef ReportMsg,
+ bool SuppressOnSink = false)
+ : BugType(CF, Desc, categories::MemoryRefCount, SuppressOnSink),
+ ReportMessage(ReportMsg) {}
+ StringRef getReportMessage() const { return ReportMessage; }
+};
+
+class RefCountFrontend : public CheckerFrontend {
+public:
+ const RefCountBug UseAfterRelease{
+ this, "Use-after-release",
+ "Reference-counted object is used after it is released"};
+ const RefCountBug ReleaseNotOwned{
+ this, "Bad release",
+ "Incorrect decrement of the reference count of an object that is not "
+ "owned at this point by the caller"};
+ const RefCountBug DeallocNotOwned{
+ this, "-dealloc sent to non-exclusively owned object",
+ "-dealloc sent to object that may be referenced elsewhere"};
+ const RefCountBug FreeNotOwned{
+ this, "freeing non-exclusively owned object",
+ "'free' called on an object that may be referenced elsewhere"};
+ const RefCountBug OverAutorelease{this, "Object autoreleased too many times",
+ "Object autoreleased too many times"};
+ const RefCountBug ReturnNotOwnedForOwned{
+ this, "Method should return an owned object",
+ "Object with a +0 retain count returned to caller where a +1 (owning) "
+ "retain count is expected"};
+ // For these two bug types the report message will be generated dynamically
+ // by `RefLeakReport::createDescription` so the empty string taken from the
+ // BugType will be ignored (overwritten).
+ const RefCountBug LeakWithinFunction{this, "Leak", /*ReportMsg=*/"",
+ /*SuppressOnSink=*/true};
+ const RefCountBug LeakAtReturn{this, "Leak of returned object",
+ /*ReportMsg=*/"", /*SuppressOnSink=*/true};
};
class RefCountReport : public PathSensitiveBugReport {
@@ -53,8 +72,8 @@ protected:
public:
RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
- ExplodedNode *n, SymbolRef sym,
- bool isLeak=false);
+ ExplodedNode *n, SymbolRef sym, bool isLeak = false,
+ bool IsReleaseUnowned = false);
RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 52b3d1e..844447f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -1589,7 +1589,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// and we have a TypedefDecl with the name 'FILE'.
for (Decl *D : LookupRes)
if (auto *TD = dyn_cast<TypedefNameDecl>(D))
- return ACtx.getTypeDeclType(TD).getCanonicalType();
+ return ACtx.getCanonicalTypeDeclType(TD);
// Find the first TypeDecl.
// There maybe cases when a function has the same name as a struct.
@@ -1597,7 +1597,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// int stat(const char *restrict path, struct stat *restrict buf);
for (Decl *D : LookupRes)
if (auto *TD = dyn_cast<TypeDecl>(D))
- return ACtx.getTypeDeclType(TD).getCanonicalType();
+ return ACtx.getCanonicalTypeDeclType(TD);
return std::nullopt;
}
} lookupTy(ACtx);
diff --git a/clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp
index afad419..2bb3917 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp
@@ -26,53 +26,11 @@ class StoreToImmutableChecker : public Checker<check::Bind> {
const BugType BT{this, "Write to immutable memory", "CERT Environment (ENV)"};
public:
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
};
} // end anonymous namespace
-static bool isInitializationContext(const Stmt *S, CheckerContext &C) {
- // Check if this is a DeclStmt (variable declaration)
- if (isa<DeclStmt>(S))
- return true;
-
- // This part is specific for initialization of const lambdas pre-C++17.
- // Lets look at the AST of the statement:
- // ```
- // const auto lambda = [](){};
- // ```
- //
- // The relevant part of the AST for this case prior to C++17 is:
- // ...
- // `-DeclStmt
- // `-VarDecl
- // `-ExprWithCleanups
- // `-CXXConstructExpr
- // ...
- // In C++17 and later, the AST is different:
- // ...
- // `-DeclStmt
- // `-VarDecl
- // `-ImplicitCastExpr
- // `-LambdaExpr
- // |-CXXRecordDecl
- // `-CXXConstructExpr
- // ...
- // And even beside this, the statement `S` that is given to the checkBind
- // callback is the VarDecl in C++17 and later, and the CXXConstructExpr in
- // C++14 and before. So in order to support the C++14 we need the following
- // ugly hack to detect whether this construction is used to initialize a
- // variable.
- //
- // FIXME: This should be eliminated by improving the API of checkBind to
- // ensure that it consistently passes the `VarDecl` (instead of the
- // `CXXConstructExpr`) when the constructor call denotes the initialization
- // of a variable with a lambda, or maybe less preferably, try the more
- // invasive approach of passing the information forward to the checkers
- // whether the current bind is an initialization or an assignment.
- const auto *ConstructExp = dyn_cast<CXXConstructExpr>(S);
- return ConstructExp && ConstructExp->isElidable();
-}
-
static bool isEffectivelyConstRegion(const MemRegion *MR, CheckerContext &C) {
if (isa<GlobalImmutableSpaceRegion>(MR))
return true;
@@ -128,6 +86,7 @@ getInnermostEnclosingConstDeclRegion(const MemRegion *MR, CheckerContext &C) {
}
void StoreToImmutableChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
+ bool AtDeclInit,
CheckerContext &C) const {
// We are only interested in stores to memory regions
const MemRegion *MR = Loc.getAsRegion();
@@ -136,9 +95,7 @@ void StoreToImmutableChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
// Skip variable declarations and initializations - we only want to catch
// actual writes
- // FIXME: If the API of checkBind would allow to distinguish between
- // initialization and assignment, we could use that instead.
- if (isInitializationContext(S, C))
+ if (AtDeclInit)
return;
// Check if the region is in the global immutable space
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index e98de33..7f8923c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -26,13 +26,13 @@ class UndefinedAssignmentChecker
const BugType BT{this, "Assigned value is uninitialized"};
public:
- void checkBind(SVal location, SVal val, const Stmt *S,
+ void checkBind(SVal location, SVal val, const Stmt *S, bool AtDeclInit,
CheckerContext &C) const;
};
}
void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
- const Stmt *StoreE,
+ const Stmt *StoreE, bool AtDeclInit,
CheckerContext &C) const {
if (!val.isUndef())
return;
diff --git a/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp b/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
index fcb7664..d090748 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
@@ -217,7 +217,7 @@ bool FindUninitializedFields::isDereferencableUninit(
static std::optional<DereferenceInfo> dereference(ProgramStateRef State,
const FieldRegion *FR) {
- llvm::SmallSet<const TypedValueRegion *, 5> VisitedRegions;
+ llvm::SmallPtrSet<const TypedValueRegion *, 5> VisitedRegions;
SVal V = State->getSVal(FR);
assert(V.getAsRegion() && "V must have an underlying region!");
diff --git a/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index e122616..4df751d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -137,8 +137,7 @@ ProgramStateRef UnixAPIMisuseChecker::EnsurePtrNotNull(
auto R = std::make_unique<PathSensitiveBugReport>(
BT.value_or(std::cref(BT_ArgumentNull)),
(PtrDescr + " pointer might be NULL.").str(), N);
- if (PtrExpr)
- bugreporter::trackExpressionValue(N, PtrExpr, *R);
+ bugreporter::trackExpressionValue(N, PtrExpr, *R);
C.emitReport(std::move(R));
}
return nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
index cb73ac6..d622487 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -45,7 +45,7 @@ namespace {
class VforkChecker : public Checker<check::PreCall, check::PostCall,
check::Bind, check::PreStmt<ReturnStmt>> {
const BugType BT{this, "Dangerous construct in a vforked process"};
- mutable llvm::SmallSet<const IdentifierInfo *, 10> VforkAllowlist;
+ mutable llvm::SmallPtrSet<const IdentifierInfo *, 10> VforkAllowlist;
mutable const IdentifierInfo *II_vfork = nullptr;
static bool isChildProcess(const ProgramStateRef State);
@@ -62,7 +62,8 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
};
@@ -188,7 +189,7 @@ void VforkChecker::checkPreCall(const CallEvent &Call,
}
// Prohibit writes in child process (except for vfork's lhs).
-void VforkChecker::checkBind(SVal L, SVal V, const Stmt *S,
+void VforkChecker::checkBind(SVal L, SVal V, const Stmt *S, bool AtDeclInit,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
if (!isChildProcess(State))
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
index 72199af..884dbe9 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -181,10 +181,6 @@ template <typename Predicate>
static bool isPtrOfType(const clang::QualType T, Predicate Pred) {
QualType type = T;
while (!type.isNull()) {
- if (auto *elaboratedT = type->getAs<ElaboratedType>()) {
- type = elaboratedT->desugar();
- continue;
- }
if (auto *SpecialT = type->getAs<TemplateSpecializationType>()) {
auto *Decl = SpecialT->getTemplateName().getAsTemplateDecl();
return Decl && Pred(Decl->getNameAsString());
@@ -245,16 +241,18 @@ void RetainTypeChecker::visitTypedef(const TypedefDecl *TD) {
return;
auto PointeeQT = QT->getPointeeType();
- const RecordType *RT = PointeeQT->getAs<RecordType>();
+ const RecordType *RT = PointeeQT->getAsCanonical<RecordType>();
if (!RT) {
if (TD->hasAttr<ObjCBridgeAttr>() || TD->hasAttr<ObjCBridgeMutableAttr>()) {
- if (auto *Type = TD->getTypeForDecl())
- RecordlessTypes.insert(Type);
+ RecordlessTypes.insert(TD->getASTContext()
+ .getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD)
+ .getTypePtr());
}
return;
}
- for (auto *Redecl : RT->getDecl()->getMostRecentDecl()->redecls()) {
+ for (auto *Redecl : RT->getOriginalDecl()->getMostRecentDecl()->redecls()) {
if (Redecl->getAttr<ObjCBridgeAttr>() ||
Redecl->getAttr<ObjCBridgeMutableAttr>()) {
CFPointees.insert(RT);
@@ -266,21 +264,10 @@ void RetainTypeChecker::visitTypedef(const TypedefDecl *TD) {
bool RetainTypeChecker::isUnretained(const QualType QT, bool ignoreARC) {
if (ento::cocoa::isCocoaObjectRef(QT) && (!IsARCEnabled || ignoreARC))
return true;
- auto CanonicalType = QT.getCanonicalType();
- auto PointeeType = CanonicalType->getPointeeType();
- auto *RT = dyn_cast_or_null<RecordType>(PointeeType.getTypePtrOrNull());
- if (!RT) {
- auto *Type = QT.getTypePtrOrNull();
- while (Type) {
- if (RecordlessTypes.contains(Type))
- return true;
- auto *ET = dyn_cast_or_null<ElaboratedType>(Type);
- if (!ET)
- break;
- Type = ET->desugar().getTypePtrOrNull();
- }
- }
- return RT && CFPointees.contains(RT);
+ if (auto *RT = dyn_cast_or_null<RecordType>(
+ QT.getCanonicalType()->getPointeeType().getTypePtrOrNull()))
+ return CFPointees.contains(RT);
+ return RecordlessTypes.contains(QT.getTypePtr());
}
std::optional<bool> isUnretained(const QualType T, bool IsARCEnabled) {
@@ -306,7 +293,7 @@ std::optional<bool> isUnretained(const QualType T, bool IsARCEnabled) {
auto *Record = PointeeType->getAsStructureType();
if (!Record)
return false;
- auto *Decl = Record->getDecl();
+ auto *Decl = Record->getOriginalDecl();
if (!Decl)
return false;
auto TypeName = Decl->getName();
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
index 98c587d..6f3a280 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
@@ -113,10 +113,6 @@ public:
auto CastType = Cast->getType();
if (auto *PtrType = dyn_cast<PointerType>(CastType)) {
auto PointeeType = PtrType->getPointeeType();
- while (auto *ET = dyn_cast<ElaboratedType>(PointeeType)) {
- if (ET->isSugared())
- PointeeType = ET->desugar();
- }
if (auto *ParmType = dyn_cast<TemplateTypeParmType>(PointeeType)) {
if (ArgList) {
auto ParmIndex = ParmType->getIndex();
@@ -125,13 +121,13 @@ public:
return true;
}
} else if (auto *RD = dyn_cast<RecordType>(PointeeType)) {
- if (RD->getDecl() == ClassDecl)
+ if (declaresSameEntity(RD->getOriginalDecl(), ClassDecl))
return true;
} else if (auto *ST =
dyn_cast<SubstTemplateTypeParmType>(PointeeType)) {
auto Type = ST->getReplacementType();
if (auto *RD = dyn_cast<RecordType>(Type)) {
- if (RD->getDecl() == ClassDecl)
+ if (declaresSameEntity(RD->getOriginalDecl(), ClassDecl))
return true;
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 4631e0c..63f0d70 100644
--- a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -1950,7 +1950,7 @@ class TrackControlDependencyCondBRVisitor final
: public TrackingBugReporterVisitor {
const ExplodedNode *Origin;
ControlDependencyCalculator ControlDeps;
- llvm::SmallSet<const CFGBlock *, 32> VisitedBlocks;
+ llvm::SmallPtrSet<const CFGBlock *, 32> VisitedBlocks;
public:
TrackControlDependencyCondBRVisitor(TrackerRef ParentTracker,
diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 34fcb9b..180056c 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -89,7 +89,7 @@ static bool isCallback(QualType T) {
T = T->getPointeeType();
if (const RecordType *RT = T->getAsStructureType()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
for (const auto *I : RD->fields()) {
QualType FieldT = I->getType();
if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType())
@@ -391,7 +391,9 @@ bool CallEvent::isVariadic(const Decl *D) {
static bool isTransparentUnion(QualType T) {
const RecordType *UT = T->getAsUnionType();
- return UT && UT->getDecl()->hasAttr<TransparentUnionAttr>();
+ return UT && UT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<TransparentUnionAttr>();
}
// In some cases, symbolic cases should be transformed before we associate
@@ -843,7 +845,7 @@ void CXXInstanceCall::getInitialStackFrameContents(
if (MD->getCanonicalDecl() != getDecl()->getCanonicalDecl()) {
ASTContext &Ctx = SVB.getContext();
const CXXRecordDecl *Class = MD->getParent();
- QualType Ty = Ctx.getPointerType(Ctx.getRecordType(Class));
+ CanQualType Ty = Ctx.getPointerType(Ctx.getCanonicalTagType(Class));
// FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
std::optional<SVal> V =
@@ -854,7 +856,8 @@ void CXXInstanceCall::getInitialStackFrameContents(
// Fall back to a generic pointer cast for this-value.
const CXXMethodDecl *StaticMD = cast<CXXMethodDecl>(getDecl());
const CXXRecordDecl *StaticClass = StaticMD->getParent();
- QualType StaticTy = Ctx.getPointerType(Ctx.getRecordType(StaticClass));
+ CanQualType StaticTy =
+ Ctx.getPointerType(Ctx.getCanonicalTagType(StaticClass));
ThisVal = SVB.evalCast(ThisVal, Ty, StaticTy);
} else
ThisVal = *V;
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 0fe677e..44c6f9f 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -376,11 +376,13 @@ namespace {
const Stmt *S;
ExprEngine &Eng;
const ProgramPoint &PP;
+ bool AtDeclInit;
- CheckBindContext(const CheckersTy &checkers,
- SVal loc, SVal val, const Stmt *s, ExprEngine &eng,
+ CheckBindContext(const CheckersTy &checkers, SVal loc, SVal val,
+ const Stmt *s, bool AtDeclInit, ExprEngine &eng,
const ProgramPoint &pp)
- : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp) {}
+ : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp),
+ AtDeclInit(AtDeclInit) {}
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
@@ -391,7 +393,7 @@ namespace {
const ProgramPoint &L = PP.withTag(checkFn.Checker);
CheckerContext C(Bldr, Eng, Pred, L);
- checkFn(Loc, Val, S, C);
+ checkFn(Loc, Val, S, AtDeclInit, C);
}
};
@@ -408,10 +410,10 @@ namespace {
/// Run checkers for binding of a value to a location.
void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- SVal location, SVal val,
- const Stmt *S, ExprEngine &Eng,
+ SVal location, SVal val, const Stmt *S,
+ bool AtDeclInit, ExprEngine &Eng,
const ProgramPoint &PP) {
- CheckBindContext C(BindCheckers, location, val, S, Eng, PP);
+ CheckBindContext C(BindCheckers, location, val, S, AtDeclInit, Eng, PP);
llvm::TimeTraceScope TimeScope{
"CheckerManager::runCheckersForBind",
[&val]() { return getTimeTraceBindMetadata(val); }};
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index d874844..785cdfa 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -3165,7 +3165,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
// feasible then it shouldn't be considered for making 'default:' reachable.
const SwitchStmt *SS = builder.getSwitch();
const Expr *CondExpr = SS->getCond()->IgnoreParenImpCasts();
- if (CondExpr->getType()->getAs<EnumType>()) {
+ if (CondExpr->getType()->isEnumeralType()) {
if (SS->isAllEnumCasesCovered())
return;
}
@@ -3714,9 +3714,8 @@ ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
/// evalBind - Handle the semantics of binding a value to a specific location.
/// This method is used by evalStore and (soon) VisitDeclStmt, and others.
void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
- ExplodedNode *Pred,
- SVal location, SVal Val,
- bool atDeclInit, const ProgramPoint *PP) {
+ ExplodedNode *Pred, SVal location, SVal Val,
+ bool AtDeclInit, const ProgramPoint *PP) {
const LocationContext *LC = Pred->getLocationContext();
PostStmt PS(StoreE, LC);
if (!PP)
@@ -3725,7 +3724,7 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
// Do a previsit of the bind.
ExplodedNodeSet CheckedSet;
getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val,
- StoreE, *this, *PP);
+ StoreE, AtDeclInit, *this, *PP);
StmtNodeBuilder Bldr(CheckedSet, Dst, *currBldrCtx);
@@ -3748,8 +3747,8 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
// When binding the value, pass on the hint that this is a initialization.
// For initializations, we do not need to inform clients of region
// changes.
- state = state->bindLoc(location.castAs<Loc>(),
- Val, LC, /* notifyChanges = */ !atDeclInit);
+ state = state->bindLoc(location.castAs<Loc>(), Val, LC,
+ /* notifyChanges = */ !AtDeclInit);
const MemRegion *LocReg = nullptr;
if (std::optional<loc::MemRegionVal> LocRegVal =
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index fe70558..c0b28d2 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -85,7 +85,7 @@ void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
evalLocation(Tmp, CallExpr, VExpr, Pred, Pred->getState(), V,
/*isLoad=*/true);
for (ExplodedNode *N : Tmp)
- evalBind(Dst, CallExpr, N, ThisVal, V, true);
+ evalBind(Dst, CallExpr, N, ThisVal, V, !AlwaysReturnsLValue);
PostStmt PS(CallExpr, LCtx);
for (ExplodedNode *N : Dst) {
diff --git a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 0058a0d..5f27196 100644
--- a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -192,11 +192,11 @@ QualType ObjCIvarRegion::getValueType() const {
}
QualType CXXBaseObjectRegion::getValueType() const {
- return QualType(getDecl()->getTypeForDecl(), 0);
+ return getContext().getCanonicalTagType(getDecl());
}
QualType CXXDerivedObjectRegion::getValueType() const {
- return QualType(getDecl()->getTypeForDecl(), 0);
+ return getContext().getCanonicalTagType(getDecl());
}
QualType ParamVarRegion::getValueType() const {
diff --git a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 388034b..8f18533 100644
--- a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -2453,7 +2453,8 @@ NonLoc RegionStoreManager::createLazyBinding(RegionBindingsConstRef B,
SVal RegionStoreManager::getBindingForStruct(RegionBindingsConstRef B,
const TypedValueRegion *R) {
- const RecordDecl *RD = R->getValueType()->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ R->getValueType()->castAsCanonical<RecordType>()->getOriginalDecl();
if (!RD->getDefinition())
return UnknownVal();
@@ -2843,9 +2844,7 @@ RegionStoreManager::bindStruct(LimitedRegionBindingsConstRef B,
QualType T = R->getValueType();
assert(T->isStructureOrClassType());
- const RecordType* RT = T->castAs<RecordType>();
- const RecordDecl *RD = RT->getDecl();
-
+ const auto *RD = T->castAsRecordDecl();
if (!RD->isCompleteDefinition())
return B;
diff --git a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 2276c45..a6f4463 100644
--- a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -320,8 +320,8 @@ loc::MemRegionVal SValBuilder::getCXXThis(const CXXMethodDecl *D,
/// Return a memory region for the 'this' object reference.
loc::MemRegionVal SValBuilder::getCXXThis(const CXXRecordDecl *D,
const StackFrameContext *SFC) {
- const Type *T = D->getTypeForDecl();
- QualType PT = getContext().getPointerType(QualType(T, 0));
+ CanQualType PT =
+ getContext().getPointerType(getContext().getCanonicalTagType(D));
return loc::MemRegionVal(getRegionManager().getCXXThisRegion(PT, SFC));
}
diff --git a/clang/lib/Tooling/ASTDiff/ASTDiff.cpp b/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
index 5f7153c..d70a679 100644
--- a/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -246,7 +246,7 @@ struct PreorderVisitor : public RecursiveASTVisitor<PreorderVisitor> {
PostTraverse(SavedState);
return true;
}
- bool TraverseType(QualType T) { return true; }
+ bool TraverseType(QualType T, bool TraverseQualifier = true) { return true; }
bool TraverseConstructorInitializer(CXXCtorInitializer *Init) {
if (isNodeExcluded(Tree.AST.getSourceManager(), Init))
return true;
@@ -428,11 +428,12 @@ std::string SyntaxTree::Impl::getDeclValue(const Decl *D) const {
Value += getRelativeName(N) + ";";
if (auto *T = dyn_cast<TypedefNameDecl>(D))
return Value + T->getUnderlyingType().getAsString(TypePP) + ";";
- if (auto *T = dyn_cast<TypeDecl>(D))
- if (T->getTypeForDecl())
- Value +=
- T->getTypeForDecl()->getCanonicalTypeInternal().getAsString(TypePP) +
- ";";
+ if (auto *T = dyn_cast<TypeDecl>(D)) {
+ const ASTContext &Ctx = T->getASTContext();
+ Value +=
+ Ctx.getTypeDeclType(T)->getCanonicalTypeInternal().getAsString(TypePP) +
+ ";";
+ }
if (auto *U = dyn_cast<UsingDirectiveDecl>(D))
return std::string(U->getNominatedNamespace()->getName());
if (auto *A = dyn_cast<AccessSpecDecl>(D)) {
diff --git a/clang/lib/Tooling/Refactoring/Lookup.cpp b/clang/lib/Tooling/Refactoring/Lookup.cpp
index 757fba0..dedde86 100644
--- a/clang/lib/Tooling/Refactoring/Lookup.cpp
+++ b/clang/lib/Tooling/Refactoring/Lookup.cpp
@@ -108,16 +108,6 @@ static StringRef getBestNamespaceSubstr(const DeclContext *DeclA,
}
}
-/// Check if the name specifier begins with a written "::".
-static bool isFullyQualified(const NestedNameSpecifier *NNS) {
- while (NNS) {
- if (NNS->getKind() == NestedNameSpecifier::Global)
- return true;
- NNS = NNS->getPrefix();
- }
- return false;
-}
-
// Adds more scope specifier to the spelled name until the spelling is not
// ambiguous. A spelling is ambiguous if the resolution of the symbol is
// ambiguous. For example, if QName is "::y::bar", the spelling is "y::bar", and
@@ -182,7 +172,7 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
return Disambiguated;
}
-std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
+std::string tooling::replaceNestedName(NestedNameSpecifier Use,
SourceLocation UseLoc,
const DeclContext *UseContext,
const NamedDecl *FromDecl,
@@ -217,7 +207,7 @@ std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
// We work backwards (from most specific possible namespace to least
// specific).
StringRef Suggested = getBestNamespaceSubstr(UseContext, ReplacementString,
- isFullyQualified(Use));
+ Use.isFullyQualified());
return disambiguateSpellingInScope(Suggested, ReplacementString, *UseContext,
UseLoc);
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index 8eff778..d944411 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -107,45 +107,83 @@ private:
};
SourceLocation StartLocationForType(TypeLoc TL) {
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
+
// For elaborated types (e.g. `struct a::A`) we want the portion after the
- // `struct` but including the namespace qualifier, `a::`.
- if (auto ElaboratedTypeLoc = TL.getAs<clang::ElaboratedTypeLoc>()) {
- NestedNameSpecifierLoc NestedNameSpecifier =
- ElaboratedTypeLoc.getQualifierLoc();
- if (NestedNameSpecifier.getNestedNameSpecifier())
- return NestedNameSpecifier.getBeginLoc();
- TL = TL.getNextTypeLoc();
+ // `struct`, including the namespace qualifier, `a::`.
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ case TypeLoc::Enum: {
+ auto TTL = TL.castAs<TagTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::Typedef: {
+ auto TTL = TL.castAs<TypedefTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::UnresolvedUsing: {
+ auto TTL = TL.castAs<UnresolvedUsingTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::Using: {
+ auto TTL = TL.castAs<UsingTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::TemplateSpecialization: {
+ auto TTL = TL.castAs<TemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getTemplateNameLoc();
+ }
+ case TypeLoc::DeducedTemplateSpecialization: {
+ auto DTL = TL.castAs<clang::DeducedTemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = DTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return DTL.getTemplateNameLoc();
+ }
+ case TypeLoc::DependentName: {
+ auto TTL = TL.castAs<DependentNameTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto TTL = TL.castAs<DependentTemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getTemplateNameLoc();
+ }
+ default:
+ llvm_unreachable("unhandled TypeLoc class");
}
- return TL.getBeginLoc();
}
SourceLocation EndLocationForType(TypeLoc TL) {
- // Dig past any namespace or keyword qualifications.
- while (TL.getTypeLocClass() == TypeLoc::Elaborated ||
- TL.getTypeLocClass() == TypeLoc::Qualified)
- TL = TL.getNextTypeLoc();
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
// The location for template specializations (e.g. Foo<int>) includes the
// templated types in its location range. We want to restrict this to just
// before the `<` character.
- if (TL.getTypeLocClass() == TypeLoc::TemplateSpecialization) {
- return TL.castAs<TemplateSpecializationTypeLoc>()
- .getLAngleLoc()
- .getLocWithOffset(-1);
- }
+ if (auto TTL = TL.getAs<TemplateSpecializationTypeLoc>())
+ return TTL.getLAngleLoc().getLocWithOffset(-1);
return TL.getEndLoc();
}
-NestedNameSpecifier *GetNestedNameForType(TypeLoc TL) {
- // Dig past any keyword qualifications.
- while (TL.getTypeLocClass() == TypeLoc::Qualified)
- TL = TL.getNextTypeLoc();
-
- // For elaborated types (e.g. `struct a::A`) we want the portion after the
- // `struct` but including the namespace qualifier, `a::`.
- if (auto ElaboratedTypeLoc = TL.getAs<clang::ElaboratedTypeLoc>())
- return ElaboratedTypeLoc.getQualifierLoc().getNestedNameSpecifier();
- return nullptr;
+NestedNameSpecifier GetNestedNameForType(TypeLoc TL) {
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
+ return TL.getPrefix().getNestedNameSpecifier();
}
// Find all locations identified by the given USRs for rename.
@@ -168,14 +206,14 @@ public:
const NamedDecl *FromDecl;
// The declaration in which the nested name is contained (can be nullptr).
const Decl *Context;
- // The nested name being replaced (can be nullptr).
- const NestedNameSpecifier *Specifier;
+ // The nested name being replaced.
+ NestedNameSpecifier Specifier;
// Determine whether the prefix qualifiers of the NewName should be ignored.
// Normally, we set it to true for the symbol declaration and definition to
// avoid adding prefix qualifiers.
// For example, if it is true and NewName is "a::b::foo", then the symbol
// occurrence which the RenameInfo points to will be renamed to "foo".
- bool IgnorePrefixQualifers;
+ bool IgnorePrefixQualifiers;
};
bool VisitNamedDecl(const NamedDecl *Decl) {
@@ -203,8 +241,8 @@ public:
EndLoc,
/*FromDecl=*/nullptr,
/*Context=*/nullptr,
- /*Specifier=*/nullptr,
- /*IgnorePrefixQualifers=*/true};
+ /*Specifier=*/std::nullopt,
+ /*IgnorePrefixQualifiers=*/true};
RenameInfos.push_back(Info);
}
}
@@ -217,10 +255,10 @@ public:
auto EndLoc = Expr->getMemberLoc();
if (isInUSRSet(Decl)) {
RenameInfos.push_back({StartLoc, EndLoc,
- /*FromDecl=*/nullptr,
- /*Context=*/nullptr,
- /*Specifier=*/nullptr,
- /*IgnorePrefixQualifiers=*/true});
+ /*FromDecl=*/nullptr,
+ /*Context=*/nullptr,
+ /*Specifier=*/std::nullopt,
+ /*IgnorePrefixQualifiers=*/true});
}
return true;
}
@@ -235,7 +273,7 @@ public:
RenameInfos.push_back({StartLoc, EndLoc,
/*FromDecl=*/nullptr,
/*Context=*/nullptr,
- /*Specifier=*/nullptr,
+ /*Specifier=*/std::nullopt,
/*IgnorePrefixQualifiers=*/true});
}
}
@@ -257,7 +295,7 @@ public:
RenameInfos.push_back({Loc, Loc,
/*FromDecl=*/nullptr,
/*Context=*/nullptr,
- /*Specifier=*/nullptr,
+ /*Specifier=*/std::nullopt,
/*IgnorePrefixQualifiers=*/true});
}
}
@@ -288,7 +326,7 @@ public:
RenameInfos.push_back({EndLoc, EndLoc,
/*FromDecl=*/nullptr,
/*Context=*/nullptr,
- /*Specifier=*/nullptr,
+ /*Specifier=*/std::nullopt,
/*IgnorePrefixQualifiers=*/true});
return true;
}
@@ -332,7 +370,7 @@ public:
Decl,
getClosestAncestorDecl(*Expr),
Expr->getQualifier(),
- /*IgnorePrefixQualifers=*/false};
+ /*IgnorePrefixQualifiers=*/false};
RenameInfos.push_back(Info);
}
@@ -350,18 +388,18 @@ public:
}
bool VisitNestedNameSpecifierLocations(NestedNameSpecifierLoc NestedLoc) {
- if (!NestedLoc.getNestedNameSpecifier()->getAsType())
+ TypeLoc TL = NestedLoc.getAsTypeLoc();
+ if (!TL)
return true;
- if (const auto *TargetDecl =
- getSupportedDeclFromTypeLoc(NestedLoc.getTypeLoc())) {
+ if (const auto *TargetDecl = getSupportedDeclFromTypeLoc(TL)) {
if (isInUSRSet(TargetDecl)) {
RenameInfo Info = {NestedLoc.getBeginLoc(),
- EndLocationForType(NestedLoc.getTypeLoc()),
+ EndLocationForType(TL),
TargetDecl,
getClosestAncestorDecl(NestedLoc),
- NestedLoc.getNestedNameSpecifier()->getPrefix(),
- /*IgnorePrefixQualifers=*/false};
+ /*Specifier=*/std::nullopt,
+ /*IgnorePrefixQualifiers=*/false};
RenameInfos.push_back(Info);
}
}
@@ -411,7 +449,7 @@ public:
TargetDecl,
getClosestAncestorDecl(Loc),
GetNestedNameForType(Loc),
- /*IgnorePrefixQualifers=*/false};
+ /*IgnorePrefixQualifiers=*/false};
RenameInfos.push_back(Info);
}
return true;
@@ -421,33 +459,17 @@ public:
// Handle specific template class specialiation cases.
if (const auto *TemplateSpecType =
dyn_cast<TemplateSpecializationType>(Loc.getType())) {
- TypeLoc TargetLoc = Loc;
- if (!ParentTypeLoc.isNull()) {
- if (llvm::isa<ElaboratedType>(ParentTypeLoc.getType()))
- TargetLoc = ParentTypeLoc;
- }
-
if (isInUSRSet(TemplateSpecType->getTemplateName().getAsTemplateDecl())) {
- TypeLoc TargetLoc = Loc;
- // FIXME: Find a better way to handle this case.
- // For the qualified template class specification type like
- // "ns::Foo<int>" in "ns::Foo<int>& f();", we want the parent typeLoc
- // (ElaboratedType) of the TemplateSpecializationType in order to
- // catch the prefix qualifiers "ns::".
- if (!ParentTypeLoc.isNull() &&
- llvm::isa<ElaboratedType>(ParentTypeLoc.getType()))
- TargetLoc = ParentTypeLoc;
-
- auto StartLoc = StartLocationForType(TargetLoc);
- auto EndLoc = EndLocationForType(TargetLoc);
+ auto StartLoc = StartLocationForType(Loc);
+ auto EndLoc = EndLocationForType(Loc);
if (IsValidEditLoc(Context.getSourceManager(), StartLoc)) {
RenameInfo Info = {
StartLoc,
EndLoc,
TemplateSpecType->getTemplateName().getAsTemplateDecl(),
- getClosestAncestorDecl(DynTypedNode::create(TargetLoc)),
- GetNestedNameForType(TargetLoc),
- /*IgnorePrefixQualifers=*/false};
+ getClosestAncestorDecl(DynTypedNode::create(Loc)),
+ GetNestedNameForType(Loc),
+ /*IgnorePrefixQualifiers=*/false};
RenameInfos.push_back(Info);
}
}
@@ -469,12 +491,7 @@ private:
const NamedDecl *getSupportedDeclFromTypeLoc(TypeLoc Loc) {
if (const auto* TT = Loc.getType()->getAs<clang::TypedefType>())
return TT->getDecl();
- if (const auto *RD = Loc.getType()->getAsCXXRecordDecl())
- return RD;
- if (const auto *ED =
- llvm::dyn_cast_or_null<EnumDecl>(Loc.getType()->getAsTagDecl()))
- return ED;
- return nullptr;
+ return Loc.getType()->getAsTagDecl();
}
// Get the closest ancester which is a declaration of a given AST node.
@@ -549,7 +566,7 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
for (const auto &RenameInfo : Finder.getRenameInfos()) {
std::string ReplacedName = NewName.str();
- if (RenameInfo.IgnorePrefixQualifers) {
+ if (RenameInfo.IgnorePrefixQualifiers) {
// Get the name without prefix qualifiers from NewName.
size_t LastColonPos = NewName.find_last_of(':');
if (LastColonPos != std::string::npos)
diff --git a/clang/lib/Tooling/Syntax/BuildTree.cpp b/clang/lib/Tooling/Syntax/BuildTree.cpp
index eb9fa7a..546161c 100644
--- a/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -918,97 +918,91 @@ public:
return true;
}
- // FIXME: Fix `NestedNameSpecifierLoc::getLocalSourceRange` for the
- // `DependentTemplateSpecializationType` case.
- /// Given a nested-name-specifier return the range for the last name
- /// specifier.
- ///
- /// e.g. `std::T::template X<U>::` => `template X<U>::`
- SourceRange getLocalSourceRange(const NestedNameSpecifierLoc &NNSLoc) {
- auto SR = NNSLoc.getLocalSourceRange();
-
- // The method `NestedNameSpecifierLoc::getLocalSourceRange` *should*
- // return the desired `SourceRange`, but there is a corner case. For a
- // `DependentTemplateSpecializationType` this method returns its
- // qualifiers as well, in other words in the example above this method
- // returns `T::template X<U>::` instead of only `template X<U>::`
- if (auto TL = NNSLoc.getTypeLoc()) {
- if (auto DependentTL =
- TL.getAs<DependentTemplateSpecializationTypeLoc>()) {
- // The 'template' keyword is always present in dependent template
- // specializations. Except in the case of incorrect code
- // TODO: Treat the case of incorrect code.
- SR.setBegin(DependentTL.getTemplateKeywordLoc());
- }
- }
-
- return SR;
- }
-
- syntax::NodeKind getNameSpecifierKind(const NestedNameSpecifier &NNS) {
- switch (NNS.getKind()) {
- case NestedNameSpecifier::Global:
- return syntax::NodeKind::GlobalNameSpecifier;
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Identifier:
- return syntax::NodeKind::IdentifierNameSpecifier;
- case NestedNameSpecifier::TypeSpec: {
- const auto *NNSType = NNS.getAsType();
- assert(NNSType);
- if (isa<DecltypeType>(NNSType))
- return syntax::NodeKind::DecltypeNameSpecifier;
- if (isa<TemplateSpecializationType, DependentTemplateSpecializationType>(
- NNSType))
- return syntax::NodeKind::SimpleTemplateNameSpecifier;
- return syntax::NodeKind::IdentifierNameSpecifier;
- }
- default:
- // FIXME: Support Microsoft's __super
- llvm::report_fatal_error("We don't yet support the __super specifier",
- true);
- }
+ syntax::NameSpecifier *buildIdentifier(SourceRange SR,
+ bool DropBack = false) {
+ auto NameSpecifierTokens = Builder.getRange(SR).drop_back(DropBack);
+ assert(NameSpecifierTokens.size() == 1);
+ Builder.markChildToken(NameSpecifierTokens.begin(),
+ syntax::NodeRole::Unknown);
+ auto *NS = new (allocator()) syntax::IdentifierNameSpecifier;
+ Builder.foldNode(NameSpecifierTokens, NS, nullptr);
+ return NS;
+ }
+
+ syntax::NameSpecifier *buildSimpleTemplateName(SourceRange SR) {
+ auto NameSpecifierTokens = Builder.getRange(SR);
+ // TODO: Build `SimpleTemplateNameSpecifier` children and implement
+ // accessors to them.
+ // Be aware, we cannot do that simply by calling `TraverseTypeLoc`,
+ // some `TypeLoc`s have inside them the previous name specifier and
+ // we want to treat them independently.
+ auto *NS = new (allocator()) syntax::SimpleTemplateNameSpecifier;
+ Builder.foldNode(NameSpecifierTokens, NS, nullptr);
+ return NS;
}
syntax::NameSpecifier *
buildNameSpecifier(const NestedNameSpecifierLoc &NNSLoc) {
assert(NNSLoc.hasQualifier());
- auto NameSpecifierTokens =
- Builder.getRange(getLocalSourceRange(NNSLoc)).drop_back();
- switch (getNameSpecifierKind(*NNSLoc.getNestedNameSpecifier())) {
- case syntax::NodeKind::GlobalNameSpecifier:
+ switch (NNSLoc.getNestedNameSpecifier().getKind()) {
+ case NestedNameSpecifier::Kind::Global:
return new (allocator()) syntax::GlobalNameSpecifier;
- case syntax::NodeKind::IdentifierNameSpecifier: {
- assert(NameSpecifierTokens.size() == 1);
- Builder.markChildToken(NameSpecifierTokens.begin(),
- syntax::NodeRole::Unknown);
- auto *NS = new (allocator()) syntax::IdentifierNameSpecifier;
- Builder.foldNode(NameSpecifierTokens, NS, nullptr);
- return NS;
- }
- case syntax::NodeKind::SimpleTemplateNameSpecifier: {
- // TODO: Build `SimpleTemplateNameSpecifier` children and implement
- // accessors to them.
- // Be aware, we cannot do that simply by calling `TraverseTypeLoc`,
- // some `TypeLoc`s have inside them the previous name specifier and
- // we want to treat them independently.
- auto *NS = new (allocator()) syntax::SimpleTemplateNameSpecifier;
- Builder.foldNode(NameSpecifierTokens, NS, nullptr);
- return NS;
- }
- case syntax::NodeKind::DecltypeNameSpecifier: {
- const auto TL = NNSLoc.getTypeLoc().castAs<DecltypeTypeLoc>();
- if (!RecursiveASTVisitor::TraverseDecltypeTypeLoc(TL))
- return nullptr;
- auto *NS = new (allocator()) syntax::DecltypeNameSpecifier;
- // TODO: Implement accessor to `DecltypeNameSpecifier` inner
- // `DecltypeTypeLoc`.
- // For that add mapping from `TypeLoc` to `syntax::Node*` then:
- // Builder.markChild(TypeLoc, syntax::NodeRole);
- Builder.foldNode(NameSpecifierTokens, NS, nullptr);
- return NS;
+
+ case NestedNameSpecifier::Kind::Namespace:
+ return buildIdentifier(NNSLoc.getLocalSourceRange(), /*DropBack=*/true);
+
+ case NestedNameSpecifier::Kind::Type: {
+ TypeLoc TL = NNSLoc.castAsTypeLoc();
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ case TypeLoc::Enum:
+ return buildIdentifier(TL.castAs<TagTypeLoc>().getNameLoc());
+ case TypeLoc::Typedef:
+ return buildIdentifier(TL.castAs<TypedefTypeLoc>().getNameLoc());
+ case TypeLoc::UnresolvedUsing:
+ return buildIdentifier(
+ TL.castAs<UnresolvedUsingTypeLoc>().getNameLoc());
+ case TypeLoc::Using:
+ return buildIdentifier(TL.castAs<UsingTypeLoc>().getNameLoc());
+ case TypeLoc::DependentName:
+ return buildIdentifier(TL.castAs<DependentNameTypeLoc>().getNameLoc());
+ case TypeLoc::TemplateSpecialization: {
+ auto TST = TL.castAs<TemplateSpecializationTypeLoc>();
+ SourceLocation BeginLoc = TST.getTemplateKeywordLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = TST.getTemplateNameLoc();
+ return buildSimpleTemplateName({BeginLoc, TST.getEndLoc()});
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto DT = TL.castAs<DependentTemplateSpecializationTypeLoc>();
+ SourceLocation BeginLoc = DT.getTemplateKeywordLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = DT.getTemplateNameLoc();
+ return buildSimpleTemplateName({BeginLoc, DT.getEndLoc()});
+ }
+ case TypeLoc::Decltype: {
+ const auto DTL = TL.castAs<DecltypeTypeLoc>();
+ if (!RecursiveASTVisitor::TraverseDecltypeTypeLoc(
+ DTL, /*TraverseQualifier=*/true))
+ return nullptr;
+ auto *NS = new (allocator()) syntax::DecltypeNameSpecifier;
+ // TODO: Implement accessor to `DecltypeNameSpecifier` inner
+ // `DecltypeTypeLoc`.
+ // For that add mapping from `TypeLoc` to `syntax::Node*` then:
+ // Builder.markChild(TypeLoc, syntax::NodeRole);
+ Builder.foldNode(Builder.getRange(DTL.getLocalSourceRange()), NS,
+ nullptr);
+ return NS;
+ }
+ default:
+ return buildIdentifier(TL.getLocalSourceRange());
+ }
}
default:
- llvm_unreachable("getChildKind() does not return this value");
+ // FIXME: Support Microsoft's __super
+ llvm::report_fatal_error("We don't yet support the __super specifier",
+ true);
}
}
@@ -1019,12 +1013,16 @@ public:
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc QualifierLoc) {
if (!QualifierLoc)
return true;
- for (auto It = QualifierLoc; It; It = It.getPrefix()) {
+ for (auto It = QualifierLoc; It; /**/) {
auto *NS = buildNameSpecifier(It);
if (!NS)
return false;
Builder.markChild(NS, syntax::NodeRole::ListElement);
Builder.markChildToken(It.getEndLoc(), syntax::NodeRole::ListDelimiter);
+ if (TypeLoc TL = It.getAsTypeLoc())
+ It = TL.getPrefix();
+ else
+ It = It.getAsNamespaceAndPrefix().Prefix;
}
Builder.foldNode(Builder.getRange(QualifierLoc.getSourceRange()),
new (allocator()) syntax::NestedNameSpecifier,
@@ -1328,7 +1326,7 @@ public:
// FIXME: Deleting the `TraverseParenTypeLoc` override doesn't change test
// results. Find test coverage or remove it.
- bool TraverseParenTypeLoc(ParenTypeLoc L) {
+ bool TraverseParenTypeLoc(ParenTypeLoc L, bool TraverseQualifier) {
// We reverse order of traversal to get the proper syntax structure.
if (!WalkUpFromParenTypeLoc(L))
return false;
@@ -1391,7 +1389,8 @@ public:
return WalkUpFromFunctionTypeLoc(L);
}
- bool TraverseMemberPointerTypeLoc(MemberPointerTypeLoc L) {
+ bool TraverseMemberPointerTypeLoc(MemberPointerTypeLoc L,
+ bool TraverseQualifier) {
// In the source code "void (Y::*mp)()" `MemberPointerTypeLoc` corresponds
// to "Y::*" but it points to a `ParenTypeLoc` that corresponds to
// "(Y::*mp)" We thus reverse the order of traversal to get the proper
diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp
index 45dfdf4..1120e33 100644
--- a/clang/lib/Tooling/Tooling.cpp
+++ b/clang/lib/Tooling/Tooling.cpp
@@ -644,9 +644,13 @@ namespace {
class ASTBuilderAction : public ToolAction {
std::vector<std::unique_ptr<ASTUnit>> &ASTs;
+ CaptureDiagsKind CaptureKind;
public:
- ASTBuilderAction(std::vector<std::unique_ptr<ASTUnit>> &ASTs) : ASTs(ASTs) {}
+ ASTBuilderAction(
+ std::vector<std::unique_ptr<ASTUnit>> &ASTs,
+ CaptureDiagsKind CaptureDiagnosticsKind = CaptureDiagsKind::None)
+ : ASTs(ASTs), CaptureKind(CaptureDiagnosticsKind) {}
bool runInvocation(std::shared_ptr<CompilerInvocation> Invocation,
FileManager *Files,
@@ -658,7 +662,7 @@ public:
Invocation->getDiagnosticOpts(),
DiagConsumer,
/*ShouldOwnClient=*/false),
- Files);
+ Files, false, CaptureKind);
if (!AST)
return false;
@@ -693,9 +697,12 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
StringRef ToolName, std::shared_ptr<PCHContainerOperations> PCHContainerOps,
ArgumentsAdjuster Adjuster, const FileContentMappings &VirtualMappedFiles,
DiagnosticConsumer *DiagConsumer,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS,
+ CaptureDiagsKind CaptureKind) {
std::vector<std::unique_ptr<ASTUnit>> ASTs;
- ASTBuilderAction Action(ASTs);
+
+ ASTBuilderAction Action(ASTs, CaptureKind);
+
auto OverlayFileSystem =
llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
std::move(BaseFS));
diff --git a/clang/lib/Tooling/Transformer/RangeSelector.cpp b/clang/lib/Tooling/Transformer/RangeSelector.cpp
index 00f4611..171c786 100644
--- a/clang/lib/Tooling/Transformer/RangeSelector.cpp
+++ b/clang/lib/Tooling/Transformer/RangeSelector.cpp
@@ -222,14 +222,10 @@ RangeSelector transformer::name(std::string ID) {
return CharSourceRange::getTokenRange(L, L);
}
if (const auto *T = Node.get<TypeLoc>()) {
- TypeLoc Loc = *T;
- auto ET = Loc.getAs<ElaboratedTypeLoc>();
- if (!ET.isNull())
- Loc = ET.getNamedTypeLoc();
- if (auto SpecLoc = Loc.getAs<TemplateSpecializationTypeLoc>();
+ if (auto SpecLoc = T->getAs<TemplateSpecializationTypeLoc>();
!SpecLoc.isNull())
return CharSourceRange::getTokenRange(SpecLoc.getTemplateNameLoc());
- return CharSourceRange::getTokenRange(Loc.getSourceRange());
+ return CharSourceRange::getTokenRange(T->getSourceRange());
}
return typeError(ID, Node.getNodeKind(),
"DeclRefExpr, NamedDecl, CXXCtorInitializer, TypeLoc");
diff --git a/clang/test/APINotes/Inputs/Frameworks/Simple.framework/Headers/Simple.apinotes b/clang/test/APINotes/Inputs/Frameworks/Simple.framework/Headers/Simple.apinotes
index 8c915bd..9ba38ed 100644
--- a/clang/test/APINotes/Inputs/Frameworks/Simple.framework/Headers/Simple.apinotes
+++ b/clang/test/APINotes/Inputs/Frameworks/Simple.framework/Headers/Simple.apinotes
@@ -26,3 +26,6 @@ Classes:
- Name: scalarNewProperty
PropertyKind: Instance
Nullability: Scalar
+Typedefs:
+ - Name: MyTypedef
+ SwiftConformsTo: Swift.Equatable
diff --git a/clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes b/clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes
index c096822..15c8068 100644
--- a/clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes
+++ b/clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes
@@ -32,6 +32,9 @@ Tags:
SwiftEscapable: false
- Name: EscapableType
SwiftEscapable: true
+- Name: NoncopyableWithDestroyType
+ SwiftCopyable: false
+ SwiftDestroyOp: NCDDestroy
Functions:
- Name: functionReturningFrt__
@@ -39,3 +42,7 @@ Functions:
SwiftReturnOwnership: unretained
- Name: functionReturningFrt_returns_retained
SwiftReturnOwnership: retained
+Typedefs:
+ - Name: WrappedOptions
+ SwiftWrapper: struct
+ SwiftConformsTo: Swift.OptionSet
diff --git a/clang/test/APINotes/Inputs/Headers/SwiftImportAs.h b/clang/test/APINotes/Inputs/Headers/SwiftImportAs.h
index 5f817ac..978b4fb 100644
--- a/clang/test/APINotes/Inputs/Headers/SwiftImportAs.h
+++ b/clang/test/APINotes/Inputs/Headers/SwiftImportAs.h
@@ -29,3 +29,10 @@ struct OpaqueRefCountedType; // redeclaration
inline void ORCRetain(struct OpaqueRefCountedType *x);
inline void ORCRelease(struct OpaqueRefCountedType *x);
+
+typedef unsigned WrappedOptions;
+
+struct NoncopyableWithDestroyType {
+};
+
+void NCDDestroy(NoncopyableWithDestroyType instance);
diff --git a/clang/test/APINotes/swift-import-as.cpp b/clang/test/APINotes/swift-import-as.cpp
index 179170f..f5d08df 100644
--- a/clang/test/APINotes/swift-import-as.cpp
+++ b/clang/test/APINotes/swift-import-as.cpp
@@ -14,6 +14,8 @@
// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -I %S/Inputs/Headers %s -x c++ -ast-dump -ast-dump-filter methodReturningFrt__ | FileCheck -check-prefix=CHECK-METHOD-RETURNING-FRT %s
// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -I %S/Inputs/Headers %s -x c++ -ast-dump -ast-dump-filter methodReturningFrt_returns_unretained | FileCheck -check-prefix=CHECK-METHOD-RETURNING-FRT-UNRETAINED %s
// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -I %S/Inputs/Headers %s -x c++ -ast-dump -ast-dump-filter methodReturningFrt_returns_retained | FileCheck -check-prefix=CHECK-METHOD-RETURNING-FRT-RETAINED %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -I %S/Inputs/Headers %s -x c++ -ast-dump -ast-dump-filter WrappedOptions | FileCheck -check-prefix=CHECK-WRAPPED-OPTIONS %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -I %S/Inputs/Headers %s -x c++ -ast-dump -ast-dump-filter NoncopyableWithDestroyType | FileCheck -check-prefix=CHECK-NONCOPYABLE-WITH-DESTROY %s
#include <SwiftImportAs.h>
@@ -51,8 +53,8 @@
// CHECK-OPAQUE-REF-COUNTED-NOT: SwiftAttrAttr {{.+}} <<invalid sloc>> "release:
// CHECK-NON-COPYABLE: Dumping NonCopyableType:
// CHECK-NON-COPYABLE-NEXT: CXXRecordDecl {{.+}} imported in SwiftImportAs {{.+}} struct NonCopyableType
-// CHECK-NON-COPYABLE: SwiftAttrAttr {{.+}} <<invalid sloc>> "conforms_to:MySwiftModule.MySwiftNonCopyableProtocol"
// CHECK-NON-COPYABLE: SwiftAttrAttr {{.+}} <<invalid sloc>> "~Copyable"
+// CHECK-NON-COPYABLE: SwiftAttrAttr {{.+}} <<invalid sloc>> "conforms_to:MySwiftModule.MySwiftNonCopyableProtocol"
// CHECK-COPYABLE: Dumping CopyableType:
// CHECK-COPYABLE-NEXT: CXXRecordDecl {{.+}} imported in SwiftImportAs {{.+}} struct CopyableType
@@ -91,3 +93,13 @@
// CHECK-METHOD-RETURNING-FRT-RETAINED: Dumping ImmortalRefType::methodReturningFrt_returns_retained:
// CHECK-METHOD-RETURNING-FRT-RETAINED: CXXMethodDecl {{.+}} imported in SwiftImportAs methodReturningFrt_returns_retained 'ImmortalRefType *()'
// CHECK-METHOD-RETURNING-FRT-RETAINED: `-SwiftAttrAttr {{.+}} "returns_retained"
+
+// CHECK-WRAPPED-OPTIONS: Dumping WrappedOptions
+// CHECK-WRAPPED-OPTIONS: TypedefDecl{{.*}}WrappedOptions 'unsigned int'
+// CHECK-WRAPPED-OPTIONS: SwiftNewTypeAttr {{.*}} swift_wrapper NK_Struct
+// CHECK-WRAPPED-OPTIONS: SwiftAttrAttr {{.*}} "conforms_to:Swift.OptionSet"
+
+// CHECK-NONCOPYABLE-WITH-DESTROY: Dumping NoncopyableWithDestroyType
+// CHECK-NONCOPYABLE-WITH-DESTROY: RecordDecl {{.*}}struct NoncopyableWithDestroyType
+// CHECK-NONCOPYABLE-WITH-DESTROY: SwiftAttrAttr {{.+}} "destroy:NCDDestroy"
+// CHECK-NONCOPYABLE-WITH-DESTROY: SwiftAttrAttr {{.+}} "~Copyable"
diff --git a/clang/test/APINotes/yaml-roundtrip.test b/clang/test/APINotes/yaml-roundtrip.test
index bcf84af..f69038c 100644
--- a/clang/test/APINotes/yaml-roundtrip.test
+++ b/clang/test/APINotes/yaml-roundtrip.test
@@ -24,7 +24,7 @@ CHECK-NEXT: 25c26
CHECK-NEXT: < Nullability: S
CHECK-NEXT: ---
CHECK-NEXT: > Nullability: Unspecified
-CHECK-NEXT: 28c29,30
+CHECK-NEXT: 28c29
CHECK-NEXT: < Nullability: Scalar
CHECK-NEXT: ---
CHECK-NEXT: > Nullability: Unspecified
diff --git a/clang/test/AST/ByteCode/arrays.cpp b/clang/test/AST/ByteCode/arrays.cpp
index 2dd51c2..22a4b41 100644
--- a/clang/test/AST/ByteCode/arrays.cpp
+++ b/clang/test/AST/ByteCode/arrays.cpp
@@ -779,3 +779,44 @@ namespace DiscardedSubScriptExpr {
return true;
}
}
+
+namespace ZeroSizeArrayRead {
+ constexpr char str[0] = {};
+ constexpr unsigned checksum(const char *s) {
+ unsigned result = 0;
+ for (const char *p = s; *p != '\0'; ++p) { // both-note {{read of dereferenced one-past-the-end pointer}}
+ result += *p;
+ }
+ return result;
+ }
+ constexpr unsigned C = checksum(str); // both-error {{must be initialized by a constant expression}} \
+ // both-note {{in call to}}
+
+ constexpr const char *p1 = &str[0];
+ constexpr const char *p2 = &str[1]; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{cannot refer to element 1 of array of 0 elements in a constant expression}}
+
+ constexpr char s[] = {};
+ static_assert(s[0] == '0', ""); // both-error {{not an integral constant expression}} \
+ // both-note {{read of dereferenced one-past-the-end pointer}}
+}
+
+namespace FAM {
+ char *strchr(const char *, int);
+
+ struct A {
+ char n, a[2];
+ };
+ struct B {
+ int n;
+ struct A a[]; // both-note {{here}}
+ };
+
+ const struct B b = {0, {{1, {2, 3}}, {4, {5, 6}}}};
+ void foo(void) { int sch = 0 != strchr(b.a[1].a, '\0'); }
+
+ int foo2() {
+ struct B b = {0, {{1, {2, 3}}, {4, {5, 6}}}}; // both-error {{initialization of flexible array member is not allowed}}
+ return 1;
+ }
+}
diff --git a/clang/test/AST/ByteCode/builtin-functions.cpp b/clang/test/AST/ByteCode/builtin-functions.cpp
index e554ef4..f47bc49 100644
--- a/clang/test/AST/ByteCode/builtin-functions.cpp
+++ b/clang/test/AST/ByteCode/builtin-functions.cpp
@@ -21,6 +21,27 @@
#error "huh?"
#endif
+
+inline constexpr void* operator new(__SIZE_TYPE__, void* p) noexcept { return p; }
+namespace std {
+ using size_t = decltype(sizeof(0));
+ template<typename T> struct allocator {
+ constexpr T *allocate(size_t N) {
+ return (T*)__builtin_operator_new(sizeof(T) * N); // #alloc
+ }
+ constexpr void deallocate(void *p, __SIZE_TYPE__) {
+ __builtin_operator_delete(p);
+ }
+ };
+template<typename T, typename... Args>
+constexpr T* construct_at(T* p, Args&&... args) { return ::new((void*)p) T(static_cast<Args&&>(args)...); }
+
+ template<typename T>
+ constexpr void destroy_at(T* p) {
+ p->~T();
+ }
+}
+
extern "C" {
typedef decltype(sizeof(int)) size_t;
extern size_t wcslen(const wchar_t *p);
@@ -433,6 +454,7 @@ namespace SourceLocation {
}
#define BITSIZE(x) (sizeof(x) * 8)
+constexpr bool __attribute__((ext_vector_type(4))) v4b{};
namespace popcount {
static_assert(__builtin_popcount(~0u) == __CHAR_BIT__ * sizeof(unsigned int), "");
static_assert(__builtin_popcount(0) == 0, "");
@@ -450,6 +472,7 @@ namespace popcount {
static_assert(__builtin_popcountg(0ul) == 0, "");
static_assert(__builtin_popcountg(~0ull) == __CHAR_BIT__ * sizeof(unsigned long long), "");
static_assert(__builtin_popcountg(0ull) == 0, "");
+ static_assert(__builtin_popcountg(v4b) == 0, "");
#ifdef __SIZEOF_INT128__
static_assert(__builtin_popcountg(~(unsigned __int128)0) == __CHAR_BIT__ * sizeof(unsigned __int128), "");
static_assert(__builtin_popcountg((unsigned __int128)0) == 0, "");
@@ -722,6 +745,7 @@ namespace clz {
char clz62[__builtin_clzg((unsigned _BitInt(128))0xf) == BITSIZE(_BitInt(128)) - 4 ? 1 : -1];
char clz63[__builtin_clzg((unsigned _BitInt(128))0xf, 42) == BITSIZE(_BitInt(128)) - 4 ? 1 : -1];
#endif
+ char clz64[__builtin_clzg(v4b, 0) == 0 ? 1 : -1];
}
namespace ctz {
@@ -792,6 +816,7 @@ namespace ctz {
char ctz62[__builtin_ctzg((unsigned _BitInt(128))1 << (BITSIZE(_BitInt(128)) - 1)) == BITSIZE(_BitInt(128)) - 1 ? 1 : -1];
char ctz63[__builtin_ctzg((unsigned _BitInt(128))1 << (BITSIZE(_BitInt(128)) - 1), 42) == BITSIZE(_BitInt(128)) - 1 ? 1 : -1];
#endif
+ char clz64[__builtin_ctzg(v4b, 0) == 0 ? 1 : -1];
}
namespace bswap {
@@ -1514,7 +1539,7 @@ namespace Memchr {
extern struct Incomplete incomplete;
static_assert(__builtin_memchr(&incomplete, 0, 0u) == nullptr);
static_assert(__builtin_memchr(&incomplete, 0, 1u) == nullptr); // both-error {{not an integral constant}} \
- // ref-note {{read of incomplete type 'struct Incomplete'}}
+ // both-note {{read of incomplete type 'struct Incomplete'}}
const unsigned char &u1 = 0xf0;
auto &&i1 = (const signed char []){-128};
@@ -1697,6 +1722,15 @@ namespace WMemMove {
// both-note {{source of 'wmemmove' is nullptr}}
static_assert(__builtin_wmemmove(null, &global, sizeof(wchar_t))); // both-error {{}} \
// both-note {{destination of 'wmemmove' is nullptr}}
+
+ // Check that a pointer to an incomplete array is rejected.
+ constexpr int test_address_of_incomplete_array_type() { // both-error {{never produces a constant}}
+ extern int arr[];
+ __builtin_memmove(&arr, &arr, 4 * sizeof(arr[0])); // both-note 2{{cannot constant evaluate 'memmove' between objects of incomplete type 'int[]'}}
+ return arr[0] * 1000 + arr[1] * 100 + arr[2] * 10 + arr[3];
+ }
+ static_assert(test_address_of_incomplete_array_type() == 1234); // both-error {{constant}} \
+ // both-note {{in call}}
}
namespace Invalid {
@@ -1758,6 +1792,30 @@ namespace WithinLifetime {
}
} xstd; // both-error {{is not a constant expression}} \
// both-note {{in call to}}
+
+ /// FIXME: We do not have per-element lifetime information for primitive arrays.
+ /// See https://github.com/llvm/llvm-project/issues/147528
+ consteval bool test_dynamic(bool read_after_deallocate) {
+ std::allocator<int> a;
+ int* p = a.allocate(1); // expected-note 2{{allocation performed here was not deallocated}}
+ // a.allocate starts the lifetime of an array,
+ // the complete object of *p has started its lifetime
+ if (__builtin_is_within_lifetime(p))
+ return false;
+ std::construct_at(p);
+ if (!__builtin_is_within_lifetime(p))
+ return false;
+ std::destroy_at(p);
+ if (__builtin_is_within_lifetime(p))
+ return false;
+ a.deallocate(p, 1);
+ if (read_after_deallocate)
+ __builtin_is_within_lifetime(p); // ref-note {{read of heap allocated object that has been deleted}}
+ return true;
+ }
+ static_assert(test_dynamic(false)); // expected-error {{not an integral constant expression}}
+ static_assert(test_dynamic(true)); // both-error {{not an integral constant expression}} \
+ // ref-note {{in call to}}
}
#ifdef __SIZEOF_INT128__
diff --git a/clang/test/AST/ByteCode/builtin-object-size-codegen.cpp b/clang/test/AST/ByteCode/builtin-object-size-codegen.cpp
new file mode 100644
index 0000000..f6ddbeb
--- /dev/null
+++ b/clang/test/AST/ByteCode/builtin-object-size-codegen.cpp
@@ -0,0 +1,87 @@
+// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -triple x86_64-apple-darwin -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -o - %s | FileCheck %s
+
+void foo() {
+ struct A { char buf[16]; };
+ struct B : A {};
+ struct C { int i; B bs[1]; } *c;
+
+ int gi;
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 false, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0], 0);
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 false, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0], 1);
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 true, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0], 2);
+ // CHECK: store i32 16
+ gi = __builtin_object_size(&c->bs[0], 3);
+}
+
+
+void foo2() {
+ struct A { int a; };
+ struct B { int b; };
+ struct C: public A, public B {};
+
+ C c;
+
+ int gi;
+ // CHECK: store i32 8
+ gi = __builtin_object_size(&c, 0);
+ // CHECK: store i32 8
+ gi = __builtin_object_size((A*)&c, 0);
+ // CHECK: store i32 4
+ gi = __builtin_object_size((B*)&c, 0);
+
+ // CHECK: store i32 8
+ gi = __builtin_object_size((char*)&c, 0);
+ // CHECK: store i32 8
+ gi = __builtin_object_size((char*)(A*)&c, 0);
+ // CHECK: store i32 4
+ gi = __builtin_object_size((char*)(B*)&c, 0);
+}
+
+
+typedef struct {
+ double c[0];
+ float f;
+} foofoo0_t;
+
+unsigned babar0(foofoo0_t *f) {
+ // CHECK: ret i32 0
+ return __builtin_object_size(f->c, 1);
+}
+
+void test2() {
+ struct A { char buf[16]; };
+ struct B : A {};
+ struct C { int i; B bs[1]; } *c;
+
+ int gi;
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 false, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0], 0);
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 false, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0], 1);
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 true, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0], 2);
+ // CHECK: store i32 16
+ gi = __builtin_object_size(&c->bs[0], 3);
+
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 false, i1 true, i1 false)
+ gi = __builtin_object_size((A*)&c->bs[0], 0);
+ // CHECK: store i32 16
+ gi = __builtin_object_size((A*)&c->bs[0], 1);
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 true, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0].buf[0], 2);
+ // CHECK: store i32 16
+ gi = __builtin_object_size(&c->bs[0].buf[0], 3);
+
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 false, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0].buf[0], 0);
+ // CHECK: store i32 16
+ gi = __builtin_object_size(&c->bs[0].buf[0], 1);
+ // CHECK: call i64 @llvm.objectsize.i64.p0(ptr %{{.*}}, i1 true, i1 true, i1 false)
+ gi = __builtin_object_size(&c->bs[0].buf[0], 2);
+ // CHECK: store i32 16
+ gi = __builtin_object_size(&c->bs[0].buf[0], 3);
+}
diff --git a/clang/test/AST/ByteCode/c.c b/clang/test/AST/ByteCode/c.c
index a7b1fe0..b6d2a69 100644
--- a/clang/test/AST/ByteCode/c.c
+++ b/clang/test/AST/ByteCode/c.c
@@ -173,6 +173,10 @@ _Static_assert(CTB3, ""); // pedantic-ref-warning {{GNU extension}} \
// pedantic-expected-warning {{GNU extension}}
+void nonComplexToComplexCast(void) {
+ _Complex double z = *(_Complex double *)&(struct { double r, i; }){0.0, 1.0};
+}
+
int t1 = sizeof(int);
void test4(void) {
t1 = sizeof(int);
@@ -329,3 +333,38 @@ void foo3 (void)
void* x = 0;
void* y = &*x;
}
+
+static void *FooTable[1] = {
+ [0] = (void *[1]) { // 1
+ [0] = (void *[1]) { // 2
+ [0] = (void *[1]) {} // pedantic-warning {{use of an empty initializer}}
+ },
+ }
+};
+
+int strcmp(const char *, const char *); // all-note {{passing argument to parameter here}}
+#define S "\x01\x02\x03\x04\x05\x06\x07\x08"
+const char _str[] = {S[0], S[1], S[2], S[3], S[4], S[5], S[6], S[7]};
+const unsigned char _str2[] = {S[0], S[1], S[2], S[3], S[4], S[5], S[6], S[7]};
+const int compared = strcmp(_str, (const char *)_str2); // all-error {{initializer element is not a compile-time constant}}
+
+
+const int compared2 = strcmp(strcmp, _str); // all-warning {{incompatible pointer types}} \
+ // all-error {{initializer element is not a compile-time constant}}
+
+int foo(x) // all-warning {{a function definition without a prototype is deprecated in all versions of C}}
+int x;
+{
+ return x;
+}
+
+void bar() { // pedantic-warning {{a function declaration without a prototype}}
+ int x;
+ x = foo(); // all-warning {{too few arguments}}
+}
+
+int *_b = &a;
+void discardedCmp(void)
+{
+ (*_b) = ((&a == &a) , a); // all-warning {{left operand of comma operator has no effect}}
+}
diff --git a/clang/test/AST/ByteCode/constexpr-vectors.cpp b/clang/test/AST/ByteCode/constexpr-vectors.cpp
index f19adad..81ec6aa 100644
--- a/clang/test/AST/ByteCode/constexpr-vectors.cpp
+++ b/clang/test/AST/ByteCode/constexpr-vectors.cpp
@@ -15,7 +15,6 @@ using FourFloatsExtVec __attribute__((ext_vector_type(4))) = float;
using FourDoublesExtVec __attribute__((ext_vector_type(4))) = double;
using FourI128ExtVec __attribute__((ext_vector_type(4))) = __int128;
-
// Next a series of tests to make sure these operations are usable in
// constexpr functions. Template instantiations don't emit Winvalid-constexpr,
// so we have to do these as macros.
@@ -875,3 +874,9 @@ void BoolVecUsage() {
constexpr auto k = ~FourBoolsExtVec{true, false, true, false};
static_assert(k[0] == false && k[1] == true && k[2] == false && k[3] == true, "");
}
+
+using EightBoolsExtVec __attribute__((ext_vector_type(8))) = bool;
+void BoolVecShuffle() {
+ constexpr EightBoolsExtVec a = __builtin_shufflevector(
+ FourBoolsExtVec{}, FourBoolsExtVec{}, 0, 1, 2, 3, 4, 5, 6, 7);
+}
diff --git a/clang/test/AST/ByteCode/cxx11.cpp b/clang/test/AST/ByteCode/cxx11.cpp
index 7aecf23b..72bc762 100644
--- a/clang/test/AST/ByteCode/cxx11.cpp
+++ b/clang/test/AST/ByteCode/cxx11.cpp
@@ -287,6 +287,8 @@ namespace OverlappingStrings {
constexpr bool may_overlap_4 = &"xfoo"[1] == &"xfoo"[1]; // both-error {{}} both-note {{addresses of potentially overlapping literals}}
+ /// Used to crash.
+ const bool x = &"ab"[0] == &"ba"[3];
}
@@ -330,3 +332,33 @@ namespace ReadMutableInCopyCtor {
// both-note {{read of mutable member 'u'}} \
// both-note {{in call to 'G(g1)'}}
}
+
+namespace GH150709 {
+ struct C { };
+ struct D : C {
+ constexpr int f() const { return 1; };
+ };
+ struct E : C { };
+ struct F : D { };
+ struct G : E { };
+
+ constexpr C c1, c2[2];
+ constexpr D d1, d2[2];
+ constexpr E e1, e2[2];
+ constexpr F f;
+ constexpr G g;
+
+ constexpr auto mp = static_cast<int (C::*)() const>(&D::f);
+
+ // sanity checks for fix of GH150709 (unchanged behavior)
+ static_assert((c1.*mp)() == 1, ""); // both-error {{constant expression}}
+ static_assert((d1.*mp)() == 1, "");
+ static_assert((f.*mp)() == 1, "");
+ static_assert((c2[0].*mp)() == 1, ""); // ref-error {{constant expression}}
+ static_assert((d2[0].*mp)() == 1, "");
+
+ // incorrectly undiagnosed before fix of GH150709
+ static_assert((e1.*mp)() == 1, ""); // ref-error {{constant expression}}
+ static_assert((e2[0].*mp)() == 1, ""); // ref-error {{constant expression}}
+ static_assert((g.*mp)() == 1, ""); // ref-error {{constant expression}}
+}
diff --git a/clang/test/AST/ByteCode/cxx20.cpp b/clang/test/AST/ByteCode/cxx20.cpp
index cc315d3..67bf9a7 100644
--- a/clang/test/AST/ByteCode/cxx20.cpp
+++ b/clang/test/AST/ByteCode/cxx20.cpp
@@ -1084,3 +1084,19 @@ namespace Virtual {
static_assert(l.b == 10);
static_assert(l.c == 10);
}
+
+namespace DiscardedTrivialCXXConstructExpr {
+ struct S {
+ constexpr S(int a) : x(a) {}
+ int x;
+ };
+
+ constexpr int foo(int x) { // ref-error {{never produces a constant expression}}
+ throw S(3); // both-note {{not valid in a constant expression}} \
+ // ref-note {{not valid in a constant expression}}
+ return 1;
+ }
+
+ constexpr int y = foo(12); // both-error {{must be initialized by a constant expression}} \
+ // both-note {{in call to}}
+}
diff --git a/clang/test/AST/ByteCode/cxx23.cpp b/clang/test/AST/ByteCode/cxx23.cpp
index 45dd4f5..2182d7c 100644
--- a/clang/test/AST/ByteCode/cxx23.cpp
+++ b/clang/test/AST/ByteCode/cxx23.cpp
@@ -309,15 +309,14 @@ namespace NonLiteralDtorInParam {
~NonLiteral() {} // all23-note {{declared here}}
};
constexpr int F2(NonLiteral N) { // all20-error {{constexpr function's 1st parameter type 'NonLiteral' is not a literal type}} \
- // ref23-note {{non-constexpr function '~NonLiteral' cannot be used in a constant expression}}
+ // all23-note {{non-constexpr function '~NonLiteral' cannot be used in a constant expression}}
return 8;
}
void test() {
NonLiteral L;
- constexpr auto D = F2(L); // all23-error {{must be initialized by a constant expression}} \
- // expected23-note {{non-constexpr function '~NonLiteral' cannot be used in a constant expression}}
+ constexpr auto D = F2(L); // all23-error {{must be initialized by a constant expression}}
}
}
diff --git a/clang/test/AST/ByteCode/cxx2a.cpp b/clang/test/AST/ByteCode/cxx2a.cpp
index ac2f988..533173d 100644
--- a/clang/test/AST/ByteCode/cxx2a.cpp
+++ b/clang/test/AST/ByteCode/cxx2a.cpp
@@ -225,3 +225,31 @@ namespace Dtor {
static_assert(pseudo(true, false)); // both-error {{constant expression}} both-note {{in call}}
static_assert(pseudo(false, true));
}
+
+namespace GH150705 {
+ struct A { };
+ struct B : A { };
+ struct C : A {
+ constexpr virtual int foo() const { return 0; }
+ };
+
+ constexpr auto p = &C::foo;
+ constexpr auto q = static_cast<int (A::*)() const>(p);
+ constexpr B b;
+ constexpr const A& a = b;
+ constexpr auto x = (a.*q)(); // both-error {{constant expression}}
+}
+
+namespace DependentRequiresExpr {
+ template <class T,
+ bool = []() -> bool { // both-error {{not a constant expression}}
+ if (requires { T::type; })
+ return true;
+ return false;
+ }()>
+ struct p {
+ using type = void;
+ };
+
+ template <class T> using P = p<T>::type; // both-note {{while checking a default template argument}}
+}
diff --git a/clang/test/AST/ByteCode/cxx98.cpp b/clang/test/AST/ByteCode/cxx98.cpp
index c17049b..1150a4e 100644
--- a/clang/test/AST/ByteCode/cxx98.cpp
+++ b/clang/test/AST/ByteCode/cxx98.cpp
@@ -18,13 +18,12 @@ template struct C<cval>;
/// FIXME: This example does not get properly diagnosed in the new interpreter.
extern const int recurse1;
-const int recurse2 = recurse1; // both-note {{declared here}}
+const int recurse2 = recurse1; // ref-note {{declared here}}
const int recurse1 = 1;
int array1[recurse1];
int array2[recurse2]; // ref-warning 2{{variable length array}} \
- // both-note {{initializer of 'recurse2' is not a constant expression}} \
- // expected-warning {{variable length array}} \
- // expected-error {{variable length array}}
+ // ref-note {{initializer of 'recurse2' is not a constant expression}} \
+ // expected-warning 2{{variable length array}}
int NCI; // both-note {{declared here}}
int NCIA[NCI]; // both-warning {{variable length array}} \
@@ -64,3 +63,20 @@ const int b = 1 / 0; // both-warning {{division by zero is undefined}} \
// both-note {{declared here}}
_Static_assert(b, ""); // both-error {{not an integral constant expression}} \
// both-note {{initializer of 'b' is not a constant expression}}
+
+#ifdef __SIZEOF_INT128__
+/// The if statement tries an ltor conversion on an inactive union member.
+union InactiveReadUnion{
+ int a;
+ __uint128_t n;
+};
+
+int inactiveRead(void) {
+ const InactiveReadUnion U = {1};
+
+ if (U.n)
+ return 1;
+
+ return 0;
+}
+#endif
diff --git a/clang/test/AST/ByteCode/functions.cpp b/clang/test/AST/ByteCode/functions.cpp
index 36e7bb3..01bf0a5 100644
--- a/clang/test/AST/ByteCode/functions.cpp
+++ b/clang/test/AST/ByteCode/functions.cpp
@@ -708,3 +708,32 @@ namespace NoDiags {
return true;
}
}
+
+namespace EnableIfWithTemporary {
+ struct A { ~A(); };
+ int &h() __attribute__((enable_if((A(), true), ""))); // both-warning {{clang extension}}
+}
+
+namespace LocalVarForParmVarDecl {
+ struct Iter {
+ void *p;
+ };
+ constexpr bool bar2(Iter A) {
+ return true;
+ }
+ constexpr bool bar(Iter A, bool b) {
+ if (b)
+ return true;
+
+ return bar(A, true);
+ }
+ constexpr int foo() {
+ return bar(Iter(), false);
+ }
+ static_assert(foo(), "");
+}
+
+namespace PtrPtrCast {
+ void foo() { ; }
+ void bar(int *a) { a = (int *)(void *)(foo); }
+}
diff --git a/clang/test/AST/ByteCode/invalid.cpp b/clang/test/AST/ByteCode/invalid.cpp
index 2a6c2d1..affb40ea 100644
--- a/clang/test/AST/ByteCode/invalid.cpp
+++ b/clang/test/AST/ByteCode/invalid.cpp
@@ -58,3 +58,11 @@ namespace Casts {
/// Just make sure this doesn't crash.
float PR9558 = reinterpret_cast<const float&>("asd");
}
+
+
+/// This used to crash in collectBlock().
+struct S {
+};
+S s;
+S *sp[2] = {&s, &s};
+S *&spp = sp[1];
diff --git a/clang/test/AST/ByteCode/lifetimes.cpp b/clang/test/AST/ByteCode/lifetimes.cpp
index 5c8d562..d3b02d2 100644
--- a/clang/test/AST/ByteCode/lifetimes.cpp
+++ b/clang/test/AST/ByteCode/lifetimes.cpp
@@ -94,14 +94,25 @@ namespace CallScope {
int n = 0;
constexpr int f() const { return 0; }
};
- constexpr Q *out_of_lifetime(Q q) { return &q; } // both-warning {{address of stack}}
+ constexpr Q *out_of_lifetime(Q q) { return &q; } // both-warning {{address of stack}} \
+ // expected-note 2{{declared here}}
constexpr int k3 = out_of_lifetime({})->n; // both-error {{must be initialized by a constant expression}} \
- // expected-note {{read of temporary whose lifetime has ended}} \
- // expected-note {{temporary created here}} \
+ // expected-note {{read of variable whose lifetime has ended}} \
// ref-note {{read of object outside its lifetime}}
constexpr int k4 = out_of_lifetime({})->f(); // both-error {{must be initialized by a constant expression}} \
- // expected-note {{member call on temporary whose lifetime has ended}} \
- // expected-note {{temporary created here}} \
+ // expected-note {{member call on variable whose lifetime has ended}} \
// ref-note {{member call on object outside its lifetime}}
}
+
+namespace ExprDoubleDestroy {
+ template <typename T>
+ constexpr bool test() {
+ T{}.~T(); // both-note {{lifetime has already ended}}
+ return true;
+ }
+
+ struct S { int x; };
+ constexpr bool t = test<S>(); // both-error {{must be initialized by a constant expression}} \
+ // both-note {{in call to}}
+}
diff --git a/clang/test/AST/ByteCode/lifetimes26.cpp b/clang/test/AST/ByteCode/lifetimes26.cpp
index a5203ae..c3163f8 100644
--- a/clang/test/AST/ByteCode/lifetimes26.cpp
+++ b/clang/test/AST/ByteCode/lifetimes26.cpp
@@ -17,8 +17,8 @@ namespace std {
constexpr void *operator new(std::size_t, void *p) { return p; }
namespace std {
- template<typename T> constexpr T *construct(T *p) { return new (p) T; }
- template<typename T> constexpr void destroy(T *p) { p->~T(); }
+ template<typename T> constexpr T *construct_at(T *p) { return new (p) T; }
+ template<typename T> constexpr void destroy_at(T *p) { p->~T(); }
}
constexpr bool foo() {
@@ -43,7 +43,24 @@ constexpr void destroy_pointer() {
using T = int*;
T p;
p.~T();
- std::construct(&p);
+ std::construct_at(&p);
}
static_assert((destroy_pointer(), true));
+
+namespace DestroyArrayElem {
+ /// This is proof that std::destroy_at'ing an array element
+ /// ends the lifetime of the entire array.
+ /// See https://github.com/llvm/llvm-project/issues/147528
+ /// Using destroy_at on array elements is currently a no-op due to this.
+ constexpr int test() {
+ int a[4] = {};
+
+ std::destroy_at(&a[3]);
+ int r = a[1];
+ std::construct_at(&a[3]);
+
+ return r;
+ }
+ static_assert(test() == 0);
+}
diff --git a/clang/test/AST/ByteCode/literals.cpp b/clang/test/AST/ByteCode/literals.cpp
index ddf1d2b..5bc3f7f 100644
--- a/clang/test/AST/ByteCode/literals.cpp
+++ b/clang/test/AST/ByteCode/literals.cpp
@@ -1,7 +1,7 @@
-// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -Wno-vla -fms-extensions -std=c++11 -verify=expected,both %s
-// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -Wno-vla -fms-extensions -std=c++20 -verify=expected,both %s
-// RUN: %clang_cc1 -std=c++11 -fms-extensions -Wno-vla -verify=ref,both %s
-// RUN: %clang_cc1 -std=c++20 -fms-extensions -Wno-vla -verify=ref,both %s
+// RUN: %clang_cc1 -Wno-vla -fms-extensions -std=c++11 -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -Wno-vla -fms-extensions -std=c++20 -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -Wno-vla -fms-extensions -std=c++11 -verify=ref,both %s
+// RUN: %clang_cc1 -Wno-vla -fms-extensions -std=c++20 -verify=ref,both %s
#define INT_MIN (~__INT_MAX__)
#define INT_MAX __INT_MAX__
@@ -546,16 +546,14 @@ namespace IncDec {
// expected-note 2{{increment of uninitialized}} \
// expected-note {{read of uninitialized}}
else
- a++; // ref-note 2{{increment of uninitialized}} \
- // expected-note 2{{increment of uninitialized}}
+ a++; // both-note 2{{increment of uninitialized}}
} else {
if (Pre)
--a; // ref-note 3{{decrement of uninitialized}} \
// expected-note 2{{decrement of uninitialized}} \
// expected-note {{read of uninitialized}}
else
- a--; // ref-note 2{{decrement of uninitialized}} \
- // expected-note 2{{decrement of uninitialized}}
+ a--; // both-note 2{{decrement of uninitialized}}
}
return 1;
}
@@ -1430,3 +1428,10 @@ namespace OnePastEndCmp {
constexpr const int *q = &s.a + 1;
static_assert(p != q, "");
}
+
+namespace ExternRedecl {
+ extern const int a;
+ constexpr const int *p = &a;
+ constexpr int a = 10;
+ static_assert(*p == 10, "");
+}
diff --git a/clang/test/AST/ByteCode/new-delete.cpp b/clang/test/AST/ByteCode/new-delete.cpp
index c5f1878..af747d7 100644
--- a/clang/test/AST/ByteCode/new-delete.cpp
+++ b/clang/test/AST/ByteCode/new-delete.cpp
@@ -82,8 +82,7 @@ static_assert(noInit() == 0, "");
/// Try to delete a pointer that hasn't been heap allocated.
constexpr int notHeapAllocated() { // both-error {{never produces a constant expression}}
int A = 0; // both-note 2{{declared here}}
- delete &A; // ref-note 2{{delete of pointer '&A' that does not point to a heap-allocated object}} \
- // expected-note 2{{delete of pointer '&A' that does not point to a heap-allocated object}}
+ delete &A; // both-note 2{{delete of pointer '&A' that does not point to a heap-allocated object}}
return 1;
}
@@ -374,8 +373,7 @@ namespace delete_random_things {
static_assert((delete &(new A)->n, true)); // both-error {{}} \
// both-note {{delete of pointer to subobject }}
static_assert((delete (new int + 1), true)); // both-error {{}} \
- // ref-note {{delete of pointer '&{*new int#0} + 1' that does not point to complete object}} \
- // expected-note {{delete of pointer '&{*new int#1} + 1' that does not point to complete object}}
+ // both-note {{delete of pointer '&{*new int#0} + 1' that does not point to complete object}}
static_assert((delete[] (new int[3] + 1), true)); // both-error {{}} \
// both-note {{delete of pointer to subobject}}
static_assert((delete &(int&)(int&&)0, true)); // both-error {{}} \
@@ -1071,6 +1069,28 @@ namespace BaseCompare {
static_assert(foo());
}
+
+namespace NegativeArraySize {
+ constexpr void f() { // both-error {{constexpr function never produces a constant expression}}
+ int x = -1;
+ int *p = new int[x]; //both-note {{cannot allocate array; evaluated array bound -1 is negative}}
+ }
+} // namespace NegativeArraySize
+
+namespace NewNegSizeNothrow {
+ constexpr int get_neg_size() {
+ return -1;
+ }
+
+ constexpr bool test_nothrow_neg_size() {
+ int x = get_neg_size();
+ int* p = new (std::nothrow) int[x];
+ return p == nullptr;
+ }
+
+ static_assert(test_nothrow_neg_size(), "expected nullptr");
+} // namespace NewNegSizeNothrow
+
#else
/// Make sure we reject this prior to C++20
constexpr int a() { // both-error {{never produces a constant expression}}
diff --git a/clang/test/AST/ByteCode/records.cpp b/clang/test/AST/ByteCode/records.cpp
index 5ca3e2d..48cf811 100644
--- a/clang/test/AST/ByteCode/records.cpp
+++ b/clang/test/AST/ByteCode/records.cpp
@@ -1,11 +1,11 @@
-// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -std=c++14 -verify=expected,both %s
-// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -std=c++17 -verify=expected,both %s
-// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -std=c++17 -triple i686 -verify=expected,both %s
-// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -std=c++20 -verify=expected,both %s
-// RUN: %clang_cc1 -verify=ref,both -std=c++14 %s
-// RUN: %clang_cc1 -verify=ref,both -std=c++17 %s
-// RUN: %clang_cc1 -verify=ref,both -std=c++17 -triple i686 %s
-// RUN: %clang_cc1 -verify=ref,both -std=c++20 %s
+// RUN: %clang_cc1 -std=c++14 -verify=expected,both %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -std=c++17 -verify=expected,both %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -std=c++17 -verify=expected,both -triple i686 %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -std=c++20 -verify=expected,both %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -std=c++14 -verify=ref,both %s
+// RUN: %clang_cc1 -std=c++17 -verify=ref,both %s
+// RUN: %clang_cc1 -std=c++17 -verify=ref,both -triple i686 %s
+// RUN: %clang_cc1 -std=c++20 -verify=ref,both %s
/// Used to crash.
struct Empty {};
diff --git a/clang/test/AST/ByteCode/typeid.cpp b/clang/test/AST/ByteCode/typeid.cpp
index 5be5604..179a66f 100644
--- a/clang/test/AST/ByteCode/typeid.cpp
+++ b/clang/test/AST/ByteCode/typeid.cpp
@@ -13,7 +13,12 @@ struct __type_info_implementations {
typedef __unique_impl __impl;
};
-class type_info {
+class __pointer_type_info {
+public:
+ int __flags = 0;
+};
+
+class type_info : public __pointer_type_info {
protected:
typedef __type_info_implementations::__impl __impl;
__impl::__type_name_t __type_name;
@@ -40,3 +45,10 @@ constexpr bool test() {
return true;
}
static_assert(test());
+
+int dontcrash() {
+ auto& pti = static_cast<const std::__pointer_type_info&>(
+ typeid(int)
+ );
+ return pti.__flags == 0 ? 1 : 0;
+}
diff --git a/clang/test/AST/ByteCode/unions.cpp b/clang/test/AST/ByteCode/unions.cpp
index 8ed76258..6bccbda 100644
--- a/clang/test/AST/ByteCode/unions.cpp
+++ b/clang/test/AST/ByteCode/unions.cpp
@@ -966,3 +966,15 @@ namespace AddressComparison {
static_assert(&U2.a[0] != &U2.b[1]);
static_assert(&U2.a[0] == &U2.b[1]); // both-error {{failed}}
}
+
+#if __cplusplus >= 202002L
+namespace UnionMemberOnePastEnd {
+ constexpr bool b() {
+ union {
+ int p;
+ };
+ return &p == (&p + 1);
+ }
+ static_assert(!b());
+}
+#endif
diff --git a/clang/test/AST/ByteCode/vectors.cpp b/clang/test/AST/ByteCode/vectors.cpp
index a04b678..91fec8f 100644
--- a/clang/test/AST/ByteCode/vectors.cpp
+++ b/clang/test/AST/ByteCode/vectors.cpp
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -verify=expected,both %s
-// RUN: %clang_cc1 -verify=ref,both %s
+// RUN: %clang_cc1 -Wno-c++20-extensions -fexperimental-new-constant-interpreter -verify=expected,both -flax-vector-conversions=none %s
+// RUN: %clang_cc1 -Wno-c++20-extensions -verify=ref,both -flax-vector-conversions=none %s
typedef int __attribute__((vector_size(16))) VI4;
constexpr VI4 A = {1,2,3,4};
@@ -58,7 +58,7 @@ namespace Vector {
namespace {
typedef float __attribute__((vector_size(16))) VI42;
- constexpr VI42 A2 = A;
+ constexpr VI42 A2 = {1.f, 2.f, 3.f, 4.f};
}
namespace BoolToSignedIntegralCast{
@@ -143,3 +143,28 @@ namespace {
constexpr __m128d v_mm_cvtps_pd = _mm_cvtps_pd(kf1);
static_assert(v_mm_cvtps_pd[0] == -1.0 && v_mm_cvtps_pd[1] == +2.0);
}
+
+namespace Assign {
+ constexpr int a2() {
+ VI a = {0, 0, 0, 0};
+ VI b;
+
+ b = {1,1,1,1};
+ return b[0] + b[1] + b[2] + b[3];
+ }
+
+ static_assert(a2() == 4);
+
+ typedef short v2int16_t __attribute__((ext_vector_type(2)));
+ typedef unsigned short v2int_t __attribute__((ext_vector_type(2)));
+
+
+ constexpr bool invalid() {
+ v2int16_t a = {0, 0};
+ v2int_t b;
+ b = a; // both-error {{incompatible type}}
+
+ return true;
+ }
+ static_assert(invalid()); // both-error {{not an integral constant expression}}
+}
diff --git a/clang/test/AST/HLSL/RootSignatures-AST.hlsl b/clang/test/AST/HLSL/RootSignatures-AST.hlsl
index df06165..32da1f1 100644
--- a/clang/test/AST/HLSL/RootSignatures-AST.hlsl
+++ b/clang/test/AST/HLSL/RootSignatures-AST.hlsl
@@ -115,7 +115,7 @@ void same_rs_string_main() {}
"DescriptorTable(Sampler(s0, numDescriptors = 4, space = 1))"
// Ensure that when we define a different type root signature that it creates
-// a seperate decl and identifier to reference
+// a separate decl and identifier to reference
// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[DIFF_RS_DECL:__hlsl_rootsig_decl_\d*]]
// CHECK-V1_0: version: 1.0,
diff --git a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
index 1c8b9c1..6ee7145 100644
--- a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
@@ -95,12 +95,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_uninitializedhandle'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: AlwaysInlineAttr
// Constructor from binding
@@ -114,12 +114,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt {{.*}}
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_handlefrombinding'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'registerNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'spaceNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'int' ParmVar {{.*}} 'range' 'int'
@@ -138,12 +138,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt {{.*}}
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_handlefromimplicitbinding'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'spaceNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'int' ParmVar {{.*}} 'range' 'int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'index' 'unsigned int'
@@ -166,7 +166,7 @@ RESOURCE<float> Buffer;
// CHECK-SUBSCRIPT-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-SUBSCRIPT-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-SUBSCRIPT-SAME: ' lvalue .__handle {{.*}}
-// CHECK-SUBSCRIPT-NEXT: CXXThisExpr {{.*}} 'const [[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-SUBSCRIPT-NEXT: CXXThisExpr {{.*}} 'const hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-SUBSCRIPT-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-SUBSCRIPT-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -183,7 +183,7 @@ RESOURCE<float> Buffer;
// CHECK-SUBSCRIPT-UAV-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-SUBSCRIPT-UAV-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-SUBSCRIPT-UAV-SAME: ' lvalue .__handle {{.*}}
-// CHECK-SUBSCRIPT-UAV-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-SUBSCRIPT-UAV-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-SUBSCRIPT-UAV-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-SUBSCRIPT-UAV-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -204,7 +204,7 @@ RESOURCE<float> Buffer;
// CHECK-LOAD-SAME{LITERAL}: [[hlsl::resource_class(
// CHECK-LOAD-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-LOAD-SAME: ' lvalue .__handle {{.*}}
-// CHECK-LOAD-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-LOAD-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-LOAD-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-LOAD-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -220,7 +220,7 @@ RESOURCE<float> Buffer;
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-COUNTER-NEXT: CXXThisExpr {{.*}} 'RWStructuredBuffer<element_type>' lvalue implicit this
+// CHECK-COUNTER-NEXT: CXXThisExpr {{.*}} 'hlsl::RWStructuredBuffer<element_type>' lvalue implicit this
// CHECK-COUNTER-NEXT: IntegerLiteral {{.*}} 'int' 1
// CHECK-COUNTER-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -236,7 +236,7 @@ RESOURCE<float> Buffer;
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-COUNTER-NEXT: CXXThisExpr {{.*}} 'RWStructuredBuffer<element_type>' lvalue implicit this
+// CHECK-COUNTER-NEXT: CXXThisExpr {{.*}} 'hlsl::RWStructuredBuffer<element_type>' lvalue implicit this
// CHECK-COUNTER-NEXT: IntegerLiteral {{.*}} 'int' -1
// CHECK-COUNTER-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -254,7 +254,7 @@ RESOURCE<float> Buffer;
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-APPEND-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-APPEND-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-APPEND-NEXT: CallExpr {{.*}} 'unsigned int'
// CHECK-APPEND-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-APPEND-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_buffer_update_counter' 'unsigned int (...) noexcept'
@@ -262,7 +262,7 @@ RESOURCE<float> Buffer;
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-APPEND-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-APPEND-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-APPEND-NEXT: IntegerLiteral {{.*}} 'int' 1
// CHECK-APPEND-NEXT: DeclRefExpr {{.*}} 'element_type' ParmVar {{.*}} 'value' 'element_type'
@@ -279,7 +279,7 @@ RESOURCE<float> Buffer;
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-CONSUME-NEXT: CallExpr {{.*}} 'unsigned int'
// CHECK-CONSUME-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-CONSUME-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_buffer_update_counter' 'unsigned int (...) noexcept'
@@ -287,7 +287,7 @@ RESOURCE<float> Buffer;
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-CONSUME-NEXT: IntegerLiteral {{.*}} 'int' -1
// CHECK: ClassTemplateSpecializationDecl {{.*}} class [[RESOURCE]] definition
diff --git a/clang/test/AST/HLSL/TypedBuffers-AST.hlsl b/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
index d6b88e2..e7f000e 100644
--- a/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
@@ -70,12 +70,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_uninitializedhandle'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: AlwaysInlineAttr
// Constructor from binding
@@ -89,12 +89,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt {{.*}}
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_handlefrombinding'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'registerNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'spaceNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'int' ParmVar {{.*}} 'range' 'int'
@@ -113,12 +113,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt {{.*}}
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_handlefromimplicitbinding'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'spaceNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'int' ParmVar {{.*}} 'range' 'int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'index' 'unsigned int'
@@ -141,7 +141,7 @@ RESOURCE<float> Buffer;
// CHECK-SRV-SAME{LITERAL}: [[hlsl::resource_class(SRV)]]
// CHECK-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-SAME: ' lvalue .__handle {{.*}}
-// CHECK-NEXT: CXXThisExpr {{.*}} 'const [[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'const hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -157,7 +157,7 @@ RESOURCE<float> Buffer;
// CHECK-UAV-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-UAV-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-UAV-SAME: ' lvalue .__handle {{.*}}
-// CHECK-UAV-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-UAV-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-UAV-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-UAV-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -176,7 +176,7 @@ RESOURCE<float> Buffer;
// CHECK-SRV-SAME{LITERAL}: [[hlsl::resource_class(SRV)]]
// CHECK-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-SAME: ' lvalue .__handle {{.*}}
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
diff --git a/clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl b/clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl
index 2f08531..1c5e067 100644
--- a/clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl
+++ b/clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl
@@ -63,9 +63,9 @@ namespace NS2 {
// CHECK: HLSLBufferDecl {{.*}} line:[[# @LINE + 2]]:11 cbuffer CB2
// CHECK: HLSLResourceClassAttr {{.*}} Implicit CBuffer
cbuffer CB2 {
- // CHECK: VarDecl {{.*}} foo0 'hlsl_constant ::Foo':'hlsl_constant Foo'
+ // CHECK: VarDecl {{.*}} foo0 'hlsl_constant ::Foo'
::Foo foo0;
- // CHECK: VarDecl {{.*}} foo1 'hlsl_constant Foo':'hlsl_constant NS2::Foo'
+ // CHECK: VarDecl {{.*}} foo1 'hlsl_constant Foo'
Foo foo1;
// CHECK: VarDecl {{.*}} foo2 'hlsl_constant NS1::Foo'
NS1::Foo foo2;
diff --git a/clang/test/AST/HLSL/resource_binding_attr.hlsl b/clang/test/AST/HLSL/resource_binding_attr.hlsl
index c073cd4..c6d93b9 100644
--- a/clang/test/AST/HLSL/resource_binding_attr.hlsl
+++ b/clang/test/AST/HLSL/resource_binding_attr.hlsl
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -finclude-default-header -ast-dump -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -finclude-default-header -ast-dump -o - %s | FileCheck %s -check-prefixes=CHECK,DXIL
+// RUN: %clang_cc1 -triple spirv-unknown-vulkan-library -finclude-default-header -ast-dump -o - %s | FileCheck %s -check-prefixes=CHECK,SPV
// CHECK: HLSLBufferDecl {{.*}} line:[[# @LINE + 4]]:9 cbuffer CB
// CHECK-NEXT: HLSLResourceClassAttr {{.*}} Implicit CBuffer
@@ -34,6 +35,10 @@ RWBuffer<float> UAV1 : register(u2), UAV2 : register(u4);
// CHECK: HLSLResourceBindingAttr {{.*}} "" "space5"
RWBuffer<float> UAV3 : register(space5);
+// CHECK: VarDecl {{.*}} UAV_Array 'RWBuffer<float>[10]'
+// CHECK: HLSLResourceBindingAttr {{.*}} "u10" "space6"
+RWBuffer<float> UAV_Array[10] : register(u10, space6);
+
//
// Default constants ($Globals) layout annotations
@@ -56,3 +61,44 @@ struct S {
// CHECK: VarDecl {{.*}} s 'hlsl_constant S'
// CHECK: HLSLResourceBindingAttr {{.*}} "c10" "space0
S s : register(c10);
+
+//
+// Implicit binding
+
+// Constant buffers should have implicit binding attribute added by SemaHLSL,
+// unless the target is SPIR-V and there is [[vk::binding]] attribute.
+// CHECK: HLSLBufferDecl {{.*}} line:[[# @LINE + 3]]:9 cbuffer CB2
+// CHECK-NEXT: HLSLResourceClassAttr {{.*}} Implicit CBuffer
+// CHECK-NEXT: HLSLResourceBindingAttr {{.*}} Implicit "" "0"
+cbuffer CB2 {
+ float4 c;
+}
+
+// CHECK: HLSLBufferDecl {{.*}} line:[[# @LINE + 7]]:9 cbuffer CB3
+// CHECK-NEXT: HLSLResourceClassAttr {{.*}} Implicit CBuffer
+// DXIL: HLSLResourceBindingAttr {{.*}} Implicit
+// DXIL-NOT: HLSLVkBindingAttr
+// SPV: HLSLVkBindingAttr {{.*}} 1 0
+// SPV-NOT: HLSLResourceBindingAttr {{.*}} Implicit
+[[vk::binding(1)]]
+cbuffer CB3 {
+ float2 d;
+}
+
+// Resource arrays should have implicit binding attribute added by SemaHLSL,
+// unless the target is SPIR-V and there is [[vk::binding]] attribute.
+// CHECK: VarDecl {{.*}} SB 'StructuredBuffer<float>[10]'
+// CHECK: HLSLResourceBindingAttr {{.*}} Implicit "" "0"
+StructuredBuffer<float> SB[10];
+
+// CHECK: VarDecl {{.*}} SB2 'StructuredBuffer<float>[10]'
+// DXIL: HLSLResourceBindingAttr {{.*}} Implicit
+// DXIL-NOT: HLSLVkBindingAttr
+// SPV: HLSLVkBindingAttr {{.*}} 2 0
+// SPV-NOT: HLSLResourceBindingAttr {{.*}} Implicit
+[[vk::binding(2)]]
+StructuredBuffer<float> SB2[10];
+
+// $Globals should have implicit binding attribute added by SemaHLSL
+// CHECK: HLSLBufferDecl {{.*}} implicit cbuffer $Globals
+// CHECK: HLSLResourceBindingAttr {{.*}} Implicit "" "0"
diff --git a/clang/test/AST/HLSL/rootsignature-define-ast.hlsl b/clang/test/AST/HLSL/rootsignature-define-ast.hlsl
new file mode 100644
index 0000000..9c17cbc
--- /dev/null
+++ b/clang/test/AST/HLSL/rootsignature-define-ast.hlsl
@@ -0,0 +1,62 @@
+// Establish a baseline without define specified
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \
+// RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,NO-OVERRIDE
+
+// Check that we can set the entry function even if it doesn't have an attr
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \
+// RUN: -hlsl-entry none_main -fdx-rootsignature-define=SampleCBV \
+// RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,SET
+
+// Check that we can set the entry function overriding an attr
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \
+// RUN: -hlsl-entry uav_main -fdx-rootsignature-define=SampleCBV \
+// RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,OVERRIDE
+
+// Check that we can override with a command line root signature
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \
+// RUN: -hlsl-entry cbv_main -fdx-rootsignature-define=CmdRS -DCmdRS='"SRV(t0)"' \
+// RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CMD
+
+#define SampleCBV "CBV(b0)"
+#define SampleUAV "UAV(u0)"
+
+// CMD: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[CMD_DECL:__hlsl_rootsig_decl_\d*]]
+// CMD-SAME: version: 1.1, RootElements{
+// CMD-SAME: RootSRV(t0,
+// CMD-SAME: space = 0, visibility = All, flags = DataStaticWhileSetAtExecute
+// CMD-SAME: )}
+
+// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[CBV_DECL:__hlsl_rootsig_decl_\d*]]
+// CHECK-SAME: version: 1.1, RootElements{
+// CHECK-SAME: RootCBV(b0,
+// CHECK-SAME: space = 0, visibility = All, flags = DataStaticWhileSetAtExecute
+// CHECK-SAME: )}
+
+// CHECK-LABEL: -FunctionDecl 0x{{.*}} {{.*}} cbv_main
+// NO-OVERRIDE: -RootSignatureAttr 0x{{.*}} {{.*}} [[CBV_DECL]]
+// SET: -RootSignatureAttr 0x{{.*}} {{.*}} [[CBV_DECL]]
+// CMD: -RootSignatureAttr 0x{{.*}} {{.*}} [[CMD_DECL]]
+
+[RootSignature(SampleCBV)]
+void cbv_main() {}
+
+// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[UAV_DECL:__hlsl_rootsig_decl_\d*]]
+// CHECK-SAME: version: 1.1, RootElements{
+// CHECK-SAME: RootUAV(u0,
+// CHECK-SAME: space = 0, visibility = All, flags = DataVolatile
+// CHECK-SAME: )}
+
+// CHECK-LABEL: -FunctionDecl 0x{{.*}} {{.*}} uav_main
+// NO-OVERRIDE: -RootSignatureAttr 0x{{.*}} {{.*}} [[UAV_DECL]]
+// SET: -RootSignatureAttr 0x{{.*}} {{.*}} [[UAV_DECL]]
+// OVERRIDE: -RootSignatureAttr 0x{{.*}} {{.*}} [[CBV_DECL]]
+
+[RootSignature(SampleUAV)]
+void uav_main() {}
+
+// CHECK-LABEL: -FunctionDecl 0x{{.*}} {{.*}} none_main
+// NO-OVERRIDE-NONE: -RootSignatureAttr
+// SET: -RootSignatureAttr 0x{{.*}} {{.*}} [[CBV_DECL]]
+// OVERRIDE-NONE: -RootSignatureAttr
+
+void none_main() {}
diff --git a/clang/test/AST/HLSL/vector-constructors.hlsl b/clang/test/AST/HLSL/vector-constructors.hlsl
index 31d8dd0..fd43a7d 100644
--- a/clang/test/AST/HLSL/vector-constructors.hlsl
+++ b/clang/test/AST/HLSL/vector-constructors.hlsl
@@ -88,10 +88,10 @@ void entry() {
// CHECK-NEXT: InitListExpr
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float' <LValueToRValue>
// CHECK-NEXT: MemberExpr {{.*}} 'float' lvalue .f {{.*}}
-// CHECK-NEXT: DeclRefExpr {{.*}} 'struct S':'S' lvalue Var {{.*}} 's' 'struct S':'S'
+// CHECK-NEXT: DeclRefExpr {{.*}} 'struct S' lvalue Var {{.*}} 's' 'struct S'
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float' <LValueToRValue>
// CHECK-NEXT: MemberExpr {{.*}} 'float' lvalue .f {{.*}}
-// CHECK-NEXT: DeclRefExpr {{.*}} 'struct S':'S' lvalue Var {{.*}} 's' 'struct S':'S'
+// CHECK-NEXT: DeclRefExpr {{.*}} 'struct S' lvalue Var {{.*}} 's' 'struct S'
struct T {
operator float() const { return 1.0f; }
@@ -105,12 +105,12 @@ void entry() {
// CHECK-NEXT: CXXMemberCallExpr {{.*}} 'float'
// CHECK-NEXT: MemberExpr {{.*}} '<bound member function type>' .operator float {{.*}}
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'const T' lvalue <NoOp>
-// CHECK-NEXT: DeclRefExpr {{.*}} 'struct T':'T' lvalue Var {{.*}} 't' 'struct T':'T'
+// CHECK-NEXT: DeclRefExpr {{.*}} 'struct T' lvalue Var {{.*}} 't' 'struct T'
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float' <UserDefinedConversion>
// CHECK-NEXT: CXXMemberCallExpr {{.*}} 'float'
// CHECK-NEXT: MemberExpr {{.*}} '<bound member function type>' .operator float {{.*}}
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'const T' lvalue <NoOp>
-// CHECK-NEXT: DeclRefExpr {{.*}} 'struct T':'T' lvalue Var {{.*}} 't' 'struct T':'T'
+// CHECK-NEXT: DeclRefExpr {{.*}} 'struct T' lvalue Var {{.*}} 't' 'struct T'
typedef float2 second_level_of_typedefs;
second_level_of_typedefs foo6 = float2(1.0f, 2.0f);
diff --git a/clang/test/AST/arm-mfp8.cpp b/clang/test/AST/arm-mfp8.cpp
index 195c734..b1fa04a 100644
--- a/clang/test/AST/arm-mfp8.cpp
+++ b/clang/test/AST/arm-mfp8.cpp
@@ -49,7 +49,7 @@ public:
}
};
-//CHECK: | |-CXXRecordDecl {{.*}} referenced class C1
+//CHECK: | |-CXXRecordDecl {{.*}} class C1
//CHECK-NEXT: | |-FieldDecl {{.*}} f1c '__mfp8'
//CHECK-NEXT: | |-VarDecl {{.*}} f2c 'const __mfp8' static
//CHECK-NEXT: | |-FieldDecl {{.*}} f3c 'volatile __mfp8'
diff --git a/clang/test/AST/ast-dump-color.cpp b/clang/test/AST/ast-dump-color.cpp
index 87797f6..2e60e76 100644
--- a/clang/test/AST/ast-dump-color.cpp
+++ b/clang/test/AST/ast-dump-color.cpp
@@ -82,15 +82,15 @@ struct Invalid {
//CHECK: {{^}}[[Blue]]| | `-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:33[[RESET]]> [[Yellow]]col:33[[RESET]] [[Green]]'const Mutex &'[[RESET]]{{$}}
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[GREEN]]CXXConstructorDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:33[[RESET]]> [[Yellow]]col:33[[RESET]] implicit constexpr[[CYAN]] Mutex[[RESET]] [[Green]]'void (Mutex &&)'[[RESET]] inline{{ .*$}}
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:33[[RESET]]> [[Yellow]]col:33[[RESET]] [[Green]]'Mutex &&'[[RESET]]{{$}}
-//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]line:25:3[[RESET]]> [[Yellow]]col:3[[RESET]] referenced[[CYAN]] mu1[[RESET]] [[Green]]'class Mutex':'Mutex'[[RESET]]
-//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:3[[RESET]]> [[Green]]'class Mutex':'Mutex'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]{{$}}
-//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:18:1[[RESET]], [[Yellow]]line:25:8[[RESET]]> [[Yellow]]col:8[[RESET]][[CYAN]] mu2[[RESET]] [[Green]]'class Mutex':'Mutex'[[RESET]]
-//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Green]]'class Mutex':'Mutex'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]{{$}}
+//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]line:25:3[[RESET]]> [[Yellow]]col:3[[RESET]] referenced[[CYAN]] mu1[[RESET]] [[Green]]'class Mutex'[[RESET]]
+//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:3[[RESET]]> [[Green]]'class Mutex'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]{{$}}
+//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:18:1[[RESET]], [[Yellow]]line:25:8[[RESET]]> [[Yellow]]col:8[[RESET]][[CYAN]] mu2[[RESET]] [[Green]]'class Mutex'[[RESET]]
+//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Green]]'class Mutex'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]{{$}}
//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:26:1[[RESET]], [[Yellow]]col:5[[RESET]]> [[Yellow]]col:5[[RESET]][[CYAN]] TestExpr[[RESET]] [[Green]]'int'[[RESET]]
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[BLUE]]GuardedByAttr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:29[[RESET]], [[Yellow]]col:43[[RESET]]>{{$}}
-//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]DeclRefExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:40[[RESET]]> [[Green]]'class Mutex':'Mutex'[[RESET]][[Cyan]] lvalue[[RESET]][[Cyan]][[RESET]] [[GREEN]]Var[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]][[CYAN]] 'mu1'[[RESET]] [[Green]]'class Mutex':'Mutex'[[RESET]] non_odr_use_unevaluated{{$}}
+//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]DeclRefExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:40[[RESET]]> [[Green]]'class Mutex'[[RESET]][[Cyan]] lvalue[[RESET]][[Cyan]][[RESET]] [[GREEN]]Var[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]][[CYAN]] 'mu1'[[RESET]] [[Green]]'class Mutex'[[RESET]] non_odr_use_unevaluated{{$}}
//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]CXXRecordDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:28:1[[RESET]], [[Yellow]]line:30:1[[RESET]]> [[Yellow]]line:28:8[[RESET]] struct[[CYAN]] Invalid[[RESET]] definition
-//CHECK: {{^}}[[Blue]]| |-[[RESET]][[GREEN]]CXXRecordDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] implicit referenced struct[[CYAN]] Invalid[[RESET]]
+//CHECK: {{^}}[[Blue]]| |-[[RESET]][[GREEN]]CXXRecordDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] implicit struct[[CYAN]] Invalid[[RESET]]
//CHECK: {{^}}[[Blue]]| |-[[RESET]][[GREEN]]CXXConstructorDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:29:3[[RESET]], [[Yellow]]col:42[[RESET]]> [[Yellow]]col:29[[RESET]] invalid[[CYAN]] Invalid[[RESET]] [[Green]]'void (int)'[[RESET]]
//CHECK: {{^}}[[Blue]]| | |-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:37[[RESET]], [[Yellow]]<invalid sloc>[[RESET]]> [[Yellow]]col:42[[RESET]] invalid [[Green]]'int'[[RESET]]
//CHECK: {{^}}[[Blue]]| | `-[[RESET]][[BLUE]]NoInlineAttr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:18[[RESET]]>
@@ -100,5 +100,5 @@ struct Invalid {
//CHECK: {{^}}[[Blue]]| | `-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] [[Green]]'const Invalid &'[[RESET]]
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[GREEN]]CXXConstructorDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] implicit constexpr[[CYAN]] Invalid[[RESET]] [[Green]]'void (Invalid &&)'[[RESET]] inline default trivial noexcept-unevaluated 0x{{[0-9a-fA-F]*}}
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] [[Green]]'Invalid &&'[[RESET]]
-//CHECK: {{^}}[[Blue]]`-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]line:30:3[[RESET]]> [[Yellow]]col:3[[RESET]][[CYAN]] Invalid[[RESET]] [[Green]]'struct Invalid':'Invalid'[[RESET]]
-//CHECK: {{^}}[[Blue]] `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:3[[RESET]]> [[Green]]'struct Invalid':'Invalid'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]
+//CHECK: {{^}}[[Blue]]`-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]line:30:3[[RESET]]> [[Yellow]]col:3[[RESET]][[CYAN]] Invalid[[RESET]] [[Green]]'struct Invalid'[[RESET]]
+//CHECK: {{^}}[[Blue]] `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:3[[RESET]]> [[Green]]'struct Invalid'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]
diff --git a/clang/test/AST/ast-dump-comment.cpp b/clang/test/AST/ast-dump-comment.cpp
index 40c3edb..b5dbe2e 100644
--- a/clang/test/AST/ast-dump-comment.cpp
+++ b/clang/test/AST/ast-dump-comment.cpp
@@ -131,3 +131,51 @@ void Test_TemplatedFunctionVariadic(int arg, ...);
// CHECK: ParamCommandComment{{.*}} [in] implicitly Param="..."
// CHECK-NEXT: ParagraphComment
// CHECK-NEXT: TextComment{{.*}} Text=" More arguments"
+
+/// \param[out] Aaa <summary>Short summary</summary>
+int Test_HTMLSummaryTag(int Aaa);
+// CHECK: FunctionDecl{{.*}}Test_HTMLSummaryTag
+// CHECK: ParamCommandComment{{.*}} [out] explicitly Param="Aaa"
+// CHECK-NEXT: ParagraphComment
+// CHECK: HTMLStartTagComment{{.*}} Name="summary"
+// CHECK-NEXT: TextComment{{.*}} Text="Short summary"
+// CHECK-NEXT: HTMLEndTagComment{{.*}} Name="summary"
+
+/// \thread_safe test for underscore in special command
+int Test_UnderscoreInSpecialCommand;
+// CHECK: VarDecl{{.*}}Test_UnderscoreInSpecialCommand 'int'
+// CHECK: InlineCommandComment{{.*}} Name="thread_safe" RenderNormal
+// CHECK-NEXT: TextComment{{.*}} Text=" test for underscore in special command"
+
+/// <details>
+/// <summary>
+/// Summary
+/// </summary>
+/// <p>Details</p>
+/// </details>
+///
+/// Some <mark>highlighting</mark>
+///
+/// <figure>
+/// <img src="pic.jpg">
+/// <figcaption>Figure 1</figcaption>
+/// </figure>
+int Test_AdditionalHTMLTags(int Aaa);
+// CHECK: FunctionDecl{{.*}}Test_AdditionalHTMLTags 'int (int)'
+// CHECK: HTMLStartTagComment{{.*}} Name="details"
+// CHECK: HTMLStartTagComment{{.*}} Name="summary"
+// CHECK-NEXT: TextComment{{.*}} Text=" Summary"
+// CHECK: HTMLEndTagComment{{.*}} Name="summary"
+// CHECK: HTMLStartTagComment{{.*}} Name="p"
+// CHECK-NEXT: TextComment{{.*}} Text="Details"
+// CHECK-NEXT: HTMLEndTagComment{{.*}} Name="p"
+// CHECK: HTMLEndTagComment{{.*}} Name="details"
+// CHECK: HTMLStartTagComment{{.*}} Name="mark"
+// CHECK-NEXT: TextComment{{.*}} Text="highlighting"
+// CHECK-NEXT: HTMLEndTagComment{{.*}} Name="mark"
+// CHECK: HTMLStartTagComment{{.*}} Name="figure"
+// CHECK: HTMLStartTagComment{{.*}} Name="img" Attrs: "src="pic.jpg"
+// CHECK: HTMLStartTagComment{{.*}} Name="figcaption"
+// CHECK-NEXT: TextComment{{.*}} Text="Figure 1"
+// CHECK-NEXT: HTMLEndTagComment{{.*}} Name="figcaption"
+// CHECK: HTMLEndTagComment{{.*}} Name="figure"
diff --git a/clang/test/AST/ast-dump-ctad-alias.cpp b/clang/test/AST/ast-dump-ctad-alias.cpp
index f39a4ce..781fb9f 100644
--- a/clang/test/AST/ast-dump-ctad-alias.cpp
+++ b/clang/test/AST/ast-dump-ctad-alias.cpp
@@ -39,15 +39,14 @@ Out2<double>::AInner t(1.0);
// CHECK-NEXT: | | |-DeducedTemplateSpecializationType {{.*}} 'Out2<double>::AInner' dependent
// CHECK-NEXT: | | | `-name: 'Out2<double>::AInner'
// CHECK-NEXT: | | | `-TypeAliasTemplateDecl {{.+}} AInner{{$}}
-// CHECK-NEXT: | | `-ElaboratedType {{.*}} 'Inner<Y>' sugar dependent
-// CHECK-NEXT: | | `-TemplateSpecializationType {{.*}} 'Inner<Y>' dependent
-// CHECK-NEXT: | | |-name: 'Inner':'Out<int>::Inner' qualified
-// CHECK-NEXT: | | | `-ClassTemplateDecl {{.+}} Inner{{$}}
-// CHECK-NEXT: | | `-TemplateArgument type 'Y'
-// CHECK-NEXT: | | `-SubstTemplateTypeParmType {{.*}} 'Y'
-// CHECK-NEXT: | | |-FunctionTemplate {{.*}} '<deduction guide for Inner>'
-// CHECK-NEXT: | | `-TemplateTypeParmType {{.*}} 'Y' dependent depth 1 index 0
-// CHECK-NEXT: | | `-TemplateTypeParm {{.*}} 'Y'
+// CHECK-NEXT: | | `-TemplateSpecializationType {{.*}} 'Inner<Y>' dependent
+// CHECK-NEXT: | | |-name: 'Inner':'Out<int>::Inner' qualified
+// CHECK-NEXT: | | | `-ClassTemplateDecl {{.+}} Inner{{$}}
+// CHECK-NEXT: | | `-TemplateArgument type 'Y'
+// CHECK-NEXT: | | `-SubstTemplateTypeParmType {{.*}} 'Y'
+// CHECK-NEXT: | | |-FunctionTemplate {{.*}} '<deduction guide for Inner>'
+// CHECK-NEXT: | | `-TemplateTypeParmType {{.*}} 'Y' dependent depth 1 index 0
+// CHECK-NEXT: | | `-TemplateTypeParm {{.*}} 'Y'
// CHECK-NEXT: | |-CXXDeductionGuideDecl {{.*}} <deduction guide for AInner> 'auto (Y) -> Inner<Y>'
// CHECK-NEXT: | | `-ParmVarDecl {{.*}} 'Y'
// CHECK-NEXT: | `-CXXDeductionGuideDecl {{.*}} used <deduction guide for AInner> 'auto (double) -> Inner<double>' implicit_instantiation
@@ -188,9 +187,9 @@ void foo() {
// CHECK-NEXT: | | | |-ImplicitConceptSpecializationDecl {{.*}}
// CHECK-NEXT: | | | | |-TemplateArgument type 'type-parameter-0-2'
// CHECK-NEXT: | | | | | `-TemplateTypeParmType {{.*}} 'type-parameter-0-2' dependent depth 0 index 2
-// CHECK-NEXT: | | | | `-TemplateArgument pack '<Packs<type-parameter-0-1...>>'
-// CHECK-NEXT: | | | | `-TemplateArgument type 'Packs<type-parameter-0-1...>'
-// CHECK-NEXT: | | | | `-TemplateSpecializationType {{.*}} 'Packs<type-parameter-0-1...>' dependent
+// CHECK-NEXT: | | | | `-TemplateArgument pack '<GH124715::Packs<type-parameter-0-1...>>'
+// CHECK-NEXT: | | | | `-TemplateArgument type 'GH124715::Packs<type-parameter-0-1...>'
+// CHECK-NEXT: | | | | `-TemplateSpecializationType {{.*}} 'GH124715::Packs<type-parameter-0-1...>' dependent
// CHECK-NEXT: | | | | |-name: 'GH124715::Packs'
// CHECK-NEXT: | | | | | `-ClassTemplateDecl {{.*}} Packs
// CHECK-NEXT: | | | | `-TemplateArgument pack '<type-parameter-0-1...>'
diff --git a/clang/test/AST/ast-dump-cxx2b-deducing-this.cpp b/clang/test/AST/ast-dump-cxx2b-deducing-this.cpp
index fc86aeb..09a274a 100644
--- a/clang/test/AST/ast-dump-cxx2b-deducing-this.cpp
+++ b/clang/test/AST/ast-dump-cxx2b-deducing-this.cpp
@@ -33,5 +33,5 @@ struct B {
operator A(this B);
};
A a = A(B{});
-// CHECK: CallExpr 0x{{[^ ]*}} <col:9, col:11> 'A':'GH130272::A'
+// CHECK: CallExpr 0x{{[^ ]*}} <col:9, col:11> 'A'
}
diff --git a/clang/test/AST/ast-dump-decl-json.c b/clang/test/AST/ast-dump-decl-json.c
index ec2d75b..b84ddf9 100644
--- a/clang/test/AST/ast-dump-decl-json.c
+++ b/clang/test/AST/ast-dump-decl-json.c
@@ -585,7 +585,6 @@ void testParmVarDecl(int TestParmVarDecl);
// CHECK-NEXT: },
// CHECK-NEXT: "name": "e",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "enum TestEnumDeclAnon::(unnamed at {{.*}}:31:3)",
// CHECK-NEXT: "qualType": "enum (unnamed enum at {{.*}}:31:3)"
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -777,7 +776,6 @@ void testParmVarDecl(int TestParmVarDecl);
// CHECK-NEXT: },
// CHECK-NEXT: "name": "testRecordDeclAnon1",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "struct TestRecordDeclAnon1::(unnamed at {{.*}}:46:3)",
// CHECK-NEXT: "qualType": "struct (unnamed struct at {{.*}}:46:3)"
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -1204,7 +1202,6 @@ void testParmVarDecl(int TestParmVarDecl);
// CHECK-NEXT: },
// CHECK-NEXT: "name": "y",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "enum (unnamed at {{.*}}:69:29)",
// CHECK-NEXT: "qualType": "enum (unnamed enum at {{.*}}:69:29)"
// CHECK-NEXT: }
// CHECK-NEXT: },
diff --git a/clang/test/AST/ast-dump-decl.cpp b/clang/test/AST/ast-dump-decl.cpp
index 08d8fba..afb5078 100644
--- a/clang/test/AST/ast-dump-decl.cpp
+++ b/clang/test/AST/ast-dump-decl.cpp
@@ -35,9 +35,9 @@ namespace testVarDeclNRVO {
// CHECK: FunctionDecl{{.*}} TestFuncNRVO 'A ()'
// CHECK-NEXT: `-CompoundStmt
// CHECK-NEXT: |-DeclStmt
-// CHECK-NEXT: | `-VarDecl{{.*}} TestVarDeclNRVO 'A':'testVarDeclNRVO::A' nrvo callinit
+// CHECK-NEXT: | `-VarDecl{{.*}} TestVarDeclNRVO 'A' nrvo callinit
// CHECK-NEXT: | `-CXXConstructExpr
-// CHECK-NEXT: `-ReturnStmt{{.*}} nrvo_candidate(Var {{.*}} 'TestVarDeclNRVO' 'A':'testVarDeclNRVO::A')
+// CHECK-NEXT: `-ReturnStmt{{.*}} nrvo_candidate(Var {{.*}} 'TestVarDeclNRVO' 'A')
void testParmVarDeclInit(int TestParmVarDeclInit = 0);
// CHECK: ParmVarDecl{{.*}} TestParmVarDeclInit 'int'
@@ -131,8 +131,8 @@ namespace testCXXRecordDecl {
// CHECK-NEXT: CopyAssignment simple non_trivial has_const_param
// CHECK-NEXT: MoveAssignment exists simple non_trivial
// CHECK-NEXT: Destructor simple irrelevant trivial
-// CHECK-NEXT: virtual private 'A':'testCXXRecordDecl::A'
-// CHECK-NEXT: public 'B':'testCXXRecordDecl::B'
+// CHECK-NEXT: virtual private 'A'
+// CHECK-NEXT: public 'B'
// CHECK-NEXT: CXXRecordDecl{{.*}} class TestCXXRecordDecl
// CHECK-NEXT: FieldDecl
@@ -269,7 +269,7 @@ namespace testFunctionTemplateDecl {
// CHECK-NEXT: |-TemplateArgument type 'testFunctionTemplateDecl::B'
// CHECK-NEXT: | `-RecordType 0{{.+}} 'testFunctionTemplateDecl::B'
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'B'
- // CHECK-NEXT: `-ParmVarDecl 0x{{.+}} <col:40> col:41 'B':'testFunctionTemplateDecl::B'
+ // CHECK-NEXT: `-ParmVarDecl 0x{{.+}} <col:40> col:41 'B'
namespace testClassTemplateDecl {
@@ -343,7 +343,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | | |-MoveAssignment{{$}}
// CHECK-NEXT: | | `-Destructor non_trivial user_declared{{$}}
// CHECK-NEXT: | |-TemplateArgument type 'testClassTemplateDecl::A'{{$}}
-// CHECK-NEXT: | | `-RecordType 0{{.+}} 'testClassTemplateDecl::A'{{$}}
+// CHECK-NEXT: | | `-RecordType 0{{.+}} 'testClassTemplateDecl::A' canonical{{$}}
// CHECK-NEXT: | | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: | |-CXXRecordDecl 0x{{.+}} <col:24, col:30> col:30 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: | |-AccessSpecDecl 0x{{.+}} <line:[[@LINE-67]]:3, col:9> col:3 public{{$}}
@@ -366,7 +366,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | |-MoveAssignment exists simple trivial needs_implicit{{$}}
// CHECK-NEXT: | `-Destructor simple irrelevant trivial needs_implicit{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testClassTemplateDecl::B'{{$}}
-// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::B'{{$}}
+// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::B' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'B'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <col:14, col:20> col:20 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: `-FieldDecl 0x{{.+}} <line:[[@LINE-78]]:5, col:9> col:9 j 'int'{{$}}
@@ -380,7 +380,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | |-MoveAssignment{{$}}
// CHECK-NEXT: | `-Destructor non_trivial user_declared{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testClassTemplateDecl::C'{{$}}
-// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::C'{{$}}
+// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::C' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'C'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <line:[[@LINE-104]]:24, col:30> col:30 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: |-AccessSpecDecl 0x{{.+}} <line:[[@LINE-104]]:3, col:9> col:3 public{{$}}
@@ -398,7 +398,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | |-MoveAssignment{{$}}
// CHECK-NEXT: | `-Destructor non_trivial user_declared{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testClassTemplateDecl::D'{{$}}
-// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::D'{{$}}
+// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::D' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'D'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <line:[[@LINE-122]]:24, col:30> col:30 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: |-AccessSpecDecl 0x{{.+}} <line:[[@LINE-122]]:3, col:9> col:3 public{{$}}
@@ -432,7 +432,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: |-TemplateArgument type 'type-parameter-0-0'{{$}}
// CHECK-NEXT: | `-TemplateTypeParmType 0x{{.+}} 'type-parameter-0-0' dependent depth 0 index 0{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testClassTemplateDecl::A'{{$}}
-// CHECK-NEXT: | `-RecordType 0x{{.+}} 'testClassTemplateDecl::A'{{$}}
+// CHECK-NEXT: | `-RecordType 0x{{.+}} 'testClassTemplateDecl::A' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <col:12, col:21> col:21 referenced typename depth 0 index 0 T1{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <col:25, col:31> col:31 implicit class TestClassTemplatePartial{{$}}
@@ -605,7 +605,7 @@ namespace testCanonicalTemplate {
// CHECK-NEXT: | `-ParmVarDecl 0x{{.*}} <col:50> col:51 'T'{{$}}
// CHECK-NEXT: `-FunctionDecl 0x{{.*}} <line:[[@LINE-6]]:24, col:51> col:29 used TestFunctionTemplate 'void (testCanonicalTemplate::A)' implicit_instantiation{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testCanonicalTemplate::A'{{$}}
- // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A'{{$}}
+ // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: `-ParmVarDecl 0x{{.*}} <col:50> col:51 'testCanonicalTemplate::A'{{$}}
@@ -644,7 +644,7 @@ namespace testCanonicalTemplate {
// CHECK-NEXT: | |-MoveAssignment exists simple trivial needs_implicit{{$}}
// CHECK-NEXT: | `-Destructor simple irrelevant trivial needs_implicit{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testCanonicalTemplate::A'{{$}}
- // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A'{{$}}
+ // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <col:25, col:31> col:31 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: |-FriendDecl 0x{{.+}} <line:[[@LINE-30]]:5, col:40> col:40{{$}}
@@ -677,7 +677,7 @@ namespace testCanonicalTemplate {
// CHECK-NEXT: | |-MoveAssignment exists simple trivial needs_implicit{{$}}
// CHECK-NEXT: | `-Destructor simple irrelevant trivial needs_implicit{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testCanonicalTemplate::A'{{$}}
- // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A'{{$}}
+ // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <col:25, col:31> col:31 implicit class TestClassTemplate2{{$}}
// CHECK-NEXT: |-CXXConstructorDecl 0x{{.+}} <col:31> col:31 implicit used constexpr TestClassTemplate2 'void () noexcept' inline default trivial{{$}}
@@ -721,7 +721,7 @@ namespace testCanonicalTemplate {
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <col:16, col:25> col:25 referenced typename depth 0 index 0 T{{$}}
// CHECK-NEXT: |-VarDecl 0x{{.+}} <col:28, col:43> col:43 TestVarTemplate 'const T' static{{$}}
// CHECK-NEXT: |-VarTemplateSpecializationDecl 0x{{.+}} parent 0x{{.+}} prev 0x{{.+}} <line:[[@LINE-12]]:3, line:[[@LINE-11]]:34> col:14 referenced TestVarTemplate 'const int' implicit_instantiation cinit{{$}}
- // CHECK-NEXT: | |-NestedNameSpecifier TypeSpec 'testCanonicalTemplate::S'{{$}}
+ // CHECK-NEXT: | |-NestedNameSpecifier TypeSpec 'S'{{$}}
// CHECK-NEXT: | |-TemplateArgument type 'int'{{$}}
// CHECK-NEXT: | | `-BuiltinType 0x{{.+}} 'int'{{$}}
// CHECK-NEXT: | `-InitListExpr 0x{{.+}} <col:32, col:34> 'int'{{$}}
@@ -735,13 +735,13 @@ namespace testCanonicalTemplate {
// CHECK: VarTemplateDecl 0x{{.+}} parent 0x{{.+}} prev 0x{{.+}} <{{.+}}:[[@LINE-24]]:3, line:[[@LINE-23]]:34> col:14 TestVarTemplate{{$}}
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <line:[[@LINE-25]]:12, col:21> col:21 referenced typename depth 0 index 0 T{{$}}
// CHECK-NEXT: |-VarDecl 0x{{.+}} parent 0x{{.+}} prev 0x{{.+}} <line:[[@LINE-25]]:3, col:34> col:14 TestVarTemplate 'const T' cinit{{$}}
- // CHECK-NEXT: | |-NestedNameSpecifier TypeSpec 'testCanonicalTemplate::S'{{$}}
+ // CHECK-NEXT: | |-NestedNameSpecifier TypeSpec 'S'{{$}}
// CHECK-NEXT: | `-InitListExpr 0x{{.+}} <col:32, col:34> 'void'{{$}}
// CHECK-NEXT: |-VarTemplateSpecialization 0x{{.+}} 'TestVarTemplate' 'const int'{{$}}
// CHECK-NEXT: `-VarTemplateSpecialization 0x{{.+}} 'TestVarTemplate' 'const int'{{$}}
// CHECK: VarTemplateSpecializationDecl 0x{{.+}} parent 0x{{.+}} prev 0x{{.+}} <{{.+}}:[[@LINE-32]]:3, line:[[@LINE-31]]:34> col:14 referenced TestVarTemplate 'const int' implicit_instantiation cinit{{$}}
- // CHECK-NEXT: |-NestedNameSpecifier TypeSpec 'testCanonicalTemplate::S'{{$}}
+ // CHECK-NEXT: |-NestedNameSpecifier TypeSpec 'S'{{$}}
// CHECK-NEXT: |-TemplateArgument type 'int'{{$}}
// CHECK-NEXT: | `-BuiltinType 0x{{.+}} 'int'{{$}}
// CHECK-NEXT: `-InitListExpr 0x{{.+}} <col:32, col:34> 'int'{{$}}
@@ -901,7 +901,7 @@ template<typename T> class TestFriendDecl {
// CHECK: CXXRecord{{.*}} TestFriendDecl
// CHECK-NEXT: FriendDecl
// CHECK-NEXT: FunctionDecl{{.*}} foo
-// CHECK-NEXT: FriendDecl{{.*}} 'class A':'A'
+// CHECK-NEXT: FriendDecl{{.*}} 'class A'
// CHECK-NEXT: CXXRecordDecl{{.*}} class A
// CHECK-NEXT: FriendDecl{{.*}} 'T'
@@ -973,5 +973,35 @@ namespace TestConstexprVariableTemplateWithInitializer {
// CHECK-NEXT: `-VarDecl 0x{{.+}} <col:25, col:48> col:37 call_init 'const T' constexpr callinit{{$}}
// CHECK-NEXT: `-ParenListExpr 0x{{.+}} <col:46, col:48> 'NULL TYPE'{{$}}
// CHECK-NEXT: `-IntegerLiteral 0x{{.+}} <col:47> 'int' 0{{$}}
-
}
+
+namespace TestInjectedClassName {
+ struct A {
+ using T1 = A;
+ using T2 = A;
+ };
+ // CHECK-LABEL: Dumping TestInjectedClassName:
+ // CHECK: CXXRecordDecl [[TestInjectedClassName_RD:0x[^ ]+]] {{.*}} struct A definition
+ // CHECK: CXXRecordDecl {{.*}} implicit referenced struct A
+ // CHECK-NEXT: |-TypeAliasDecl {{.*}} T1 'A'
+ // CHECK-NEXT: | `-RecordType [[TestInjectedClassName_RT:0x[^ ]+]] 'A' injected
+ // CHECK-NEXT: | `-CXXRecord [[TestInjectedClassName_RD]] 'A'
+ // CHECK-NEXT: `-TypeAliasDecl {{.*}} T2 'A'
+ // CHECK-NEXT: `-RecordType [[TestInjectedClassName_RT]] 'A' injected
+ // CHECK-NEXT: `-CXXRecord [[TestInjectedClassName_RD]] 'A'
+} // namespace InjectedClassName
+
+namespace TestGH155936 {
+ struct Foo {
+ struct A {
+ struct Foo {};
+ };
+ };
+ // CHECK-LABEL: Dumping TestGH155936:
+ // CHECK: CXXRecordDecl 0x{{.+}} <{{.+}}> line:[[@LINE-6]]:10 struct Foo definition
+ // CHECK: CXXRecordDecl 0x{{.+}} <col:3, col:10> col:10 implicit struct Foo
+ // CHECK: CXXRecordDecl 0x{{.+}} <{{.+}}> line:[[@LINE-7]]:12 struct A definition
+ // CHECK: CXXRecordDecl 0x{{.+}} <col:5, col:12> col:12 implicit struct A
+ // CHECK: CXXRecordDecl 0x{{.+}} <line:[[@LINE-8]]:7, col:19> col:14 struct Foo definition
+ // CHECH: CXXRecordDecl 0x{{.+}} <col:9, col:16> col:16 implicit struct Foo
+} // namspace GH155936
diff --git a/clang/test/AST/ast-dump-expr-json.cpp b/clang/test/AST/ast-dump-expr-json.cpp
index 11026c9..6293f8c 100644
--- a/clang/test/AST/ast-dump-expr-json.cpp
+++ b/clang/test/AST/ast-dump-expr-json.cpp
@@ -7962,7 +7962,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -7988,7 +7987,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -8142,7 +8140,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -8395,7 +8392,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -8421,7 +8417,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -8720,7 +8715,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -8746,7 +8740,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -8900,7 +8893,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -9032,7 +9024,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: "isUsed": true,
// CHECK-NEXT: "name": "x",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "init": "call",
@@ -9053,7 +9044,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -9155,7 +9145,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -9181,7 +9170,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -9203,7 +9191,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -9212,7 +9199,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: "kind": "VarDecl",
// CHECK-NEXT: "name": "x",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: }
// CHECK-NEXT: }
diff --git a/clang/test/AST/ast-dump-expr.cpp b/clang/test/AST/ast-dump-expr.cpp
index 6fd429d..7a686b2 100644
--- a/clang/test/AST/ast-dump-expr.cpp
+++ b/clang/test/AST/ast-dump-expr.cpp
@@ -219,13 +219,10 @@ void PostfixExpressions(S a, S *p, U<int> *r) {
// CHECK-NEXT: MemberExpr 0x{{[^ ]*}} <col:3, col:5> '<bound member function type>' .~S 0x{{[^ ]*}}
// CHECK-NEXT: DeclRefExpr 0x{{[^ ]*}} <col:3> 'S' lvalue ParmVar 0x{{[^ ]*}} 'a' 'S'
- // FIXME: similarly, there is no way to distinguish the construct below from
- // the p->~S() case.
p->::S::~S();
// CHECK: CXXMemberCallExpr 0x{{[^ ]*}} <line:[[@LINE-1]]:3, col:14> 'void'
// CHECK-NEXT: MemberExpr 0x{{[^ ]*}} <col:3, col:12> '<bound member function type>' ->~S 0x{{[^ ]*}}
- // CHECK-NEXT: NestedNameSpecifier TypeSpec 'S'
- // CHECK-NEXT: NestedNameSpecifier Global
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec '::S'
// CHECK-NEXT: ImplicitCastExpr
// CHECK-NEXT: DeclRefExpr 0x{{[^ ]*}} <col:3> 'S *' lvalue ParmVar 0x{{[^ ]*}} 'p' 'S *'
@@ -597,5 +594,5 @@ struct S {
void f() {
S(S(0, 1));
}
-// CHECK: CXXTemporaryObjectExpr {{.*}} <col:5, col:11> 'S':'GH143711::S' 'void (int, int)'
+// CHECK: CXXTemporaryObjectExpr {{.*}} <col:5, col:11> 'S' 'void (int, int)'
}
diff --git a/clang/test/AST/ast-dump-for-range-lifetime.cpp b/clang/test/AST/ast-dump-for-range-lifetime.cpp
index ee046be..c330342 100644
--- a/clang/test/AST/ast-dump-for-range-lifetime.cpp
+++ b/clang/test/AST/ast-dump-for-range-lifetime.cpp
@@ -24,14 +24,14 @@ void test1() {
// CHECK-NEXT: | |-<<<NULL>>>
// CHECK-NEXT: | |-DeclStmt {{.*}}
// CHECK-NEXT: | | `-VarDecl {{.*}} implicit used __range1 'const A &' cinit
- // CHECK-NEXT: | | `-ExprWithCleanups {{.*}} 'const A':'const P2718R0::A' lvalue
- // CHECK-NEXT: | | `-CallExpr {{.*}} 'const A':'const P2718R0::A' lvalue
+ // CHECK-NEXT: | | `-ExprWithCleanups {{.*}} 'const A' lvalue
+ // CHECK-NEXT: | | `-CallExpr {{.*}} 'const A' lvalue
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const A &(*)(const A &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const A &(const A &)' lvalue Function {{.*}} 'f1' 'const A &(const A &)'
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'const A &'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'const A &'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'A (*)()' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'A ()' lvalue Function {{.*}} 'g' 'A ()'
for (auto e : f1(g()))
@@ -56,11 +56,11 @@ void test2() {
// CHECK-NEXT: | | `-CallExpr {{.*}} 'const A *'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const A *(*)(const A &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const A *(const A &)' lvalue Function {{.*}} 'g' 'const A *(const A &)'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' lvalue <DerivedToBase (A)>
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const B':'const P2718R0::B' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const B':'const P2718R0::B' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'B':'P2718R0::B' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'B':'P2718R0::B' 'void () noexcept(false)' zeroing
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A' lvalue <DerivedToBase (A)>
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const B' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const B' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'B' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'B' 'void () noexcept(false)' zeroing
for (auto e : f(g(B())))
bar(e);
}
@@ -82,9 +82,9 @@ void test3() {
// CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | `-BinaryOperator {{.*}} 'int[3]' lvalue ','
// CHECK-NEXT: | |-CXXStaticCastExpr {{.*}} 'void' static_cast<void> <ToVoid>
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : static_cast<void>(LockGuard()), v)
LockGuard guard;
@@ -96,9 +96,9 @@ void test3() {
// CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | `-BinaryOperator {{.*}} 'int[3]' lvalue ','
// CHECK-NEXT: | |-CStyleCastExpr {{.*}} 'void' <ToVoid>
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : (void)LockGuard(), v)
LockGuard guard;
@@ -109,9 +109,9 @@ void test3() {
// CHECK-NEXT: | `-VarDecl {{.*}} implicit used __range1 'int (&)[3]' cinit
// CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | `-BinaryOperator {{.*}} 'int[3]' lvalue ','
- // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : LockGuard(), v)
LockGuard guard;
@@ -130,12 +130,12 @@ void test4() {
// CHECK-NEXT: | `-CallExpr {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | |-ImplicitCastExpr {{.*}} 'int (&(*)(const A &))[3]' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'int (&(const A &))[3]' lvalue Function {{.*}} 'default_arg_fn' 'int (&(const A &))[3]'
- // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const A':'const P2718R0::A' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'A':'P2718R0::A' 'void ()'
- for (auto e : default_arg_fn())
+ // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const A' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'A' 'void ()'
+ for (auto e : default_arg_fn())
bar(e);
}
@@ -158,43 +158,43 @@ void test5() {
// CHECK-NEXT: | `-CallExpr {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | |-ImplicitCastExpr {{.*}} 'int (&(*)(const A &))[3]' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'int (&(const A &))[3]' lvalue Function {{.*}} 'default_arg_fn' 'int (&(const A &))[3]'
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A'
- // CHECK-NEXT: | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A'
+ // CHECK-NEXT: | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | |-ImplicitCastExpr {{.*}} 'A (*)(const A &, const DefaultA &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'A (const A &, const DefaultA &)' lvalue Function {{.*}} 'foo' 'A (const A &, const DefaultA &)'
- // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A'
- // CHECK-NEXT: | | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'A'
+ // CHECK-NEXT: | | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'A (*)(const A &, const DefaultA &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'A (const A &, const DefaultA &)' lvalue Function {{.*}} 'foo' 'A (const A &, const DefaultA &)'
- // CHECK-NEXT: | | |-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A'
- // CHECK-NEXT: | | | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | | |-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'A'
+ // CHECK-NEXT: | | | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} 'A (*)(const A &, const DefaultA &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} 'A (const A &, const DefaultA &)' lvalue Function {{.*}} 'foo' 'A (const A &, const DefaultA &)'
- // CHECK-NEXT: | | | |-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | | | | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A'
- // CHECK-NEXT: | | | | `-CXXTemporaryObjectExpr {{.*}} 'A':'P2718R0::A' 'void ()'
- // CHECK-NEXT: | | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
- // CHECK-NEXT: | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
- // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
+ // CHECK-NEXT: | | | |-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | | | | `-CXXBindTemporaryExpr {{.*}} 'A'
+ // CHECK-NEXT: | | | | `-CXXTemporaryObjectExpr {{.*}} 'A' 'void ()'
+ // CHECK-NEXT: | | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
+ // CHECK-NEXT: | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
+ // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
for (auto e : default_arg_fn(foo(foo(foo(A())))))
bar(e);
}
@@ -210,40 +210,40 @@ void test6() {
// CHECK-NEXT: |-<<<NULL>>>
// CHECK-NEXT: |-DeclStmt {{.*}}
// CHECK-NEXT: | `-VarDecl {{.*}} col:17 implicit used __range1 'C &&' cinit
- // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'C':'P2718R0::C' xvalue
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'C':'P2718R0::C' xvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'C':'P2718R0::C'
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'C':'P2718R0::C' 'void (int, const C &, const DefaultA &)'
+ // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'C' xvalue
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'C' xvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'C'
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'C' 'void (int, const C &, const DefaultA &)'
// CHECK-NEXT: | |-IntegerLiteral {{.*}}'int' 0
- // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'const C':'const P2718R0::C' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const C':'const P2718R0::C' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'C':'P2718R0::C'
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'C':'P2718R0::C' 'void (int, const C &, const DefaultA &)'
+ // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'const C' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const C' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'C'
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'C' 'void (int, const C &, const DefaultA &)'
// CHECK-NEXT: | | |-IntegerLiteral {{.*}} 'int' 0
- // CHECK-NEXT: | | |-MaterializeTemporaryExpr {{.*}} 'const C':'const P2718R0::C' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const C':'const P2718R0::C' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'C':'P2718R0::C'
- // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'C':'P2718R0::C' 'void (int, const C &, const DefaultA &)'
+ // CHECK-NEXT: | | |-MaterializeTemporaryExpr {{.*}} 'const C' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const C' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'C'
+ // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'C' 'void (int, const C &, const DefaultA &)'
// CHECK-NEXT: | | | |-IntegerLiteral {{.*}} 'int' 0
- // CHECK-NEXT: | | | |-MaterializeTemporaryExpr {{.*}} 'const C':'const P2718R0::C' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} 'const C':'const P2718R0::C' <NoOp>
- // CHECK-NEXT: | | | | `-CXXBindTemporaryExpr {{.*}} 'C':'P2718R0::C'
- // CHECK-NEXT: | | | | `-CXXTemporaryObjectExpr {{.*}} 'C':'P2718R0::C' 'void ()'
- // CHECK-NEXT: | | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
- // CHECK-NEXT: | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
- // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
+ // CHECK-NEXT: | | | |-MaterializeTemporaryExpr {{.*}} 'const C' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} 'const C' <NoOp>
+ // CHECK-NEXT: | | | | `-CXXBindTemporaryExpr {{.*}} 'C'
+ // CHECK-NEXT: | | | | `-CXXTemporaryObjectExpr {{.*}} 'C' 'void ()'
+ // CHECK-NEXT: | | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
+ // CHECK-NEXT: | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
+ // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
for (auto e : C(0, C(0, C(0, C()))))
bar(e);
}
@@ -255,28 +255,28 @@ void test7() {
// CHECK-NEXT: |-<<<NULL>>>
// CHECK-NEXT: |-DeclStmt {{.*}}
// CHECK-NEXT: | `-VarDecl {{.*}} implicit used __range1 'A &&' cinit
- // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'A':'P2718R0::A' xvalue
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'A' xvalue
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'A (*)()' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'A ()' lvalue Function {{.*}} 'g' 'A ()'
for (auto e : g().r().g().r().g().r().g())
@@ -324,11 +324,11 @@ void test9() {
// CHECK-NEXT: | `-CallExpr {{.*}} 'const A *'
// CHECK-NEXT: | |-ImplicitCastExpr {{.*}} 'const A *(*)(const A &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'const A *(const A &)' lvalue Function {{.*}} 'dg2' 'const A *(const A &)'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' lvalue <DerivedToBase (A)>
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const B':'const P2718R0::B' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const B':'const P2718R0::B' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'B':'P2718R0::B' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'B':'P2718R0::B' 'void () noexcept(false)' zeroing
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A' lvalue <DerivedToBase (A)>
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const B' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const B' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'B' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'B' 'void () noexcept(false)' zeroing
for (auto e : df2(dg2(B())))
bar(e);
}
@@ -348,10 +348,10 @@ void test10() {
// CHECK-NEXT: | | `-CallExpr {{.*}} 'const P2718R0::LockGuard' lvalue
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const P2718R0::LockGuard &(*)(const P2718R0::LockGuard &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' lvalue Function {{.*}} 'df1' 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' (FunctionTemplate {{.*}} 'df1')
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : static_cast<void>(df1(LockGuard())), v)
LockGuard guard;
@@ -366,10 +366,10 @@ void test10() {
// CHECK-NEXT: | | `-CallExpr {{.*}} 'const P2718R0::LockGuard' lvalue
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const P2718R0::LockGuard &(*)(const P2718R0::LockGuard &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' lvalue Function {{.*}} 'df1' 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' (FunctionTemplate {{.*}} 'df1')
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : (void)df1(LockGuard()), v)
LockGuard guard;
@@ -384,17 +384,17 @@ void test10() {
// CHECK-NEXT: | | |-CallExpr {{.*}} 'const P2718R0::LockGuard' lvalue
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} 'const P2718R0::LockGuard &(*)(const P2718R0::LockGuard &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' lvalue Function {{.*}} 'df1' 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' (FunctionTemplate {{.*}} 'df1')
- // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const LockGuard' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | | `-CallExpr {{.*}} 'const P2718R0::LockGuard' lvalue
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const P2718R0::LockGuard &(*)(const P2718R0::LockGuard &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' lvalue Function {{.*}} 'df1' 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' (FunctionTemplate {{.*}} 'df1')
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : df1(LockGuard()), df1(LockGuard()), v)
LockGuard guard;
@@ -403,7 +403,7 @@ void test10() {
// Test default argument && dependent context
template <typename T> int (&default_arg_fn2(const T & = T()))[3];
void test11() {
- for (auto e : default_arg_fn2<A>())
+ for (auto e : default_arg_fn2<A>())
bar(e);
}
@@ -422,24 +422,24 @@ void test13() {
// CHECK-NEXT: |-<<<NULL>>>
// CHECK-NEXT: |-DeclStmt {{.*}}
// CHECK-NEXT: | `-VarDecl {{.*}} implicit used __range1 'A &&' cinit
- // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'A':'P2718R0::A' xvalue
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'A' xvalue
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
// CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
// CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'P2718R0::A' (CXXTemporary {{.*}})
@@ -474,17 +474,17 @@ void test14() {
// CHECK-NEXT: | `-VarDecl {{.*}} implicit used __range1 'const int (&)[1]' cinit
// CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'const int[1]' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} 'const int[1]' lvalue .arr {{.*}}
- // CHECK-NEXT: | `-MemberExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue .a {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'B14':'P2718R0::B14' xvalue extended by Var {{.*}} '__range1' 'const int (&)[1]'
- // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'B14':'P2718R0::B14' functional cast to B14 <NoOp>
- // CHECK-NEXT: | `-InitListExpr {{.*}} 'B14':'P2718R0::B14'
+ // CHECK-NEXT: | `-MemberExpr {{.*}} 'const A14' lvalue .a {{.*}}
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'B14' xvalue extended by Var {{.*}} '__range1' 'const int (&)[1]'
+ // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'B14' functional cast to B14 <NoOp>
+ // CHECK-NEXT: | `-InitListExpr {{.*}} 'B14'
// CHECK-NEXT: | |-IntegerLiteral {{.*}} 'int' 0
- // CHECK-NEXT: | `-CXXDefaultInitExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue extended by Var {{.*}} '__range1' 'const int (&)[1]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A14':'const P2718R0::A14' <NoOp>
- // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'A14':'P2718R0::A14' functional cast to A14 <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A14':'P2718R0::A14' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-InitListExpr {{.*}} 'A14':'P2718R0::A14'
+ // CHECK-NEXT: | `-CXXDefaultInitExpr {{.*}} 'const A14' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A14' lvalue extended by Var {{.*}} '__range1' 'const int (&)[1]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A14' <NoOp>
+ // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'A14' functional cast to A14 <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A14' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-InitListExpr {{.*}} 'A14'
// CHECK-NEXT: | `-InitListExpr {{.*}} 'int[1]'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 0
for (auto &&x : B14{0}.a.arr) { exit(0); }
@@ -493,17 +493,17 @@ void test14() {
// CHECK-NEXT: |-<<<NULL>>>
// CHECK-NEXT: |-DeclStmt {{.*}}
// CHECK-NEXT: | `-VarDecl {{.*}} col:19 implicit used __range1 'B14 &&' cinit
- // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'B14':'P2718R0::B14' xvalue
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'B14':'P2718R0::B14' xvalue extended by Var {{.*}} '__range1' 'B14 &&'
- // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'B14':'P2718R0::B14' functional cast to B14 <NoOp>
- // CHECK-NEXT: | `-InitListExpr {{.*}} 'B14':'P2718R0::B14'
+ // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'B14' xvalue
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'B14' xvalue extended by Var {{.*}} '__range1' 'B14 &&'
+ // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'B14' functional cast to B14 <NoOp>
+ // CHECK-NEXT: | `-InitListExpr {{.*}} 'B14'
// CHECK-NEXT: | |-IntegerLiteral {{.*}} 'int' 0
- // CHECK-NEXT: | `-CXXDefaultInitExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue extended by Var {{.*}} '__range1' 'B14 &&'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A14':'const P2718R0::A14' <NoOp>
- // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'A14':'P2718R0::A14' functional cast to A14 <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A14':'P2718R0::A14' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-InitListExpr {{.*}} 'A14':'P2718R0::A14'
+ // CHECK-NEXT: | `-CXXDefaultInitExpr {{.*}} 'const A14' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A14' lvalue extended by Var {{.*}} '__range1' 'B14 &&'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A14' <NoOp>
+ // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'A14' functional cast to A14 <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A14' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-InitListExpr {{.*}} 'A14'
// CHECK-NEXT: | `-InitListExpr {{.*}} 'int[1]'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 0
for (auto &&x : B14{0}) { exit(0); }
diff --git a/clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp b/clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp
index 1937a5d..9584e77 100644
--- a/clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp
+++ b/clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp
@@ -192,12 +192,12 @@ int test(float &&f, short &&s) {
// CHECK-NEXT: | | `-CompoundStmt [[ADDR_44:0x[a-z0-9]*]] <col:17, line:14:1>
// CHECK-NEXT: | | |-DeclStmt [[ADDR_45:0x[a-z0-9]*]] <line:12:3, col:51>
// CHECK-NEXT: | | | `-TypedefDecl [[ADDR_46:0x[a-z0-9]*]] <col:3, col:48> col:48 referenced _Up 'typename remove_reference<float &>::type':'float'
-// CHECK-NEXT: | | | `-ElaboratedType [[ADDR_47:0x[a-z0-9]*]] 'typename remove_reference<float &>::type' sugar
-// CHECK-NEXT: | | | `-TypedefType [[ADDR_48:0x[a-z0-9]*]] 'remove_reference<float &>::type' sugar
-// CHECK-NEXT: | | | |-Typedef [[ADDR_10]] 'type'
-// CHECK-NEXT: | | | `-SubstTemplateTypeParmType [[ADDR_11]] 'float' sugar class depth 0 index 0 _Tp
-// CHECK-NEXT: | | | |-ClassTemplateSpecialization [[ADDR_6]] 'remove_reference'
-// CHECK-NEXT: | | | `-BuiltinType [[ADDR_8]] 'float'
+// CHECK-NEXT: | | | `-TypedefType [[ADDR_48:0x[a-z0-9]*]] 'typename remove_reference<float &>::type' sugar typename
+// CHECK-NEXT: | | | |-NestedNameSpecifier TypeSpec 'remove_reference<float &>'
+// CHECK-NEXT: | | | |-Typedef [[ADDR_10]] 'type'
+// CHECK-NEXT: | | | `-SubstTemplateTypeParmType [[ADDR_11]] 'float' sugar class depth 0 index 0 _Tp
+// CHECK-NEXT: | | | |-ClassTemplateSpecialization [[ADDR_6]] 'remove_reference'
+// CHECK-NEXT: | | | `-BuiltinType [[ADDR_8]] 'float'
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_49:0x[a-z0-9]*]] <line:13:3, col:33>
// CHECK-NEXT: | | `-CXXStaticCastExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:33> '_Up':'float' xvalue static_cast<_Up &&> <NoOp>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_51:0x[a-z0-9]*]] <col:30> 'float' {{.*}}ParmVar [[ADDR_43]] '__t' 'float &'
@@ -209,12 +209,12 @@ int test(float &&f, short &&s) {
// CHECK-NEXT: | `-CompoundStmt [[ADDR_54:0x[a-z0-9]*]] <col:17, line:14:1>
// CHECK-NEXT: | |-DeclStmt [[ADDR_55:0x[a-z0-9]*]] <line:12:3, col:51>
// CHECK-NEXT: | | `-TypedefDecl [[ADDR_56:0x[a-z0-9]*]] <col:3, col:48> col:48 referenced _Up 'typename remove_reference<short &>::type':'short'
-// CHECK-NEXT: | | `-ElaboratedType [[ADDR_57:0x[a-z0-9]*]] 'typename remove_reference<short &>::type' sugar
-// CHECK-NEXT: | | `-TypedefType [[ADDR_58:0x[a-z0-9]*]] 'remove_reference<short &>::type' sugar
-// CHECK-NEXT: | | |-Typedef [[ADDR_18]] 'type'
-// CHECK-NEXT: | | `-SubstTemplateTypeParmType [[ADDR_19]] 'short' sugar class depth 0 index 0 _Tp
-// CHECK-NEXT: | | |-ClassTemplateSpecialization [[ADDR_14]] 'remove_reference'
-// CHECK-NEXT: | | `-BuiltinType [[ADDR_16]] 'short'
+// CHECK-NEXT: | | `-TypedefType [[ADDR_58:0x[a-z0-9]*]] 'typename remove_reference<short &>::type' sugar typename
+// CHECK-NEXT: | | |-NestedNameSpecifier TypeSpec 'remove_reference<short &>'
+// CHECK-NEXT: | | |-Typedef [[ADDR_18]] 'type'
+// CHECK-NEXT: | | `-SubstTemplateTypeParmType [[ADDR_19]] 'short' sugar class depth 0 index 0 _Tp
+// CHECK-NEXT: | | |-ClassTemplateSpecialization [[ADDR_14]] 'remove_reference'
+// CHECK-NEXT: | | `-BuiltinType [[ADDR_16]] 'short'
// CHECK-NEXT: | `-ReturnStmt [[ADDR_59:0x[a-z0-9]*]] <line:13:3, col:33>
// CHECK-NEXT: | `-CXXStaticCastExpr [[ADDR_60:0x[a-z0-9]*]] <col:10, col:33> '_Up':'short' xvalue static_cast<_Up &&> <NoOp>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_61:0x[a-z0-9]*]] <col:30> 'short' {{.*}}ParmVar [[ADDR_53]] '__t' 'short &'
diff --git a/clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp b/clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp
index ad26950..44d1cb4 100644
--- a/clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp
+++ b/clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp
@@ -54,7 +54,7 @@ int test() {
// CHECK-NEXT: | | | |-CopyAssignment simple trivial has_const_param needs_implicit implicit_has_const_param
// CHECK-NEXT: | | | |-MoveAssignment exists simple trivial needs_implicit
// CHECK-NEXT: | | | `-Destructor simple irrelevant trivial {{(constexpr )?}}needs_implicit
-// CHECK-NEXT: | | |-CXXRecordDecl [[ADDR_3:0x[a-z0-9]*]] <col:23, col:30> col:30 implicit referenced struct S
+// CHECK-NEXT: | | |-CXXRecordDecl [[ADDR_3:0x[a-z0-9]*]] <col:23, col:30> col:30 implicit struct S
// CHECK-NEXT: | | `-CXXConstructorDecl [[ADDR_4:0x[a-z0-9]*]] <line:6:3, col:16> col:3 S<T> 'void (int, T *)'
// CHECK-NEXT: | | |-ParmVarDecl [[ADDR_5:0x[a-z0-9]*]] <col:5> col:8 'int'
// CHECK-NEXT: | | |-ParmVarDecl [[ADDR_6:0x[a-z0-9]*]] <col:10, col:12> col:13 'T *'
diff --git a/clang/test/AST/ast-dump-record-definition-data-json.cpp b/clang/test/AST/ast-dump-record-definition-data-json.cpp
index c119089..e35bec7 100644
--- a/clang/test/AST/ast-dump-record-definition-data-json.cpp
+++ b/clang/test/AST/ast-dump-record-definition-data-json.cpp
@@ -2516,7 +2516,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "IsTrivial",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
@@ -2646,7 +2645,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "IsNotTrivial",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
@@ -3980,7 +3978,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "HasUserDeclaredConstructor",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
@@ -4234,7 +4231,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "HasConstexprNonCopyMoveConstructor",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
@@ -4381,7 +4377,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "HasNoConstexprNonCopyMoveConstructor",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
diff --git a/clang/test/AST/ast-dump-records-json.cpp b/clang/test/AST/ast-dump-records-json.cpp
index 7efdcb0..941c6a6 100644
--- a/clang/test/AST/ast-dump-records-json.cpp
+++ b/clang/test/AST/ast-dump-records-json.cpp
@@ -795,7 +795,6 @@ struct Derived6 : virtual public Bases... {
// CHECK-NEXT: },
// CHECK-NEXT: "name": "b",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "C::(unnamed struct at {{.*}}:16:3)",
// CHECK-NEXT: "qualType": "struct (unnamed struct at {{.*}}:16:3)"
// CHECK-NEXT: }
// CHECK-NEXT: },
@@ -2072,7 +2071,6 @@ struct Derived6 : virtual public Bases... {
// CHECK-NEXT: },
// CHECK-NEXT: "name": "b",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "G::(unnamed struct at {{.*}}:50:3)",
// CHECK-NEXT: "qualType": "struct (unnamed struct at {{.*}}:50:3)"
// CHECK-NEXT: }
// CHECK-NEXT: },
diff --git a/clang/test/AST/ast-dump-records.c b/clang/test/AST/ast-dump-records.c
index f4a540f..1dc175c 100644
--- a/clang/test/AST/ast-dump-records.c
+++ b/clang/test/AST/ast-dump-records.c
@@ -47,7 +47,7 @@ struct C {
int a;
// CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:5, col:9> col:9 a 'int'
} b;
- // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-5]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-5]]:3)':'struct C::(unnamed at {{.*}}:[[@LINE-5]]:3)'
+ // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-5]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-5]]:3)'
union {
// CHECK-NEXT: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+5]]:3> line:[[@LINE-1]]:3 union definition
@@ -122,15 +122,13 @@ union E {
};
union G {
- // CHECK: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, line:[[@LINE+38]]:1> line:[[@LINE-1]]:7 union G definition
+ // CHECK: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, line:[[@LINE+36]]:1> line:[[@LINE-1]]:7 union G definition
struct {
// CHECK-NEXT: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+3]]:3> line:[[@LINE-1]]:3 struct definition
int a;
// CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:5, col:9> col:9 a 'int'
} b;
- // FIXME: note that it talks about 'struct G' below; the same happens in
- // other cases with union G as well.
- // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-7]]:3, line:[[@LINE-3]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-7]]:3)':'struct G::(unnamed at {{.*}}:[[@LINE-7]]:3)'
+ // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-5]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-5]]:3)'
union {
// CHECK-NEXT: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+5]]:3> line:[[@LINE-1]]:3 union definition
diff --git a/clang/test/AST/ast-dump-records.cpp b/clang/test/AST/ast-dump-records.cpp
index e9b37b7..edd13ba 100644
--- a/clang/test/AST/ast-dump-records.cpp
+++ b/clang/test/AST/ast-dump-records.cpp
@@ -72,7 +72,7 @@ struct C {
int a;
// CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:5, col:9> col:9 a 'int'
} b;
- // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-12]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-12]]:3)':'C::(unnamed struct at {{.*}}:[[@LINE-12]]:3)'
+ // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-12]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-12]]:3)'
union {
// CHECK-NEXT: CXXRecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+12]]:3> line:[[@LINE-1]]:3 union definition
@@ -179,7 +179,7 @@ union E {
};
union G {
- // CHECK: CXXRecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, line:[[@LINE+71]]:1> line:[[@LINE-1]]:7 union G definition
+ // CHECK: CXXRecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, line:[[@LINE+69]]:1> line:[[@LINE-1]]:7 union G definition
// CHECK-NEXT: DefinitionData pass_in_registers aggregate standard_layout trivially_copyable pod trivial literal
// CHECK-NEXT: DefaultConstructor exists trivial needs_implicit
// CHECK-NEXT: CopyConstructor simple trivial has_const_param needs_implicit implicit_has_const_param
@@ -202,9 +202,7 @@ union G {
int a;
// CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:5, col:9> col:9 a 'int'
} b;
- // FIXME: note that it talks about 'struct G' below; the same happens in
- // other cases with union G as well.
- // CHECK: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-15]]:3, line:[[@LINE-3]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-15]]:3)':'G::(unnamed struct at {{.*}}:[[@LINE-15]]:3)'
+ // CHECK: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-13]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-13]]:3)'
union {
// CHECK-NEXT: CXXRecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+13]]:3> line:[[@LINE-1]]:3 union definition
diff --git a/clang/test/AST/ast-dump-recovery.cpp b/clang/test/AST/ast-dump-recovery.cpp
index a8e30f1..060ae3b 100644
--- a/clang/test/AST/ast-dump-recovery.cpp
+++ b/clang/test/AST/ast-dump-recovery.cpp
@@ -128,7 +128,7 @@ void test2(Foo2 f) {
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'f'
// CHECK-NEXT: `-IntegerLiteral {{.*}} 'int' 1
f.func(1);
- // CHECK: RecoveryExpr {{.*}} 'ForwardClass':'Foo2::ForwardClass'
+ // CHECK: RecoveryExpr {{.*}} 'ForwardClass'
// CHECK-NEXT: `-MemberExpr {{.*}} '<bound member function type>' .createFwd
// CHECK-NEXT: `-DeclRefExpr {{.*}} 'f'
f.createFwd();
@@ -292,8 +292,8 @@ union U {
// CHECK: FunctionDecl {{.*}} foo 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}}
// CHECK-NEXT: `-DeclStmt {{.*}}
-// CHECK-NEXT: `-VarDecl {{.*}} g 'U':'GH112560::U' listinit
-// CHECK-NEXT: `-InitListExpr {{.*}} 'U':'GH112560::U' contains-errors field Field {{.*}} 'f' 'int'
+// CHECK-NEXT: `-VarDecl {{.*}} g 'U' listinit
+// CHECK-NEXT: `-InitListExpr {{.*}} 'U' contains-errors field Field {{.*}} 'f' 'int'
// CHECK-NEXT: `-CXXDefaultInitExpr {{.*}} 'int' contains-errors has rewritten init
// CHECK-NEXT: `-RecoveryExpr {{.*}} 'int' contains-errors
// DISABLED-NOT: -RecoveryExpr {{.*}} contains-errors
diff --git a/clang/test/AST/ast-dump-stmt-json.cpp b/clang/test/AST/ast-dump-stmt-json.cpp
index a8f113c..ee99ab8 100644
--- a/clang/test/AST/ast-dump-stmt-json.cpp
+++ b/clang/test/AST/ast-dump-stmt-json.cpp
@@ -224,7 +224,55 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "void ()"
// CHECK-NEXT: }
-// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "FunctionDecl",
+// CHECK-NEXT: "loc": {
+// CHECK-NEXT: "offset": 125,
+// CHECK-NEXT: "line": 4,
+// CHECK-NEXT: "col": 6,
+// CHECK-NEXT: "tokLen": 8
+// CHECK-NEXT: },
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 120,
+// CHECK-NEXT: "col": 1,
+// CHECK-NEXT: "tokLen": 4
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 137,
+// CHECK-NEXT: "col": 18,
+// CHECK-NEXT: "tokLen": 1
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "isUsed": true,
+// CHECK-NEXT: "name": "function",
+// CHECK-NEXT: "mangledName": "_ZN1n8functionEv",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void ()"
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "CompoundStmt",
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 136,
+// CHECK-NEXT: "col": 17,
+// CHECK-NEXT: "tokLen": 1
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 137,
+// CHECK-NEXT: "col": 18,
+// CHECK-NEXT: "tokLen": 1
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
// CHECK-NEXT: }
@@ -279,7 +327,37 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "int"
// CHECK-NEXT: }
-// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "VarDecl",
+// CHECK-NEXT: "loc": {
+// CHECK-NEXT: "offset": 143,
+// CHECK-NEXT: "line": 5,
+// CHECK-NEXT: "col": 5,
+// CHECK-NEXT: "tokLen": 8
+// CHECK-NEXT: },
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 139,
+// CHECK-NEXT: "col": 1,
+// CHECK-NEXT: "tokLen": 3
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 143,
+// CHECK-NEXT: "col": 5,
+// CHECK-NEXT: "tokLen": 8
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "isUsed": true,
+// CHECK-NEXT: "name": "Variable",
+// CHECK-NEXT: "mangledName": "_ZN1n8VariableE",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "int"
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
// CHECK-NEXT: }
@@ -1869,6 +1947,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: ]
// CHECK-NEXT: }
+
// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionDecl",
// CHECK-NEXT: "loc": {},
@@ -1936,6 +2015,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: ]
// CHECK-NEXT: }
+
// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionDecl",
// CHECK-NEXT: "loc": {},
@@ -2086,6 +2166,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: ]
// CHECK-NEXT: }
+
// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionDecl",
// CHECK-NEXT: "loc": {},
@@ -2153,6 +2234,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: ]
// CHECK-NEXT: }
+
// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionTemplateDecl",
// CHECK-NEXT: "loc": {
diff --git a/clang/test/AST/ast-dump-stmt.m b/clang/test/AST/ast-dump-stmt.m
index e0fc16b..b68f5b6 100644
--- a/clang/test/AST/ast-dump-stmt.m
+++ b/clang/test/AST/ast-dump-stmt.m
@@ -55,4 +55,4 @@ id TestCompoundLiteral(id a) {
// CHECK: FunctionDecl{{.*}}TestCompoundLiteral
// CHECK: ExprWithCleanups
// CHECK-NEXT: cleanup CompoundLiteralExpr
-// CHECK: CompoundLiteralExpr{{.*}}'S' lvalue
+ // CHECK: CompoundLiteralExpr{{.*}}'S':'struct S' lvalue
diff --git a/clang/test/AST/ast-dump-template-decls.cpp b/clang/test/AST/ast-dump-template-decls.cpp
index d5228d4..ba3e405 100644
--- a/clang/test/AST/ast-dump-template-decls.cpp
+++ b/clang/test/AST/ast-dump-template-decls.cpp
@@ -115,8 +115,7 @@ template <class T> struct C {
};
using type2 = typename C<int>::type1<void>;
// CHECK: TypeAliasDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, col:42> col:7 type2 'typename C<int>::type1<void>':'void (int)'
-// CHECK-NEXT: ElaboratedType 0x{{[^ ]*}} 'typename C<int>::type1<void>' sugar
-// CHECK-NEXT: TemplateSpecializationType 0x{{[^ ]*}} 'type1<void>' sugar alias
+// CHECK-NEXT: TemplateSpecializationType 0x{{[^ ]*}} 'typename C<int>::type1<void>' sugar alias
// CHECK-NEXT: name: 'C<int>::type1':'PR55886::C<int>::type1' qualified
// CHECK-NEXT: NestedNameSpecifier TypeSpec 'C<int>':'PR55886::C<int>'
// CHECK-NEXT: TypeAliasTemplateDecl {{.+}} type1
diff --git a/clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp b/clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp
index cc9a82c..3e0877f 100644
--- a/clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp
+++ b/clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp
@@ -537,28 +537,17 @@ int main()
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "ElaboratedType",
+// CHECK-NEXT: "kind": "InjectedClassNameType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "integral_constant<_Ty, _Val>"
// CHECK-NEXT: },
// CHECK-NEXT: "isDependent": true,
// CHECK-NEXT: "isInstantiationDependent": true,
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "InjectedClassNameType",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "integral_constant<_Ty, _Val>"
-// CHECK-NEXT: },
-// CHECK-NEXT: "isDependent": true,
-// CHECK-NEXT: "isInstantiationDependent": true,
-// CHECK-NEXT: "decl": {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "CXXRecordDecl",
-// CHECK-NEXT: "name": "integral_constant"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK-NEXT: "decl": {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "CXXRecordDecl",
+// CHECK-NEXT: "name": "integral_constant"
+// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: },
@@ -885,71 +874,60 @@ int main()
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "ElaboratedType",
+// CHECK-NEXT: "kind": "TemplateSpecializationType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "integral_constant<bool, _Val>"
// CHECK-NEXT: },
// CHECK-NEXT: "isDependent": true,
// CHECK-NEXT: "isInstantiationDependent": true,
+// CHECK-NEXT: "templateName": "integral_constant",
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "TemplateSpecializationType",
+// CHECK-NEXT: "kind": "TemplateArgument",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "integral_constant<bool, _Val>"
+// CHECK-NEXT: "qualType": "bool"
// CHECK-NEXT: },
-// CHECK-NEXT: "isDependent": true,
-// CHECK-NEXT: "isInstantiationDependent": true,
-// CHECK-NEXT: "templateName": "integral_constant",
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
-// CHECK-NEXT: "kind": "TemplateArgument",
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "BuiltinType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "bool"
-// CHECK-NEXT: },
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "BuiltinType",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "bool"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
-// CHECK-NEXT: },
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "kind": "TemplateArgument",
+// CHECK-NEXT: "isExpr": true,
+// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
-// CHECK-NEXT: "kind": "TemplateArgument",
-// CHECK-NEXT: "isExpr": true,
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "DeclRefExpr",
-// CHECK-NEXT: "range": {
-// CHECK-NEXT: "begin": {
-// CHECK-NEXT: "offset": 554,
-// CHECK-NEXT: "col": 47,
-// CHECK-NEXT: "tokLen": 4
-// CHECK-NEXT: },
-// CHECK-NEXT: "end": {
-// CHECK-NEXT: "offset": 554,
-// CHECK-NEXT: "col": 47,
-// CHECK-NEXT: "tokLen": 4
-// CHECK-NEXT: }
-// CHECK-NEXT: },
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "bool"
-// CHECK-NEXT: },
-// CHECK-NEXT: "valueCategory": "prvalue",
-// CHECK-NEXT: "referencedDecl": {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "NonTypeTemplateParmDecl",
-// CHECK-NEXT: "name": "_Val",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "bool"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "DeclRefExpr",
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 554,
+// CHECK-NEXT: "col": 47,
+// CHECK-NEXT: "tokLen": 4
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 554,
+// CHECK-NEXT: "col": 47,
+// CHECK-NEXT: "tokLen": 4
// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK-NEXT: },
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "bool"
+// CHECK-NEXT: },
+// CHECK-NEXT: "valueCategory": "prvalue",
+// CHECK-NEXT: "referencedDecl": {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "NonTypeTemplateParmDecl",
+// CHECK-NEXT: "name": "_Val",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "bool"
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: }
diff --git a/clang/test/AST/ast-dump-template-name.cpp b/clang/test/AST/ast-dump-template-name.cpp
index acacdac..7f08508 100644
--- a/clang/test/AST/ast-dump-template-name.cpp
+++ b/clang/test/AST/ast-dump-template-name.cpp
@@ -11,13 +11,12 @@ namespace qualified {
// CHECK: Dumping qualified::TestQualified:
// CHECK-NEXT: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType
-// CHECK-NEXT: `-TemplateSpecializationType
-// CHECK-NEXT: |-name: 'N' qualified
-// CHECK-NEXT: | `-TypeAliasTemplateDecl {{.+}} N{{$}}
-// CHECK-NEXT: |-TemplateArgument template 'foo::A':'qualified::foo::A' qualified{{$}}
-// CHECK-NEXT: | |-NestedNameSpecifier Namespace 0x{{.+}} 'foo'{{$}}
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
+// CHECK-NEXT: `-TemplateSpecializationType
+// CHECK-NEXT: |-name: 'N' qualified
+// CHECK-NEXT: | `-TypeAliasTemplateDecl {{.+}} N{{$}}
+// CHECK-NEXT: |-TemplateArgument template 'foo::A':'qualified::foo::A' qualified{{$}}
+// CHECK-NEXT: | |-NestedNameSpecifier Namespace 0x{{.+}} 'foo'{{$}}
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
namespace dependent {
template <class T> struct B {
@@ -27,12 +26,11 @@ namespace dependent {
// CHECK: Dumping dependent::B::TestDependent:
// CHECK-NEXT: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType
-// CHECK-NEXT: `-TemplateSpecializationType
-// CHECK-NEXT: |-name: 'N' qualified
-// CHECK-NEXT: | `-TypeAliasTemplateDecl
-// CHECK-NEXT: |-TemplateArgument template 'T::template X':'type-parameter-0-0::template X' dependent{{$}}
-// CHECK-NEXT: | `-NestedNameSpecifier TypeSpec 'T'{{$}}
+// CHECK-NEXT: `-TemplateSpecializationType
+// CHECK-NEXT: |-name: 'N' qualified
+// CHECK-NEXT: | `-TypeAliasTemplateDecl
+// CHECK-NEXT: |-TemplateArgument template 'T::template X':'type-parameter-0-0::template X' dependent{{$}}
+// CHECK-NEXT: | `-NestedNameSpecifier TypeSpec 'T'{{$}}
namespace subst {
template <class> struct A;
@@ -46,15 +44,14 @@ namespace subst {
// CHECK: Dumping subst::TestSubst:
// CHECK-NEXT: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType
-// CHECK-NEXT: `-TypedefType
-// CHECK-NEXT: |-TypeAlias
-// CHECK-NEXT: `-ElaboratedType
-// CHECK-NEXT: `-TemplateSpecializationType
-// CHECK-NEXT: |-name: 'C':'subst::B<subst::A>::C' qualified
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} C
-// CHECK-NEXT: |-TemplateArgument template 'subst::A' subst index 0
-// CHECK-NEXT: | |-parameter: TemplateTemplateParmDecl {{.+}} depth 0 index 0 TT{{$}}
-// CHECK-NEXT: | |-associated ClassTemplateSpecialization {{.+}} 'B'{{$}}
-// CHECK-NEXT: | `-replacement:
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
+// CHECK-NEXT: `-TypedefType
+// CHECK-NEXT: |-NestedNameSpecifier TypeSpec 'B<A>':'subst::B<subst::A>'
+// CHECK-NEXT: |-TypeAlias
+// CHECK-NEXT: `-TemplateSpecializationType
+// CHECK-NEXT: |-name: 'C':'subst::B<subst::A>::C' qualified
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} C
+// CHECK-NEXT: |-TemplateArgument template 'subst::A' subst index 0
+// CHECK-NEXT: | |-parameter: TemplateTemplateParmDecl {{.+}} depth 0 index 0 TT{{$}}
+// CHECK-NEXT: | |-associated ClassTemplateSpecialization {{.+}} 'B'{{$}}
+// CHECK-NEXT: | `-replacement:
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
diff --git a/clang/test/AST/ast-dump-templates.cpp b/clang/test/AST/ast-dump-templates.cpp
index b504acc..e43fe6b 100644
--- a/clang/test/AST/ast-dump-templates.cpp
+++ b/clang/test/AST/ast-dump-templates.cpp
@@ -171,10 +171,11 @@ namespace TestDependentMemberPointer {
// DUMP-NEXT: | `-BuiltinType {{.+}} 'int'
// DUMP-NEXT: |-TypeAliasDecl {{.+}} Y 'int U::test::*'{{$}}
// DUMP-NEXT: | `-MemberPointerType {{.+}} 'int U::test::*' dependent
+// DUMP-NEXT: | |-DependentNameType {{.+}} 'U::test' dependent
// DUMP-NEXT: | `-BuiltinType {{.+}} 'int'
// DUMP-NEXT: `-TypeAliasDecl {{.+}} Z 'int U::template V<int>::*'{{$}}
// DUMP-NEXT: `-MemberPointerType {{.+}} 'int U::template V<int>::*' dependent
-// DUMP-NEXT: |-DependentTemplateSpecializationType {{.+}} 'template V<int>' dependent
+// DUMP-NEXT: |-DependentTemplateSpecializationType {{.+}} 'U::template V<int>' dependent
// DUMP-NEXT: `-BuiltinType {{.+}} 'int'
} // namespace TestDependentMemberPointer
@@ -186,14 +187,14 @@ namespace TestPartialSpecNTTP {
template <class U1, bool U2, bool U3>
struct Template2<Template1<U1, U2>, U3> {};
// DUMP: ClassTemplatePartialSpecializationDecl {{.+}} struct Template2
-// DUMP: |-TemplateArgument type 'Template1<type-parameter-0-0, value-parameter-0-1>'
-// DUMP-NEXT: | `-TemplateSpecializationType {{.+}} 'Template1<type-parameter-0-0, value-parameter-0-1>' dependent
+// DUMP: |-TemplateArgument type 'TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-1>'
+// DUMP-NEXT: | `-TemplateSpecializationType {{.+}} 'TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-1>' dependent
// DUMP-NEXT: | |-name: 'TestPartialSpecNTTP::Template1'
// DUMP-NEXT: | | `-ClassTemplateDecl {{.+}} Template1
// DUMP-NEXT: | |-TemplateArgument type 'type-parameter-0-0'
// DUMP-NEXT: | | `-TemplateTypeParmType {{.+}} 'type-parameter-0-0' dependent depth 0 index 0
// DUMP-NEXT: | `-TemplateArgument expr canonical 'value-parameter-0-1'
-// DUMP-NEXT: | `-DeclRefExpr {{.+}} 'bool' NonTypeTemplateParm {{.+}} 'TA2' 'bool'
+// DUMP-NEXT: | `-DeclRefExpr {{.+}} 'bool' NonTypeTemplateParm {{.+}} 'U2' 'bool'
// DUMP-NEXT: |-TemplateArgument expr canonical 'value-parameter-0-2'
// DUMP-NEXT: | `-DeclRefExpr {{.+}} 'bool' NonTypeTemplateParm {{.+}} 'U3' 'bool'
// DUMP-NEXT: |-TemplateTypeParmDecl {{.+}} referenced class depth 0 index 0 U1
@@ -204,8 +205,8 @@ namespace TestPartialSpecNTTP {
template <typename U1, bool U3, bool U2>
struct Template2<Template1<U1, U2>, U3> {};
// DUMP: ClassTemplatePartialSpecializationDecl {{.+}} struct Template2 definition explicit_specialization
-// DUMP: |-TemplateArgument type 'Template1<type-parameter-0-0, value-parameter-0-2>'
-// DUMP-NEXT: | `-TemplateSpecializationType {{.+}} 'Template1<type-parameter-0-0, value-parameter-0-2>' dependent
+// DUMP: |-TemplateArgument type 'TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-2>'
+// DUMP-NEXT: | `-TemplateSpecializationType {{.+}} 'TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-2>' dependent
// DUMP-NEXT: | |-name: 'TestPartialSpecNTTP::Template1'
// DUMP-NEXT: | | `-ClassTemplateDecl {{.+}} Template1
// DUMP-NEXT: | |-TemplateArgument type 'type-parameter-0-0'
@@ -220,6 +221,22 @@ namespace TestPartialSpecNTTP {
// DUMP-NEXT: `-CXXRecordDecl {{.+}} implicit struct Template2
} // namespace TestPartialSpecNTTP
+namespace GH153540 {
+// DUMP-LABEL: NamespaceDecl {{.*}} GH153540{{$}}
+
+ namespace N {
+ template<typename T> struct S { S(T); };
+ }
+ void f() {
+ N::S(0);
+ }
+
+// DUMP: FunctionDecl {{.*}} f 'void ()'
+// DUMP-NEXT: CompoundStmt
+// DUMP-NEXT: CXXFunctionalCastExpr {{.*}} 'N::S<int>':'GH153540::N::S<int>'
+// DUMP-NEXT: CXXConstructExpr {{.*}} <col:5, col:11> 'N::S<int>':'GH153540::N::S<int>' 'void (int)'
+} // namespace GH153540
+
// NOTE: CHECK lines have been autogenerated by gen_ast_dump_json_test.py
@@ -621,7 +638,6 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: }
// JSON-NEXT: },
// JSON-NEXT: "isImplicit": true,
-// JSON-NEXT: "isReferenced": true,
// JSON-NEXT: "name": "foo",
// JSON-NEXT: "tagUsed": "struct"
// JSON-NEXT: },
@@ -4109,7 +4125,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "isImplicit": true,
// JSON-NEXT: "name": "<deduction guide for A>",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "auto () -> A<T>"
+// JSON-NEXT: "qualType": "auto () -> test3::A<T>"
// JSON-NEXT: }
// JSON-NEXT: }
// JSON-NEXT: ]
@@ -4185,7 +4201,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "isImplicit": true,
// JSON-NEXT: "name": "<deduction guide for A>",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "auto (A<T>) -> A<T>"
+// JSON-NEXT: "qualType": "auto (test3::A<T>) -> test3::A<T>"
// JSON-NEXT: },
// JSON-NEXT: "inner": [
// JSON-NEXT: {
@@ -4209,7 +4225,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: }
// JSON-NEXT: },
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "A<T>"
+// JSON-NEXT: "qualType": "test3::A<T>"
// JSON-NEXT: }
// JSON-NEXT: }
// JSON-NEXT: ]
@@ -6630,8 +6646,8 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6356,
-// JSON-NEXT: "line": 179,
+// JSON-NEXT: "offset": 6425,
+// JSON-NEXT: "line": 180,
// JSON-NEXT: "col": 1,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -6889,6 +6905,15 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "inner": [
// JSON-NEXT: {
// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "DependentNameType",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "U::test"
+// JSON-NEXT: },
+// JSON-NEXT: "isDependent": true,
+// JSON-NEXT: "isInstantiationDependent": true
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "BuiltinType",
// JSON-NEXT: "type": {
// JSON-NEXT: "qualType": "int"
@@ -6938,7 +6963,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "DependentTemplateSpecializationType",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "template V<int>"
+// JSON-NEXT: "qualType": "U::template V<int>"
// JSON-NEXT: },
// JSON-NEXT: "isDependent": true,
// JSON-NEXT: "isInstantiationDependent": true
@@ -6964,20 +6989,20 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NamespaceDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6409,
-// JSON-NEXT: "line": 181,
+// JSON-NEXT: "offset": 6478,
+// JSON-NEXT: "line": 182,
// JSON-NEXT: "col": 11,
// JSON-NEXT: "tokLen": 19
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6399,
+// JSON-NEXT: "offset": 6468,
// JSON-NEXT: "col": 1,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 9184,
-// JSON-NEXT: "line": 221,
+// JSON-NEXT: "offset": 9336,
+// JSON-NEXT: "line": 222,
// JSON-NEXT: "col": 1,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -6988,19 +7013,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "ClassTemplateDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6532,
-// JSON-NEXT: "line": 183,
+// JSON-NEXT: "offset": 6601,
+// JSON-NEXT: "line": 184,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6494,
+// JSON-NEXT: "offset": 6563,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6543,
+// JSON-NEXT: "offset": 6612,
// JSON-NEXT: "col": 52,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7011,18 +7036,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateTypeParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6510,
+// JSON-NEXT: "offset": 6579,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6504,
+// JSON-NEXT: "offset": 6573,
// JSON-NEXT: "col": 13,
// JSON-NEXT: "tokLen": 5
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6510,
+// JSON-NEXT: "offset": 6579,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: }
@@ -7036,18 +7061,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6520,
+// JSON-NEXT: "offset": 6589,
// JSON-NEXT: "col": 29,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6515,
+// JSON-NEXT: "offset": 6584,
// JSON-NEXT: "col": 24,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6520,
+// JSON-NEXT: "offset": 6589,
// JSON-NEXT: "col": 29,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: }
@@ -7063,18 +7088,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6532,
+// JSON-NEXT: "offset": 6601,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6525,
+// JSON-NEXT: "offset": 6594,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6543,
+// JSON-NEXT: "offset": 6612,
// JSON-NEXT: "col": 52,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7137,18 +7162,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6532,
+// JSON-NEXT: "offset": 6601,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6525,
+// JSON-NEXT: "offset": 6594,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6532,
+// JSON-NEXT: "offset": 6601,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: }
@@ -7165,19 +7190,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "ClassTemplateDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6586,
-// JSON-NEXT: "line": 184,
+// JSON-NEXT: "offset": 6655,
+// JSON-NEXT: "line": 185,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6548,
+// JSON-NEXT: "offset": 6617,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6597,
+// JSON-NEXT: "offset": 6666,
// JSON-NEXT: "col": 52,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7188,18 +7213,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateTypeParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6564,
+// JSON-NEXT: "offset": 6633,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6558,
+// JSON-NEXT: "offset": 6627,
// JSON-NEXT: "col": 13,
// JSON-NEXT: "tokLen": 5
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6564,
+// JSON-NEXT: "offset": 6633,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: }
@@ -7213,18 +7238,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6574,
+// JSON-NEXT: "offset": 6643,
// JSON-NEXT: "col": 29,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6569,
+// JSON-NEXT: "offset": 6638,
// JSON-NEXT: "col": 24,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6574,
+// JSON-NEXT: "offset": 6643,
// JSON-NEXT: "col": 29,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: }
@@ -7240,18 +7265,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6586,
+// JSON-NEXT: "offset": 6655,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6579,
+// JSON-NEXT: "offset": 6648,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6597,
+// JSON-NEXT: "offset": 6666,
// JSON-NEXT: "col": 52,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7314,18 +7339,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6586,
+// JSON-NEXT: "offset": 6655,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6579,
+// JSON-NEXT: "offset": 6648,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6586,
+// JSON-NEXT: "offset": 6655,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: }
@@ -7342,21 +7367,21 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "ClassTemplatePartialSpecializationDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6650,
-// JSON-NEXT: "line": 187,
+// JSON-NEXT: "offset": 6719,
+// JSON-NEXT: "line": 188,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6603,
-// JSON-NEXT: "line": 186,
+// JSON-NEXT: "offset": 6672,
+// JSON-NEXT: "line": 187,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6684,
-// JSON-NEXT: "line": 187,
+// JSON-NEXT: "offset": 6753,
+// JSON-NEXT: "line": 188,
// JSON-NEXT: "col": 44,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7418,14 +7443,14 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: {
// JSON-NEXT: "kind": "TemplateArgument",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "Template1<type-parameter-0-0, value-parameter-0-1>"
+// JSON-NEXT: "qualType": "TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-1>"
// JSON-NEXT: },
// JSON-NEXT: "inner": [
// JSON-NEXT: {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateSpecializationType",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "Template1<type-parameter-0-0, value-parameter-0-1>"
+// JSON-NEXT: "qualType": "TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-1>"
// JSON-NEXT: },
// JSON-NEXT: "isDependent": true,
// JSON-NEXT: "isInstantiationDependent": true,
@@ -7463,15 +7488,14 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "kind": "DeclRefExpr",
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6520,
-// JSON-NEXT: "line": 183,
-// JSON-NEXT: "col": 29,
-// JSON-NEXT: "tokLen": 3
+// JSON-NEXT: "offset": 6743,
+// JSON-NEXT: "col": 34,
+// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6520,
-// JSON-NEXT: "col": 29,
-// JSON-NEXT: "tokLen": 3
+// JSON-NEXT: "offset": 6743,
+// JSON-NEXT: "col": 34,
+// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
// JSON-NEXT: },
// JSON-NEXT: "type": {
@@ -7481,7 +7505,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "referencedDecl": {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
-// JSON-NEXT: "name": "TA2",
+// JSON-NEXT: "name": "U2",
// JSON-NEXT: "type": {
// JSON-NEXT: "qualType": "bool"
// JSON-NEXT: }
@@ -7503,13 +7527,12 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "kind": "DeclRefExpr",
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6679,
-// JSON-NEXT: "line": 187,
+// JSON-NEXT: "offset": 6748,
// JSON-NEXT: "col": 39,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6679,
+// JSON-NEXT: "offset": 6748,
// JSON-NEXT: "col": 39,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7533,19 +7556,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateTypeParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6619,
-// JSON-NEXT: "line": 186,
+// JSON-NEXT: "offset": 6688,
+// JSON-NEXT: "line": 187,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6613,
+// JSON-NEXT: "offset": 6682,
// JSON-NEXT: "col": 13,
// JSON-NEXT: "tokLen": 5
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6619,
+// JSON-NEXT: "offset": 6688,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7560,18 +7583,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6628,
+// JSON-NEXT: "offset": 6697,
// JSON-NEXT: "col": 28,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6623,
+// JSON-NEXT: "offset": 6692,
// JSON-NEXT: "col": 23,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6628,
+// JSON-NEXT: "offset": 6697,
// JSON-NEXT: "col": 28,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7588,18 +7611,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6637,
+// JSON-NEXT: "offset": 6706,
// JSON-NEXT: "col": 37,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6632,
+// JSON-NEXT: "offset": 6701,
// JSON-NEXT: "col": 32,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6637,
+// JSON-NEXT: "offset": 6706,
// JSON-NEXT: "col": 37,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7616,19 +7639,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6650,
-// JSON-NEXT: "line": 187,
+// JSON-NEXT: "offset": 6719,
+// JSON-NEXT: "line": 188,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6643,
+// JSON-NEXT: "offset": 6712,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6650,
+// JSON-NEXT: "offset": 6719,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: }
@@ -7643,21 +7666,21 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "ClassTemplatePartialSpecializationDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7925,
-// JSON-NEXT: "line": 205,
+// JSON-NEXT: "offset": 8035,
+// JSON-NEXT: "line": 206,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7875,
-// JSON-NEXT: "line": 204,
+// JSON-NEXT: "offset": 7985,
+// JSON-NEXT: "line": 205,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7959,
-// JSON-NEXT: "line": 205,
+// JSON-NEXT: "offset": 8069,
+// JSON-NEXT: "line": 206,
// JSON-NEXT: "col": 44,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7719,14 +7742,14 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: {
// JSON-NEXT: "kind": "TemplateArgument",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "Template1<type-parameter-0-0, value-parameter-0-2>"
+// JSON-NEXT: "qualType": "TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-2>"
// JSON-NEXT: },
// JSON-NEXT: "inner": [
// JSON-NEXT: {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateSpecializationType",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "Template1<type-parameter-0-0, value-parameter-0-2>"
+// JSON-NEXT: "qualType": "TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-2>"
// JSON-NEXT: },
// JSON-NEXT: "isDependent": true,
// JSON-NEXT: "isInstantiationDependent": true,
@@ -7764,12 +7787,12 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "kind": "DeclRefExpr",
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7949,
+// JSON-NEXT: "offset": 8059,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7949,
+// JSON-NEXT: "offset": 8059,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7803,12 +7826,12 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "kind": "DeclRefExpr",
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7954,
+// JSON-NEXT: "offset": 8064,
// JSON-NEXT: "col": 39,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7954,
+// JSON-NEXT: "offset": 8064,
// JSON-NEXT: "col": 39,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7832,19 +7855,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateTypeParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7894,
-// JSON-NEXT: "line": 204,
+// JSON-NEXT: "offset": 8004,
+// JSON-NEXT: "line": 205,
// JSON-NEXT: "col": 22,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7885,
+// JSON-NEXT: "offset": 7995,
// JSON-NEXT: "col": 13,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7894,
+// JSON-NEXT: "offset": 8004,
// JSON-NEXT: "col": 22,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7859,18 +7882,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7903,
+// JSON-NEXT: "offset": 8013,
// JSON-NEXT: "col": 31,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7898,
+// JSON-NEXT: "offset": 8008,
// JSON-NEXT: "col": 26,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7903,
+// JSON-NEXT: "offset": 8013,
// JSON-NEXT: "col": 31,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7887,18 +7910,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7912,
+// JSON-NEXT: "offset": 8022,
// JSON-NEXT: "col": 40,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7907,
+// JSON-NEXT: "offset": 8017,
// JSON-NEXT: "col": 35,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7912,
+// JSON-NEXT: "offset": 8022,
// JSON-NEXT: "col": 40,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7915,19 +7938,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7925,
-// JSON-NEXT: "line": 205,
+// JSON-NEXT: "offset": 8035,
+// JSON-NEXT: "line": 206,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7918,
+// JSON-NEXT: "offset": 8028,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7925,
+// JSON-NEXT: "offset": 8035,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: }
@@ -7939,6 +7962,959 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: ]
// JSON-NEXT: }
// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "NamespaceDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9382,
+// JSON-NEXT: "line": 224,
+// JSON-NEXT: "col": 11,
+// JSON-NEXT: "tokLen": 8
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9372,
+// JSON-NEXT: "col": 1,
+// JSON-NEXT: "tokLen": 9
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9791,
+// JSON-NEXT: "line": 238,
+// JSON-NEXT: "col": 1,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "name": "GH153540",
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "NamespaceDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9456,
+// JSON-NEXT: "line": 227,
+// JSON-NEXT: "col": 13,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9446,
+// JSON-NEXT: "col": 3,
+// JSON-NEXT: "tokLen": 9
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9507,
+// JSON-NEXT: "line": 229,
+// JSON-NEXT: "col": 3,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "name": "N",
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ClassTemplateDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "line": 228,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9464,
+// JSON-NEXT: "col": 5,
+// JSON-NEXT: "tokLen": 8
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9502,
+// JSON-NEXT: "col": 43,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "TemplateTypeParmDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9482,
+// JSON-NEXT: "col": 23,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9473,
+// JSON-NEXT: "col": 14,
+// JSON-NEXT: "tokLen": 8
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9482,
+// JSON-NEXT: "col": 23,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isReferenced": true,
+// JSON-NEXT: "name": "T",
+// JSON-NEXT: "tagUsed": "typename",
+// JSON-NEXT: "depth": 0,
+// JSON-NEXT: "index": 0
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXRecordDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9485,
+// JSON-NEXT: "col": 26,
+// JSON-NEXT: "tokLen": 6
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9502,
+// JSON-NEXT: "col": 43,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "tagUsed": "struct",
+// JSON-NEXT: "completeDefinition": true,
+// JSON-NEXT: "definitionData": {
+// JSON-NEXT: "canConstDefaultInit": true,
+// JSON-NEXT: "copyAssign": {
+// JSON-NEXT: "hasConstParam": true,
+// JSON-NEXT: "implicitHasConstParam": true,
+// JSON-NEXT: "needsImplicit": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: },
+// JSON-NEXT: "copyCtor": {
+// JSON-NEXT: "hasConstParam": true,
+// JSON-NEXT: "implicitHasConstParam": true,
+// JSON-NEXT: "needsImplicit": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: },
+// JSON-NEXT: "defaultCtor": {
+// JSON-NEXT: "defaultedIsConstexpr": true
+// JSON-NEXT: },
+// JSON-NEXT: "dtor": {
+// JSON-NEXT: "irrelevant": true,
+// JSON-NEXT: "needsImplicit": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: },
+// JSON-NEXT: "hasUserDeclaredConstructor": true,
+// JSON-NEXT: "isEmpty": true,
+// JSON-NEXT: "isStandardLayout": true,
+// JSON-NEXT: "isTriviallyCopyable": true,
+// JSON-NEXT: "moveAssign": {
+// JSON-NEXT: "exists": true,
+// JSON-NEXT: "needsImplicit": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: },
+// JSON-NEXT: "moveCtor": {
+// JSON-NEXT: "exists": true,
+// JSON-NEXT: "needsImplicit": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXRecordDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9485,
+// JSON-NEXT: "col": 26,
+// JSON-NEXT: "tokLen": 6
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "tagUsed": "struct"
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXConstructorDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "name": "S<T>",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "void (T)"
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ParmVarDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9498,
+// JSON-NEXT: "col": 39,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9498,
+// JSON-NEXT: "col": 39,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "T"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ClassTemplateSpecializationDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9464,
+// JSON-NEXT: "col": 5,
+// JSON-NEXT: "tokLen": 8
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9502,
+// JSON-NEXT: "col": 43,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "tagUsed": "struct",
+// JSON-NEXT: "completeDefinition": true,
+// JSON-NEXT: "definitionData": {
+// JSON-NEXT: "canConstDefaultInit": true,
+// JSON-NEXT: "canPassInRegisters": true,
+// JSON-NEXT: "copyAssign": {
+// JSON-NEXT: "hasConstParam": true,
+// JSON-NEXT: "implicitHasConstParam": true,
+// JSON-NEXT: "needsImplicit": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: },
+// JSON-NEXT: "copyCtor": {
+// JSON-NEXT: "hasConstParam": true,
+// JSON-NEXT: "implicitHasConstParam": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: },
+// JSON-NEXT: "defaultCtor": {
+// JSON-NEXT: "defaultedIsConstexpr": true
+// JSON-NEXT: },
+// JSON-NEXT: "dtor": {
+// JSON-NEXT: "irrelevant": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: },
+// JSON-NEXT: "hasUserDeclaredConstructor": true,
+// JSON-NEXT: "isEmpty": true,
+// JSON-NEXT: "isStandardLayout": true,
+// JSON-NEXT: "isTriviallyCopyable": true,
+// JSON-NEXT: "moveAssign": {
+// JSON-NEXT: "exists": true,
+// JSON-NEXT: "needsImplicit": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: },
+// JSON-NEXT: "moveCtor": {
+// JSON-NEXT: "exists": true,
+// JSON-NEXT: "simple": true,
+// JSON-NEXT: "trivial": true
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "kind": "TemplateArgument",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "int"
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "BuiltinType",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "int"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXRecordDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9485,
+// JSON-NEXT: "col": 26,
+// JSON-NEXT: "tokLen": 6
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "tagUsed": "struct"
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXConstructorDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isUsed": true,
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "mangledName": "_ZN8GH1535401N1SIiEC1Ei",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "void (int)"
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ParmVarDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9498,
+// JSON-NEXT: "col": 39,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9498,
+// JSON-NEXT: "col": 39,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "int"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXConstructorDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "mangledName": "_ZN8GH1535401N1SIiEC1ERKS2_",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "void (const S<int> &)"
+// JSON-NEXT: },
+// JSON-NEXT: "inline": true,
+// JSON-NEXT: "constexpr": true,
+// JSON-NEXT: "explicitlyDefaulted": "default",
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ParmVarDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "const S<int> &"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXConstructorDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "mangledName": "_ZN8GH1535401N1SIiEC1EOS2_",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "void (S<int> &&)"
+// JSON-NEXT: },
+// JSON-NEXT: "inline": true,
+// JSON-NEXT: "constexpr": true,
+// JSON-NEXT: "explicitlyDefaulted": "default",
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ParmVarDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "S<int> &&"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXDestructorDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "isReferenced": true,
+// JSON-NEXT: "name": "~S",
+// JSON-NEXT: "mangledName": "_ZN8GH1535401N1SIiED1Ev",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "void () noexcept"
+// JSON-NEXT: },
+// JSON-NEXT: "inline": true,
+// JSON-NEXT: "constexpr": true,
+// JSON-NEXT: "explicitlyDefaulted": "default"
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "FunctionTemplateDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9464,
+// JSON-NEXT: "col": 5,
+// JSON-NEXT: "tokLen": 8
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "name": "<deduction guide for S>",
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "TemplateTypeParmDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9482,
+// JSON-NEXT: "col": 23,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9473,
+// JSON-NEXT: "col": 14,
+// JSON-NEXT: "tokLen": 8
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9482,
+// JSON-NEXT: "col": 23,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isReferenced": true,
+// JSON-NEXT: "name": "T",
+// JSON-NEXT: "tagUsed": "typename",
+// JSON-NEXT: "depth": 0,
+// JSON-NEXT: "index": 0
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXDeductionGuideDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "name": "<deduction guide for S>",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "auto (T) -> GH153540::N::S<T>"
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ParmVarDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9498,
+// JSON-NEXT: "col": 39,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9498,
+// JSON-NEXT: "col": 39,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "T"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXDeductionGuideDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9496,
+// JSON-NEXT: "col": 37,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "isUsed": true,
+// JSON-NEXT: "name": "<deduction guide for S>",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "auto (int) -> GH153540::N::S<int>"
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "kind": "TemplateArgument",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "int"
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "BuiltinType",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "int"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ParmVarDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9499,
+// JSON-NEXT: "col": 40,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9498,
+// JSON-NEXT: "col": 39,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9498,
+// JSON-NEXT: "col": 39,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "int"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "FunctionTemplateDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9464,
+// JSON-NEXT: "col": 5,
+// JSON-NEXT: "tokLen": 8
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "name": "<deduction guide for S>",
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "TemplateTypeParmDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9482,
+// JSON-NEXT: "col": 23,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9473,
+// JSON-NEXT: "col": 14,
+// JSON-NEXT: "tokLen": 8
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9482,
+// JSON-NEXT: "col": 23,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isReferenced": true,
+// JSON-NEXT: "name": "T",
+// JSON-NEXT: "tagUsed": "typename",
+// JSON-NEXT: "depth": 0,
+// JSON-NEXT: "index": 0
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXDeductionGuideDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "isImplicit": true,
+// JSON-NEXT: "name": "<deduction guide for S>",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "auto (GH153540::N::S<T>) -> GH153540::N::S<T>"
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "ParmVarDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9492,
+// JSON-NEXT: "col": 33,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "GH153540::N::S<T>"
+// JSON-NEXT: }
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "FunctionDecl",
+// JSON-NEXT: "loc": {
+// JSON-NEXT: "offset": 9516,
+// JSON-NEXT: "line": 230,
+// JSON-NEXT: "col": 8,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9511,
+// JSON-NEXT: "col": 3,
+// JSON-NEXT: "tokLen": 4
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9537,
+// JSON-NEXT: "line": 232,
+// JSON-NEXT: "col": 3,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "name": "f",
+// JSON-NEXT: "mangledName": "_ZN8GH1535401fEv",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "void ()"
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CompoundStmt",
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9520,
+// JSON-NEXT: "line": 230,
+// JSON-NEXT: "col": 12,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9537,
+// JSON-NEXT: "line": 232,
+// JSON-NEXT: "col": 3,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXFunctionalCastExpr",
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9526,
+// JSON-NEXT: "line": 231,
+// JSON-NEXT: "col": 5,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9532,
+// JSON-NEXT: "col": 11,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "desugaredQualType": "GH153540::N::S<int>",
+// JSON-NEXT: "qualType": "N::S<int>"
+// JSON-NEXT: },
+// JSON-NEXT: "valueCategory": "prvalue",
+// JSON-NEXT: "castKind": "ConstructorConversion",
+// JSON-NEXT: "conversionFunc": {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXConstructorDecl",
+// JSON-NEXT: "name": "S",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "void (int)"
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "CXXConstructExpr",
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9526,
+// JSON-NEXT: "col": 5,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9532,
+// JSON-NEXT: "col": 11,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "desugaredQualType": "GH153540::N::S<int>",
+// JSON-NEXT: "qualType": "N::S<int>"
+// JSON-NEXT: },
+// JSON-NEXT: "valueCategory": "prvalue",
+// JSON-NEXT: "ctorType": {
+// JSON-NEXT: "qualType": "void (int)"
+// JSON-NEXT: },
+// JSON-NEXT: "hadMultipleCandidates": true,
+// JSON-NEXT: "constructionKind": "complete",
+// JSON-NEXT: "inner": [
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "IntegerLiteral",
+// JSON-NEXT: "range": {
+// JSON-NEXT: "begin": {
+// JSON-NEXT: "offset": 9531,
+// JSON-NEXT: "col": 10,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: },
+// JSON-NEXT: "end": {
+// JSON-NEXT: "offset": 9531,
+// JSON-NEXT: "col": 10,
+// JSON-NEXT: "tokLen": 1
+// JSON-NEXT: }
+// JSON-NEXT: },
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "int"
+// JSON-NEXT: },
+// JSON-NEXT: "valueCategory": "prvalue",
+// JSON-NEXT: "value": "0"
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
+// JSON-NEXT: }
+// JSON-NEXT: ]
// JSON-NEXT: }
// JSON-NEXT: ]
// JSON-NEXT: }
diff --git a/clang/test/AST/ast-dump-traits.cpp b/clang/test/AST/ast-dump-traits.cpp
index 72d2a2a..b844fd6 100644
--- a/clang/test/AST/ast-dump-traits.cpp
+++ b/clang/test/AST/ast-dump-traits.cpp
@@ -40,9 +40,8 @@ void test_unary_expr_or_type_trait() {
// CHECK-NEXT: | | `-EnumDecl {{.*}} <col:3, col:11> col:8{{( imported)?}} referenced E
// CHECK-NEXT: | |-CStyleCastExpr {{.*}} <line:13:3, col:21> 'void' <ToVoid>
// CHECK-NEXT: | | `-TypeTraitExpr {{.*}} <col:10, col:21> 'bool' __is_enum
-// CHECK-NEXT: | | `-ElaboratedType {{.*}} 'E' sugar
-// CHECK-NEXT: | | `-EnumType {{.*}} 'E'
-// CHECK-NEXT: | | `-Enum {{.*}} 'E'
+// CHECK-NEXT: | | `-EnumType {{.*}} 'E'
+// CHECK-NEXT: | | `-Enum {{.*}} 'E'
// CHECK-NEXT: | |-CStyleCastExpr {{.*}} <line:15:3, col:30> 'void' <ToVoid>
// CHECK-NEXT: | | `-TypeTraitExpr {{.*}} <col:10, col:30> 'bool' __is_same
// CHECK-NEXT: | | |-BuiltinType {{.*}} 'int'
diff --git a/clang/test/AST/ast-dump-types-json.cpp b/clang/test/AST/ast-dump-types-json.cpp
index cc4d4d9..aac6027 100644
--- a/clang/test/AST/ast-dump-types-json.cpp
+++ b/clang/test/AST/ast-dump-types-json.cpp
@@ -51,30 +51,20 @@ using ::TestUsingShadowDeclType;
// CHECK-NEXT: },
// CHECK-NEXT: "name": "TestElaboratedType1",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "T",
// CHECK-NEXT: "qualType": "struct T"
// CHECK-NEXT: },
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "ElaboratedType",
+// CHECK-NEXT: "kind": "RecordType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "struct T"
// CHECK-NEXT: },
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "RecordType",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "T"
-// CHECK-NEXT: },
-// CHECK-NEXT: "decl": {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "CXXRecordDecl",
-// CHECK-NEXT: "name": "T"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK-NEXT: "decl": {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "CXXRecordDecl",
+// CHECK-NEXT: "name": "T"
+// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: }
@@ -108,25 +98,16 @@ using ::TestUsingShadowDeclType;
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "ElaboratedType",
+// CHECK-NEXT: "kind": "RecordType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "NS::S"
// CHECK-NEXT: },
// CHECK-NEXT: "qualifier": "NS::",
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "RecordType",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "NS::S"
-// CHECK-NEXT: },
-// CHECK-NEXT: "decl": {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "CXXRecordDecl",
-// CHECK-NEXT: "name": "S"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK-NEXT: "decl": {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "CXXRecordDecl",
+// CHECK-NEXT: "name": "S"
+// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: }
@@ -538,7 +519,39 @@ using ::TestUsingShadowDeclType;
// CHECK-NEXT: },
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x0"
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "TypedefDecl",
+// CHECK-NEXT: "loc": {
+// CHECK-NEXT: "offset": 506,
+// CHECK-NEXT: "line": 23,
+// CHECK-NEXT: "col": 13,
+// CHECK-NEXT: "tokLen": 23
+// CHECK-NEXT: },
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 494,
+// CHECK-NEXT: "col": 1,
+// CHECK-NEXT: "tokLen": 7
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 506,
+// CHECK-NEXT: "col": 13,
+// CHECK-NEXT: "tokLen": 23
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "name": "TestUsingShadowDeclType",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "int"
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "BuiltinType",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "int"
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: }
diff --git a/clang/test/AST/ast-dump-using-template.cpp b/clang/test/AST/ast-dump-using-template.cpp
index 2c95849..a5a0e4d 100644
--- a/clang/test/AST/ast-dump-using-template.cpp
+++ b/clang/test/AST/ast-dump-using-template.cpp
@@ -19,40 +19,36 @@ using ns::S2;
template<typename T>
using A = S<T>;
// CHECK: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S<T>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType {{.*}} 'S<T>' dependent
-// CHECK-NEXT: |-name: 'S':'ns::S' qualified
-// CHECK-NEXT: | |-UsingShadowDecl {{.+}} ClassTemplate {{.+}} 'S'
+// CHECK-NEXT: `-TemplateSpecializationType {{.*}} 'S<T>' dependent
+// CHECK-NEXT: |-name: 'S':'ns::S' qualified
+// CHECk-NEXT: | |-UsingShadowDecl {{.+}} ClassTemplate {{.+}} 'S'
// TemplateName in TemplateArgument.
template <template <typename> class T> class X {};
using B = X<S>;
// CHECK: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'X<S>' sugar
-// CHECK-NEXT: `-TemplateSpecializationType {{.*}} 'X<S>' sugar
-// CHECK-NEXT: |-name: 'X' qualified
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} X
-// CHECK-NEXT: |-TemplateArgument template 'S':'ns::S' qualified
-// CHECK-NEXT: | |-UsingShadowDecl {{.*}} implicit ClassTemplate {{.*}} 'S'
-// CHECK-NEXT: | `-target: ClassTemplateDecl {{.*}} S
-// CHECK-NEXT: `-RecordType {{.*}} 'X<ns::S>'
-// CHECK-NEXT: `-ClassTemplateSpecialization {{.*}} 'X'
+// CHECK-NEXT: `-TemplateSpecializationType {{.*}} 'X<S>' sugar
+// CHECK-NEXT: |-name: 'X' qualified
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} X
+// CHECK-NEXT: |-TemplateArgument template 'S':'ns::S' qualified
+// CHECK-NEXT: | |-UsingShadowDecl {{.*}} implicit ClassTemplate {{.*}} 'S'
+// CHECK-NEXT: | `-target: ClassTemplateDecl {{.*}} S
+// CHECK-NEXT: `-RecordType {{.*}} 'X<ns::S>'
+// CHECK-NEXT: `-ClassTemplateSpecialization {{.*}} 'X'
// TemplateName in DeducedTemplateSpecializationType.
S DeducedTemplateSpecializationT(123);
using C = decltype(DeducedTemplateSpecializationT);
// CHECK: DecltypeType {{.*}}
// CHECK-NEXT: |-DeclRefExpr {{.*}}
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S<int>' sugar
-// CHECK-NEXT: `-DeducedTemplateSpecializationType {{.*}} 'ns::S<int>' sugar
-// CHECK-NEXT: |-name: 'S':'ns::S' qualified
-// CHECK-NEXT: | |-UsingShadowDecl {{.+}} 'S'
+// CHECK-NEXT: `-DeducedTemplateSpecializationType {{.*}} 'S<int>' sugar
+// CHECK-NEXT: |-name: 'S':'ns::S' qualified
+// CHECK-NEXT: | |-UsingShadowDecl {{.+}} 'S'
S2 DeducedTemplateSpecializationT2(123);
using D = decltype(DeducedTemplateSpecializationT2);
// CHECK: DecltypeType {{.*}}
// CHECK-NEXT: |-DeclRefExpr {{.*}}
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S2<int>' sugar
-// CHECK-NEXT: `-DeducedTemplateSpecializationType {{.*}} 'S2<int>' sugar
-// CHECK-NEXT: |-name: 'S2':'ns::S2' qualified
-// CHECK-NEXT: | |-UsingShadowDecl {{.+}} ClassTemplate {{.+}} 'S2'
+// CHECK-NEXT: `-DeducedTemplateSpecializationType {{.*}} 'S2<int>' sugar
+// CHECK-NEXT: |-name: 'S2':'ns::S2' qualified
+// CHECk-NEXT: | |-UsingShadowDecl {{.+}} ClassTemplate {{.+}} 'S2'
diff --git a/clang/test/AST/ast-dump-using.cpp b/clang/test/AST/ast-dump-using.cpp
index 8e5c60d..0c8405b 100644
--- a/clang/test/AST/ast-dump-using.cpp
+++ b/clang/test/AST/ast-dump-using.cpp
@@ -9,19 +9,17 @@ using a::S;
// CHECK: UsingDecl {{.*}} a::S
// CHECK-NEXT: | `-NestedNameSpecifier Namespace {{.*}} 'a'
// CHECK-NEXT: UsingShadowDecl {{.*}} implicit CXXRecord {{.*}} 'S'
-// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
+// CHECK-NEXT: `-CXXRecordDecl {{.*}} referenced struct S
typedef S f; // to dump the introduced type
// CHECK: TypedefDecl
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S' sugar
-// CHECK-NEXT: `-UsingType [[TYPE_ADDR:.*]] 'a::S' sugar
-// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR:.*]] 'S'
-// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
+// CHECK-NEXT: `-UsingType [[TYPE_ADDR:.*]] 'S' sugar 'a::S'
+// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR:.*]] 'S'
+// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
typedef S e; // check the same UsingType is reused.
// CHECK: TypedefDecl
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S' sugar
-// CHECK-NEXT: `-UsingType [[TYPE_ADDR]] 'a::S' sugar
-// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR]] 'S'
-// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
+// CHECK-NEXT: `-UsingType [[TYPE_ADDR]] 'S' sugar 'a::S'
+// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR]] 'S'
+// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
using a::x;
void foo() {
diff --git a/clang/test/AST/ast-print-openacc-combined-construct.cpp b/clang/test/AST/ast-print-openacc-combined-construct.cpp
index b4e8033..1f954cb 100644
--- a/clang/test/AST/ast-print-openacc-combined-construct.cpp
+++ b/clang/test/AST/ast-print-openacc-combined-construct.cpp
@@ -386,27 +386,18 @@ void foo() {
#pragma acc serial loop vector
for(int i = 0;i<5;++i);
-//CHECK: #pragma acc parallel loop reduction(+: iPtr)
-#pragma acc parallel loop reduction(+: iPtr)
- for(int i = 0;i<5;++i);
//CHECK: #pragma acc serial loop reduction(*: i)
#pragma acc serial loop reduction(*: i)
for(int i = 0;i<5;++i);
//CHECK: #pragma acc kernels loop reduction(max: SomeB)
#pragma acc kernels loop reduction(max: SomeB)
for(int i = 0;i<5;++i);
-//CHECK: #pragma acc parallel loop reduction(min: iPtr)
-#pragma acc parallel loop reduction(min: iPtr)
- for(int i = 0;i<5;++i);
//CHECK: #pragma acc serial loop reduction(&: i)
#pragma acc serial loop reduction(&: i)
for(int i = 0;i<5;++i);
//CHECK: #pragma acc kernels loop reduction(|: SomeB)
#pragma acc kernels loop reduction(|: SomeB)
for(int i = 0;i<5;++i);
-//CHECK: #pragma acc parallel loop reduction(^: iPtr)
-#pragma acc parallel loop reduction(^: iPtr)
- for(int i = 0;i<5;++i);
//CHECK: #pragma acc serial loop reduction(&&: i)
#pragma acc serial loop reduction(&&: i)
for(int i = 0;i<5;++i);
diff --git a/clang/test/AST/ast-print-openacc-compute-construct.cpp b/clang/test/AST/ast-print-openacc-compute-construct.cpp
index 7c3ac17..d85682f 100644
--- a/clang/test/AST/ast-print-openacc-compute-construct.cpp
+++ b/clang/test/AST/ast-print-openacc-compute-construct.cpp
@@ -135,27 +135,18 @@ void foo() {
#pragma acc parallel device_type (host)
while(true);
-//CHECK: #pragma acc parallel reduction(+: iPtr)
-#pragma acc parallel reduction(+: iPtr)
- while(true);
//CHECK: #pragma acc parallel reduction(*: i)
#pragma acc parallel reduction(*: i)
while(true);
//CHECK: #pragma acc parallel reduction(max: SomeB)
#pragma acc parallel reduction(max: SomeB)
while(true);
-//CHECK: #pragma acc parallel reduction(min: iPtr)
-#pragma acc parallel reduction(min: iPtr)
- while(true);
//CHECK: #pragma acc parallel reduction(&: i)
#pragma acc parallel reduction(&: i)
while(true);
//CHECK: #pragma acc parallel reduction(|: SomeB)
#pragma acc parallel reduction(|: SomeB)
while(true);
-//CHECK: #pragma acc parallel reduction(^: iPtr)
-#pragma acc parallel reduction(^: iPtr)
- while(true);
//CHECK: #pragma acc parallel reduction(&&: i)
#pragma acc parallel reduction(&&: i)
while(true);
diff --git a/clang/test/AST/ast-print-openacc-loop-construct.cpp b/clang/test/AST/ast-print-openacc-loop-construct.cpp
index 6971089..74c5889 100644
--- a/clang/test/AST/ast-print-openacc-loop-construct.cpp
+++ b/clang/test/AST/ast-print-openacc-loop-construct.cpp
@@ -291,30 +291,20 @@ void foo() {
#pragma acc loop vector
for(int i = 0;i<5;++i);
- int *iPtr;
bool SomeB;
-//CHECK: #pragma acc loop reduction(+: iPtr)
-#pragma acc loop reduction(+: iPtr)
- for(int i = 0;i<5;++i);
//CHECK: #pragma acc loop reduction(*: i)
#pragma acc loop reduction(*: i)
for(int i = 0;i<5;++i);
//CHECK: #pragma acc loop reduction(max: SomeB)
#pragma acc loop reduction(max: SomeB)
for(int i = 0;i<5;++i);
-//CHECK: #pragma acc loop reduction(min: iPtr)
-#pragma acc loop reduction(min: iPtr)
- for(int i = 0;i<5;++i);
//CHECK: #pragma acc loop reduction(&: i)
#pragma acc loop reduction(&: i)
for(int i = 0;i<5;++i);
//CHECK: #pragma acc loop reduction(|: SomeB)
#pragma acc loop reduction(|: SomeB)
for(int i = 0;i<5;++i);
-//CHECK: #pragma acc loop reduction(^: iPtr)
-#pragma acc loop reduction(^: iPtr)
- for(int i = 0;i<5;++i);
//CHECK: #pragma acc loop reduction(&&: i)
#pragma acc loop reduction(&&: i)
for(int i = 0;i<5;++i);
diff --git a/clang/test/AST/attr-swift_attr.m b/clang/test/AST/attr-swift_attr.m
index 6888745..766da93 100644
--- a/clang/test/AST/attr-swift_attr.m
+++ b/clang/test/AST/attr-swift_attr.m
@@ -43,7 +43,7 @@ typedef struct {
void *ptr;
} SendableStruct;
-// CHECK-LABEL: TypedefDecl {{.*}} SendableStruct 'struct SendableStruct':'SendableStruct'
+// CHECK-LABEL: TypedefDecl {{.*}} SendableStruct 'struct SendableStruct'
// CHECK: SwiftAttrAttr {{.*}} "@Sendable"
@interface TestAttrPlacementInBlock1
diff --git a/clang/test/AST/coroutine-locals-cleanup.cpp b/clang/test/AST/coroutine-locals-cleanup.cpp
index a7f524b0..b0577f9 100644
--- a/clang/test/AST/coroutine-locals-cleanup.cpp
+++ b/clang/test/AST/coroutine-locals-cleanup.cpp
@@ -86,7 +86,7 @@ Task bar() {
// CHECK: ExprWithCleanups {{.*}} 'void'
// CHECK-NEXT: CoawaitExpr
// CHECK-NEXT: CXXBindTemporaryExpr {{.*}} 'Task' (CXXTemporary {{.*}})
-// CHECK: MaterializeTemporaryExpr {{.*}} 'Awaiter':'Task::Awaiter'
+// CHECK: MaterializeTemporaryExpr {{.*}} 'Awaiter'
// CHECK: ExprWithCleanups {{.*}} 'bool'
// CHECK-NEXT: CXXMemberCallExpr {{.*}} 'bool'
// CHECK-NEXT: MemberExpr {{.*}} .await_ready
@@ -96,7 +96,7 @@ Task bar() {
// CHECK: ExprWithCleanups {{.*}} 'void'
// CHECK-NEXT: CoawaitExpr
// CHECK-NEXT: CXXBindTemporaryExpr {{.*}} 'Task' (CXXTemporary {{.*}})
-// CHECK: MaterializeTemporaryExpr {{.*}} 'Awaiter':'Task::Awaiter'
+// CHECK: MaterializeTemporaryExpr {{.*}} 'Awaiter'
// CHECK: ExprWithCleanups {{.*}} 'bool'
// CHECK-NEXT: CXXMemberCallExpr {{.*}} 'bool'
// CHECK-NEXT: MemberExpr {{.*}} .await_ready
diff --git a/clang/test/AST/cxx2c-variadic-friends.cpp b/clang/test/AST/cxx2c-variadic-friends.cpp
index fc84e73..6147d7f 100644
--- a/clang/test/AST/cxx2c-variadic-friends.cpp
+++ b/clang/test/AST/cxx2c-variadic-friends.cpp
@@ -70,11 +70,11 @@ template <typename ...Pack> struct Variadic {
// CHECK-LABEL: ClassTemplateDecl {{.*}} S2
// PRINT-LABEL: template <class ...Ts> struct S2 {
template<class ...Ts> struct S2 {
- // CHECK: FriendDecl {{.*}} 'class C<Ts>':'C<Ts>'...
+ // CHECK: FriendDecl {{.*}} 'class C<Ts>'...
// PRINT-NEXT: friend class C<Ts>...;
friend class C<Ts>...;
- // CHECK-NEXT: FriendDecl {{.*}} 'class N::C<Ts>':'C<Ts>'...
+ // CHECK-NEXT: FriendDecl {{.*}} 'class N::C<Ts>'...
// PRINT-NEXT: friend class N::C<Ts>...
friend class N::C<Ts>...;
};
diff --git a/clang/test/AST/deduction-guides.cpp b/clang/test/AST/deduction-guides.cpp
index d96c7e6..5ecb276 100644
--- a/clang/test/AST/deduction-guides.cpp
+++ b/clang/test/AST/deduction-guides.cpp
@@ -30,11 +30,11 @@ struct HasDeductionGuideTypeAlias {
HasDeductionGuideTypeAlias()->HasDeductionGuideTypeAlias<int>;
// The parameter to this one shouldn't be an elaborated type.
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuide> 'auto (typename STy::Child) -> HasDeductionGuide<T>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuide> 'auto (HasDeductionGuide<T>) -> HasDeductionGuide<T>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuide> 'auto (typename STy::Child) -> PR46111::HasDeductionGuide<T>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuide> 'auto (PR46111::HasDeductionGuide<T>) -> PR46111::HasDeductionGuide<T>'
// CHECK: CXXDeductionGuideDecl {{.*}} <deduction guide for HasDeductionGuide> 'auto () -> HasDeductionGuide<int>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuideTypeAlias> 'auto (typename STy::Child) -> HasDeductionGuideTypeAlias<T>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuideTypeAlias> 'auto (HasDeductionGuideTypeAlias<T>) -> HasDeductionGuideTypeAlias<T>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuideTypeAlias> 'auto (typename STy::Child) -> PR46111::HasDeductionGuideTypeAlias<T>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuideTypeAlias> 'auto (PR46111::HasDeductionGuideTypeAlias<T>) -> PR46111::HasDeductionGuideTypeAlias<T>'
// CHECK: CXXDeductionGuideDecl {{.*}} <deduction guide for HasDeductionGuideTypeAlias> 'auto () -> HasDeductionGuideTypeAlias<int>'
} // namespace PR46111
@@ -64,15 +64,15 @@ namespace PR48177 {
// CHECK: CXXRecordDecl {{.*}} struct Derived
// CHECK: TypeAliasDecl {{.*}} type_alias 'typename Derived<int, 1, int>::type_alias':'int'
-// CHECK-NEXT: ElaboratedType {{.*}} 'typename Derived<int, 1, int>::type_alias' sugar
-// CHECK-NEXT: TypedefType {{.*}} 'PR48177::Base<int>::type_alias' sugar
+// CHECK-NEXT: TypedefType {{.*}} 'typename Derived<int, 1, int>::type_alias' sugar
+// CHECK-NEXT: NestedNameSpecifier TypeSpec 'Derived<int, 1, int>'
// CHECK-NEXT: TypeAlias {{.*}} 'type_alias'
// CHECK-NEXT: SubstTemplateTypeParmType {{.*}} 'int' sugar class depth 0 index 0 A
// CHECK-NEXT: ClassTemplateSpecialization {{.*}} 'Base'
// CHECK-NEXT: BuiltinType {{.*}} 'int'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (Derived<T, S, A> &&, const typename Derived<T, S, A>::type_alias &) -> Derived<T, S, A>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (T) -> Derived<T, S, A>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (Derived<T, S, A>) -> Derived<T, S, A>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (Derived<T, S, A> &&, const typename Derived<T, S, A>::type_alias &) -> PR48177::Derived<T, S, A>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (T) -> PR48177::Derived<T, S, A>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (PR48177::Derived<T, S, A>) -> PR48177::Derived<T, S, A>'
// CHECK: CXXDeductionGuideDecl {{.*}} <deduction guide for Derived> 'auto (T, A) -> Derived<T, 1, A>'
// CHECK: CXXDeductionGuideDecl {{.*}} <deduction guide for Derived> 'auto (int, int) -> Derived<int, 1, int>'
diff --git a/clang/test/AST/float16.cpp b/clang/test/AST/float16.cpp
index a9e1144..643d37c 100644
--- a/clang/test/AST/float16.cpp
+++ b/clang/test/AST/float16.cpp
@@ -126,7 +126,7 @@ public:
};
//CHECK: |-CXXRecordDecl {{.*}} referenced class C1 definition
-//CHECK: | |-CXXRecordDecl {{.*}} implicit referenced class C1
+//CHECK: | |-CXXRecordDecl {{.*}} implicit class C1
//CHECK-NEXT: | |-FieldDecl {{.*}} referenced f1c '_Float16'
//CHECK-NEXT: | |-VarDecl {{.*}} used f2c 'const _Float16' static
//CHECK-NEXT: | |-FieldDecl {{.*}} f3c 'volatile _Float16'
diff --git a/clang/test/AST/sourceranges.cpp b/clang/test/AST/sourceranges.cpp
index f78d34c..598a28b 100644
--- a/clang/test/AST/sourceranges.cpp
+++ b/clang/test/AST/sourceranges.cpp
@@ -47,7 +47,7 @@ struct D {
void construct() {
using namespace foo;
A a = A(12);
- // CHECK: CXXConstructExpr {{0x[0-9a-fA-F]+}} <col:9, col:13> 'A':'foo::A' 'void (int){{( __attribute__\(\(thiscall\)\))?}}'
+ // CHECK: CXXConstructExpr {{0x[0-9a-fA-F]+}} <col:9, col:13> 'A' 'void (int){{( __attribute__\(\(thiscall\)\))?}}'
D d = D(12);
// CHECK: CXXConstructExpr {{0x[0-9a-fA-F]+}} <col:9, col:13> 'D' 'void (int){{( __attribute__\(\(thiscall\)\))?}}'
}
@@ -174,7 +174,7 @@ namespace in_class_init {
// CHECK-1Z: CXXRecordDecl {{.*}} struct B definition
struct B {
- // CHECK-1Z: FieldDecl {{.*}} a 'A':'in_class_init::A'
+ // CHECK-1Z: FieldDecl {{.*}} a 'A'
// CHECK-1Z-NEXT: InitListExpr {{.*}} <col:11, col:12
A a = {};
};
@@ -192,7 +192,7 @@ namespace delegating_constructor_init {
// CHECK-1Z: CXXRecordDecl {{.*}} struct C definition
struct C : B {
// CHECK-1Z: CXXConstructorDecl {{.*}} C
- // CHECK-1Z-NEXT: CXXCtorInitializer 'B':'delegating_constructor_init::B'
+ // CHECK-1Z-NEXT: CXXCtorInitializer 'B'
// CHECK-1Z-NEXT: CXXConstructExpr {{.*}} <col:11, col:15
// CHECK-1Z-NEXT: InitListExpr {{.*}} <col:13, col:14
C() : B({}) {};
diff --git a/clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp b/clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp
index 27604e2..8e8e03c 100644
--- a/clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp
+++ b/clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp
@@ -64,10 +64,10 @@ void skep2<KN<2>>(K<2>);
// CHECK-NEXT: | `-FunctionDecl {{.*}} skep2 'void (K<2>)' explicit_instantiation_definition
// CHECK-NEXT: | |-TemplateArgument type 'KN<2>'
-// CHECK-NEXT: | | `-RecordType {{.*}} 'KN<2>'
+// CHECK-NEXT: | | `-RecordType {{.*}} 'KN<2>' canonical
// CHECK-NEXT: | | `-ClassTemplateSpecialization {{.*}} 'KN'
// CHECK-NEXT: | |-TemplateArgument type 'K<2>'
-// CHECK-NEXT: | | `-RecordType {{.*}} 'K<2>'
+// CHECK-NEXT: | | `-RecordType {{.*}} 'K<2>' canonical
// CHECK-NEXT: | | `-ClassTemplateSpecialization {{.*}} 'K'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} k 'K<2>'
// CHECK-NEXT: | |-SYCLKernelCallStmt {{.*}}
@@ -110,10 +110,10 @@ void skep3<KN<3>>(K<3> k) {
// CHECK-NEXT: | `-Function {{.*}} 'skep3' 'void (K<3>)'
// CHECK-NEXT: |-FunctionDecl {{.*}} skep3 'void (K<3>)' explicit_specialization
// CHECK-NEXT: | |-TemplateArgument type 'KN<3>'
-// CHECK-NEXT: | | `-RecordType {{.*}} 'KN<3>'
+// CHECK-NEXT: | | `-RecordType {{.*}} 'KN<3>' canonical
// CHECK-NEXT: | | `-ClassTemplateSpecialization {{.*}} 'KN'
// CHECK-NEXT: | |-TemplateArgument type 'K<3>'
-// CHECK-NEXT: | | `-RecordType {{.*}} 'K<3>'
+// CHECK-NEXT: | | `-RecordType {{.*}} 'K<3>' canonical
// CHECK-NEXT: | | `-ClassTemplateSpecialization {{.*}} 'K'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} k 'K<3>'
// CHECK-NEXT: | |-SYCLKernelCallStmt {{.*}}
diff --git a/clang/test/Analysis/LifetimeSafety/benchmark.py b/clang/test/Analysis/LifetimeSafety/benchmark.py
index 9d5f36c..4421fe9 100644
--- a/clang/test/Analysis/LifetimeSafety/benchmark.py
+++ b/clang/test/Analysis/LifetimeSafety/benchmark.py
@@ -99,28 +99,84 @@ def generate_cpp_merge_test(n: int) -> str:
return cpp_code
-def analyze_trace_file(trace_path: str) -> tuple[float, float]:
+def generate_cpp_nested_loop_test(n: int) -> str:
"""
- Parses the -ftime-trace JSON output to find durations.
+ Generates C++ code with N levels of nested loops.
+ This pattern tests how analysis performance scales with loop nesting depth,
+ which is a key factor in the complexity of dataflow analyses on structured
+ control flow.
- Returns:
- A tuple of (lifetime_analysis_duration_us, total_clang_duration_us).
+ Example (n=3):
+ struct MyObj { int id; ~MyObj() {} };
+ void nested_loops_3() {
+ MyObj* p = nullptr;
+ for(int i0=0; i0<2; ++i0) {
+ MyObj s0;
+ p = &s0;
+ for(int i1=0; i1<2; ++i1) {
+ MyObj s1;
+ p = &s1;
+ for(int i2=0; i2<2; ++i2) {
+ MyObj s2;
+ p = &s2;
+ }
+ }
+ }
+ }
+ """
+ if n <= 0:
+ return "// Nesting depth must be positive."
+
+ cpp_code = "struct MyObj { int id; ~MyObj() {} };\n\n"
+ cpp_code += f"void nested_loops_{n}() {{\n"
+ cpp_code += " MyObj* p = nullptr;\n"
+
+ for i in range(n):
+ indent = " " * (i + 1)
+ cpp_code += f"{indent}for(int i{i}=0; i{i}<2; ++i{i}) {{\n"
+ cpp_code += f"{indent} MyObj s{i}; p = &s{i};\n"
+
+ for i in range(n - 1, -1, -1):
+ indent = " " * (i + 1)
+ cpp_code += f"{indent}}}\n"
+
+ cpp_code += "}\n"
+ cpp_code += f"\nint main() {{ nested_loops_{n}(); return 0; }}\n"
+ return cpp_code
+
+
+def analyze_trace_file(trace_path: str) -> dict:
"""
- lifetime_duration = 0.0
- total_duration = 0.0
+ Parses the -ftime-trace JSON output to find durations for the lifetime
+ analysis and its sub-phases.
+ Returns a dictionary of durations in microseconds.
+ """
+ durations = {
+ "lifetime_us": 0.0,
+ "total_us": 0.0,
+ "fact_gen_us": 0.0,
+ "loan_prop_us": 0.0,
+ "expired_loans_us": 0.0,
+ }
+ event_name_map = {
+ "LifetimeSafetyAnalysis": "lifetime_us",
+ "ExecuteCompiler": "total_us",
+ "FactGenerator": "fact_gen_us",
+ "LoanPropagation": "loan_prop_us",
+ "ExpiredLoans": "expired_loans_us",
+ }
try:
with open(trace_path, "r") as f:
trace_data = json.load(f)
for event in trace_data.get("traceEvents", []):
- if event.get("name") == "LifetimeSafetyAnalysis":
- lifetime_duration += float(event.get("dur", 0))
- if event.get("name") == "ExecuteCompiler":
- total_duration += float(event.get("dur", 0))
-
+ event_name = event.get("name")
+ if event_name in event_name_map:
+ key = event_name_map[event_name]
+ durations[key] += float(event.get("dur", 0))
except (IOError, json.JSONDecodeError) as e:
print(f"Error reading or parsing trace file {trace_path}: {e}", file=sys.stderr)
- return 0.0, 0.0
- return lifetime_duration, total_duration
+ return {key: 0.0 for key in durations}
+ return durations
def power_law(n, c, k):
@@ -135,8 +191,29 @@ def human_readable_time(ms: float) -> str:
return f"{ms:.2f} ms"
+def calculate_complexity(n_data, y_data) -> tuple[float | None, float | None]:
+ """
+ Calculates the exponent 'k' for the power law fit y = c * n^k.
+ Returns a tuple of (k, k_standard_error).
+ """
+ try:
+ if len(n_data) < 3 or np.all(y_data < 1e-6) or np.var(y_data) < 1e-6:
+ return None, None
+
+ non_zero_indices = y_data > 0
+ if np.sum(non_zero_indices) < 3:
+ return None, None
+
+ n_fit, y_fit = n_data[non_zero_indices], y_data[non_zero_indices]
+ popt, pcov = curve_fit(power_law, n_fit, y_fit, p0=[0, 1], maxfev=5000)
+ k_stderr = np.sqrt(np.diag(pcov))[1]
+ return popt[1], k_stderr
+ except (RuntimeError, ValueError):
+ return None, None
+
+
def generate_markdown_report(results: dict) -> str:
- """Generates a Markdown-formatted report from the benchmark results."""
+ """Generates a concise, Markdown-formatted report from the benchmark results."""
report = []
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S %Z")
report.append(f"# Lifetime Analysis Performance Report")
@@ -146,54 +223,52 @@ def generate_markdown_report(results: dict) -> str:
for test_name, data in results.items():
title = data["title"]
report.append(f"## Test Case: {title}")
- report.append("")
+ report.append("\n**Timing Results:**\n")
# Table header
- report.append("| N | Analysis Time | Total Clang Time |")
- report.append("|:----|--------------:|-----------------:|")
+ report.append(
+ "| N (Input Size) | Total Time | Analysis Time (%) | Fact Generator (%) | Loan Propagation (%) | Expired Loans (%) |"
+ )
+ report.append(
+ "|:---------------|-----------:|------------------:|-------------------:|---------------------:|------------------:|"
+ )
# Table rows
n_data = np.array(data["n"])
- analysis_data = np.array(data["lifetime_ms"])
- total_data = np.array(data["total_ms"])
+ total_ms_data = np.array(data["total_ms"])
for i in range(len(n_data)):
- analysis_str = human_readable_time(analysis_data[i])
- total_str = human_readable_time(total_data[i])
- report.append(f"| {n_data[i]:<3} | {analysis_str:>13} | {total_str:>16} |")
-
- report.append("")
-
- # Complexity analysis
- report.append(f"**Complexity Analysis:**")
- try:
- # Curve fitting requires at least 3 points
- if len(n_data) < 3:
- raise ValueError("Not enough data points to perform curve fitting.")
-
- popt, pcov = curve_fit(
- power_law, n_data, analysis_data, p0=[0, 2], maxfev=5000
- )
- _, k = popt
-
- # Confidence Interval for k
- alpha = 0.05 # 95% confidence
- dof = max(0, len(n_data) - len(popt)) # degrees of freedom
- t_val = t.ppf(1.0 - alpha / 2.0, dof)
- # Standard error of the parameters
- perr = np.sqrt(np.diag(pcov))
- k_stderr = perr[1]
- k_ci_lower = k - t_val * k_stderr
- k_ci_upper = k + t_val * k_stderr
-
- report.append(
- f"- The performance for this case scales approx. as **O(n<sup>{k:.2f}</sup>)**."
- )
- report.append(
- f"- **95% Confidence interval for exponent:** `[{k_ci_lower:.2f}, {k_ci_upper:.2f}]`."
- )
+ total_t = total_ms_data[i]
+ if total_t < 1e-6:
+ total_t = 1.0 # Avoid division by zero
+
+ row = [
+ f"| {n_data[i]:<14} |",
+ f"{human_readable_time(total_t):>10} |",
+ f"{data['lifetime_ms'][i] / total_t * 100:>17.2f}% |",
+ f"{data['fact_gen_ms'][i] / total_t * 100:>18.2f}% |",
+ f"{data['loan_prop_ms'][i] / total_t * 100:>20.2f}% |",
+ f"{data['expired_loans_ms'][i] / total_t * 100:>17.2f}% |",
+ ]
+ report.append(" ".join(row))
+
+ report.append("\n**Complexity Analysis:**\n")
+ report.append("| Analysis Phase | Complexity O(n<sup>k</sup>) |")
+ report.append("|:------------------|:--------------------------|")
+
+ analysis_phases = {
+ "Total Analysis": data["lifetime_ms"],
+ "FactGenerator": data["fact_gen_ms"],
+ "LoanPropagation": data["loan_prop_ms"],
+ "ExpiredLoans": data["expired_loans_ms"],
+ }
- except (RuntimeError, ValueError) as e:
- report.append(f"- Could not determine a best-fit curve for the data: {e}")
+ for phase_name, y_data in analysis_phases.items():
+ k, delta = calculate_complexity(n_data, np.array(y_data))
+ if k is not None and delta is not None:
+ complexity_str = f"O(n<sup>{k:.2f}</sup> &pm; {delta:.2f})"
+ else:
+ complexity_str = "(Negligible)"
+ report.append(f"| {phase_name:<17} | {complexity_str:<25} |")
report.append("\n---\n")
@@ -202,7 +277,7 @@ def generate_markdown_report(results: dict) -> str:
def run_single_test(
clang_binary: str, output_dir: str, test_name: str, generator_func, n: int
-) -> tuple[float, float]:
+) -> dict:
"""Generates, compiles, and benchmarks a single test case."""
print(f"--- Running Test: {test_name.capitalize()} with N={n} ---")
@@ -221,7 +296,8 @@ def run_single_test(
"-o",
"/dev/null",
"-ftime-trace=" + trace_file,
- "-Wexperimental-lifetime-safety",
+ "-Xclang",
+ "-fexperimental-lifetime-safety",
"-std=c++17",
source_file,
]
@@ -231,11 +307,12 @@ def run_single_test(
if result.returncode != 0:
print(f"Compilation failed for N={n}!", file=sys.stderr)
print(result.stderr, file=sys.stderr)
- return 0.0, 0.0
+ return {}
- lifetime_us, total_us = analyze_trace_file(trace_file)
-
- return lifetime_us / 1000.0, total_us / 1000.0
+ durations_us = analyze_trace_file(trace_file)
+ return {
+ key.replace("_us", "_ms"): value / 1000.0 for key, value in durations_us.items()
+ }
if __name__ == "__main__":
@@ -270,6 +347,12 @@ if __name__ == "__main__":
"generator_func": generate_cpp_merge_test,
"n_values": [10, 50, 100, 200, 400, 800],
},
+ {
+ "name": "nested_loops",
+ "title": "Deeply Nested Loops",
+ "generator_func": generate_cpp_nested_loop_test,
+ "n_values": [10, 50, 100, 200, 400, 800],
+ },
]
results = {}
@@ -282,21 +365,28 @@ if __name__ == "__main__":
"n": [],
"lifetime_ms": [],
"total_ms": [],
+ "fact_gen_ms": [],
+ "loan_prop_ms": [],
+ "expired_loans_ms": [],
}
for n in config["n_values"]:
- lifetime_ms, total_ms = run_single_test(
+ durations_ms = run_single_test(
args.clang_binary,
args.output_dir,
test_name,
config["generator_func"],
n,
)
- if total_ms > 0:
+ if durations_ms:
results[test_name]["n"].append(n)
- results[test_name]["lifetime_ms"].append(lifetime_ms)
- results[test_name]["total_ms"].append(total_ms)
+ for key, value in durations_ms.items():
+ results[test_name][key].append(value)
+
print(
- f" Total: {human_readable_time(total_ms)} | Analysis: {human_readable_time(lifetime_ms)}"
+ f" Total Analysis: {human_readable_time(durations_ms['lifetime_ms'])} | "
+ f"FactGen: {human_readable_time(durations_ms['fact_gen_ms'])} | "
+ f"LoanProp: {human_readable_time(durations_ms['loan_prop_ms'])} | "
+ f"ExpiredLoans: {human_readable_time(durations_ms['expired_loans_ms'])}"
)
print("\n\n" + "=" * 80)
@@ -305,3 +395,8 @@ if __name__ == "__main__":
markdown_report = generate_markdown_report(results)
print(markdown_report)
+
+ report_filename = os.path.join(args.output_dir, "performance_report.md")
+ with open(report_filename, "w") as f:
+ f.write(markdown_report)
+ print(f"Report saved to: {report_filename}")
diff --git a/clang/test/Analysis/Malloc+MismatchedDeallocator+NewDelete.cpp b/clang/test/Analysis/Malloc+MismatchedDeallocator+NewDelete.cpp
index b9eb85d..fc324c3 100644
--- a/clang/test/Analysis/Malloc+MismatchedDeallocator+NewDelete.cpp
+++ b/clang/test/Analysis/Malloc+MismatchedDeallocator+NewDelete.cpp
@@ -1,5 +1,5 @@
-// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.Malloc,unix.MismatchedDeallocator,cplusplus.NewDelete -std=c++11 -verify %s
-// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.Malloc,unix.MismatchedDeallocator,cplusplus.NewDelete,cplusplus.NewDeleteLeaks -DLEAKS -std=c++11 -verify %s
+// RUN: %clang_analyze_cc1 -Wno-alloc-size -analyzer-checker=core,unix.Malloc,unix.MismatchedDeallocator,cplusplus.NewDelete -std=c++11 -verify %s
+// RUN: %clang_analyze_cc1 -Wno-alloc-size -analyzer-checker=core,unix.Malloc,unix.MismatchedDeallocator,cplusplus.NewDelete,cplusplus.NewDeleteLeaks -DLEAKS -std=c++11 -verify %s
#include "Inputs/system-header-simulator-for-malloc.h"
diff --git a/clang/test/Analysis/Malloc+MismatchedDeallocator_intersections.cpp b/clang/test/Analysis/Malloc+MismatchedDeallocator_intersections.cpp
index b0cef25..9887340 100644
--- a/clang/test/Analysis/Malloc+MismatchedDeallocator_intersections.cpp
+++ b/clang/test/Analysis/Malloc+MismatchedDeallocator_intersections.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.Malloc,unix.MismatchedDeallocator -std=c++11 -verify %s
+// RUN: %clang_analyze_cc1 -Wno-alloc-size -analyzer-checker=core,unix.Malloc,unix.MismatchedDeallocator -std=c++11 -verify %s
// expected-no-diagnostics
typedef __typeof(sizeof(int)) size_t;
diff --git a/clang/test/Analysis/MismatchedDeallocator-checker-test.mm b/clang/test/Analysis/MismatchedDeallocator-checker-test.mm
index ef8b24b..21cbe86 100644
--- a/clang/test/Analysis/MismatchedDeallocator-checker-test.mm
+++ b/clang/test/Analysis/MismatchedDeallocator-checker-test.mm
@@ -1,5 +1,5 @@
-// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.MismatchedDeallocator -fblocks -verify %s
-// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.MismatchedDeallocator -fblocks -DTEST_INLINABLE_ALLOCATORS -verify %s
+// RUN: %clang_analyze_cc1 -Wno-alloc-size -analyzer-checker=core,unix.MismatchedDeallocator -fblocks -verify %s
+// RUN: %clang_analyze_cc1 -Wno-alloc-size -analyzer-checker=core,unix.MismatchedDeallocator -fblocks -DTEST_INLINABLE_ALLOCATORS -verify %s
#include "Inputs/system-header-simulator-objc.h"
#include "Inputs/system-header-simulator-cxx.h"
diff --git a/clang/test/Analysis/NewDelete-checker-test.cpp b/clang/test/Analysis/NewDelete-checker-test.cpp
index c417b9c..5ab6e16 100644
--- a/clang/test/Analysis/NewDelete-checker-test.cpp
+++ b/clang/test/Analysis/NewDelete-checker-test.cpp
@@ -1,31 +1,37 @@
// RUN: %clang_analyze_cc1 -std=c++11 -fblocks %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=expected,newdelete \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=cplusplus.NewDelete
//
// RUN: %clang_analyze_cc1 -DLEAKS -std=c++11 -fblocks %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=expected,newdelete,leak \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=cplusplus.NewDelete \
// RUN: -analyzer-checker=cplusplus.NewDeleteLeaks
//
// RUN: %clang_analyze_cc1 -std=c++11 -fblocks -verify %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=expected,leak \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=cplusplus.NewDeleteLeaks
//
// RUN: %clang_analyze_cc1 -std=c++17 -fblocks %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=expected,newdelete \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=cplusplus.NewDelete
//
// RUN: %clang_analyze_cc1 -DLEAKS -std=c++17 -fblocks %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=expected,newdelete,leak \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=cplusplus.NewDelete \
// RUN: -analyzer-checker=cplusplus.NewDeleteLeaks
//
// RUN: %clang_analyze_cc1 -std=c++17 -fblocks -verify %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=expected,leak,inspection \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=cplusplus.NewDeleteLeaks \
diff --git a/clang/test/Analysis/NewDelete-intersections.mm b/clang/test/Analysis/NewDelete-intersections.mm
index eddfb32..dec9c292 100644
--- a/clang/test/Analysis/NewDelete-intersections.mm
+++ b/clang/test/Analysis/NewDelete-intersections.mm
@@ -1,4 +1,5 @@
// RUN: %clang_analyze_cc1 -std=c++11 -fblocks %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=newdelete \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=cplusplus.NewDelete
@@ -6,11 +7,13 @@
// leak-no-diagnostics
// RUN: %clang_analyze_cc1 -std=c++11 -DLEAKS -fblocks %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=leak \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=cplusplus.NewDeleteLeaks
// RUN: %clang_analyze_cc1 -std=c++11 -DLEAKS -fblocks %s \
+// RUN: -Wno-alloc-size \
// RUN: -verify=mismatch \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=unix.MismatchedDeallocator
diff --git a/clang/test/Analysis/anonymous-decls.cpp b/clang/test/Analysis/anonymous-decls.cpp
index 85449ca..76e5155 100644
--- a/clang/test/Analysis/anonymous-decls.cpp
+++ b/clang/test/Analysis/anonymous-decls.cpp
@@ -78,12 +78,12 @@ int main() {
// CHECK-NEXT: 8: decomposition-a-b
// CHECK-NEXT: 9: [B3.7]([B3.8])
// CHECK-NEXT: 10: [B3.9]
-// CHECK-NEXT: 11: std::tuple_element<0, std::pair<int, int>>::type a = get<0UL>(decomposition-a-b);
+// CHECK-NEXT: 11: std::tuple_element<0UL, std::pair<int, int>>::type &&a = get<0UL>(decomposition-a-b);
// CHECK-NEXT: 12: get<1UL>
// CHECK-NEXT: 13: [B3.12] (ImplicitCastExpr, FunctionToPointerDecay, tuple_element<1L, pair<int, int> >::type (*)(pair<int, int> &))
// CHECK-NEXT: 14: decomposition-a-b
// CHECK-NEXT: 15: [B3.13]([B3.14])
// CHECK-NEXT: 16: [B3.15]
-// CHECK-NEXT: 17: std::tuple_element<1, std::pair<int, int>>::type b = get<1UL>(decomposition-a-b);
+// CHECK-NEXT: 17: std::tuple_element<1UL, std::pair<int, int>>::type &&b = get<1UL>(decomposition-a-b);
// CHECK-NEXT: Preds (1): B1
// CHECK-NEXT: Succs (1): B2
diff --git a/clang/test/Analysis/castsize.c b/clang/test/Analysis/castsize.c
index 81aa60c..b08203b 100644
--- a/clang/test/Analysis/castsize.c
+++ b/clang/test/Analysis/castsize.c
@@ -1,5 +1,5 @@
// RUN: %clang_analyze_cc1 -verify %s \
-// RUN: -analyzer-checker=core,unix.Malloc,alpha.core.CastSize
+// RUN: -Wno-alloc-size -analyzer-checker=core,unix.Malloc,alpha.core.CastSize
typedef typeof(sizeof(int)) size_t;
void *malloc(size_t);
diff --git a/clang/test/Analysis/element-region-address-space.c b/clang/test/Analysis/element-region-address-space.c
index dd70662..da59055 100644
--- a/clang/test/Analysis/element-region-address-space.c
+++ b/clang/test/Analysis/element-region-address-space.c
@@ -1,11 +1,27 @@
// RUN: %clang_analyze_cc1 -triple amdgcn-unknown-unknown \
-// RUN: -analyzer-checker=core -verify %s
+// RUN: -Wno-incompatible-library-redeclaration \
+// RUN: -analyzer-checker=core,unix -verify %s
// expected-no-diagnostics
//
// By default, pointers are 64-bits.
+#define ADDRESS_SPACE_64BITS __attribute__((address_space(0)))
#define ADDRESS_SPACE_32BITS __attribute__((address_space(3)))
int test(ADDRESS_SPACE_32BITS int *p, ADDRESS_SPACE_32BITS void *q) {
return p == q; // no-crash
}
+
+// Make sure that the cstring checker handles non-default address spaces
+ADDRESS_SPACE_64BITS void *
+memcpy(ADDRESS_SPACE_64BITS void *,
+ ADDRESS_SPACE_32BITS const void *,
+ long unsigned int);
+
+ADDRESS_SPACE_64BITS struct {
+ char m[16];
+} n;
+
+void avoid_cstring_checker_crash(ADDRESS_SPACE_32BITS char *p) {
+ memcpy(&n.m[0], p, 4); // no-crash
+}
diff --git a/clang/test/Analysis/malloc-annotations.c b/clang/test/Analysis/malloc-annotations.c
index 68ac71d..969c84d 100644
--- a/clang/test/Analysis/malloc-annotations.c
+++ b/clang/test/Analysis/malloc-annotations.c
@@ -1,4 +1,5 @@
// RUN: %clang_analyze_cc1 -verify \
+// RUN: -Wno-alloc-size \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=alpha.deadcode.UnreachableCode \
// RUN: -analyzer-checker=alpha.core.CastSize \
diff --git a/clang/test/Analysis/malloc-checker-arg-uaf.c b/clang/test/Analysis/malloc-checker-arg-uaf.c
new file mode 100644
index 0000000..d6aa856
--- /dev/null
+++ b/clang/test/Analysis/malloc-checker-arg-uaf.c
@@ -0,0 +1,44 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.Malloc -verify %s
+
+#include "Inputs/system-header-simulator-for-malloc.h"
+
+struct Obj {
+ int field;
+};
+
+void use(void *ptr);
+
+void test_direct_param_uaf() {
+ int *p = (int *)malloc(sizeof(int));
+ free(p);
+ use(p); // expected-warning{{Use of memory after it is released}}
+}
+
+void test_struct_field_uaf() {
+ struct Obj *o = (struct Obj *)malloc(sizeof(struct Obj));
+ free(o);
+ use(&o->field); // expected-warning{{Use of memory after it is released}}
+}
+
+void test_no_warning_const_int() {
+ use((void *)0x1234); // no-warning
+}
+
+void test_no_warning_stack() {
+ int x = 42;
+ use(&x); // no-warning
+}
+
+void test_nested_alloc() {
+ struct Obj *o = (struct Obj *)malloc(sizeof(struct Obj));
+ use(o); // no-warning
+ free(o);
+ use(o); // expected-warning{{Use of memory after it is released}}
+}
+
+void test_nested_field() {
+ struct Obj *o = (struct Obj *)malloc(sizeof(struct Obj));
+ int *f = &o->field;
+ free(o);
+ use(f); // expected-warning{{Use of memory after it is released}}
+}
diff --git a/clang/test/Analysis/malloc-sizeof.c b/clang/test/Analysis/malloc-sizeof.c
index 4573c19..6202795 100644
--- a/clang/test/Analysis/malloc-sizeof.c
+++ b/clang/test/Analysis/malloc-sizeof.c
@@ -1,4 +1,4 @@
-// RUN: %clang_analyze_cc1 -analyzer-checker=unix.MallocSizeof -verify %s
+// RUN: %clang_analyze_cc1 -Wno-alloc-size -analyzer-checker=unix.MallocSizeof -verify %s
#include <stddef.h>
diff --git a/clang/test/Analysis/malloc.c b/clang/test/Analysis/malloc.c
index 82eb364..84593ad 100644
--- a/clang/test/Analysis/malloc.c
+++ b/clang/test/Analysis/malloc.c
@@ -1,4 +1,5 @@
// RUN: %clang_analyze_cc1 -Wno-strict-prototypes -Wno-error=implicit-int -verify %s \
+// RUN: -Wno-alloc-size \
// RUN: -analyzer-checker=core \
// RUN: -analyzer-checker=alpha.deadcode.UnreachableCode \
// RUN: -analyzer-checker=alpha.core.CastSize \
diff --git a/clang/test/Analysis/unix-fns.c b/clang/test/Analysis/unix-fns.c
index 7789428..2a971be 100644
--- a/clang/test/Analysis/unix-fns.c
+++ b/clang/test/Analysis/unix-fns.c
@@ -1,6 +1,6 @@
-// RUN: %clang_analyze_cc1 -triple x86_64-apple-darwin10 -analyzer-checker=core,unix.API,osx.API,optin.portability %s -analyzer-output=plist -analyzer-config faux-bodies=true -fblocks -verify -o %t.plist
+// RUN: %clang_analyze_cc1 -triple x86_64-apple-darwin10 -Wno-alloc-size -analyzer-checker=core,unix.API,osx.API,optin.portability %s -analyzer-output=plist -analyzer-config faux-bodies=true -fblocks -verify -o %t.plist
// RUN: %normalize_plist <%t.plist | diff -ub %S/Inputs/expected-plists/unix-fns.c.plist -
-// RUN: %clang_analyze_cc1 -triple x86_64-unknown-linux -analyzer-checker=core,unix.API,osx.API,optin.portability %s -analyzer-output=plist -analyzer-config faux-bodies=true -fblocks -verify -o %t.plist
+// RUN: %clang_analyze_cc1 -triple x86_64-unknown-linux -Wno-alloc-size -analyzer-checker=core,unix.API,osx.API,optin.portability %s -analyzer-output=plist -analyzer-config faux-bodies=true -fblocks -verify -o %t.plist
// RUN: %normalize_plist <%t.plist | diff -ub %S/Inputs/expected-plists/unix-fns.c.plist -
// RUN: mkdir -p %t.dir
// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.API,osx.API,optin.portability -analyzer-output=html -analyzer-config faux-bodies=true -fblocks -o %t.dir %s
diff --git a/clang/test/C/C11/n1285_1.c b/clang/test/C/C11/n1285_1.c
index 5010004..25b68e3 100644
--- a/clang/test/C/C11/n1285_1.c
+++ b/clang/test/C/C11/n1285_1.c
@@ -26,16 +26,16 @@ struct X f(void);
// C11-O2-NEXT: [[ENTRY:.*:]]
// C11-O2-NEXT: [[P:%.*]] = alloca ptr, align 8
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X:%.*]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) #[[ATTR5:[0-9]+]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) #[[ATTR5:[0-9]+]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: call void @f(ptr dead_on_unwind writable sret([[STRUCT_X]]) align 4 [[REF_TMP]])
// C11-O2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A]], i64 0, i64 0
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr [[P]], align 8, !tbaa [[TBAA2:![0-9]+]]
// C11-O2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P]], align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA7:![0-9]+]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[TMP1]]
//
int func_return(void) {
@@ -79,7 +79,7 @@ int func_return(void) {
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X:%.*]], align 4
// C11-O2-NEXT: [[Q:%.*]] = alloca ptr, align 8
// C11-O2-NEXT: [[DOTCOMPOUNDLITERAL:%.*]] = alloca [[STRUCT_X]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: br i1 true, label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]]
// C11-O2: [[COND_TRUE]]:
// C11-O2-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[REF_TMP]], i8 0, i64 20, i1 false)
@@ -92,8 +92,8 @@ int func_return(void) {
// C11-O2-NEXT: [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A1]], i64 0, i64 0
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr @p, align 8, !tbaa [[TBAA2]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Q]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[Q]]) #[[ATTR5]]
// C11-O2-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[DOTCOMPOUNDLITERAL]], i8 0, i64 20, i1 false)
// C11-O2-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0
// C11-O2-NEXT: [[A3:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0
@@ -104,7 +104,7 @@ int func_return(void) {
// C11-O2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[Q]], align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !tbaa [[TBAA7]]
// C11-O2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP3]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[Q]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[Q]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[ADD]]
//
int ternary(void) {
@@ -133,16 +133,16 @@ int ternary(void) {
// C11-O2-NEXT: [[ENTRY:.*:]]
// C11-O2-NEXT: [[X:%.*]] = alloca [[STRUCT_X:%.*]], align 4
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[X]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[REF_TMP]], ptr align 4 [[X]], i64 20, i1 false), !tbaa.struct [[TBAA_STRUCT9:![0-9]+]]
// C11-O2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A]], i64 0, i64 0
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr @p, align 8, !tbaa [[TBAA2]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: [[TMP0:%.*]] = load ptr, ptr @p, align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA7]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[TMP1]]
//
int comma(void) {
@@ -170,16 +170,16 @@ int comma(void) {
// C11-O2-NEXT: [[ENTRY:.*:]]
// C11-O2-NEXT: [[X:%.*]] = alloca [[STRUCT_X:%.*]], align 4
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[X]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[REF_TMP]], ptr align 4 [[X]], i64 20, i1 false), !tbaa.struct [[TBAA_STRUCT9]]
// C11-O2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A]], i64 0, i64 0
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr @p, align 8, !tbaa [[TBAA2]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: [[TMP0:%.*]] = load ptr, ptr @p, align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA7]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[TMP1]]
//
int cast(void) {
@@ -210,19 +210,19 @@ int cast(void) {
// C11-O2-NEXT: [[X:%.*]] = alloca [[STRUCT_X:%.*]], align 4
// C11-O2-NEXT: [[S:%.*]] = alloca [[STRUCT_X]], align 4
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[X]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[S]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[S]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[X]], ptr align 4 [[S]], i64 20, i1 false), !tbaa.struct [[TBAA_STRUCT9]]
// C11-O2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[REF_TMP]], ptr align 4 [[X]], i64 20, i1 false), !tbaa.struct [[TBAA_STRUCT9]]
// C11-O2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A]], i64 0, i64 0
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr @p, align 8, !tbaa [[TBAA2]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: [[TMP0:%.*]] = load ptr, ptr @p, align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA7]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[S]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[S]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[TMP1]]
//
int assign(void) {
diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp
index 141b67e..a643de2d 100644
--- a/clang/test/CIR/CodeGen/array.cpp
+++ b/clang/test/CIR/CodeGen/array.cpp
@@ -45,9 +45,9 @@ int dd[3][2] = {{1, 2}, {3, 4}, {5, 6}};
// OGCG: [i32 3, i32 4], [2 x i32] [i32 5, i32 6]]
int e[10] = {1, 2};
-// CIR: cir.global external @e = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i], trailing_zeros> : !cir.array<!s32i x 10>
+// CIR: cir.global external @e = #cir.const_record<{#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.zero : !cir.array<!s32i x 8>}> : !rec_anon_struct
-// LLVM: @e = global [10 x i32] [i32 1, i32 2, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0]
+// LLVM: @e = global <{ i32, i32, [8 x i32] }> <{ i32 1, i32 2, [8 x i32] zeroinitializer }>
// OGCG: @e = global <{ i32, i32, [8 x i32] }> <{ i32 1, i32 2, [8 x i32] zeroinitializer }>
@@ -58,6 +58,28 @@ int f[5] = {1, 2};
// OGCG: @f = global [5 x i32] [i32 1, i32 2, i32 0, i32 0, i32 0]
+int g[16] = {1, 2, 3, 4, 5, 6, 7, 8};
+// CIR: cir.global external @g = #cir.const_record<{
+// CIR-SAME: #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i,
+// CIR-SAME: #cir.int<3> : !s32i, #cir.int<4> : !s32i,
+// CIR-SAME: #cir.int<5> : !s32i, #cir.int<6> : !s32i,
+// CIR-SAME: #cir.int<7> : !s32i, #cir.int<8> : !s32i]>
+// CIR-SAME: : !cir.array<!s32i x 8>,
+// CIR-SAME: #cir.zero : !cir.array<!s32i x 8>}> : !rec_anon_struct1
+
+// LLVM: @g = global <{ [8 x i32], [8 x i32] }>
+// LLVM-SAME: <{ [8 x i32]
+// LLVM-SAME: [i32 1, i32 2, i32 3, i32 4,
+// LLVM-SAME: i32 5, i32 6, i32 7, i32 8],
+// LLVM-SAME: [8 x i32] zeroinitializer }>
+
+// OGCG: @g = global <{ [8 x i32], [8 x i32] }>
+// OGCG-SAME: <{ [8 x i32]
+// OGCG-SAME: [i32 1, i32 2, i32 3, i32 4,
+// OGCG-SAME: i32 5, i32 6, i32 7, i32 8],
+// OGCG-SAME: [8 x i32] zeroinitializer }>
+
+
extern int b[10];
// CIR: cir.global "private" external @b : !cir.array<!s32i x 10>
// LLVM: @b = external global [10 x i32]
@@ -129,31 +151,50 @@ void func2() {
}
// CIR: %[[ARR2:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
-// CIR: %[[ELE_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp", init]
-// CIR: %[[ARR_2_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR2]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
-// CIR: %[[V1:.*]] = cir.const #cir.int<5> : !s32i
-// CIR: cir.store{{.*}} %[[V1]], %[[ARR_2_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp", init]
+// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %[[ARR2]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store{{.*}} %[[FIVE]], %[[ARR_0]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i
-// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_2_PTR]] : !cir.ptr<!s32i>, %[[OFFSET_0]] : !s64i), !cir.ptr<!s32i>
-// CIR: cir.store{{.*}} %[[ELE_PTR]], %[[ELE_ALLOCA]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
-// CIR: %[[LOAD_1:.*]] = cir.load{{.*}} %[[ELE_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
-// CIR: %[[V2:.*]] = cir.const #cir.int<0> : !s32i
-// CIR: cir.store{{.*}} %[[V2]], %[[LOAD_1]] : !s32i, !cir.ptr<!s32i>
-// CIR: %[[OFFSET_1:.*]] = cir.const #cir.int<1> : !s64i
-// CIR: %[[ELE_1_PTR:.*]] = cir.ptr_stride(%[[LOAD_1]] : !cir.ptr<!s32i>, %[[OFFSET_1]] : !s64i), !cir.ptr<!s32i>
-// CIR: cir.store{{.*}} %[[ELE_1_PTR]], %[[ELE_ALLOCA]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!s32i>, %[[OFFSET_0]] : !s64i), !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %[[ELE_PTR]], %[[ARR_PTR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s64i
+// CIR: %[[ARR_END:.*]] = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i>
+// CIR: cir.do {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store{{.*}} %[[ZERO]], %[[ARR_CUR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ARR_NEXT:.*]] = cir.ptr_stride(%[[ARR_CUR]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %[[ARR_NEXT]], %[[ARR_PTR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: cir.yield
+// CIR: } while {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR: %[[CMP:.*]] = cir.cmp(ne, %[[ARR_CUR]], %[[ARR_END]]) : !cir.ptr<!s32i>, !cir.bool
+// CIR: cir.condition(%[[CMP]])
+// CIR: }
// LLVM: define{{.*}} void @_Z5func2v()
-// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
-// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
-// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0
-// LLVM: store i32 5, ptr %[[ARR_PTR]], align 4
-// LLVM: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1
-// LLVM: store ptr %[[ELE_1_PTR]], ptr %[[TMP]], align 8
-// LLVM: %[[TMP2:.*]] = load ptr, ptr %[[TMP]], align 8
-// LLVM: store i32 0, ptr %[[TMP2]], align 4
-// LLVM: %[[ELE_1:.*]] = getelementptr i32, ptr %[[TMP2]], i64 1
-// LLVM: store ptr %[[ELE_1]], ptr %[[TMP]], align 8
+// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
+// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0
+// LLVM: store i32 5, ptr %[[ARR_PTR]], align 4
+// LLVM: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1
+// LLVM: store ptr %[[ELE_1_PTR]], ptr %[[TMP]], align 8
+// LLVM: %[[END_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 2
+// LLVM: br label %[[LOOP_BODY:.*]]
+// LLVM: [[LOOP_NEXT:.*]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// LLVM: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]]
+// LLVM: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]]
+// LLVM: [[LOOP_BODY]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// LLVM: store i32 0, ptr %[[CUR]], align 4
+// LLVM: %[[NEXT:.*]] = getelementptr i32, ptr %[[CUR]], i64 1
+// LLVM: store ptr %[[NEXT]], ptr %[[TMP]], align 8
+// LLVM: br label %[[LOOP_NEXT:.*]]
+// LLVM: [[LOOP_END]]:
+// LLVM: ret void
// OGCG: %[[ARR:.*]] = alloca [2 x i32], align 4
// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[ARR]], ptr align 4 @[[FUN2_ARR]], i64 8, i1 false)
@@ -270,27 +311,46 @@ void func5() {
// CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i
// CIR: cir.store{{.*}} %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
-// CIR: %6 = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
-// CIR: cir.store{{.*}} %6, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
-// CIR: %7 = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>, !cir.ptr<!cir.array<!s32i x 1>>
-// CIR: %8 = cir.const #cir.zero : !cir.array<!s32i x 1>
-// CIR: cir.store{{.*}} %8, %7 : !cir.array<!s32i x 1>, !cir.ptr<!cir.array<!s32i x 1>>
-// CIR: %[[OFFSET_1:.*]] = cir.const #cir.int<1> : !s64i
-// CIR: %10 = cir.ptr_stride(%7 : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET_1]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
-// CIR: cir.store{{.*}} %10, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
+// CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: cir.store{{.*}} %[[ARR_1]], %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
+// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s64i
+// CIR: %[[ARR_END:.*]] = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[TWO]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: cir.do {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>, !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array<!s32i x 1>
+// CIR: cir.store{{.*}} %[[ZERO]], %[[ARR_CUR]] : !cir.array<!s32i x 1>, !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ARR_NEXT:.*]] = cir.ptr_stride(%[[ARR_CUR]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[ONE]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: cir.store{{.*}} %[[ARR_NEXT]], %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
+// CIR: cir.yield
+// CIR: } while {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>, !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[CMP:.*]] = cir.cmp(ne, %[[ARR_CUR]], %[[ARR_END]]) : !cir.ptr<!cir.array<!s32i x 1>>, !cir.bool
+// CIR: cir.condition(%[[CMP]])
+// CIR: }
// LLVM: define{{.*}} void @_Z5func5v()
-// LLVM: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
-// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
-// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0
-// LLVM: %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0
-// LLVM: store i32 5, ptr %[[ARR_0]], align 4
-// LLVM: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
-// LLVM: store ptr %[[ARR_1]], ptr %[[TMP]], align 8
-// LLVM: %[[ARR_1_VAL:.*]] = load ptr, ptr %[[TMP]], align 8
-// LLVM: store [1 x i32] zeroinitializer, ptr %[[ARR_1_VAL]], align 4
-// LLVM: %[[ARR_1_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_1_VAL]], i64 1
-// LLVM: store ptr %[[ARR_1_PTR]], ptr %[[TMP]], align 8
+// LLVM: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
+// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0
+// LLVM: %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0
+// LLVM: store i32 5, ptr %[[ARR_0]], align 4
+// LLVM: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
+// LLVM: store ptr %[[ARR_1]], ptr %[[TMP]], align 8
+// LLVM: %[[END_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 2
+// LLVM: br label %[[LOOP_BODY:.*]]
+// LLVM: [[LOOP_NEXT:.*]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// LLVM: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]]
+// LLVM: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]]
+// LLVM: [[LOOP_BODY]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// LLVM: store [1 x i32] zeroinitializer, ptr %[[CUR]], align 4
+// LLVM: %[[NEXT:.*]] = getelementptr [1 x i32], ptr %[[CUR]], i64 1
+// LLVM: store ptr %[[NEXT]], ptr %[[TMP]], align 8
+// LLVM: br label %[[LOOP_NEXT:.*]]
+// LLVM: [[LOOP_END]]:
+// LLVM: ret void
// ORGC: %[[ARR:.*]] = alloca [2 x [1 x i32]], align 4
// ORGC: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[ARR]], ptr align 4 @[[FUN5_ARR]], i64 8, i1 false)
@@ -335,25 +395,44 @@ void func7() {
}
// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!cir.ptr<!s32i> x 1>, !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>, ["arr", init]
-// CIR: %[[ARR_TMP:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, ["arrayinit.temp", init]
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>), !cir.ptr<!cir.ptr<!s32i>>
-// CIR: cir.store{{.*}} %[[ARR_PTR]], %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
-// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!s32i>>
-// CIR: %[[NULL_PTR:.*]] = cir.const #cir.ptr<null> : !cir.ptr<!s32i>
-// CIR: cir.store{{.*}} %[[NULL_PTR]], %[[TMP]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
-// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
-// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[TMP]] : !cir.ptr<!cir.ptr<!s32i>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.ptr<!s32i>>
-// CIR: cir.store{{.*}} %[[ELE_PTR]], %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, ["arrayinit.temp", init]
+// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>), !cir.ptr<!cir.ptr<!s32i>>
+// CIR: cir.store{{.*}} %[[ARR_0]], %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ARR_END:.*]] = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ONE]] : !s64i), !cir.ptr<!cir.ptr<!s32i>>
+// CIR: cir.do {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[NULL_PTR:.*]] = cir.const #cir.ptr<null> : !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %[[NULL_PTR]], %[[ARR_CUR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ARR_NEXT:.*]] = cir.ptr_stride(%[[ARR_CUR]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ONE]] : !s64i), !cir.ptr<!cir.ptr<!s32i>>
+// CIR: cir.store{{.*}} %[[ARR_NEXT]], %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CIR: cir.yield
+// CIR: } while {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[CMP:.*]] = cir.cmp(ne, %[[ARR_CUR]], %[[ARR_END]]) : !cir.ptr<!cir.ptr<!s32i>>, !cir.bool
+// CIR: cir.condition(%[[CMP]])
+// CIR: }
// LLVM: define{{.*}} void @_Z5func7v()
-// LLVM: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8
-// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
-// LLVM: %[[ELE_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0
-// LLVM: store ptr %[[ELE_PTR]], ptr %[[ALLOCA]], align 8
-// LLVM: %[[TMP:.*]] = load ptr, ptr %[[ALLOCA]], align 8
-// LLVM: store ptr null, ptr %[[TMP]], align 8
-// LLVM: %[[ELE:.*]] = getelementptr ptr, ptr %[[TMP]], i64 1
-// LLVM: store ptr %[[ELE]], ptr %[[ALLOCA]], align 8
+// LLVM: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8
+// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[ARR_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0
+// LLVM: store ptr %[[ARR_PTR]], ptr %[[TMP]], align 8
+// LLVM: %[[END_PTR:.*]] = getelementptr ptr, ptr %[[ARR_PTR]], i64 1
+// LLVM: br label %[[LOOP_BODY:.*]]
+// LLVM: [[LOOP_NEXT:.*]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// LLVM: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]]
+// LLVM: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]]
+// LLVM: [[LOOP_BODY]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// LLVM: store ptr null, ptr %[[CUR]], align 8
+// LLVM: %[[NEXT:.*]] = getelementptr ptr, ptr %[[CUR]], i64 1
+// LLVM: store ptr %[[NEXT]], ptr %[[TMP]], align 8
+// LLVM: br label %[[LOOP_NEXT:.*]]
+// LLVM: [[LOOP_END]]:
+// LLVM: ret void
// OGCG: %[[ARR:.*]] = alloca [1 x ptr], align 8
// OGCG: call void @llvm.memset.p0.i64(ptr align 8 %[[ARR]], i8 0, i64 8, i1 false)
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
new file mode 100644
index 0000000..8b947f7
--- /dev/null
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -0,0 +1,206 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+void f1(void) {
+ _Atomic(int) x = 42;
+}
+
+// CIR-LABEL: @f1
+// CIR: %[[SLOT:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64}
+// CIR-NEXT: %[[INIT:.+]] = cir.const #cir.int<42> : !s32i
+// CIR-NEXT: cir.store align(4) %[[INIT]], %[[SLOT]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+// LLVM-LABEL: @f1
+// LLVM: %[[SLOT:.+]] = alloca i32, i64 1, align 4
+// LLVM-NEXT: store i32 42, ptr %[[SLOT]], align 4
+// LLVM: }
+
+// OGCG-LABEL: @f1
+// OGCG: %[[SLOT:.+]] = alloca i32, align 4
+// OGCG-NEXT: store i32 42, ptr %[[SLOT]], align 4
+// OGCG: }
+
+void f2(void) {
+ _Atomic(int) x;
+ __c11_atomic_init(&x, 42);
+}
+
+// CIR-LABEL: @f2
+// CIR: %[[SLOT:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x"] {alignment = 4 : i64}
+// CIR-NEXT: %[[INIT:.+]] = cir.const #cir.int<42> : !s32i
+// CIR-NEXT: cir.store align(4) %[[INIT]], %[[SLOT]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+// LLVM-LABEL: @f2
+// LLVM: %[[SLOT:.+]] = alloca i32, i64 1, align 4
+// LLVM-NEXT: store i32 42, ptr %[[SLOT]], align 4
+// LLVM: }
+
+// OGCG-LABEL: @f2
+// OGCG: %[[SLOT:.+]] = alloca i32, align 4
+// OGCG-NEXT: store i32 42, ptr %[[SLOT]], align 4
+// OGCG: }
+
+void load(int *ptr) {
+ int x;
+ __atomic_load(ptr, &x, __ATOMIC_RELAXED);
+ __atomic_load(ptr, &x, __ATOMIC_CONSUME);
+ __atomic_load(ptr, &x, __ATOMIC_ACQUIRE);
+ __atomic_load(ptr, &x, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @load
+// CIR: %{{.+}} = cir.load align(4) atomic(relaxed) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(consume) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(acquire) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: }
+
+// LLVM-LABEL: @load
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @load
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
+void load_n(int *ptr) {
+ int a;
+ a = __atomic_load_n(ptr, __ATOMIC_RELAXED);
+ a = __atomic_load_n(ptr, __ATOMIC_CONSUME);
+ a = __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+ a = __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @load_n
+// CIR: %{{.+}} = cir.load align(4) atomic(relaxed) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(consume) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(acquire) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: }
+
+// LLVM-LABEL: @load_n
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @load_n
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
+void c11_load(_Atomic(int) *ptr) {
+ __c11_atomic_load(ptr, __ATOMIC_RELAXED);
+ __c11_atomic_load(ptr, __ATOMIC_CONSUME);
+ __c11_atomic_load(ptr, __ATOMIC_ACQUIRE);
+ __c11_atomic_load(ptr, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @c11_load
+// CIR: %{{.+}} = cir.load align(4) atomic(relaxed) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(consume) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(acquire) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: %{{.+}} = cir.load align(4) atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR: }
+
+// LLVM-LABEL: @c11_load
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @c11_load
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
+void store(int *ptr, int x) {
+ __atomic_store(ptr, &x, __ATOMIC_RELAXED);
+ __atomic_store(ptr, &x, __ATOMIC_RELEASE);
+ __atomic_store(ptr, &x, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @store
+// CIR: cir.store align(4) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store align(4) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store align(4) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+// LLVM-LABEL: @store
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @store
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
+void store_n(int *ptr, int x) {
+ __atomic_store_n(ptr, x, __ATOMIC_RELAXED);
+ __atomic_store_n(ptr, x, __ATOMIC_RELEASE);
+ __atomic_store_n(ptr, x, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @store_n
+// CIR: cir.store align(4) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store align(4) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store align(4) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+// LLVM-LABEL: @store_n
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @store_n
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
+void c11_store(_Atomic(int) *ptr, int x) {
+ __c11_atomic_store(ptr, x, __ATOMIC_RELAXED);
+ __c11_atomic_store(ptr, x, __ATOMIC_RELEASE);
+ __c11_atomic_store(ptr, x, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @c11_store
+// CIR: cir.store align(4) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store align(4) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store align(4) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+// LLVM-LABEL: @c11_store
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @c11_store
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
diff --git a/clang/test/CIR/CodeGen/bitfield-union.c b/clang/test/CIR/CodeGen/bitfield-union.c
index b5d1454..14a2aaf 100644
--- a/clang/test/CIR/CodeGen/bitfield-union.c
+++ b/clang/test/CIR/CodeGen/bitfield-union.c
@@ -28,3 +28,44 @@ typedef union {
demo d;
zero_bit z;
+
+void f() {
+ demo d;
+ d.x = 1;
+ d.y = 2;
+ d.z = 0;
+}
+
+// CIR: #bfi_y = #cir.bitfield_info<name = "y", storage_type = !u8i, size = 4, offset = 0, is_signed = true>
+// CIR: #bfi_z = #cir.bitfield_info<name = "z", storage_type = !u8i, size = 8, offset = 0, is_signed = true>
+
+// CIR: cir.func no_proto dso_local @f
+// CIR: [[ALLOC:%.*]] = cir.alloca !rec_demo, !cir.ptr<!rec_demo>, ["d"] {alignment = 4 : i64}
+// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i
+// CIR: [[X:%.*]] = cir.get_member [[ALLOC]][0] {name = "x"} : !cir.ptr<!rec_demo> -> !cir.ptr<!s32i>
+// CIR: cir.store align(4) [[ONE]], [[X]] : !s32i, !cir.ptr<!s32i>
+// CIR: [[TWO:%.*]] = cir.const #cir.int<2> : !s32i
+// CIR: [[Y:%.*]] = cir.get_member [[ALLOC]][1] {name = "y"} : !cir.ptr<!rec_demo> -> !cir.ptr<!u8i>
+// CIR: [[SET:%.*]] = cir.set_bitfield align(4) (#bfi_y, [[Y]] : !cir.ptr<!u8i>, [[TWO]] : !s32i) -> !s32i
+// CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i
+// CIR: [[Z:%.*]] = cir.get_member [[ALLOC]][2] {name = "z"} : !cir.ptr<!rec_demo> -> !cir.ptr<!u8i>
+// CIR: [[SET2:%.*]] = cir.set_bitfield align(4) (#bfi_z, [[Z]] : !cir.ptr<!u8i>, [[ZERO]] : !s32i) -> !s32i
+// CIR: cir.return
+
+// LLVM: define dso_local void @f
+// LLVM: [[ALLOC:%.*]] = alloca %union.demo, i64 1, align 4
+// LLVM: store i32 1, ptr [[ALLOC]], align 4
+// LLVM: [[BFLOAD:%.*]] = load i8, ptr [[ALLOC]], align 4
+// LLVM: [[CLEAR:%.*]] = and i8 [[BFLOAD]], -16
+// LLVM: [[SET:%.*]] = or i8 [[CLEAR]], 2
+// LLVM: store i8 [[SET]], ptr [[ALLOC]], align 4
+// LLVM: store i8 0, ptr [[ALLOC]], align 4
+
+// OGCG: define dso_local void @f
+// OGCG: [[ALLOC:%.*]] = alloca %union.demo, align 4
+// OGCG: store i32 1, ptr [[ALLOC]], align 4
+// OGCG: [[BFLOAD:%.*]] = load i8, ptr [[ALLOC]], align 4
+// OGCG: [[CLEAR:%.*]] = and i8 [[BFLOAD]], -16
+// OGCG: [[SET:%.*]] = or i8 [[CLEAR]], 2
+// OGCG: store i8 [[SET]], ptr [[ALLOC]], align 4
+// OGCG: store i8 0, ptr [[ALLOC]], align 4
diff --git a/clang/test/CIR/CodeGen/builtin_call.cpp b/clang/test/CIR/CodeGen/builtin_call.cpp
index c266f1a..09be793 100644
--- a/clang/test/CIR/CodeGen/builtin_call.cpp
+++ b/clang/test/CIR/CodeGen/builtin_call.cpp
@@ -111,6 +111,38 @@ void assume(bool arg) {
// OGCG: call void @llvm.assume(i1 %{{.+}})
// OGCG: }
+void *assume_aligned(void *ptr) {
+ return __builtin_assume_aligned(ptr, 16);
+}
+
+// CIR: @_Z14assume_alignedPv
+// CIR: %{{.+}} = cir.assume_aligned %{{.+}} alignment 16 : !cir.ptr<!void>
+// CIR: }
+
+// LLVM: @_Z14assume_alignedPv
+// LLVM: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i64 16) ]
+// LLVM: }
+
+// OGCG: @_Z14assume_alignedPv
+// OGCG: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i64 16) ]
+// OGCG: }
+
+void *assume_aligned_misalignment(void *ptr, unsigned misalignment) {
+ return __builtin_assume_aligned(ptr, 16, misalignment);
+}
+
+// CIR: @_Z27assume_aligned_misalignmentPvj
+// CIR: %{{.+}} = cir.assume_aligned %{{.+}} alignment 16[offset %{{.+}} : !u64i] : !cir.ptr<!void>
+// CIR: }
+
+// LLVM: @_Z27assume_aligned_misalignmentPvj
+// LLVM: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i64 16, i64 %{{.+}}) ]
+// LLVM: }
+
+// OGCG: @_Z27assume_aligned_misalignmentPvj
+// OGCG: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i64 16, i64 %{{.+}}) ]
+// OGCG: }
+
void assume_separate_storage(void *p1, void *p2) {
__builtin_assume_separate_storage(p1, p2);
}
diff --git a/clang/test/CIR/CodeGen/builtin_printf.cpp b/clang/test/CIR/CodeGen/builtin_printf.cpp
index 43128e4..80875c3 100644
--- a/clang/test/CIR/CodeGen/builtin_printf.cpp
+++ b/clang/test/CIR/CodeGen/builtin_printf.cpp
@@ -5,10 +5,10 @@
// RUN: %clang_cc1 -std=c++11 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
-// CIR: cir.global "private" cir_private dso_local @".str" = #cir.const_array<"%s\00" : !cir.array<!s8i x 3>> : !cir.array<!s8i x 3>
-// CIR: cir.global "private" cir_private dso_local @".str.1" = #cir.const_array<"%s %d\0A\00" : !cir.array<!s8i x 7>> : !cir.array<!s8i x 7>
-// LLVM: @.str = private global [3 x i8] c"%s\00"
-// LLVM: @.str.1 = private global [7 x i8] c"%s %d\0A\00"
+// CIR: cir.global "private" constant cir_private dso_local @".str" = #cir.const_array<"%s\00" : !cir.array<!s8i x 3>> : !cir.array<!s8i x 3>
+// CIR: cir.global "private" constant cir_private dso_local @".str.1" = #cir.const_array<"%s %d\0A\00" : !cir.array<!s8i x 7>> : !cir.array<!s8i x 7>
+// LLVM: @.str = private constant [3 x i8] c"%s\00"
+// LLVM: @.str.1 = private constant [7 x i8] c"%s %d\0A\00"
// OGCG: @.str = private unnamed_addr constant [3 x i8] c"%s\00"
// OGCG: @.str.1 = private unnamed_addr constant [7 x i8] c"%s %d\0A\00"
diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGen/builtins.cpp
new file mode 100644
index 0000000..0e43480
--- /dev/null
+++ b/clang/test/CIR/CodeGen/builtins.cpp
@@ -0,0 +1,42 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+double fabs(double x) {
+ return __builtin_fabs(x);
+}
+
+// CIR: {{.*}} = cir.fabs {{.*}} : !cir.double
+// LLVM: {{.*}} = call double @llvm.fabs.f64(double {{.*}})
+// OGCG: {{.*}} = call double @llvm.fabs.f64(double {{.*}})
+
+extern "C" void *test_return_address(void) {
+ return __builtin_return_address(1);
+
+ // CIR-LABEL: test_return_address
+ // CIR: [[ARG:%.*]] = cir.const #cir.int<1> : !u32i
+ // CIR: {{%.*}} = cir.return_address([[ARG]])
+
+ // LLVM-LABEL: @test_return_address
+ // LLVM: {{%.*}} = call ptr @llvm.returnaddress(i32 1)
+
+ // OGCG-LABEL: @test_return_address
+ // OGCG: {{%.*}} = call ptr @llvm.returnaddress(i32 1)
+}
+
+extern "C" void *test_frame_address(void) {
+ return __builtin_frame_address(1);
+
+ // CIR-LABEL: test_frame_address
+ // CIR: [[ARG:%.*]] = cir.const #cir.int<1> : !u32i
+ // CIR: {{%.*}} = cir.frame_address([[ARG]])
+
+ // LLVM-LABEL: @test_frame_address
+ // LLVM: {{%.*}} = call ptr @llvm.frameaddress.p0(i32 1)
+
+ // OGCG-LABEL: @test_frame_address
+ // OGCG: {{%.*}} = call ptr @llvm.frameaddress.p0(i32 1)
+}
diff --git a/clang/test/CIR/CodeGen/call.cpp b/clang/test/CIR/CodeGen/call.cpp
index 43e5d15..3e8cfc1 100644
--- a/clang/test/CIR/CodeGen/call.cpp
+++ b/clang/test/CIR/CodeGen/call.cpp
@@ -116,4 +116,18 @@ void f14() {
// LLVM: call void @_Z3f13v() #[[LLVM_ATTR_0:.+]]
// LLVM: }
-// LLLVM: attributes #[[LLVM_ATTR_0]] = { nounwind }
+int f15();
+void f16() {
+ using T = int;
+ f15().~T();
+}
+
+// CIR-LABEL: @_Z3f16v
+// CIR-NEXT: %{{.+}} = cir.call @_Z3f15v() : () -> !s32i
+// CIR: }
+
+// LLVM-LABEL: define{{.+}} void @_Z3f16v() {
+// LLVM-NEXT: %{{.+}} = call i32 @_Z3f15v()
+// LLVM: }
+
+// LLVM: attributes #[[LLVM_ATTR_0]] = { nounwind }
diff --git a/clang/test/CIR/CodeGen/class.cpp b/clang/test/CIR/CodeGen/class.cpp
index 43dde12..eb9d5d7 100644
--- a/clang/test/CIR/CodeGen/class.cpp
+++ b/clang/test/CIR/CodeGen/class.cpp
@@ -100,3 +100,22 @@ int use_base_via_pointer(Derived *d) {
// OGCG: define{{.*}} i32 @_Z20use_base_via_pointerP7Derived
// OGCG: %[[D_A_ADDR:.*]] = getelementptr inbounds nuw %class.Base, ptr %{{.*}}, i32 0, i32 0
+
+struct EmptyDerived : Base {};
+struct EmptyDerived2 : EmptyDerived {};
+
+void use_empty_derived2() {
+ EmptyDerived2 d2;
+}
+
+// CIR: cir.func{{.*}} @_Z18use_empty_derived2v()
+// CIR: %0 = cir.alloca !rec_EmptyDerived2, !cir.ptr<!rec_EmptyDerived2>, ["d2"]
+// CIR: cir.return
+
+// LLVM: define{{.*}} void @_Z18use_empty_derived2v
+// LLVM: alloca %struct.EmptyDerived2
+// LLVM: ret void
+
+// OGCG: define{{.*}} void @_Z18use_empty_derived2v
+// OGCG: alloca %struct.EmptyDerived2
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
index 35a8aa6..82c0086 100644
--- a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
+++ b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
@@ -286,3 +286,456 @@ void foo4() {
// CXX_OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
// CXX_OGCG: store i32 %[[B_REAL]], ptr %[[C_REAL_PTR]], align 4
// CXX_OGCG: store i32 %[[B_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo5() {
+ float _Complex a;
+ float b;
+ a += b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL:.*]] = cir.binop(add, %[[A_REAL]], %[[TMP_B]]) : !cir.float
+// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[RESULT_REAL:.*]] = fadd float %[[A_REAL]], %[[TMP_B]]
+// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1
+// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
+// OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[TMP_B]]
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: store float %[[ADD_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store float %[[A_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+void foo6() {
+ int _Complex a;
+ int _Complex b;
+ b *= a;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"]
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[MUL_BR_AR:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_REAL]]) : !s32i
+// CIR: %[[MUL_BI_AI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_IMAG]]) : !s32i
+// CIR: %[[MUL_BR_AI:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_IMAG]]) : !s32i
+// CIR: %[[MUL_BI_AR:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_REAL]]) : !s32i
+// CIR: %[[RESULT_REAL:.*]] = cir.binop(sub, %[[MUL_BR_AR]], %[[MUL_BI_AI]]) : !s32i
+// CIR: %[[RESULT_IMAG:.*]] = cir.binop(add, %[[MUL_BR_AI]], %[[MUL_BI_AR]]) : !s32i
+// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !s32i -> !cir.complex<!s32i>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4
+// LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0
+// LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1
+// LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
+// LLVM: %[[MUL_BR_AR:.*]] = mul i32 %[[B_REAL]], %[[A_REAL]]
+// LLVM: %[[MUL_BI_AI:.*]] = mul i32 %[[B_IMAG]], %[[A_IMAG]]
+// LLVM: %[[MUL_BR_AI:.*]] = mul i32 %[[B_REAL]], %[[A_IMAG]]
+// LLVM: %[[MUL_BI_AR:.*]] = mul i32 %[[B_IMAG]], %[[A_REAL]]
+// LLVM: %[[RESULT_REAL:.*]] = sub i32 %[[MUL_BR_AR]], %[[MUL_BI_AI]]
+// LLVM: %[[RESULT_IMAG:.*]] = add i32 %[[MUL_BR_AI]], %[[MUL_BI_AR]]
+// LLVM: %[[MUL_A_B:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[RESULT_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[MUL_A_B]], i32 %[[RESULT_IMAG]], 1
+// LLVM: store { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4
+// OGCG: %[[MUL_BR_AR:.*]] = mul i32 %[[B_REAL]], %[[A_REAL]]
+// OGCG: %[[MUL_BI_AI:.*]] = mul i32 %[[B_IMAG]], %[[A_IMAG]]
+// OGCG: %[[RESULT_REAL:.*]] = sub i32 %[[MUL_BR_AR]], %[[MUL_BI_AI]]
+// OGCG: %[[MUL_BI_AR:.*]] = mul i32 %[[B_IMAG]], %[[A_REAL]]
+// OGCG: %[[MUL_BR_AI:.*]] = mul i32 %[[B_REAL]], %[[A_IMAG]]
+// OGCG: %[[RESULT_IMAG:.*]] = add i32 %[[MUL_BI_AR]], %[[MUL_BR_AI]]
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store i32 %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 4
+// OGCG: store i32 %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 4
+
+void foo7() {
+ float _Complex a;
+ float _Complex b;
+ b *= a;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[MUL_BR_AR:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_REAL]]) : !cir.float
+// CIR: %[[MUL_BI_AI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_IMAG]]) : !cir.float
+// CIR: %[[MUL_BR_AI:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_IMAG]]) : !cir.float
+// CIR: %[[MUL_BI_AR:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_REAL]]) : !cir.float
+// CIR: %[[C_REAL:.*]] = cir.binop(sub, %[[MUL_BR_AR]], %[[MUL_BI_AI]]) : !cir.float
+// CIR: %[[C_IMAG:.*]] = cir.binop(add, %[[MUL_BR_AI]], %[[MUL_BI_AR]]) : !cir.float
+// CIR: %[[COMPLEX:.*]] = cir.complex.create %[[C_REAL]], %[[C_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: %[[IS_C_REAL_NAN:.*]] = cir.cmp(ne, %[[C_REAL]], %[[C_REAL]]) : !cir.float, !cir.bool
+// CIR: %[[IS_C_IMAG_NAN:.*]] = cir.cmp(ne, %[[C_IMAG]], %[[C_IMAG]]) : !cir.float, !cir.bool
+// CIR: %[[CONST_FALSE:.*]] = cir.const #false
+// CIR: %[[SELECT_CONDITION:.*]] = cir.select if %[[IS_C_REAL_NAN]] then %[[IS_C_IMAG_NAN]] else %[[CONST_FALSE]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
+// CIR: %[[RESULT:.*]] = cir.ternary(%[[SELECT_CONDITION]], true {
+// CIR: %[[LIBC_COMPLEX:.*]] = cir.call @__mulsc3(%[[B_REAL]], %[[B_IMAG]], %[[A_REAL]], %[[A_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
+// CIR: cir.yield %[[LIBC_COMPLEX]] : !cir.complex<!cir.float>
+// CIR: }, false {
+// CIR: cir.yield %[[COMPLEX]] : !cir.complex<!cir.float>
+// CIR: }) : (!cir.bool) -> !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[MUL_BR_AR:.*]] = fmul float %[[B_REAL]], %[[A_REAL]]
+// LLVM: %[[MUL_BI_AI:.*]] = fmul float %[[B_IMAG]], %[[A_IMAG]]
+// LLVM: %[[MUL_BR_AI:.*]] = fmul float %[[B_REAL]], %[[A_IMAG]]
+// LLVM: %[[MUL_BI_AR:.*]] = fmul float %[[B_IMAG]], %[[A_REAL]]
+// LLVM: %[[C_REAL:.*]] = fsub float %[[MUL_BR_AR]], %[[MUL_BI_AI]]
+// LLVM: %[[C_IMAG:.*]] = fadd float %[[MUL_BR_AI]], %[[MUL_BI_AR]]
+// LLVM: %[[MUL_A_B:.*]] = insertvalue { float, float } {{.*}}, float %[[C_REAL]], 0
+// LLVM: %[[COMPLEX:.*]] = insertvalue { float, float } %[[MUL_A_B]], float %[[C_IMAG]], 1
+// LLVM: %[[IS_C_REAL_NAN:.*]] = fcmp une float %[[C_REAL]], %[[C_REAL]]
+// LLVM: %[[IS_C_IMAG_NAN:.*]] = fcmp une float %[[C_IMAG]], %[[C_IMAG]]
+// LLVM: %[[SELECT_CONDITION:.*]] = and i1 %[[IS_C_REAL_NAN]], %[[IS_C_IMAG_NAN]]
+// LLVM: br i1 %[[SELECT_CONDITION]], label %[[THEN_LABEL:.*]], label %[[ELSE_LABEL:.*]]
+// LLVM: [[THEN_LABEL]]:
+// LLVM: %[[LIBC_COMPLEX:.*]] = call { float, float } @__mulsc3(float %[[B_REAL]], float %[[B_IMAG]], float %[[A_REAL]], float %[[A_IMAG]])
+// LLVM: br label %[[PHI_BRANCH:.*]]
+// LLVM: [[ELSE_LABEL]]:
+// LLVM: br label %[[PHI_BRANCH:]]
+// LLVM: [[PHI_BRANCH:]]:
+// LLVM: %[[RESULT:.*]] = phi { float, float } [ %[[COMPLEX]], %[[ELSE_LABEL]] ], [ %[[LIBC_COMPLEX]], %[[THEN_LABEL]] ]
+// LLVM: br label %[[END_LABEL:.*]]
+// LLVM: [[END_LABEL]]:
+// LLVM: store { float, float } %[[RESULT]], ptr %[[B_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[COMPLEX_CALL_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG: %[[MUL_BR_AR:.*]] = fmul float %[[B_REAL]], %[[A_REAL]]
+// OGCG: %[[MUL_BI_AI:.*]] = fmul float %[[B_IMAG]], %[[A_IMAG]]
+// OGCG: %[[MUL_BR_AI:.*]] = fmul float %[[B_REAL]], %[[A_IMAG]]
+// OGCG: %[[MUL_BI_AR:.*]] = fmul float %[[B_IMAG]], %[[A_REAL]]
+// OGCG: %[[C_REAL:.*]] = fsub float %[[MUL_BR_AR]], %[[MUL_BI_AI]]
+// OGCG: %[[C_IMAG:.*]] = fadd float %[[MUL_BR_AI]], %[[MUL_BI_AR]]
+// OGCG: %[[IS_C_REAL_NAN:.*]] = fcmp uno float %[[C_REAL]], %[[C_REAL]]
+// OGCG: br i1 %[[IS_C_REAL_NAN]], label %[[COMPLEX_IS_IMAG_NAN:.*]], label %[[END_LABEL:.*]], !prof !2
+// OGCG: [[COMPLEX_IS_IMAG_NAN]]:
+// OGCG: %[[IS_C_IMAG_NAN:.*]] = fcmp uno float %[[C_IMAG]], %[[C_IMAG]]
+// OGCG: br i1 %[[IS_C_IMAG_NAN]], label %[[COMPLEX_LIB_CALL:.*]], label %[[END_LABEL]], !prof !2
+// OGCG: [[COMPLEX_LIB_CALL]]:
+// OGCG: %[[CALL_RESULT:.*]] = call{{.*}} <2 x float> @__mulsc3(float noundef %[[B_REAL]], float noundef %[[B_IMAG]], float noundef %[[A_REAL]], float noundef %[[A_IMAG]])
+// OGCG: store <2 x float> %[[CALL_RESULT]], ptr %[[COMPLEX_CALL_ADDR]], align 4
+// OGCG: %[[COMPLEX_CALL_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_CALL_ADDR]], i32 0, i32 0
+// OGCG: %[[COMPLEX_CALL_REAL:.*]] = load float, ptr %[[COMPLEX_CALL_REAL_PTR]], align 4
+// OGCG: %[[COMPLEX_CALL_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_CALL_ADDR]], i32 0, i32 1
+// OGCG: %[[COMPLEX_CALL_IMAG:.*]] = load float, ptr %[[COMPLEX_CALL_IMAG_PTR]], align 4
+// OGCG: br label %[[END_LABEL]]
+// OGCG: [[END_LABEL]]:
+// OGCG: %[[FINAL_REAL:.*]] = phi float [ %[[C_REAL]], %[[ENTRY:.*]] ], [ %[[C_REAL]], %[[COMPLEX_IS_IMAG_NAN]] ], [ %[[COMPLEX_CALL_REAL]], %[[COMPLEX_LIB_CALL]] ]
+// OGCG: %[[FINAL_IMAG:.*]] = phi float [ %[[C_IMAG]], %[[ENTRY]] ], [ %[[C_IMAG]], %[[COMPLEX_IS_IMAG_NAN]] ], [ %[[COMPLEX_CALL_IMAG]], %[[COMPLEX_LIB_CALL]] ]
+// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store float %[[FINAL_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG: store float %[[FINAL_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo8() {
+ float _Complex a;
+ float b;
+ a *= b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL:.*]] = cir.binop(mul, %[[A_REAL]], %[[TMP_B]]) : !cir.float
+// CIR: %[[RESULT_IMAG:.*]] = cir.binop(mul, %[[A_IMAG]], %[[TMP_B]]) : !cir.float
+// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[RESULT_REAL:.*]] = fmul float %[[A_REAL]], %[[TMP_B]]
+// LLVM: %[[RESULT_IMAG:.*]] = fmul float %[[A_IMAG]], %[[TMP_B]]
+// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
+// OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[RESULT_REAL:.*]] = fmul float %[[A_REAL]], %[[TMP_B]]
+// OGCG: %[[RESULT_IMAG:.*]] = fmul float %[[A_IMAG]], %[[TMP_B]]
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+void foo10() {
+ float _Complex a;
+ float _Complex b;
+ a /= b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT:.*]] = cir.call @__divsc3(%[[A_REAL]], %[[A_IMAG]], %[[B_REAL]], %[[B_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM: %[[RESULT:.*]] = call { float, float } @__divsc3(float %[[A_REAL]], float %[[A_IMAG]], float %[[B_REAL]], float %[[B_IMAG]])
+// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[RESULT_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[RESULT:.*]] = call{{.*}} <2 x float> @__divsc3(float noundef %[[A_REAL]], float noundef %[[A_IMAG]], float noundef %[[B_REAL]], float noundef %[[B_IMAG]])
+// OGCG: store <2 x float> %[[RESULT]], ptr %[[RESULT_ADDR]], align 4
+// OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT_ADDR]], i32 0, i32 0
+// OGCG: %[[RESULT_REAL:.*]] = load float, ptr %[[RESULT_REAL_PTR]], align 4
+// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT_ADDR]], i32 0, i32 1
+// OGCG: %[[RESULT_IMAG:.*]] = load float, ptr %[[RESULT_IMAG_PTR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+void foo11() {
+ float _Complex a;
+ float b;
+ a /= b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL:.*]] = cir.binop(div, %[[A_REAL]], %[[TMP_B]]) : !cir.float
+// CIR: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[A_IMAG]], %[[TMP_B]]) : !cir.float
+// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
+// LLVM: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
+// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
+// OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
+// OGCG: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+void foo12() {
+ int _Complex a;
+ int b;
+ a /= b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[B_COMPLEX:.*]] = cir.complex.create %[[TMP_B]], %[[CONST_0]] : !s32i -> !cir.complex<!s32i>
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[B_REAL:.*]] = cir.complex.real %[[B_COMPLEX]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[B_COMPLEX]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !s32i
+// CIR: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], %[[B_REAL]]) : !s32i
+// CIR: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !s32i
+// CIR: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) : !s32i
+// CIR: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !s32i
+// CIR: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !s32i
+// CIR: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !s32i
+// CIR: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) : !s32i
+// CIR: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]) : !s32i
+// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !s32i -> !cir.complex<!s32i>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_B_COMPLEX:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[TMP_B]], 0
+// LLVM: %[[B_COMPLEX:.*]] = insertvalue { i32, i32 } %[[TMP_B_COMPLEX]], i32 0, 1
+// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
+// LLVM: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
+// LLVM: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
+// LLVM: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
+// LLVM: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
+// LLVM: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// LLVM: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
+// LLVM: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
+// LLVM: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// LLVM: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// LLVM: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[RESULT_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], i32 %[[RESULT_IMAG]], 1
+// LLVM: store { i32, i32 } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca i32, align 4
+// OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
+// OGCG: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
+// OGCG: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
+// OGCG: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
+// OGCG: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
+// OGCG: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
+// OGCG: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// OGCG: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: store i32 %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store i32 %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+#ifndef __cplusplus
+void foo9() {
+ float _Complex a;
+ float b;
+ b += a;
+}
+#endif
+
+// C_CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// C_CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
+// C_CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// C_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// C_CIR: %[[A_REAL:.*]] = cir.complex.real %[[A_ADDR]] : !cir.complex<!cir.float> -> !cir.float
+// C_CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[A_ADDR]] : !cir.complex<!cir.float> -> !cir.float
+// C_CIR: %[[NEW_REAL:.*]] = cir.binop(add, %[[TMP_B]], %[[A_REAL]]) : !cir.float
+// C_CIR: %[[RESULT:.*]] = cir.complex.create %[[NEW_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// C_CIR: %[[RESULT_REAL:.*]] = cir.complex.real %[[RESULT]] : !cir.complex<!cir.float> -> !cir.float
+// C_CIR: cir.store{{.*}} %[[RESULT_REAL]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
+
+// C_LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// C_LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// C_LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// C_LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// C_LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// C_LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// C_LLVM: %[[NEW_REAL:.*]] = fadd float %[[TMP_B]], %[[A_REAL]]
+// C_LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[NEW_REAL]], 0
+// C_LLVM: store float %[[NEW_REAL]], ptr %[[B_ADDR]], align 4
+
+// C_OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// C_OGCG: %[[B_ADDR:.*]] = alloca float, align 4
+// C_OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// C_OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// C_OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// C_OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// C_OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// C_OGCG: %[[ADD_REAL:.*]] = fadd float %[[TMP_B]], %[[A_REAL]]
+// C_OGCG: store float %[[ADD_REAL]], ptr %[[B_ADDR]], align 4 \ No newline at end of file
diff --git a/clang/test/CIR/CodeGen/complex-mul-div.cpp b/clang/test/CIR/CodeGen/complex-mul-div.cpp
index 9d71ef7..d493046 100644
--- a/clang/test/CIR/CodeGen/complex-mul-div.cpp
+++ b/clang/test/CIR/CodeGen/complex-mul-div.cpp
@@ -1,38 +1,38 @@
// complex-range basic
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -complex-range=basic -Wno-unused-value -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE-BASIC %s
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=basic -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
-// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED
+// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED,CIR-COMBINED,CIR-AFTER-BASIC
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=basic -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
-// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED,LLVM-COMBINED,LLVM-BASIC
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=basic -Wno-unused-value -emit-llvm %s -o %t.ll
-// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED
+// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED,OGCG-COMBINED,OGCG-BASIC
// complex-range improved
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -complex-range=improved -Wno-unused-value -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE-IMPROVED %s
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=improved -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
-// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED
+// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED,CIR-COMBINED,CIR-AFTER-IMPROVED
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=improved -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
-// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED,LLVM-COMBINED,LLVM-IMPROVED
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=improved -Wno-unused-value -emit-llvm %s -o %t.ll
-// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED
+// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED,OGCG-COMBINED,OGCG-IMPROVED
// complex-range promoted
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -complex-range=promoted -Wno-unused-value -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE-PROMOTED %s
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=promoted -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
-// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED
+// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED,CIR-COMBINED,CIR-AFTER-PROMOTED
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=promoted -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
-// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED,LLVM-COMBINED,LLVM-PROMOTED
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=promoted -Wno-unused-value -emit-llvm %s -o %t.ll
-// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED
+// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED,OGCG-COMBINED,OGCG-PROMOTED
// complex-range full
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -complex-range=full -Wno-unused-value -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE-FULL %s
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=full -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
-// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-FULL,CIR-AFTER-INT
+// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-FULL,CIR-AFTER-INT,CIR-COMBINED
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=full -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
-// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-FULL,LLVM-INT
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-FULL,LLVM-INT,LLVM-COMBINED
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=full -Wno-unused-value -emit-llvm %s -o %t.ll
-// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-FULL,OGCG-INT
+// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-FULL,OGCG-INT,OGCG-COMBINED
void foo() {
float _Complex a;
@@ -278,3 +278,984 @@ void foo1() {
// OGCG-INT: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
// OGCG-INT: store i32 %[[C_REAL]], ptr %[[C_REAL_PTR]], align 4
// OGCG-INT: store i32 %[[C_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo2() {
+ float _Complex a;
+ float b;
+ float _Complex c = a * b;
+}
+
+// CIR-COMBINED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-COMBINED: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
+// CIR-COMBINED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-COMBINED: %[[TMP_A:.*]] = cir.load{{.*}} %0 : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-COMBINED: %[[TMP_B:.*]] = cir.load{{.*}} %1 : !cir.ptr<!cir.float>, !cir.float
+// CIR-COMBINED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-COMBINED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-COMBINED: %[[RESULT_REAL:.*]] = cir.binop(mul, %[[A_REAL]], %[[TMP_B]]) : !cir.float
+// CIR-COMBINED: %[[RESULT_IMAG:.*]] = cir.binop(mul, %[[A_IMAG]], %[[TMP_B]]) : !cir.float
+// CIR-COMBINED: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-COMBINED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-COMBINED: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-COMBINED: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-COMBINED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-COMBINED: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM-COMBINED: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// LLVM-COMBINED: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-COMBINED: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-COMBINED: %[[RESULT_REAL:.*]] = fmul float %[[A_REAL]], %[[TMP_B]]
+// LLVM-COMBINED: %[[RESULT_IMAG:.*]] = fmul float %[[A_IMAG]], %[[TMP_B]]
+// LLVM-COMBINED: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-COMBINED: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-COMBINED: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-COMBINED: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-COMBINED: %[[B_ADDR:.*]] = alloca float, align 4
+// OGCG-COMBINED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-COMBINED: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG-COMBINED: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-COMBINED: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// OGCG-COMBINED: %[[RESULT_REAL:.*]] = fmul float %[[A_REAL]], %[[TMP_B]]
+// OGCG-COMBINED: %[[RESULT_IMAG:.*]] = fmul float %[[A_IMAG]], %[[TMP_B]]
+// OGCG-COMBINED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-COMBINED: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo3() {
+ float _Complex a;
+ float _Complex b;
+ float _Complex c = a / b;
+}
+
+// CIR-BEFORE-BASIC: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(basic) : !cir.complex<!cir.float>
+
+// CIR-AFTER-BASIC: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-AFTER-BASIC: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-BASIC: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-BASIC: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-BASIC: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-BASIC: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-BASIC: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-BASIC: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM-BASIC: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-BASIC: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-BASIC: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-BASIC: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-BASIC: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-BASIC: %[[MUL_AR_BR:.*]] = fmul float %[[A_REAL]], %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_AI_BI:.*]] = fmul float %[[A_IMAG]], %[[B_IMAG]]
+// LLVM-BASIC: %[[MUL_BR_BR:.*]] = fmul float %[[B_REAL]], %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_BI_BI:.*]] = fmul float %[[B_IMAG]], %[[B_IMAG]]
+// LLVM-BASIC: %[[ADD_ARBR_AIBI:.*]] = fadd float %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-BASIC: %[[ADD_BRBR_BIBI:.*]] = fadd float %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// LLVM-BASIC: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-BASIC: %[[MUL_AI_BR:.*]] = fmul float %[[A_IMAG]], %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_BR_BI:.*]] = fmul float %[[A_REAL]], %[[B_IMAG]]
+// LLVM-BASIC: %[[SUB_AIBR_BRBI:.*]] = fsub float %[[MUL_AI_BR]], %[[MUL_BR_BI]]
+// LLVM-BASIC: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_AIBR_BRBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-BASIC: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-BASIC: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-BASIC: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-BASIC: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-BASIC: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-BASIC: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-BASIC: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-BASIC: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG-BASIC: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-BASIC: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-BASIC: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-BASIC: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-BASIC: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-BASIC: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-BASIC: %[[MUL_AR_BR:.*]] = fmul float %[[A_REAL]], %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_AI_BI:.*]] = fmul float %[[A_IMAG]], %[[B_IMAG]]
+// OGCG-BASIC: %[[ADD_ARBR_AIBI:.*]] = fadd float %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-BASIC: %[[MUL_BR_BR:.*]] = fmul float %[[B_REAL]], %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_BI_BI:.*]] = fmul float %[[B_IMAG]], %[[B_IMAG]]
+// OGCG-BASIC: %[[ADD_BRBR_BIBI:.*]] = fadd float %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// OGCG-BASIC: %[[MUL_AI_BR:.*]] = fmul float %[[A_IMAG]], %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_AR_BI:.*]] = fmul float %[[A_REAL]], %[[B_IMAG]]
+// OGCG-BASIC: %[[SUB_AIBR_BRBI:.*]] = fsub float %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG-BASIC: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-BASIC: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_AIBR_BRBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-BASIC: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-BASIC: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-BASIC: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-BASIC: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-IMPROVED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(improved) : !cir.complex<!cir.float>
+
+// CIR-AFTER-IMPROVED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-AFTER-IMPROVED: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-IMPROVED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-IMPROVED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_REAL:.*]] = cir.fabs %[[B_REAL]] : !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_IMAG:.*]] = cir.fabs %[[B_IMAG]] : !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_CMP:.*]] = cir.cmp(ge, %[[ABS_B_REAL]], %[[ABS_B_IMAG]]) : !cir.float, !cir.bool
+// CIR-AFTER-IMPROVED: %[[RESULT:.*]] = cir.ternary(%[[ABS_B_CMP]], true {
+// CIR-AFTER-IMPROVED: %[[DIV_BI_BR:.*]] = cir.binop(div, %[[B_IMAG]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_DIV_BIBR_BI:.*]] = cir.binop(mul, %[[DIV_BI_BR]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = cir.binop(add, %[[B_REAL]], %[[MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_AI_DIV_BIBR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[DIV_BI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = cir.binop(add, %[[A_REAL]], %[[MUL_AI_DIV_BIBR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_AR_MUL_AI_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_AR_DIV_BIBR:.*]] = cir.binop(mul, %[[A_REAL]], %[[DIV_BI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = cir.binop(sub, %[[A_IMAG]], %[[MUL_AR_DIV_BIBR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_COMPLEX:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: cir.yield %[[RESULT_COMPLEX]] : !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: }, false {
+// CIR-AFTER-IMPROVED: %[[DIV_BR_BI:.*]] = cir.binop(div, %[[B_REAL]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_DIV_BRBI_BR:.*]] = cir.binop(mul, %[[DIV_BR_BI]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = cir.binop(add, %[[B_IMAG]], %[[MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_AR_DIV_BIBR:.*]] = cir.binop(mul, %[[A_REAL]], %[[DIV_BR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = cir.binop(add, %[[MUL_AR_DIV_BIBR]], %[[A_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_AI_DIV_BRBI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[DIV_BR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = cir.binop(sub, %[[MUL_AI_DIV_BRBI]], %[[A_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_COMPLEX:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: cir.yield %[[RESULT_COMPLEX]] : !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: }) : (!cir.bool) -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-IMPROVED: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-IMPROVED: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-IMPROVED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-IMPROVED: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM-IMPROVED: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-IMPROVED: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-IMPROVED: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-IMPROVED: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-IMPROVED: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-IMPROVED: %[[ABS_B_REAL:.*]] = call float @llvm.fabs.f32(float %[[B_REAL]])
+// LLVM-IMPROVED: %[[ABS_B_IMAG:.*]] = call float @llvm.fabs.f32(float %[[B_IMAG]])
+// LLVM-IMPROVED: %[[ABS_B_CMP:.*]] = fcmp oge float %[[ABS_B_REAL]], %[[ABS_B_IMAG]]
+// LLVM-IMPROVED: br i1 %[[ABS_B_CMP]], label %[[ABS_BR_GT_ABS_BI:.*]], label %[[ABS_BR_LT_ABS_BI:.*]]
+// LLVM-IMPROVED: [[ABS_BR_GT_ABS_BI]]:
+// LLVM-IMPROVED: %[[DIV_BI_BR:.*]] = fdiv float %[[B_IMAG]], %[[B_REAL]]
+// LLVM-IMPROVED: %[[MUL_DIV_BIBR_BI:.*]] = fmul float %[[DIV_BI_BR]], %[[B_IMAG]]
+// LLVM-IMPROVED: %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = fadd float %[[B_REAL]], %[[MUL_DIV_BIBR_BI]]
+// LLVM-IMPROVED: %[[MUL_AI_DIV_BIBR:.*]] = fmul float %[[A_IMAG]], %[[DIV_BI_BR]]
+// LLVM-IMPROVED: %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = fadd float %[[A_REAL]], %[[MUL_AI_DIV_BIBR]]
+// LLVM-IMPROVED: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_AR_MUL_AI_DIV_BIBR]], %16
+// LLVM-IMPROVED: %[[MUL_AR_DIV_BIBR:.*]] = fmul float %[[A_REAL]], %[[DIV_BI_BR]]
+// LLVM-IMPROVED: %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = fsub float %[[A_IMAG]], %[[MUL_AR_DIV_BIBR]]
+// LLVM-IMPROVED: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// LLVM-IMPROVED: %[[TMP_THEN_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-IMPROVED: %[[THEN_RESULT:.*]] = insertvalue { float, float } %[[TMP_THEN_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-IMPROVED: br label %[[PHI_RESULT:.*]]
+// LLVM-IMPROVED: [[ABS_BR_LT_ABS_BI]]:
+// LLVM-IMPROVED: %[[DIV_BR_BI:.*]] = fdiv float %[[B_REAL]], %[[B_IMAG]]
+// LLVM-IMPROVED: %[[MUL_DIV_BRBI_BR:.*]] = fmul float %[[DIV_BR_BI]], %[[B_REAL]]
+// LLVM-IMPROVED: %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = fadd float %[[B_IMAG]], %[[MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED: %[[MUL_AR_DIV_BRBI:.*]] = fmul float %[[A_REAL]], %[[DIV_BR_BI]]
+// LLVM-IMPROVED: %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = fadd float %[[MUL_AR_DIV_BRBI]], %[[A_IMAG]]
+// LLVM-IMPROVED: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED: %[[MUL_AI_DIV_BRBI:.*]] = fmul float %[[A_IMAG]], %[[DIV_BR_BI]]
+// LLVM-IMPROVED: %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = fsub float %[[MUL_AI_DIV_BRBI]], %[[A_REAL]]
+// LLVM-IMPROVED: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED: %[[TMP_ELSE_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-IMPROVED: %[[ELSE_RESULT:.*]] = insertvalue { float, float } %[[TMP_ELSE_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-IMPROVED: br label %[[PHI_RESULT]]
+// LLVM-IMPROVED: [[PHI_RESULT]]:
+// LLVM-IMPROVED: %[[RESULT:.*]] = phi { float, float } [ %[[ELSE_RESULT]], %[[ABS_BR_LT_ABS_BI]] ], [ %[[THEN_RESULT]], %[[ABS_BR_GT_ABS_BI]] ]
+// LLVM-IMPROVED: br label %[[STORE_RESULT:.*]]
+// LLVM-IMPROVED: [[STORE_RESULT]]:
+// LLVM-IMPROVED: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-IMPROVED: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-IMPROVED: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-IMPROVED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-IMPROVED: %a.realp = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-IMPROVED: %a.real = load float, ptr %a.realp, align 4
+// OGCG-IMPROVED: %a.imagp = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-IMPROVED: %a.imag = load float, ptr %a.imagp, align 4
+// OGCG-IMPROVED: %b.realp = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-IMPROVED: %b.real = load float, ptr %b.realp, align 4
+// OGCG-IMPROVED: %b.imagp = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-IMPROVED: %b.imag = load float, ptr %b.imagp, align 4
+// OGCG-IMPROVED: %[[ABS_B_REAL:.*]] = call float @llvm.fabs.f32(float %[[B_REAL]])
+// OGCG-IMPROVED: %[[ABS_B_IMAG:.*]] = call float @llvm.fabs.f32(float %[[B_IMAG]])
+// OGCG-IMPROVED: %[[ABS_B_CMP:.*]] = fcmp ugt float %[[ABS_B_REAL]], %[[ABS_B_IMAG]]
+// OGCG-IMPROVED: br i1 %[[ABS_B_CMP]], label %[[ABS_BR_GT_ABS_BI:.*]], label %[[ABS_BR_LT_ABS_BI:.*]]
+// OGCG-IMPROVED: [[ABS_BR_GT_ABS_BI]]:
+// OGCG-IMPROVED: %[[DIV_BI_BR:.*]] = fdiv float %[[B_IMAG]], %[[B_REAL]]
+// OGCG-IMPROVED: %[[MUL_DIV_BIBR_BI:.*]] = fmul float %[[DIV_BI_BR]], %[[B_IMAG]]
+// OGCG-IMPROVED: %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = fadd float %[[B_REAL]], %[[MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED: %[[MUL_AI_DIV_BIBR:.*]] = fmul float %[[A_IMAG]], %[[DIV_BI_BR]]
+// OGCG-IMPROVED: %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = fadd float %[[A_REAL]], %[[MUL_AI_DIV_BIBR]]
+// OGCG-IMPROVED: %[[THEN_RESULT_REAL:.*]] = fdiv float %[[ADD_AR_MUL_AI_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED: %[[MUL_AR_DIV_BI_BR:.*]] = fmul float %[[A_REAL]], %[[DIV_BI_BR]]
+// OGCG-IMPROVED: %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = fsub float %[[A_IMAG]], %[[MUL_AR_DIV_BI_BR]]
+// OGCG-IMPROVED: %[[THEN_RESULT_IMAG:.*]] = fdiv float %[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED: br label %[[STORE_RESULT:.*]]
+// OGCG-IMPROVED: [[ABS_BR_LT_ABS_BI]]:
+// OGCG-IMPROVED: %[[DIV_BR_BI:.*]] = fdiv float %[[B_REAL]], %[[B_IMAG]]
+// OGCG-IMPROVED: %[[MUL_DIV_BRBI_BR:.*]] = fmul float %[[DIV_BR_BI]], %[[B_REAL]]
+// OGCG-IMPROVED: %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = fadd float %[[B_IMAG]], %[[MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED: %[[MUL_AR_DIV_BRBI:.*]] = fmul float %[[A_REAL]], %[[DIV_BR_BI]]
+// OGCG-IMPROVED: %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = fadd float %[[MUL_AR_DIV_BRBI]], %[[A_IMAG]]
+// OGCG-IMPROVED: %[[ELSE_RESULT_REAL:.*]] = fdiv float %[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED: %[[MUL_AI_DIV_BRBI:.*]] = fmul float %[[A_IMAG]], %[[DIV_BR_BI]]
+// OGCG-IMPROVED: %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = fsub float %[[MUL_AI_DIV_BRBI]], %[[A_REAL]]
+// OGCG-IMPROVED: %[[ELSE_RESULT_IMAG:.*]] = fdiv float %[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED: br label %[[STORE_RESULT]]
+// OGCG-IMPROVED: [[STORE_RESULT]]:
+// OGCG-IMPROVED: %[[RESULT_REAL:.*]] = phi float [ %[[THEN_RESULT_REAL]], %[[ABS_BR_GT_ABS_BI]] ], [ %[[ELSE_RESULT_REAL]], %[[ABS_BR_LT_ABS_BI]] ]
+// OGCG-IMPROVED: %[[RESULT_IMAG:.*]] = phi float [ %[[THEN_RESULT_IMAG]], %[[ABS_BR_GT_ABS_BI]] ], [ %[[ELSE_RESULT_IMAG]], %[[ABS_BR_LT_ABS_BI]] ]
+// OGCG-IMPROVED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-IMPROVED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-IMPROVED: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-IMPROVED: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-PROMOTED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(promoted) : !cir.complex<!cir.float>
+
+// CIR-AFTER-PROMOTED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-AFTER-PROMOTED: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-PROMOTED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-PROMOTED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL_F64]], %[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG_F64]], %[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL_F64]], %[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG_F64]], %[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG_F64]], %[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL_F64]], %[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_F64:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.double -> !cir.complex<!cir.double>
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F64:.*]] = cir.complex.real %[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F64:.*]] = cir.complex.imag %[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast(floating, %[[RESULT_REAL_F64]] : !cir.double), !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast(floating, %[[RESULT_IMAG_F64]] : !cir.double), !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: cir.store{{.*}} %[[RESULT_F32]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-PROMOTED: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-PROMOTED: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-PROMOTED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-PROMOTED: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM-PROMOTED: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-PROMOTED: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-PROMOTED: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-PROMOTED: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-PROMOTED: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-PROMOTED: %[[A_REAL_F64:.*]] = fpext float %[[A_REAL]] to double
+// LLVM-PROMOTED: %[[A_IMAG_F64:.*]] = fpext float %[[A_IMAG]] to double
+// LLVM-PROMOTED: %[[B_REAL_F64:.*]] = fpext float %[[B_REAL]] to double
+// LLVM-PROMOTED: %[[B_IMAG_F64:.*]] = fpext float %[[B_IMAG]] to double
+// LLVM-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], %[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_AI_BI:.*]] = fmul double %[[A_IMAG_F64]], %[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[MUL_BR_BR:.*]] = fmul double %[[B_REAL_F64]], %[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_BI_BI:.*]] = fmul double %[[B_IMAG_F64]], %[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = fadd double %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = fadd double %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// LLVM-PROMOTED: %[[RESULT_REAL:.*]] = fdiv double %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-PROMOTED: %[[MUL_AI_BR:.*]] = fmul double %[[A_IMAG_F64]], %[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], %[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = fsub double %[[MUL_AI_BR]], %[[MUL_AR_BR]]
+// LLVM-PROMOTED: %[[RESULT_IMAG:.*]] = fdiv double %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-PROMOTED: %[[TMP_RESULT_F64:.*]] = insertvalue { double, double } {{.*}}, double %[[RESULT_REAL]], 0
+// LLVM-PROMOTED: %[[RESULT_F64:.*]] = insertvalue { double, double } %[[TMP_RESULT_F64]], double %[[RESULT_IMAG]], 1
+// LLVM-PROMOTED: %[[RESULT_REAL_F32:.*]] = fptrunc double %[[RESULT_REAL]] to float
+// LLVM-PROMOTED: %[[RESULT_IMAG_F32:.*]] = fptrunc double %[[RESULT_IMAG]] to float
+// LLVM-PROMOTED: %[[TMP_RESULT_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL_F32]], 0
+// LLVM-PROMOTED: %[[RESULT_F32:.*]] = insertvalue { float, float } %[[TMP_RESULT_F32]], float %[[RESULT_IMAG_F32]], 1
+// LLVM-PROMOTED: store { float, float } %[[RESULT_F32]], ptr %[[C_ADDR]], align 4
+
+// OGCG-PROMOTED: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-PROMOTED: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-PROMOTED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-PROMOTED: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-PROMOTED: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG-PROMOTED: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-PROMOTED: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-PROMOTED: %[[A_REAL_F64:.*]] = fpext float %[[A_REAL]] to double
+// OGCG-PROMOTED: %[[A_IMAG_F64:.*]] = fpext float %[[A_IMAG]] to double
+// OGCG-PROMOTED: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-PROMOTED: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-PROMOTED: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-PROMOTED: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-PROMOTED: %[[B_REAL_F64:.*]] = fpext float %[[B_REAL]] to double
+// OGCG-PROMOTED: %[[B_IMAG_F64:.*]] = fpext float %[[B_IMAG]] to double
+// OGCG-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], %[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_AI_BI:.*]] = fmul double %[[A_IMAG_F64]], %[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = fadd double %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-PROMOTED: %[[MUL_BR_BR:.*]] = fmul double %[[B_REAL_F64]], %[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_BI_BI:.*]] = fmul double %[[B_IMAG_F64]], %[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = fadd double %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// OGCG-PROMOTED: %[[MUL_AI_BR:.*]] = fmul double %[[A_IMAG_F64]], %[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_AR_BI:.*]] = fmul double %[[A_REAL_F64]], %[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = fsub double %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG-PROMOTED: %[[RESULT_REAL:.*]] = fdiv double %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-PROMOTED: %[[RESULT_IMAG:.*]] = fdiv double %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-PROMOTED: %[[UNPROMOTION_RESULT_REAL:.*]] = fptrunc double %[[RESULT_REAL]] to float
+// OGCG-PROMOTED: %[[UNPROMOTION_RESULT_IMAG:.*]] = fptrunc double %[[RESULT_IMAG]] to float
+// OGCG-PROMOTED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-PROMOTED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-PROMOTED: store float %[[UNPROMOTION_RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-PROMOTED: store float %[[UNPROMOTION_RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-FULL: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(full) : !cir.complex<!cir.float>
+
+// CIR-AFTER-FULL: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-AFTER-FULL: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-FULL: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-FULL: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[RESULT:.*]] = cir.call @__divsc3(%[[A_REAL]], %[[A_IMAG]], %[[B_REAL]], %[[B_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
+// CIR-AFTER-FULL: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-FULL: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM-FULL: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-FULL: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-FULL: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-FULL: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-FULL: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-FULL: %[[RESULT:.*]] = call { float, float } @__divsc3(float %[[A_REAL]], float %[[A_IMAG]], float %[[B_REAL]], float %[[B_IMAG]])
+// LLVM-FULL: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-FULL: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[RESULT_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG-FULL: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-FULL: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-FULL: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-FULL: %[[RESULT:.*]] = call noundef <2 x float> @__divsc3(float noundef %[[A_REAL]], float noundef %[[A_IMAG]], float noundef %[[B_REAL]], float noundef %[[B_IMAG]]) #2
+// OGCG-FULL: store <2 x float> %[[RESULT]], ptr %[[RESULT_ADDR]], align 4
+// OGCG-FULL: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[RESULT_REAL:.*]] = load float, ptr %[[RESULT_REAL_PTR]], align 4
+// OGCG-FULL: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[RESULT_IMAG:.*]] = load float, ptr %[[RESULT_IMAG_PTR]], align 4
+// OGCG-FULL: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-FULL: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-FULL: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo4() {
+ int _Complex a;
+ int _Complex b;
+ int _Complex c = a / b;
+}
+
+// CIR-BEFORE-BASIC: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(basic) : !cir.complex<!s32i>
+
+// CIR-BEFORE-IMPROVED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(improved) : !cir.complex<!s32i>
+
+// CIR-BEFORE-PROMOTED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(promoted) : !cir.complex<!s32i>
+
+// CIR-BEFORE-FULL: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(full) : !cir.complex<!s32i>
+
+// CIR-COMBINED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
+// CIR-COMBINED: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"]
+// CIR-COMBINED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["c", init]
+// CIR-COMBINED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR-COMBINED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR-COMBINED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !s32i
+// CIR-COMBINED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR-COMBINED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], %[[B_REAL]]) : !s32i
+// CIR-COMBINED: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR-COMBINED: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !s32i
+// CIR-COMBINED: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) : !s32i
+// CIR-COMBINED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !s32i
+// CIR-COMBINED: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !s32i
+// CIR-COMBINED: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !s32i
+// CIR-COMBINED: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) : !s32i
+// CIR-COMBINED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], %14) : !s32i
+// CIR-COMBINED: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !s32i -> !cir.complex<!s32i>
+// CIR-COMBINED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM-COMBINED: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-COMBINED: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-COMBINED: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-COMBINED: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
+// LLVM-COMBINED: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4
+// LLVM-COMBINED: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
+// LLVM-COMBINED: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
+// LLVM-COMBINED: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0
+// LLVM-COMBINED: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1
+// LLVM-COMBINED: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[B_REAL]]
+// LLVM-COMBINED: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], %[[B_IMAG]]
+// LLVM-COMBINED: %[[MUL_BR_BR:.*]] = mul i32 %[[B_REAL]], %[[B_REAL]]
+// LLVM-COMBINED: %[[MUL_BI_BI:.*]] = mul i32 %[[B_IMAG]], %[[B_IMAG]]
+// LLVM-COMBINED: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-COMBINED: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// LLVM-COMBINED: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-COMBINED: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[B_REAL]]
+// LLVM-COMBINED: %[[MUL_BR_BI:.*]] = mul i32 %[[A_REAL]], %[[B_IMAG]]
+// LLVM-COMBINED: %[[SUB_AIBR_BRBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_BR_BI]]
+// LLVM-COMBINED: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_BRBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-COMBINED: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[RESULT_REAL]], 0
+// LLVM-COMBINED: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], i32 %[[RESULT_IMAG]], 1
+// LLVM-COMBINED: store { i32, i32 } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-COMBINED: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-COMBINED: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-COMBINED: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-COMBINED: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
+// OGCG-COMBINED: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-COMBINED: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4
+// OGCG-COMBINED: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-COMBINED: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[B_REAL]]
+// OGCG-COMBINED: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], %[[B_IMAG]]
+// OGCG-COMBINED: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-COMBINED: %[[MUL_BR_BR:.*]] = mul i32 %[[B_REAL]], %[[B_REAL]]
+// OGCG-COMBINED: %[[MUL_BI_BI:.*]] = mul i32 %[[B_IMAG]], %[[B_IMAG]]
+// OGCG-COMBINED: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// OGCG-COMBINED: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[B_REAL]]
+// OGCG-COMBINED: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], %[[B_IMAG]]
+// OGCG-COMBINED: %[[SUB_AIBR_BRBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG-COMBINED: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-COMBINED: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_BRBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-COMBINED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: store i32 %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-COMBINED: store i32 %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo5() {
+ float _Complex a;
+ float b;
+ float _Complex c = a / b;
+}
+
+// CIR-COMBINED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-COMBINED: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
+// CIR-COMBINED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-COMBINED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-COMBINED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR-COMBINED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-COMBINED: %[[A_IMGA:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-COMBINED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[A_REAL]], %[[TMP_B]]) : !cir.float
+// CIR-COMBINED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[A_IMAG]], %[[TMP_B]]) : !cir.float
+// CIR-COMBINED: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-COMBINED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-COMBINED: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-COMBINED: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-COMBINED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-COMBINED: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM-COMBINED: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// LLVM-COMBINED: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-COMBINED: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-COMBINED: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
+// LLVM-COMBINED: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
+// LLVM-COMBINED: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-COMBINED: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-COMBINED: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-COMBINED: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-COMBINED: %[[B_ADDR:.*]] = alloca float, align 4
+// OGCG-COMBINED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-COMBINED: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG-COMBINED: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-COMBINED: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// OGCG-COMBINED: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
+// OGCG-COMBINED: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
+// OGCG-COMBINED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-COMBINED: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo6() {
+ float a;
+ float _Complex b;
+ float _Complex c = a / b;
+}
+
+// CIR-BEFORE-BASIC: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(basic) : !cir.complex<!cir.float>
+
+// CIR-AFTER-BASIC: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
+// CIR-AFTER-BASIC: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-BASIC: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-BASIC: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR-AFTER-BASIC: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: %[[CONST_0:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
+// CIR-AFTER-BASIC: %[[COMPLEX_A:.*]] = cir.complex.create %[[TMP_A]], %[[CONST_0]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: %[[A_REAL:.*]] = cir.complex.real %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-BASIC: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-BASIC: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-BASIC: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-BASIC: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// LLVM-BASIC: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-BASIC: %[[TMP_COMPELX_A:.*]] = insertvalue { float, float } undef, float %[[TMP_A]], 0
+// LLVM-BASIC: %[[COMPLEX_A:.*]] = insertvalue { float, float } %[[TMP_COMPELX_A]], float 0.000000e+00, 1
+// LLVM-BASIC: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-BASIC: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-BASIC: %[[MUL_AR_BR:.*]] = fmul float %[[TMP_A]], %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_AI_BI:.*]] = fmul float 0.000000e+00, %[[B_IMAG]]
+// LLVM-BASIC: %[[MUL_BR_BR:.*]] = fmul float %[[B_REAL]], %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_BI_BI:.*]] = fmul float %[[B_IMAG]], %[[B_IMAG]]
+// LLVM-BASIC: %[[ADD_ARBR_AIBI:.*]] = fadd float %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-BASIC: %[[ADD_BRBR_BIBI:.*]] = fadd float %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// LLVM-BASIC: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-BASIC: %[[MUL_AI_BR:.*]] = fmul float 0.000000e+00, %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_BR_BI:.*]] = fmul float %[[TMP_A]], %[[B_IMAG]]
+// LLVM-BASIC: %[[SUB_AIBR_BRBI:.*]] = fsub float %[[MUL_AI_BR]], %[[MUL_BR_BI]]
+// LLVM-BASIC: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_AIBR_BRBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-BASIC: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-BASIC: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-BASIC: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-BASIC: %[[A_ADDR:.*]] = alloca float, align 4
+// OGCG-BASIC: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-BASIC: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-BASIC: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// OGCG-BASIC: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-BASIC: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-BASIC: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-BASIC: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-BASIC: %[[MUL_AR_BR:.*]] = fmul float %[[TMP_A]], %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_AI_BI:.*]] = fmul float 0.000000e+00, %[[B_IMAG]]
+// OGCG-BASIC: %[[ADD_ARBR_AIBI:.*]] = fadd float %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-BASIC: %[[MUL_BR_BR:.*]] = fmul float %[[B_REAL]], %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_BI_BI:.*]] = fmul float %[[B_IMAG]], %[[B_IMAG]]
+// OGCG-BASIC: %[[ADD_BRBR_BIBI:.*]] = fadd float %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// OGCG-BASIC: %[[MUL_AI_BR:.*]] = fmul float 0.000000e+00, %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_AR_BI:.*]] = fmul float %[[TMP_A]], %[[B_IMAG]]
+// OGCG-BASIC: %[[SUB_AIBR_BRBI:.*]] = fsub float %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG-BASIC: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-BASIC: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_AIBR_BRBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-BASIC: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-BASIC: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-BASIC: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-BASIC: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-IMPROVED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(improved) : !cir.complex<!cir.float>
+
+// CIR-AFTER-IMPROVED: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
+// CIR-AFTER-IMPROVED: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-IMPROVED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-IMPROVED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR-AFTER-IMPROVED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: %[[CONST_0:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
+// CIR-AFTER-IMPROVED: %[[COMPLEX_A:.*]] = cir.complex.create %[[TMP_A]], %[[CONST_0]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: %[[A_REAL:.*]] = cir.complex.real %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_REAL:.*]] = cir.fabs %[[B_REAL]] : !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_IMAG:.*]] = cir.fabs %[[B_IMAG]] : !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_CMP:.*]] = cir.cmp(ge, %[[ABS_B_REAL]], %[[ABS_B_IMAG]]) : !cir.float, !cir.bool
+// CIR-AFTER-IMPROVED: %[[RESULT:.*]] = cir.ternary(%[[ABS_B_CMP]], true {
+// CIR-AFTER-IMPROVED: %[[DIV_BI_BR:.*]] = cir.binop(div, %[[B_IMAG]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_DIV_BIBR_BI:.*]] = cir.binop(mul, %[[DIV_BI_BR]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = cir.binop(add, %[[B_REAL]], %[[MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_AI_DIV_BIBR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[DIV_BI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = cir.binop(add, %[[A_REAL]], %[[MUL_AI_DIV_BIBR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_AR_MUL_AI_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_AR_DIV_BIBR:.*]] = cir.binop(mul, %[[A_REAL]], %[[DIV_BI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = cir.binop(sub, %[[A_IMAG]], %[[MUL_AR_DIV_BIBR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_COMPLEX:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: cir.yield %[[RESULT_COMPLEX]] : !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: }, false {
+// CIR-AFTER-IMPROVED: %[[DIV_BR_BI:.*]] = cir.binop(div, %[[B_REAL]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_DIV_BRBI_BR:.*]] = cir.binop(mul, %[[DIV_BR_BI]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = cir.binop(add, %[[B_IMAG]], %[[MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_AR_DIV_BIBR:.*]] = cir.binop(mul, %[[A_REAL]], %[[DIV_BR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = cir.binop(add, %[[MUL_AR_DIV_BIBR]], %[[A_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[MUL_AI_DIV_BRBI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[DIV_BR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = cir.binop(sub, %[[MUL_AI_DIV_BRBI]], %[[A_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED: %[[RESULT_COMPLEX:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: cir.yield %[[RESULT_COMPLEX]] : !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: }) : (!cir.bool) -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-IMPROVED: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-IMPROVED: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-IMPROVED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-IMPROVED: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// LLVM-IMPROVED: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-IMPROVED: %[[TMP_COMPLEX_A:.*]] = insertvalue { float, float } {{.*}}, float %[[TMP_A]], 0
+// LLVM-IMPROVED: %[[COMPLEX_A:.*]] = insertvalue { float, float } %[[TMP_COMPLEX_A]], float 0.000000e+00, 1
+// LLVM-IMPROVED: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-IMPROVED: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-IMPROVED: %[[ABS_B_REAL:.*]] = call float @llvm.fabs.f32(float %[[B_REAL]])
+// LLVM-IMPROVED: %[[ABS_B_IMAG:.*]] = call float @llvm.fabs.f32(float %[[B_IMAG]])
+// LLVM-IMPROVED: %[[ABS_B_CMP:.*]] = fcmp oge float %[[ABS_B_REAL]], %[[ABS_B_IMAG]]
+// LLVM-IMPROVED: br i1 %[[ABS_B_CMP]], label %[[ABS_BR_GT_ABS_BI:.*]], label %[[ABS_BR_LT_ABS_BI:.*]]
+// LLVM-IMPROVED: [[ABS_BR_GT_ABS_BI]]:
+// LLVM-IMPROVED: %[[DIV_BI_BR:.*]] = fdiv float %[[B_IMAG]], %[[B_REAL]]
+// LLVM-IMPROVED: %[[MUL_DIV_BIBR_BI:.*]] = fmul float %[[DIV_BI_BR]], %[[B_IMAG]]
+// LLVM-IMPROVED: %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = fadd float %[[B_REAL]], %[[MUL_DIV_BIBR_BI]]
+// LLVM-IMPROVED: %[[MUL_AI_DIV_BIBR:.*]] = fmul float 0.000000e+00, %[[DIV_BI_BR]]
+// LLVM-IMPROVED: %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = fadd float %[[TMP_A]], %[[MUL_AI_DIV_BIBR]]
+// LLVM-IMPROVED: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_AR_MUL_AI_DIV_BIBR]], %16
+// LLVM-IMPROVED: %[[MUL_AR_DIV_BIBR:.*]] = fmul float %[[TMP_A]], %[[DIV_BI_BR]]
+// LLVM-IMPROVED: %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = fsub float 0.000000e+00, %[[MUL_AR_DIV_BIBR]]
+// LLVM-IMPROVED: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// LLVM-IMPROVED: %[[TMP_THEN_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-IMPROVED: %[[THEN_RESULT:.*]] = insertvalue { float, float } %[[TMP_THEN_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-IMPROVED: br label %[[PHI_RESULT:.*]]
+// LLVM-IMPROVED: [[ABS_BR_LT_ABS_BI]]:
+// LLVM-IMPROVED: %[[DIV_BR_BI:.*]] = fdiv float %[[B_REAL]], %[[B_IMAG]]
+// LLVM-IMPROVED: %[[MUL_DIV_BRBI_BR:.*]] = fmul float %[[DIV_BR_BI]], %[[B_REAL]]
+// LLVM-IMPROVED: %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = fadd float %[[B_IMAG]], %[[MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED: %[[MUL_AR_DIV_BRBI:.*]] = fmul float %[[TMP_A]], %[[DIV_BR_BI]]
+// LLVM-IMPROVED: %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = fadd float %[[MUL_AR_DIV_BRBI]], 0.000000e+00
+// LLVM-IMPROVED: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED: %[[MUL_AI_DIV_BRBI:.*]] = fmul float 0.000000e+00, %[[DIV_BR_BI]]
+// LLVM-IMPROVED: %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = fsub float %[[MUL_AI_DIV_BRBI]], %[[TMP_A]]
+// LLVM-IMPROVED: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED: %[[TMP_ELSE_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-IMPROVED: %[[ELSE_RESULT:.*]] = insertvalue { float, float } %[[TMP_ELSE_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-IMPROVED: br label %[[PHI_RESULT]]
+// LLVM-IMPROVED: [[PHI_RESULT]]:
+// LLVM-IMPROVED: %[[RESULT:.*]] = phi { float, float } [ %[[ELSE_RESULT]], %[[ABS_BR_LT_ABS_BI]] ], [ %[[THEN_RESULT]], %[[ABS_BR_GT_ABS_BI]] ]
+// LLVM-IMPROVED: br label %[[STORE_RESULT:.*]]
+// LLVM-IMPROVED: [[STORE_RESULT]]:
+// LLVM-IMPROVED: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-IMPROVED: %[[A_ADDR:.*]] = alloca float, align 4
+// OGCG-IMPROVED: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-IMPROVED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-IMPROVED: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// OGCG-IMPROVED: %b.realp = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-IMPROVED: %b.real = load float, ptr %b.realp, align 4
+// OGCG-IMPROVED: %b.imagp = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-IMPROVED: %b.imag = load float, ptr %b.imagp, align 4
+// OGCG-IMPROVED: %[[ABS_B_REAL:.*]] = call float @llvm.fabs.f32(float %[[B_REAL]])
+// OGCG-IMPROVED: %[[ABS_B_IMAG:.*]] = call float @llvm.fabs.f32(float %[[B_IMAG]])
+// OGCG-IMPROVED: %[[ABS_B_CMP:.*]] = fcmp ugt float %[[ABS_B_REAL]], %[[ABS_B_IMAG]]
+// OGCG-IMPROVED: br i1 %[[ABS_B_CMP]], label %[[ABS_BR_GT_ABS_BI:.*]], label %[[ABS_BR_LT_ABS_BI:.*]]
+// OGCG-IMPROVED: [[ABS_BR_GT_ABS_BI]]:
+// OGCG-IMPROVED: %[[DIV_BI_BR:.*]] = fdiv float %[[B_IMAG]], %[[B_REAL]]
+// OGCG-IMPROVED: %[[MUL_DIV_BIBR_BI:.*]] = fmul float %[[DIV_BI_BR]], %[[B_IMAG]]
+// OGCG-IMPROVED: %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = fadd float %[[B_REAL]], %[[MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED: %[[MUL_AI_DIV_BIBR:.*]] = fmul float 0.000000e+00, %[[DIV_BI_BR]]
+// OGCG-IMPROVED: %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = fadd float %[[TMP_A]], %[[MUL_AI_DIV_BIBR]]
+// OGCG-IMPROVED: %[[THEN_RESULT_REAL:.*]] = fdiv float %[[ADD_AR_MUL_AI_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED: %[[MUL_AR_DIV_BI_BR:.*]] = fmul float %[[TMP_A]], %[[DIV_BI_BR]]
+// OGCG-IMPROVED: %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = fsub float 0.000000e+00, %[[MUL_AR_DIV_BI_BR]]
+// OGCG-IMPROVED: %[[THEN_RESULT_IMAG:.*]] = fdiv float %[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED: br label %[[STORE_RESULT:.*]]
+// OGCG-IMPROVED: [[ABS_BR_LT_ABS_BI]]:
+// OGCG-IMPROVED: %[[DIV_BR_BI:.*]] = fdiv float %[[B_REAL]], %[[B_IMAG]]
+// OGCG-IMPROVED: %[[MUL_DIV_BRBI_BR:.*]] = fmul float %[[DIV_BR_BI]], %[[B_REAL]]
+// OGCG-IMPROVED: %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = fadd float %[[B_IMAG]], %[[MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED: %[[MUL_AR_DIV_BRBI:.*]] = fmul float %[[TMP_A]], %[[DIV_BR_BI]]
+// OGCG-IMPROVED: %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = fadd float %[[MUL_AR_DIV_BRBI]], 0.000000e+00
+// OGCG-IMPROVED: %[[ELSE_RESULT_REAL:.*]] = fdiv float %[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED: %[[MUL_AI_DIV_BRBI:.*]] = fmul float 0.000000e+00, %[[DIV_BR_BI]]
+// OGCG-IMPROVED: %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = fsub float %[[MUL_AI_DIV_BRBI]], %[[TMP_A]]
+// OGCG-IMPROVED: %[[ELSE_RESULT_IMAG:.*]] = fdiv float %[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED: br label %[[STORE_RESULT]]
+// OGCG-IMPROVED: [[STORE_RESULT]]:
+// OGCG-IMPROVED: %[[RESULT_REAL:.*]] = phi float [ %[[THEN_RESULT_REAL]], %[[ABS_BR_GT_ABS_BI]] ], [ %[[ELSE_RESULT_REAL]], %[[ABS_BR_LT_ABS_BI]] ]
+// OGCG-IMPROVED: %[[RESULT_IMAG:.*]] = phi float [ %[[THEN_RESULT_IMAG]], %[[ABS_BR_GT_ABS_BI]] ], [ %[[ELSE_RESULT_IMAG]], %[[ABS_BR_LT_ABS_BI]] ]
+// OGCG-IMPROVED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-IMPROVED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-IMPROVED: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-IMPROVED: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-PROMOTED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(promoted) : !cir.complex<!cir.float>
+
+// CIR-AFTER-PROMOTED: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
+// CIR-AFTER-PROMOTED: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-PROMOTED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-PROMOTED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR-AFTER-PROMOTED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: %[[CONST_0:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
+// CIR-AFTER-PROMOTED: %[[COMPLEX_A:.*]] = cir.complex.create %[[TMP_A]], %[[CONST_0]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: %[[A_REAL:.*]] = cir.complex.real %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL_F64]], %[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG_F64]], %[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL_F64]], %[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG_F64]], %[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG_F64]], %[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL_F64]], %[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_F64:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.double -> !cir.complex<!cir.double>
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F64:.*]] = cir.complex.real %[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F64:.*]] = cir.complex.imag %[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast(floating, %[[RESULT_REAL_F64]] : !cir.double), !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast(floating, %[[RESULT_IMAG_F64]] : !cir.double), !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: cir.store{{.*}} %[[RESULT_F32]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-PROMOTED: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-PROMOTED: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-PROMOTED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-PROMOTED: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// LLVM-PROMOTED: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-PROMOTED: %[[TMP_COMPLEX_A:.*]] = insertvalue { float, float } {{.*}}, float %[[TMP_A]], 0
+// LLVM-PROMOTED: %[[COMPLEX_A:.*]] = insertvalue { float, float } %[[TMP_COMPLEX_A]], float 0.000000e+00, 1
+// LLVM-PROMOTED: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-PROMOTED: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-PROMOTED: %[[A_REAL_F64:.*]] = fpext float %[[TMP_A]] to double
+// LLVM-PROMOTED: %[[B_REAL_F64:.*]] = fpext float %[[B_REAL]] to double
+// LLVM-PROMOTED: %[[B_IMAG_F64:.*]] = fpext float %[[B_IMAG]] to double
+// LLVM-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], %[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_AI_BI:.*]] = fmul double 0.000000e+00, %[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[MUL_BR_BR:.*]] = fmul double %[[B_REAL_F64]], %[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_BI_BI:.*]] = fmul double %[[B_IMAG_F64]], %[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = fadd double %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = fadd double %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// LLVM-PROMOTED: %[[RESULT_REAL:.*]] = fdiv double %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-PROMOTED: %[[MUL_AI_BR:.*]] = fmul double 0.000000e+00, %[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], %[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = fsub double %[[MUL_AI_BR]], %[[MUL_AR_BR]]
+// LLVM-PROMOTED: %[[RESULT_IMAG:.*]] = fdiv double %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-PROMOTED: %[[TMP_RESULT_F64:.*]] = insertvalue { double, double } {{.*}}, double %[[RESULT_REAL]], 0
+// LLVM-PROMOTED: %[[RESULT_F64:.*]] = insertvalue { double, double } %[[TMP_RESULT_F64]], double %[[RESULT_IMAG]], 1
+// LLVM-PROMOTED: %[[RESULT_REAL_F32:.*]] = fptrunc double %[[RESULT_REAL]] to float
+// LLVM-PROMOTED: %[[RESULT_IMAG_F32:.*]] = fptrunc double %[[RESULT_IMAG]] to float
+// LLVM-PROMOTED: %[[TMP_RESULT_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL_F32]], 0
+// LLVM-PROMOTED: %[[RESULT_F32:.*]] = insertvalue { float, float } %[[TMP_RESULT_F32]], float %[[RESULT_IMAG_F32]], 1
+// LLVM-PROMOTED: store { float, float } %[[RESULT_F32]], ptr %[[C_ADDR]], align 4
+
+// OGCG-PROMOTED: %[[A_ADDR:.*]] = alloca float, align 4
+// OGCG-PROMOTED: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-PROMOTED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-PROMOTED: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// OGCG-PROMOTED: %[[A_REAL_F64:.*]] = fpext float %[[TMP_A]] to double
+// OGCG-PROMOTED: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-PROMOTED: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-PROMOTED: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-PROMOTED: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-PROMOTED: %[[B_REAL_F64:.*]] = fpext float %[[B_REAL]] to double
+// OGCG-PROMOTED: %[[B_IMAG_F64:.*]] = fpext float %[[B_IMAG]] to double
+// OGCG-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], %[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_AI_BI:.*]] = fmul double 0.000000e+00, %[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = fadd double %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-PROMOTED: %[[MUL_BR_BR:.*]] = fmul double %[[B_REAL_F64]], %[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_BI_BI:.*]] = fmul double %[[B_IMAG_F64]], %[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = fadd double %[[MUL_BR_BR]], %[[MUL_BI_BI]]
+// OGCG-PROMOTED: %[[MUL_AI_BR:.*]] = fmul double 0.000000e+00, %[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_AR_BI:.*]] = fmul double %[[A_REAL_F64]], %[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = fsub double %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG-PROMOTED: %[[RESULT_REAL:.*]] = fdiv double %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-PROMOTED: %[[RESULT_IMAG:.*]] = fdiv double %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-PROMOTED: %[[UNPROMOTION_RESULT_REAL:.*]] = fptrunc double %[[RESULT_REAL]] to float
+// OGCG-PROMOTED: %[[UNPROMOTION_RESULT_IMAG:.*]] = fptrunc double %[[RESULT_IMAG]] to float
+// OGCG-PROMOTED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-PROMOTED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-PROMOTED: store float %[[UNPROMOTION_RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-PROMOTED: store float %[[UNPROMOTION_RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-FULL: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(full) : !cir.complex<!cir.float>
+
+// CIR-AFTER-FULL: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
+// CIR-AFTER-FULL: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-FULL: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-FULL: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR-AFTER-FULL: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[CONST_0:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
+// CIR-AFTER-FULL: %[[COMPLEX_A:.*]] = cir.complex.create %[[TMP_A]], %[[CONST_0]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[A_REAL:.*]] = cir.complex.real %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[RESULT:.*]] = cir.call @__divsc3(%[[A_REAL]], %[[A_IMAG]], %[[B_REAL]], %[[B_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
+// CIR-AFTER-FULL: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-FULL: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-FULL: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// LLVM-FULL: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-FULL: %[[TMP_COMPLEX_A:.*]] = insertvalue { float, float } {{.*}}, float %[[TMP_A]], 0
+// LLVM-FULL: %[[COMPLEX_A:.*]] = insertvalue { float, float } %6, float 0.000000e+00, 1
+// LLVM-FULL: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-FULL: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-FULL: %[[RESULT:.*]] = call { float, float } @__divsc3(float %[[TMP_A]], float 0.000000e+00, float %[[B_REAL]], float %[[B_IMAG]])
+// LLVM-FULL: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-FULL: %[[A_ADDR:.*]] = alloca float, align 4
+// OGCG-FULL: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[RESULT_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// OGCG-FULL: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-FULL: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-FULL: %[[RESULT:.*]] = call noundef <2 x float> @__divsc3(float noundef %[[TMP_A]], float noundef 0.000000e+00, float noundef %[[B_REAL]], float noundef %[[B_IMAG]])
+// OGCG-FULL: store <2 x float> %[[RESULT]], ptr %[[RESULT_ADDR]], align 4
+// OGCG-FULL: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[RESULT_REAL:.*]] = load float, ptr %[[RESULT_REAL_PTR]], align 4
+// OGCG-FULL: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[RESULT_IMAG:.*]] = load float, ptr %[[RESULT_IMAG_PTR]], align 4
+// OGCG-FULL: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-FULL: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-FULL: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo7() {
+ int _Complex a;
+ int b;
+ int _Complex c = a / b;
+}
+
+// CIR-BEFORE-BASIC: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(basic) : !cir.complex<!s32i>
+
+// CIR-BEFORE-IMPROVED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(improved) : !cir.complex<!s32i>
+
+// CIR-BEFORE-PROMOTED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(promoted) : !cir.complex<!s32i>
+
+// CIR-BEFORE-FULL: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(full) : !cir.complex<!s32i>
+
+// CIR-COMBINED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
+// CIR-COMBINED: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b"]
+// CIR-COMBINED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["c", init]
+// CIR-COMBINED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR-COMBINED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR-COMBINED: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR-COMBINED: %[[COMPLEX_B:.*]] = cir.complex.create %4, %5 : !s32i -> !cir.complex<!s32i>
+// CIR-COMBINED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[B_REAL:.*]] = cir.complex.real %[[COMPLEX_B]] : !cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[B_IMAG:.*]] = cir.complex.imag %[[COMPLEX_B]] : !cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !s32i
+// CIR-COMBINED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR-COMBINED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], %[[B_REAL]]) : !s32i
+// CIR-COMBINED: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR-COMBINED: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !s32i
+// CIR-COMBINED: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) : !s32i
+// CIR-COMBINED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !s32i
+// CIR-COMBINED: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !s32i
+// CIR-COMBINED: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !s32i
+// CIR-COMBINED: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) : !s32i
+// CIR-COMBINED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]) : !s32i
+// CIR-COMBINED: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !s32i -> !cir.complex<!s32i>
+// CIR-COMBINED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM-COMBINED: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-COMBINED: %[[B_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM-COMBINED: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-COMBINED: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
+// LLVM-COMBINED: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// LLVM-COMBINED: %[[TMP_COMPLEX_B:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[TMP_B]], 0
+// LLVM-COMBINED: %[[COMPLEX_B:.*]] = insertvalue { i32, i32 } %[[TMP_COMPLEX_B]], i32 0, 1
+// LLVM-COMBINED: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
+// LLVM-COMBINED: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
+// LLVM-COMBINED: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
+// LLVM-COMBINED: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
+// LLVM-COMBINED: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
+// LLVM-COMBINED: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-COMBINED: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
+// LLVM-COMBINED: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-COMBINED: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
+// LLVM-COMBINED: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
+// LLVM-COMBINED: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// LLVM-COMBINED: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// LLVM-COMBINED: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[RESULT_REAL]], 0
+// LLVM-COMBINED: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], i32 %[[RESULT_IMAG]], 1
+// LLVM-COMBINED: store { i32, i32 } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-COMBINED: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-COMBINED: %[[B_ADDR:.*]] = alloca i32, align 4
+// OGCG-COMBINED: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-COMBINED: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
+// OGCG-COMBINED: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-COMBINED: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// OGCG-COMBINED: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
+// OGCG-COMBINED: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
+// OGCG-COMBINED: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-COMBINED: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
+// OGCG-COMBINED: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
+// OGCG-COMBINED: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
+// OGCG-COMBINED: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
+// OGCG-COMBINED: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG-COMBINED: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-COMBINED: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// OGCG-COMBINED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: store i32 %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-COMBINED: store i32 %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
diff --git a/clang/test/CIR/CodeGen/complex-unary.cpp b/clang/test/CIR/CodeGen/complex-unary.cpp
index 4cd81eb..e945d9b 100644
--- a/clang/test/CIR/CodeGen/complex-unary.cpp
+++ b/clang/test/CIR/CodeGen/complex-unary.cpp
@@ -370,3 +370,138 @@ void foo8() {
// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
// OGCG: store float %[[A_REAL_MINUS]], ptr %[[B_REAL_PTR]], align 4
// OGCG: store float %[[A_IMAG_MINUS]], ptr %[[B_IMAG_PTR]], align 4
+
+void foo9() {
+ _Float16 _Complex a;
+ _Float16 _Complex b = +a;
+}
+
+
+// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : !cir.complex<!cir.f16>), !cir.complex<!cir.float>
+// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(plus, %[[A_COMPLEX_F32]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : !cir.complex<!cir.float>), !cir.complex<!cir.f16>
+// CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
+// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.unary(plus, %[[A_REAL_F32]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.unary(plus, %[[A_IMAG_F32]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
+// CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
+// LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2
+// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2
+// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
+// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
+// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
+// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
+// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
+// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
+// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
+// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half
+// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half
+// LLVM: %[[TMP_RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } {{.*}}, half %[[A_REAL_F16]], 0
+// LLVM: %[[RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } %[[TMP_RESULT_COMPLEX_F16]], half %[[A_IMAG_F16]], 1
+// LLVM: store { half, half } %[[RESULT_COMPLEX_F16]], ptr %[[B_ADDR]], align 2
+
+// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2
+// OGCG: %[[B_ADDR:.*]] = alloca { half, half }, align 2
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load half, ptr %a.realp, align 2
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load half, ptr %a.imagp, align 2
+// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
+// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
+// OGCG: %[[RESULT_REAL:.*]] = fptrunc float %[[A_REAL_F32]] to half
+// OGCG: %[[RESULT_IMAG:.*]] = fptrunc float %[[A_IMAG_F32]] to half
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store half %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 2
+// OGCG: store half %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 2
+
+void foo10() {
+ _Float16 _Complex a;
+ _Float16 _Complex b = -a;
+}
+
+// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : !cir.complex<!cir.f16>), !cir.complex<!cir.float>
+// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(minus, %[[A_COMPLEX_F32]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : !cir.complex<!cir.float>), !cir.complex<!cir.f16>
+// CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
+// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.unary(minus, %[[A_REAL_F32]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.unary(minus, %[[A_IMAG_F32]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
+// CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
+// LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2
+// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2
+// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
+// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
+// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
+// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
+// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
+// LLVM: %[[RESULT_REAL_F32:.*]] = fneg float %[[A_REAL_F32]]
+// LLVM: %[[RESULT_IMAG_F32:.*]] = fneg float %[[A_IMAG_F32]]
+// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL_F32]], 0
+// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[RESULT_IMAG_F32]], 1
+// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[RESULT_REAL_F32]] to half
+// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[RESULT_IMAG_F32]] to half
+// LLVM: %[[TMP_RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } {{.*}}, half %[[A_REAL_F16]], 0
+// LLVM: %[[RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } %[[TMP_RESULT_COMPLEX_F16]], half %[[A_IMAG_F16]], 1
+// LLVM: store { half, half } %[[RESULT_COMPLEX_F16]], ptr %[[B_ADDR]], align 2
+
+// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2
+// OGCG: %[[B_ADDR:.*]] = alloca { half, half }, align 2
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load half, ptr %a.realp, align 2
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load half, ptr %a.imagp, align 2
+// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
+// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
+// OGCG: %[[RESULT_REAL_F32:.*]] = fneg float %[[A_REAL_F32]]
+// OGCG: %[[RESULT_IMAG_F32:.*]] = fneg float %[[A_IMAG_F32]]
+// OGCG: %[[RESULT_REAL:.*]] = fptrunc float %[[RESULT_REAL_F32]] to half
+// OGCG: %[[RESULT_IMAG:.*]] = fptrunc float %[[RESULT_IMAG_F32]] to half
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store half %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 2
+// OGCG: store half %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 2
diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp
index bd7de9a..db837e5 100644
--- a/clang/test/CIR/CodeGen/complex.cpp
+++ b/clang/test/CIR/CodeGen/complex.cpp
@@ -799,3 +799,113 @@ void foo30() {
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
// OGCG: store float 1.000000e+00, ptr %[[A_REAL_PTR]], align 4
// OGCG: store float 0.000000e+00, ptr %[[A_IMAG_PTR]], align 4
+
+void foo31() {
+ struct Wrapper {
+ int _Complex c;
+ };
+
+ Wrapper w;
+ int r = __real__ w.c;
+}
+
+// CIR: %[[W_ADDR:.*]] = cir.alloca !rec_Wrapper, !cir.ptr<!rec_Wrapper>, ["w"]
+// CIR: %[[REAL_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["r", init]
+// CIR: %[[ELEM_PTR:.*]] = cir.get_member %[[W_ADDR]][0] {name = "c"} : !cir.ptr<!rec_Wrapper> -> !cir.ptr<!cir.complex<!s32i>>
+// CIR: %[[TMP_ELEM_PTR:.*]] = cir.load{{.*}} %[[ELEM_PTR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: %[[REAL:.*]] = cir.complex.real %[[TMP_ELEM_PTR]] : !cir.complex<!s32i> -> !s32i
+// CIR: cir.store{{.*}} %[[REAL]], %[[REAL_ADDR]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: %[[W_ADDR:.*]] = alloca %struct.Wrapper, i64 1, align 4
+// LLVM: %[[REAL_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[ELEM_PTR:.*]] = getelementptr %struct.Wrapper, ptr %[[W_ADDR]], i32 0, i32 0
+// LLVM: %[[TMP_ELEM_PTR:.*]] = load { i32, i32 }, ptr %[[ELEM_PTR]], align 4
+// LLVM: %[[REAL:.*]] = extractvalue { i32, i32 } %[[TMP_ELEM_PTR]], 0
+// LLVM: store i32 %[[REAL]], ptr %[[REAL_ADDR]], align 4
+
+// OGCG: %[[W_ADDR:.*]] = alloca %struct.Wrapper, align 4
+// OGCG: %[[REAL_ADDR:.*]] = alloca i32, align 4
+// OGCG: %[[ELEM_PTR:.*]] = getelementptr inbounds nuw %struct.Wrapper, ptr %[[W_ADDR]], i32 0, i32 0
+// OGCG: %[[REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[ELEM_PTR]], i32 0, i32 0
+// OGCG: %[[REAL:.*]] = load i32, ptr %[[REAL_PTR]], align 4
+// OGCG: store i32 %[[REAL]], ptr %[[REAL_ADDR]], align 4
+
+struct Container {
+ static int _Complex c;
+};
+
+void foo32() {
+ Container con;
+ int r = __real__ con.c;
+}
+
+// CIR: %[[REAL_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["r", init]
+// CIR: %[[ELEM_PTR:.*]] = cir.get_global @_ZN9Container1cE : !cir.ptr<!cir.complex<!s32i>>
+// CIR: %[[ELEM:.*]] = cir.load{{.*}} %[[ELEM_PTR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: %[[REAL:.*]] = cir.complex.real %[[ELEM]] : !cir.complex<!s32i> -> !s32i
+// CIR: cir.store{{.*}} %[[REAL]], %[[REAL_ADDR]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: %[[REAL_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[ELEM:.*]] = load { i32, i32 }, ptr @_ZN9Container1cE, align 4
+// LLVM: %[[REAL:.*]] = extractvalue { i32, i32 } %[[ELEM]], 0
+// LLVM: store i32 %[[REAL]], ptr %[[REAL_ADDR]], align 4
+
+// OGCG: %[[REAL_ADDR:.*]] = alloca i32, align 4
+// OGCG: %[[REAL:.*]] = load i32, ptr @_ZN9Container1cE, align 4
+// OGCG: store i32 %[[REAL]], ptr %[[REAL_ADDR]], align 4
+
+void foo33(__builtin_va_list a) {
+ float _Complex b = __builtin_va_arg(a, float _Complex);
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.ptr<!rec___va_list_tag>, !cir.ptr<!cir.ptr<!rec___va_list_tag>>, ["a", init]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
+// CIR: cir.store %[[ARG_0:.*]], %[[A_ADDR]] : !cir.ptr<!rec___va_list_tag>, !cir.ptr<!cir.ptr<!rec___va_list_tag>>
+// CIR: %[[VA_TAG:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.ptr<!rec___va_list_tag>>, !cir.ptr<!rec___va_list_tag>
+// CIR: %[[COMPLEX:.*]] = cir.va_arg %[[VA_TAG]] : (!cir.ptr<!rec___va_list_tag>) -> !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: store ptr %[[ARG_0:.*]], ptr %[[A_ADDR]], align 8
+// LLVM: %[[TMP_A:.*]] = load ptr, ptr %[[A_ADDR]], align 8
+// LLVM: %[[COMPLEX:.*]] = va_arg ptr %[[TMP_A]], { float, float }
+// LLVM: store { float, float } %[[COMPLEX]], ptr %[[B_ADDR]], align 4
+
+// TODO(CIR): the difference between the CIR LLVM and OGCG is because the lack of calling convention lowering,
+// Test will be updated when that is implemented
+
+// OGCG: %[[A_ADDR:.*]] = alloca ptr, align 8
+// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: store ptr %[[ARG_0:.*]], ptr %[[A_ADDR]], align 8
+// OGCG: %[[TMP_A:.*]] = load ptr, ptr %[[A_ADDR]], align 8
+// OGCG: %[[GP_OFFSET_PTR:.*]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[TMP_A]], i32 0, i32 1
+// OGCG: %[[GP_OFFSET:.*]] = load i32, ptr %[[GP_OFFSET_PTR]], align 4
+// OGCG: %[[COND:.*]] = icmp ule i32 %[[GP_OFFSET]], 160
+// OGCG: br i1 %[[COND]], label %[[VA_ARG_IN_REG:.*]], label %[[VA_ARG_IN_MEM:.*]]
+//
+// OGCG: [[VA_ARG_IN_REG]]:
+// OGCG: %[[REG_SAVE_PTR:.*]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[TMP_A]], i32 0, i32 3
+// OGCG: %[[REG_SAVE:.*]] = load ptr, ptr %[[REG_SAVE_PTR]], align 8
+// OGCG: %[[VA_ADDR:..*]] = getelementptr i8, ptr %[[REG_SAVE]], i32 %[[GP_OFFSET]]
+// OGCG: %[[GP_OFFSET_NEXT:.*]] = add i32 %[[GP_OFFSET]], 16
+// OGCG: store i32 %[[GP_OFFSET_NEXT]], ptr %[[GP_OFFSET_PTR]], align 4
+// OGCG: br label %[[VA_ARG_END:.*]]
+//
+// OGCG: [[VA_ARG_IN_MEM]]:
+// OGCG: %[[OVERFLOW_PTR:.*]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[TMP_A]], i32 0, i32 2
+// OGCG: %[[OVERFLOW:.*]] = load ptr, ptr %[[OVERFLOW_PTR]], align 8
+// OGCG: %[[OVERFLOW_NEXT:.*]] = getelementptr i8, ptr %[[OVERFLOW]], i32 8
+// OGCG: store ptr %[[OVERFLOW_NEXT]], ptr %[[OVERFLOW_PTR]], align 8
+// OGCG: br label %[[VA_ARG_END]]
+//
+// OGCG: [[VA_ARG_END]]:
+// OGCG: %[[RESULT:.*]] = phi ptr [ %[[VA_ADDR]], %[[VA_ARG_IN_REG]] ], [ %[[OVERFLOW]], %[[VA_ARG_IN_MEM]] ]
+// OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 0
+// OGCG: %[[RESULT_REAL:.*]] = load float, ptr %[[RESULT_REAL_PTR]], align 4
+// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1
+// OGCG: %[[RESULT_IMAG:.*]] = load float, ptr %[[RESULT_IMAG_PTR]], align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store float %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 4
+// OGCG: store float %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 4
diff --git a/clang/test/CIR/CodeGen/destructors.cpp b/clang/test/CIR/CodeGen/destructors.cpp
index de7718f..fde0732 100644
--- a/clang/test/CIR/CodeGen/destructors.cpp
+++ b/clang/test/CIR/CodeGen/destructors.cpp
@@ -55,3 +55,102 @@ struct inline_destructor {
// CIR-NOT: cir.func {{.*}}inline_destructor{{.*}}
// LLVM-NOT: define {{.*}}inline_destructor{{.*}}
// OGCG-NOT: define {{.*}}inline_destructor{{.*}}
+
+struct array_element {~array_element();};
+void test_array_destructor() {
+ array_element arr[5]{};
+}
+
+// CIR: cir.func dso_local @_Z21test_array_destructorv()
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!rec_array_element x 5>, !cir.ptr<!cir.array<!rec_array_element x 5>>, ["arr", init]
+// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!rec_array_element>, !cir.ptr<!cir.ptr<!rec_array_element>>, ["arrayinit.temp", init]
+// CIR: %[[BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!rec_array_element x 5>>)
+// CIR: cir.store{{.*}} %[[BEGIN]], %[[ARR_PTR]]
+// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s64i
+// CIR: %[[ARR_END:.*]] = cir.ptr_stride(%[[BEGIN]] : !cir.ptr<!rec_array_element>, %[[FIVE]] : !s64i)
+// CIR: cir.do {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]]
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ARR_NEXT:.*]] = cir.ptr_stride(%[[ARR_CUR]] : !cir.ptr<!rec_array_element>, %[[ONE]] : !s64i)
+// CIR: cir.store{{.*}} %[[ARR_NEXT]], %[[ARR_PTR]] : !cir.ptr<!rec_array_element>, !cir.ptr<!cir.ptr<!rec_array_element>>
+// CIR: cir.yield
+// CIR: } while {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]]
+// CIR: %[[CMP:.*]] = cir.cmp(ne, %[[ARR_CUR]], %[[ARR_END]])
+// CIR: cir.condition(%[[CMP]])
+// CIR: }
+// CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !u64i
+// CIR: %[[BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!rec_array_element x 5>>)
+// CIR: %[[END:.*]] = cir.ptr_stride(%[[BEGIN]] : !cir.ptr<!rec_array_element>, %[[FOUR]] : !u64i)
+// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!rec_array_element>, !cir.ptr<!cir.ptr<!rec_array_element>>, ["__array_idx"]
+// CIR: cir.store %[[END]], %[[ARR_PTR]]
+// CIR: cir.do {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]]
+// CIR: cir.call @_ZN13array_elementD1Ev(%[[ARR_CUR]]) nothrow : (!cir.ptr<!rec_array_element>) -> ()
+// CIR: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i
+// CIR: %[[ARR_NEXT:.*]] = cir.ptr_stride(%[[ARR_CUR]] : !cir.ptr<!rec_array_element>, %[[NEG_ONE]] : !s64i)
+// CIR: cir.store %[[ARR_NEXT]], %[[ARR_PTR]]
+// CIR: cir.yield
+// CIR: } while {
+// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]]
+// CIR: %[[CMP:.*]] = cir.cmp(ne, %[[ARR_CUR]], %[[BEGIN]])
+// CIR: cir.condition(%[[CMP]])
+// CIR: }
+
+// LLVM: define{{.*}} void @_Z21test_array_destructorv()
+// LLVM: %[[ARR:.*]] = alloca [5 x %struct.array_element]
+// LLVM: %[[TMP:.*]] = alloca ptr
+// LLVM: %[[ARR_PTR:.*]] = getelementptr %struct.array_element, ptr %[[ARR]], i32 0
+// LLVM: store ptr %[[ARR_PTR]], ptr %[[TMP]]
+// LLVM: %[[END_PTR:.*]] = getelementptr %struct.array_element, ptr %[[ARR_PTR]], i64 5
+// LLVM: br label %[[INIT_LOOP_BODY:.*]]
+// LLVM: [[INIT_LOOP_NEXT:.*]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]]
+// LLVM: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]]
+// LLVM: br i1 %[[CMP]], label %[[INIT_LOOP_BODY]], label %[[INIT_LOOP_END:.*]]
+// LLVM: [[INIT_LOOP_BODY]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]]
+// LLVM: %[[NEXT:.*]] = getelementptr %struct.array_element, ptr %[[CUR]], i64 1
+// LLVM: store ptr %[[NEXT]], ptr %[[TMP]]
+// LLVM: br label %[[INIT_LOOP_NEXT:.*]]
+// LLVM: [[INIT_LOOP_END]]:
+// LLVM: %[[ARR_BEGIN:.*]] = getelementptr %struct.array_element, ptr %[[ARR]], i32 0
+// LLVM: %[[ARR_END:.*]] = getelementptr %struct.array_element, ptr %[[ARR_BEGIN]], i64 4
+// LLVM: %[[ARR_CUR:.*]] = alloca ptr
+// LLVM: store ptr %[[ARR_END]], ptr %[[ARR_CUR]]
+// LLVM: br label %[[DESTROY_LOOP_BODY:.*]]
+// LLVM: [[DESTROY_LOOP_NEXT:.*]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[ARR_CUR]]
+// LLVM: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[ARR_BEGIN]]
+// LLVM: br i1 %[[CMP]], label %[[DESTROY_LOOP_BODY]], label %[[DESTROY_LOOP_END:.*]]
+// LLVM: [[DESTROY_LOOP_BODY]]:
+// LLVM: %[[CUR:.*]] = load ptr, ptr %[[ARR_CUR]]
+// LLVM: call void @_ZN13array_elementD1Ev(ptr %[[CUR]])
+// LLVM: %[[PREV:.*]] = getelementptr %struct.array_element, ptr %[[CUR]], i64 -1
+// LLVM: store ptr %[[PREV]], ptr %[[ARR_CUR]]
+// LLVM: br label %[[DESTROY_LOOP_NEXT]]
+// LLVM: [[DESTROY_LOOP_END]]:
+// LLVM: ret void
+
+// OGCG: define{{.*}} void @_Z21test_array_destructorv()
+// OGCG: entry:
+// OGCG: %[[ARR:.*]] = alloca [5 x %struct.array_element]
+// OGCG: %[[ARRAYINIT_END:.*]] = getelementptr inbounds %struct.array_element, ptr %[[ARR]], i64 5
+// OGCG: br label %[[INIT_LOOP_BODY:.*]]
+// OGCG: [[INIT_LOOP_BODY]]:
+// OGCG: %[[CUR:.*]] = phi ptr [ %[[ARR]], %entry ], [ %[[NEXT:.*]], %[[INIT_LOOP_BODY]] ]
+// OGCG: %[[NEXT]] = getelementptr inbounds %struct.array_element, ptr %[[CUR]], i64 1
+// OGCG: %[[CMP:.*]] = icmp eq ptr %[[NEXT]], %[[ARRAYINIT_END]]
+// OGCG: br i1 %[[CMP]], label %[[INIT_LOOP_END:.*]], label %[[INIT_LOOP_BODY]]
+// OGCG: [[INIT_LOOP_END:.*]]:
+// OGCG: %[[BEGIN:.*]] = getelementptr inbounds [5 x %struct.array_element], ptr %[[ARR]], i32 0, i32 0
+// OGCG: %[[END:.*]] = getelementptr inbounds %struct.array_element, ptr %[[BEGIN]], i64 5
+// OGCG: br label %[[DESTROY_LOOP_BODY:.*]]
+// OGCG: [[DESTROY_LOOP_BODY:.*]]:
+// OGCG: %[[CUR:.*]] = phi ptr [ %[[END]], %[[INIT_LOOP_END]] ], [ %[[PREV:.*]], %[[DESTROY_LOOP_BODY]] ]
+// OGCG: %[[PREV]] = getelementptr inbounds %struct.array_element, ptr %[[CUR]], i64 -1
+// OGCG: call void @_ZN13array_elementD1Ev(ptr {{.*}} %[[PREV]])
+// OGCG: %[[CMP:.*]] = icmp eq ptr %[[PREV]], %[[BEGIN]]
+// OGCG: br i1 %[[CMP]], label %[[DESTROY_LOOP_END:.*]], label %[[DESTROY_LOOP_BODY]]
+// OGCG: [[DESTROY_LOOP_END:.*]]:
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/function-to-pointer-decay.c b/clang/test/CIR/CodeGen/function-to-pointer-decay.c
new file mode 100644
index 0000000..507957a
--- /dev/null
+++ b/clang/test/CIR/CodeGen/function-to-pointer-decay.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+void f(void);
+
+void f1() {
+ (void (*)())f;
+}
+
+void f2() {
+ (*(void (*)(void))f)();
+}
+
+void test_lvalue_cast() {
+ (*(void (*)(int))f)(42);
+}
+
+// CIR-LABEL: cir.func{{.*}} @f()
+// CIR: cir.func{{.*}} @f1()
+// CIR: cir.return{{.*}}
+
+// CIR-LABEL: cir.func{{.*}} @f2()
+// CIR: cir.call @f() : () -> ()
+
+// CIR-LABEL: cir.func{{.*}} @test_lvalue_cast()
+// CIR: %[[S0:.+]] = {{.*}}@f : !cir.ptr<!cir.func<()>>{{.*}}
+// CIR: %[[S1:.+]] = cir.cast{{.*}}%[[S0]] : !cir.ptr<!cir.func<()>>{{.*}}
+// CIR: %[[S2:.+]] = cir.const #cir.int<42> : !s32i
+// CIR: cir.call %[[S1]](%[[S2]]) : (!cir.ptr<!cir.func<(!s32i)>>, !s32i) -> ()
+
+// LLVM-LABEL: define{{.*}} void @f1()
+// LLVM: ret void
+// LLVM: define{{.*}} void @f2()
+// LLVM: call void @f()
+// LLVM: define{{.*}} void @test_lvalue_cast()
+// LLVM: call void @f(i32 42)
+
+// OGCG-LABEL: define{{.*}} void @f1()
+// OGCG: ret void
+// OGCG: define{{.*}} void @f2()
+// OGCG: call void @f()
+// OGCG: define{{.*}} void @test_lvalue_cast()
+// OGCG: call void @f(i32 noundef 42)
diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp
new file mode 100644
index 0000000..a3e1613
--- /dev/null
+++ b/clang/test/CIR/CodeGen/globals.cpp
@@ -0,0 +1,37 @@
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+// Should constant initialize global with constant address.
+int var = 1;
+int *constAddr = &var;
+
+// CIR: cir.global external @constAddr = #cir.global_view<@var> : !cir.ptr<!s32i>
+
+// LLVM: @constAddr = global ptr @var, align 8
+
+// OGCG: @constAddr = global ptr @var, align 8
+
+// Should constant initialize global with constant address.
+int f();
+int (*constFnAddr)() = f;
+
+// CIR: cir.global external @constFnAddr = #cir.global_view<@_Z1fv> : !cir.ptr<!cir.func<() -> !s32i>>
+
+// LLVM: @constFnAddr = global ptr @_Z1fv, align 8
+
+// OGCG: @constFnAddr = global ptr @_Z1fv, align 8
+
+int arr[4][16];
+int *constArrAddr = &arr[2][1];
+
+// CIR: cir.global external @constArrAddr = #cir.global_view<@arr, [2 : i32, 1 : i32]> : !cir.ptr<!s32i>
+
+// The 'inbounds' and 'nuw' flags are inferred by LLVM's constant folder. The
+// same flags show up at -O1 in OGCG.
+// LLVM: @constArrAddr = global ptr getelementptr inbounds nuw (i8, ptr @arr, i64 132), align 8
+
+// OGCG: @constArrAddr = global ptr getelementptr (i8, ptr @arr, i64 132), align 8
diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp
new file mode 100644
index 0000000..48cb44e
--- /dev/null
+++ b/clang/test/CIR/CodeGen/goto.cpp
@@ -0,0 +1,305 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+int shouldNotGenBranchRet(int x) {
+ if (x > 5)
+ goto err;
+ return 0;
+err:
+ return -1;
+}
+// CIR: cir.func dso_local @_Z21shouldNotGenBranchReti
+// CIR: cir.if {{.*}} {
+// CIR: cir.goto "err"
+// CIR: }
+// CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store [[ZERO]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.br ^bb1
+// CIR: ^bb1:
+// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return [[RET]] : !s32i
+// CIR: ^bb2:
+// CIR: cir.label "err"
+// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i
+// CIR: [[MINUS:%.*]] = cir.unary(minus, [[ONE]]) nsw : !s32i, !s32i
+// CIR: cir.store [[MINUS]], [[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.br ^bb1
+
+// LLVM: define dso_local i32 @_Z21shouldNotGenBranchReti
+// LLVM: [[COND:%.*]] = load i32, ptr {{.*}}, align 4
+// LLVM: [[CMP:%.*]] = icmp sgt i32 [[COND]], 5
+// LLVM: br i1 [[CMP]], label %[[IFTHEN:.*]], label %[[IFEND:.*]]
+// LLVM: [[IFTHEN]]:
+// LLVM: br label %[[ERR:.*]]
+// LLVM: [[IFEND]]:
+// LLVM: br label %[[BB9:.*]]
+// LLVM: [[BB9]]:
+// LLVM: store i32 0, ptr %[[RETVAL:.*]], align 4
+// LLVM: br label %[[BBRET:.*]]
+// LLVM: [[BBRET]]:
+// LLVM: [[RET:%.*]] = load i32, ptr %[[RETVAL]], align 4
+// LLVM: ret i32 [[RET]]
+// LLVM: [[ERR]]:
+// LLVM: store i32 -1, ptr %[[RETVAL]], align 4
+// LLVM: br label %10
+
+// OGCG: define dso_local noundef i32 @_Z21shouldNotGenBranchReti
+// OGCG: if.then:
+// OGCG: br label %err
+// OGCG: if.end:
+// OGCG: br label %return
+// OGCG: err:
+// OGCG: br label %return
+// OGCG: return:
+
+int shouldGenBranch(int x) {
+ if (x > 5)
+ goto err;
+ x++;
+err:
+ return -1;
+}
+// CIR: cir.func dso_local @_Z15shouldGenBranchi
+// CIR: cir.if {{.*}} {
+// CIR: cir.goto "err"
+// CIR: }
+// CIR: cir.br ^bb1
+// CIR: ^bb1:
+// CIR: cir.label "err"
+
+// LLVM: define dso_local i32 @_Z15shouldGenBranchi
+// LLVM: br i1 [[CMP:%.*]], label %[[IFTHEN:.*]], label %[[IFEND:.*]]
+// LLVM: [[IFTHEN]]:
+// LLVM: br label %[[ERR:.*]]
+// LLVM: [[IFEND]]:
+// LLVM: br label %[[BB9:.*]]
+// LLVM: [[BB9]]:
+// LLVM: br label %[[ERR]]
+// LLVM: [[ERR]]:
+// LLVM: ret i32 [[RET:%.*]]
+
+// OGCG: define dso_local noundef i32 @_Z15shouldGenBranchi
+// OGCG: if.then:
+// OGCG: br label %err
+// OGCG: if.end:
+// OGCG: br label %err
+// OGCG: err:
+// OGCG: ret
+
+void severalLabelsInARow(int a) {
+ int b = a;
+ goto end1;
+ b = b + 1;
+ goto end2;
+end1:
+end2:
+ b = b + 2;
+}
+// CIR: cir.func dso_local @_Z19severalLabelsInARowi
+// CIR: cir.goto "end1"
+// CIR: ^bb[[#BLK1:]]
+// CIR: cir.goto "end2"
+// CIR: ^bb[[#BLK2:]]:
+// CIR: cir.label "end1"
+// CIR: cir.br ^bb[[#BLK3:]]
+// CIR: ^bb[[#BLK3]]:
+// CIR: cir.label "end2"
+
+// LLVM: define dso_local void @_Z19severalLabelsInARowi
+// LLVM: br label %[[END1:.*]]
+// LLVM: [[UNRE:.*]]: ; No predecessors!
+// LLVM: br label %[[END2:.*]]
+// LLVM: [[END1]]:
+// LLVM: br label %[[END2]]
+// LLVM: [[END2]]:
+// LLVM: ret
+
+// OGCG: define dso_local void @_Z19severalLabelsInARowi
+// OGCG: br label %end1
+// OGCG: end1:
+// OGCG: br label %end2
+// OGCG: end2:
+// OGCG: ret
+
+void severalGotosInARow(int a) {
+ int b = a;
+ goto end;
+ goto end;
+end:
+ b = b + 2;
+}
+// CIR: cir.func dso_local @_Z18severalGotosInARowi
+// CIR: cir.goto "end"
+// CIR: ^bb[[#BLK1:]]:
+// CIR: cir.goto "end"
+// CIR: ^bb[[#BLK2:]]:
+// CIR: cir.label "end"
+
+// LLVM: define dso_local void @_Z18severalGotosInARowi
+// LLVM: br label %[[END:.*]]
+// LLVM: [[UNRE:.*]]: ; No predecessors!
+// LLVM: br label %[[END]]
+// LLVM: [[END]]:
+// LLVM: ret void
+
+// OGCG: define dso_local void @_Z18severalGotosInARowi(i32 noundef %a) #0 {
+// OGCG: br label %end
+// OGCG: end:
+// OGCG: ret void
+
+extern "C" void action1();
+extern "C" void action2();
+extern "C" void multiple_non_case(int v) {
+ switch (v) {
+ default:
+ action1();
+ l2:
+ action2();
+ break;
+ }
+}
+
+// CIR: cir.func dso_local @multiple_non_case
+// CIR: cir.switch
+// CIR: cir.case(default, []) {
+// CIR: cir.call @action1()
+// CIR: cir.br ^[[BB1:[a-zA-Z0-9]+]]
+// CIR: ^[[BB1]]:
+// CIR: cir.label
+// CIR: cir.call @action2()
+// CIR: cir.break
+
+// LLVM: define dso_local void @multiple_non_case
+// LLVM: [[SWDEFAULT:.*]]:
+// LLVM: call void @action1()
+// LLVM: br label %[[L2:.*]]
+// LLVM: [[L2]]:
+// LLVM: call void @action2()
+// LLVM: br label %[[BREAK:.*]]
+
+// OGCG: define dso_local void @multiple_non_case
+// OGCG: sw.default:
+// OGCG: call void @action1()
+// OGCG: br label %l2
+// OGCG: l2:
+// OGCG: call void @action2()
+// OGCG: br label [[BREAK:%.*]]
+
+extern "C" void case_follow_label(int v) {
+ switch (v) {
+ case 1:
+ label:
+ case 2:
+ action1();
+ break;
+ default:
+ action2();
+ goto label;
+ }
+}
+
+// CIR: cir.func dso_local @case_follow_label
+// CIR: cir.switch
+// CIR: cir.case(equal, [#cir.int<1> : !s32i]) {
+// CIR: cir.label "label"
+// CIR: cir.case(equal, [#cir.int<2> : !s32i]) {
+// CIR: cir.call @action1()
+// CIR: cir.break
+// CIR: cir.case(default, []) {
+// CIR: cir.call @action2()
+// CIR: cir.goto "label"
+
+// LLVM: define dso_local void @case_follow_label
+// LLVM: switch i32 {{.*}}, label %[[SWDEFAULT:.*]] [
+// LLVM: i32 1, label %[[LABEL:.*]]
+// LLVM: i32 2, label %[[CASE2:.*]]
+// LLVM: ]
+// LLVM: [[LABEL]]:
+// LLVM: br label %[[CASE2]]
+// LLVM: [[CASE2]]:
+// LLVM: call void @action1()
+// LLVM: br label %[[BREAK:.*]]
+// LLVM: [[BREAK]]:
+// LLVM: br label %[[END:.*]]
+// LLVM: [[SWDEFAULT]]:
+// LLVM: call void @action2()
+// LLVM: br label %[[LABEL]]
+// LLVM: [[END]]:
+// LLVM: br label %[[RET:.*]]
+// LLVM: [[RET]]:
+// LLVM: ret void
+
+// OGCG: define dso_local void @case_follow_label
+// OGCG: sw.bb:
+// OGCG: br label %label
+// OGCG: label:
+// OGCG: br label %sw.bb1
+// OGCG: sw.bb1:
+// OGCG: call void @action1()
+// OGCG: br label %sw.epilog
+// OGCG: sw.default:
+// OGCG: call void @action2()
+// OGCG: br label %label
+// OGCG: sw.epilog:
+// OGCG: ret void
+
+extern "C" void default_follow_label(int v) {
+ switch (v) {
+ case 1:
+ case 2:
+ action1();
+ break;
+ label:
+ default:
+ action2();
+ goto label;
+ }
+}
+
+// CIR: cir.func dso_local @default_follow_label
+// CIR: cir.switch
+// CIR: cir.case(equal, [#cir.int<1> : !s32i]) {
+// CIR: cir.yield
+// CIR: cir.case(equal, [#cir.int<2> : !s32i]) {
+// CIR: cir.call @action1()
+// CIR: cir.break
+// CIR: cir.label "label"
+// CIR: cir.case(default, []) {
+// CIR: cir.call @action2()
+// CIR: cir.goto "label"
+
+// LLVM: define dso_local void @default_follow_label
+// LLVM: [[CASE1:.*]]:
+// LLVM: br label %[[BB8:.*]]
+// LLVM: [[BB8]]:
+// LLVM: br label %[[CASE2:.*]]
+// LLVM: [[CASE2]]:
+// LLVM: call void @action1()
+// LLVM: br label %[[BREAK:.*]]
+// LLVM: [[LABEL:.*]]:
+// LLVM: br label %[[SWDEFAULT:.*]]
+// LLVM: [[SWDEFAULT]]:
+// LLVM: call void @action2()
+// LLVM: br label %[[BB9:.*]]
+// LLVM: [[BB9]]:
+// LLVM: br label %[[LABEL]]
+// LLVM: [[BREAK]]:
+// LLVM: br label %[[RET:.*]]
+// LLVM: [[RET]]:
+// LLVM: ret void
+
+// OGCG: define dso_local void @default_follow_label
+// OGCG: sw.bb:
+// OGCG: call void @action1()
+// OGCG: br label %sw.epilog
+// OGCG: label:
+// OGCG: br label %sw.default
+// OGCG: sw.default:
+// OGCG: call void @action2()
+// OGCG: br label %label
+// OGCG: sw.epilog:
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/inline-asm.c b/clang/test/CIR/CodeGen/inline-asm.c
new file mode 100644
index 0000000..fc959f9
--- /dev/null
+++ b/clang/test/CIR/CodeGen/inline-asm.c
@@ -0,0 +1,24 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+
+void f1() {
+ // CIR: cir.asm(x86_att,
+ // CIR: out = [],
+ // CIR: in = [],
+ // CIR: in_out = [],
+ // CIR: {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+ // LLVM: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"()
+ __asm__ volatile("" : : : );
+}
+
+void f2() {
+ // CIR: cir.asm(x86_att,
+ // CIR: out = [],
+ // CIR: in = [],
+ // CIR: in_out = [],
+ // CIR: {"nop" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+ // LLVM: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"()
+ __asm__ volatile("nop" : : : );
+}
diff --git a/clang/test/CIR/CodeGen/label.c b/clang/test/CIR/CodeGen/label.c
new file mode 100644
index 0000000..a050094
--- /dev/null
+++ b/clang/test/CIR/CodeGen/label.c
@@ -0,0 +1,186 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+void label() {
+labelA:
+ return;
+}
+
+// CIR: cir.func no_proto dso_local @label
+// CIR: cir.label "labelA"
+// CIR: cir.return
+
+// LLVM:define dso_local void @label
+// LLVM: ret void
+
+// OGCG: define dso_local void @label
+// OGCG: br label %labelA
+// OGCG: labelA:
+// OGCG: ret void
+
+void multiple_labels() {
+labelB:
+labelC:
+ return;
+}
+
+// CIR: cir.func no_proto dso_local @multiple_labels
+// CIR: cir.label "labelB"
+// CIR: cir.br ^bb1
+// CIR: ^bb1: // pred: ^bb0
+// CIR: cir.label "labelC"
+// CIR: cir.return
+
+// LLVM: define dso_local void @multiple_labels()
+// LLVM: br label %1
+// LLVM: 1:
+// LLVM: ret void
+
+// OGCG: define dso_local void @multiple_labels
+// OGCG: br label %labelB
+// OGCG: labelB:
+// OGCG: br label %labelC
+// OGCG: labelC:
+// OGCG: ret void
+
+void label_in_if(int cond) {
+ if (cond) {
+labelD:
+ cond++;
+ }
+}
+
+// CIR: cir.func dso_local @label_in_if
+// CIR: cir.if {{.*}} {
+// CIR: cir.label "labelD"
+// CIR: [[LOAD:%.*]] = cir.load align(4) [[COND:%.*]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[INC:%.*]] = cir.unary(inc, %3) nsw : !s32i, !s32i
+// CIR: cir.store align(4) [[INC]], [[COND]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+// CIR: cir.return
+
+// LLVM: define dso_local void @label_in_if
+// LLVM: br label %3
+// LLVM: 3:
+// LLVM: [[LOAD:%.*]] = load i32, ptr [[COND:%.*]], align 4
+// LLVM: [[CMP:%.*]] = icmp ne i32 [[LOAD]], 0
+// LLVM: br i1 [[CMP]], label %6, label %9
+// LLVM: 6:
+// LLVM: [[LOAD2:%.*]] = load i32, ptr [[COND]], align 4
+// LLVM: [[ADD1:%.*]] = add nsw i32 [[LOAD2]], 1
+// LLVM: store i32 [[ADD1]], ptr [[COND]], align 4
+// LLVM: br label %9
+// LLVM: 9:
+// LLVM: br label %10
+// LLVM: 10:
+// LLVM: ret void
+
+// OGCG: define dso_local void @label_in_if
+// OGCG: if.then:
+// OGCG: br label %labelD
+// OGCG: labelD:
+// OGCG: [[LOAD:%.*]] = load i32, ptr [[COND:%.*]], align 4
+// OGCG: [[INC:%.*]] = add nsw i32 %1, 1
+// OGCG: store i32 [[INC]], ptr [[COND]], align 4
+// OGCG: br label %if.end
+// OGCG: if.end:
+// OGCG: ret void
+
+void after_return() {
+ return;
+ label:
+}
+
+// CIR: cir.func no_proto dso_local @after_return
+// CIR: cir.br ^bb1
+// CIR: ^bb1: // 2 preds: ^bb0, ^bb2
+// CIR: cir.return
+// CIR: ^bb2: // no predecessors
+// CIR: cir.label "label"
+// CIR: cir.br ^bb1
+
+// LLVM: define dso_local void @after_return
+// LLVM: br label %1
+// LLVM: 1:
+// LLVM: ret void
+// LLVM: 2:
+// LLVM: br label %1
+
+// OGCG: define dso_local void @after_return
+// OGCG: br label %label
+// OGCG: label:
+// OGCG: ret void
+
+
+void after_unreachable() {
+ __builtin_unreachable();
+ label:
+}
+
+// CIR: cir.func no_proto dso_local @after_unreachable
+// CIR: cir.unreachable
+// CIR: ^bb1:
+// CIR: cir.label "label"
+// CIR: cir.return
+
+// LLVM: define dso_local void @after_unreachable
+// LLVM: unreachable
+// LLVM: 1:
+// LLVM: ret void
+
+// OGCG: define dso_local void @after_unreachable
+// OGCG: unreachable
+// OGCG: label:
+// OGCG: ret void
+
+void labelWithoutMatch() {
+end:
+ return;
+}
+// CIR: cir.func no_proto dso_local @labelWithoutMatch
+// CIR: cir.label "end"
+// CIR: cir.return
+// CIR: }
+
+// LLVM: define dso_local void @labelWithoutMatch
+// LLVM: ret void
+
+// OGCG: define dso_local void @labelWithoutMatch
+// OGCG: br label %end
+// OGCG: end:
+// OGCG: ret void
+
+struct S {};
+struct S get();
+void bar(struct S);
+
+void foo() {
+ {
+ label:
+ bar(get());
+ }
+}
+
+// CIR: cir.func no_proto dso_local @foo
+// CIR: cir.scope {
+// CIR: cir.label "label"
+// CIR: %0 = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["agg.tmp0"]
+
+// LLVM:define dso_local void @foo() {
+// LLVM: [[ALLOC:%.*]] = alloca %struct.S, i64 1, align 1
+// LLVM: br label %2
+// LLVM:2:
+// LLVM: [[CALL:%.*]] = call %struct.S @get()
+// LLVM: store %struct.S [[CALL]], ptr [[ALLOC]], align 1
+// LLVM: [[LOAD:%.*]] = load %struct.S, ptr [[ALLOC]], align 1
+// LLVM: call void @bar(%struct.S [[LOAD]])
+
+// OGCG: define dso_local void @foo()
+// OGCG: %agg.tmp = alloca %struct.S, align 1
+// OGCG: %undef.agg.tmp = alloca %struct.S, align 1
+// OGCG: br label %label
+// OGCG: label:
diff --git a/clang/test/CIR/CodeGen/lang-c-cpp.cpp b/clang/test/CIR/CodeGen/lang-c-cpp.cpp
new file mode 100644
index 0000000..e126932
--- /dev/null
+++ b/clang/test/CIR/CodeGen/lang-c-cpp.cpp
@@ -0,0 +1,11 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cpp.cir
+// RUN: FileCheck --check-prefix=CIR-CPP --input-file=%t.cpp.cir %s
+// RUN: %clang_cc1 -x c -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.c.cir
+// RUN: FileCheck --check-prefix=CIR-C --input-file=%t.c.cir %s
+
+// CIR-CPP: module attributes {{{.*}}cir.lang = #cir.lang<cxx>{{.*}}}
+// CIR-C: module attributes {{{.*}}cir.lang = #cir.lang<c>{{.*}}}
+
+int main() {
+ return 0;
+}
diff --git a/clang/test/CIR/CodeGen/local-vars.cpp b/clang/test/CIR/CodeGen/local-vars.cpp
index 9385fdf..0c5c972 100644
--- a/clang/test/CIR/CodeGen/local-vars.cpp
+++ b/clang/test/CIR/CodeGen/local-vars.cpp
@@ -1,5 +1,9 @@
-// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t-cir.ll
-// RUN: FileCheck -input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
void test() {
int i = 1;
@@ -21,46 +25,182 @@ void test() {
bool uib;
}
-// CHECK: module
-// CHECK: cir.func{{.*}} @_Z4testv()
-// CHECK: %[[I_PTR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init] {alignment = 4 : i64}
-// CHECK: %[[L_PTR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["l", init] {alignment = 8 : i64}
-// CHECK: %[[F_PTR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["f", init] {alignment = 4 : i64}
-// CHECK: %[[D_PTR:.*]] = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["d", init] {alignment = 8 : i64}
-// CHECK: %[[B1_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b1", init] {alignment = 1 : i64}
-// CHECK: %[[B2_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b2", init] {alignment = 1 : i64}
-// CHECK: %[[CI_PTR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["ci", init, const] {alignment = 4 : i64}
-// CHECK: %[[CL_PTR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["cl", init, const] {alignment = 8 : i64}
-// CHECK: %[[CF_PTR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["cf", init, const] {alignment = 4 : i64}
-// CHECK: %[[CD_PTR:.*]] = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["cd", init, const] {alignment = 8 : i64}
-// CHECK: %[[CB1_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["cb1", init, const] {alignment = 1 : i64}
-// CHECK: %[[CB2_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["cb2", init, const] {alignment = 1 : i64}
-// CHECK: %[[UII_PTR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["uii"] {alignment = 4 : i64}
-// CHECK: %[[UIL_PTR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["uil"] {alignment = 8 : i64}
-// CHECK: %[[UIF_PTR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["uif"] {alignment = 4 : i64}
-// CHECK: %[[UID_PTR:.*]] = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["uid"] {alignment = 8 : i64}
-// CHECK: %[[UIB_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["uib"] {alignment = 1 : i64}
-// CHECK: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
-// CHECK: cir.store align(4) %[[ONE]], %[[I_PTR]] : !s32i, !cir.ptr<!s32i>
-// CHECK: %[[TWO:.*]] = cir.const #cir.int<2> : !s64i
-// CHECK: cir.store align(8) %[[TWO]], %[[L_PTR]] : !s64i, !cir.ptr<!s64i>
-// CHECK: %[[THREE:.*]] = cir.const #cir.fp<3.0{{.*}}> : !cir.float
-// CHECK: cir.store align(4) %[[THREE]], %[[F_PTR]] : !cir.float, !cir.ptr<!cir.float>
-// CHECK: %[[FOUR:.*]] = cir.const #cir.fp<4.0{{.*}}> : !cir.double
-// CHECK: cir.store align(8) %[[FOUR]], %[[D_PTR]] : !cir.double, !cir.ptr<!cir.double>
-// CHECK: %[[TRUE:.*]] = cir.const #true
-// CHECK: cir.store align(1) %[[TRUE]], %[[B1_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
-// CHECK: %[[FALSE:.*]] = cir.const #false
-// CHECK: cir.store align(1) %[[FALSE]], %[[B2_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
-// CHECK: %[[ONEC:.*]] = cir.const #cir.int<1> : !s32i
-// CHECK: cir.store align(4) %[[ONEC]], %[[CI_PTR]] : !s32i, !cir.ptr<!s32i>
-// CHECK: %[[TWOC:.*]] = cir.const #cir.int<2> : !s64i
-// CHECK: cir.store align(8) %[[TWOC]], %[[CL_PTR]] : !s64i, !cir.ptr<!s64i>
-// CHECK: %[[THREEC:.*]] = cir.const #cir.fp<3.0{{.*}}> : !cir.float
-// CHECK: cir.store align(4) %[[THREEC]], %[[CF_PTR]] : !cir.float, !cir.ptr<!cir.float>
-// CHECK: %[[FOURC:.*]] = cir.const #cir.fp<4.0{{.*}}> : !cir.double
-// CHECK: cir.store align(8) %[[FOURC]], %[[CD_PTR]] : !cir.double, !cir.ptr<!cir.double>
-// CHECK: %[[TRUEC:.*]] = cir.const #true
-// CHECK: cir.store align(1) %[[TRUEC]], %[[CB1_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
-// CHECK: %[[FALSEC:.*]] = cir.const #false
-// CHECK: cir.store align(1) %[[FALSEC]], %[[CB2_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: module
+// CIR: cir.func{{.*}} @_Z4testv()
+// CIR: %[[I_PTR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init] {alignment = 4 : i64}
+// CIR: %[[L_PTR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["l", init] {alignment = 8 : i64}
+// CIR: %[[F_PTR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["f", init] {alignment = 4 : i64}
+// CIR: %[[D_PTR:.*]] = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["d", init] {alignment = 8 : i64}
+// CIR: %[[B1_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b1", init] {alignment = 1 : i64}
+// CIR: %[[B2_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b2", init] {alignment = 1 : i64}
+// CIR: %[[CI_PTR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["ci", init, const] {alignment = 4 : i64}
+// CIR: %[[CL_PTR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["cl", init, const] {alignment = 8 : i64}
+// CIR: %[[CF_PTR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["cf", init, const] {alignment = 4 : i64}
+// CIR: %[[CD_PTR:.*]] = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["cd", init, const] {alignment = 8 : i64}
+// CIR: %[[CB1_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["cb1", init, const] {alignment = 1 : i64}
+// CIR: %[[CB2_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["cb2", init, const] {alignment = 1 : i64}
+// CIR: %[[UII_PTR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["uii"] {alignment = 4 : i64}
+// CIR: %[[UIL_PTR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["uil"] {alignment = 8 : i64}
+// CIR: %[[UIF_PTR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["uif"] {alignment = 4 : i64}
+// CIR: %[[UID_PTR:.*]] = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["uid"] {alignment = 8 : i64}
+// CIR: %[[UIB_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["uib"] {alignment = 1 : i64}
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store align(4) %[[ONE]], %[[I_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s64i
+// CIR: cir.store align(8) %[[TWO]], %[[L_PTR]] : !s64i, !cir.ptr<!s64i>
+// CIR: %[[THREE:.*]] = cir.const #cir.fp<3.0{{.*}}> : !cir.float
+// CIR: cir.store align(4) %[[THREE]], %[[F_PTR]] : !cir.float, !cir.ptr<!cir.float>
+// CIR: %[[FOUR:.*]] = cir.const #cir.fp<4.0{{.*}}> : !cir.double
+// CIR: cir.store align(8) %[[FOUR]], %[[D_PTR]] : !cir.double, !cir.ptr<!cir.double>
+// CIR: %[[TRUE:.*]] = cir.const #true
+// CIR: cir.store align(1) %[[TRUE]], %[[B1_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: %[[FALSE:.*]] = cir.const #false
+// CIR: cir.store align(1) %[[FALSE]], %[[B2_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: %[[ONEC:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store align(4) %[[ONEC]], %[[CI_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TWOC:.*]] = cir.const #cir.int<2> : !s64i
+// CIR: cir.store align(8) %[[TWOC]], %[[CL_PTR]] : !s64i, !cir.ptr<!s64i>
+// CIR: %[[THREEC:.*]] = cir.const #cir.fp<3.0{{.*}}> : !cir.float
+// CIR: cir.store align(4) %[[THREEC]], %[[CF_PTR]] : !cir.float, !cir.ptr<!cir.float>
+// CIR: %[[FOURC:.*]] = cir.const #cir.fp<4.0{{.*}}> : !cir.double
+// CIR: cir.store align(8) %[[FOURC]], %[[CD_PTR]] : !cir.double, !cir.ptr<!cir.double>
+// CIR: %[[TRUEC:.*]] = cir.const #true
+// CIR: cir.store align(1) %[[TRUEC]], %[[CB1_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: %[[FALSEC:.*]] = cir.const #false
+// CIR: cir.store align(1) %[[FALSEC]], %[[CB2_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
+
+// LLVM: define dso_local void @_Z4testv()
+// LLVM: %[[I_PTR:.+]] = alloca i32
+// LLVM: %[[L_PTR:.+]] = alloca i64
+// LLVM: %[[F_PTR:.+]] = alloca float
+// LLVM: %[[D_PTR:.+]] = alloca double
+// LLVM: %[[B1_PTR:.+]] = alloca i8
+// LLVM: %[[B2_PTR:.+]] = alloca i8
+// LLVM: %[[CI_PTR:.+]] = alloca i32
+// LLVM: %[[CL_PTR:.+]] = alloca i64
+// LLVM: %[[CF_PTR:.+]] = alloca float
+// LLVM: %[[CD_PTR:.+]] = alloca double
+// LLVM: %[[CB1_PTR:.+]] = alloca i8
+// LLVM: %[[CB2_PTR:.+]] = alloca i8
+// LLVM: %[[UII_PTR:.+]] = alloca i32
+// LLVM: %[[UIL_PTR:.+]] = alloca i64
+// LLVM: %[[UIF_PTR:.+]] = alloca float
+// LLVM: %[[UID_PTR:.+]] = alloca double
+// LLVM: %[[UIB_PTR:.+]] = alloca i8
+// LLVM: store i32 1, ptr %[[I_PTR]]
+// LLVM: store i64 2, ptr %[[L_PTR]]
+// LLVM: store float 3.000000e+00, ptr %[[F_PTR]]
+// LLVM: store double 4.000000e+00, ptr %[[D_PTR]]
+// LLVM: store i8 1, ptr %[[B1_PTR]]
+// LLVM: store i8 0, ptr %[[B2_PTR]]
+// LLVM: store i32 1, ptr %[[CI_PTR]]
+// LLVM: store i64 2, ptr %[[CL_PTR]]
+// LLVM: store float 3.000000e+00, ptr %[[CF_PTR]]
+// LLVM: store double 4.000000e+00, ptr %[[CD_PTR]]
+// LLVM: store i8 1, ptr %[[CB1_PTR]]
+// LLVM: store i8 0, ptr %[[CB2_PTR]]
+// LLVM: ret void
+
+// OGCG: define dso_local void @_Z4testv()
+// OGCG: %[[I_PTR:.+]] = alloca i32
+// OGCG: %[[L_PTR:.+]] = alloca i64
+// OGCG: %[[F_PTR:.+]] = alloca float
+// OGCG: %[[D_PTR:.+]] = alloca double
+// OGCG: %[[B1_PTR:.+]] = alloca i8
+// OGCG: %[[B2_PTR:.+]] = alloca i8
+// OGCG: %[[CI_PTR:.+]] = alloca i32
+// OGCG: %[[CL_PTR:.+]] = alloca i64
+// OGCG: %[[CF_PTR:.+]] = alloca float
+// OGCG: %[[CD_PTR:.+]] = alloca double
+// OGCG: %[[CB1_PTR:.+]] = alloca i8
+// OGCG: %[[CB2_PTR:.+]] = alloca i8
+// OGCG: %[[UII_PTR:.+]] = alloca i32
+// OGCG: %[[UIL_PTR:.+]] = alloca i64
+// OGCG: %[[UIF_PTR:.+]] = alloca float
+// OGCG: %[[UID_PTR:.+]] = alloca double
+// OGCG: %[[UIB_PTR:.+]] = alloca i8
+// OGCG: store i32 1, ptr %[[I_PTR]]
+// OGCG: store i64 2, ptr %[[L_PTR]]
+// OGCG: store float 3.000000e+00, ptr %[[F_PTR]]
+// OGCG: store double 4.000000e+00, ptr %[[D_PTR]]
+// OGCG: store i8 1, ptr %[[B1_PTR]]
+// OGCG: store i8 0, ptr %[[B2_PTR]]
+// OGCG: store i32 1, ptr %[[CI_PTR]]
+// OGCG: store i64 2, ptr %[[CL_PTR]]
+// OGCG: store float 3.000000e+00, ptr %[[CF_PTR]]
+// OGCG: store double 4.000000e+00, ptr %[[CD_PTR]]
+// OGCG: store i8 1, ptr %[[CB1_PTR]]
+// OGCG: store i8 0, ptr %[[CB2_PTR]]
+// OGCG: ret void
+
+void value_init() {
+ float f{};
+ bool b{};
+ int i{};
+
+ float f2 = {};
+ bool b2 = {};
+ int i2 = {};
+
+ bool scalar_value_init_expr = int() == 0;
+}
+
+// CIR: cir.func{{.*}} @_Z10value_initv()
+// CIR: %[[F_PTR:.+]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["f", init]
+// CIR: %[[B_PTR:.+]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b", init]
+// CIR: %[[I_PTR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init]
+// CIR: %[[F2_PTR:.+]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["f2", init]
+// CIR: %[[B2_PTR:.+]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b2", init]
+// CIR: %[[I2_PTR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i2", init]
+// CIR: %[[S_PTR:.+]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["scalar_value_init_expr", init]
+// CIR: %[[ZEROF1:.+]] = cir.const #cir.fp<0.000000e+00> : !cir.float
+// CIR: cir.store{{.*}} %[[ZEROF1]], %[[F_PTR]] : !cir.float, !cir.ptr<!cir.float>
+// CIR: %[[FALSE1:.+]] = cir.const #false
+// CIR: cir.store{{.*}} %[[FALSE1]], %[[B_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: %[[ZEROI1:.+]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store{{.*}} %[[ZEROI1]], %[[I_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[ZEROF2:.+]] = cir.const #cir.fp<0.000000e+00> : !cir.float
+// CIR: cir.store{{.*}} %[[ZEROF2]], %[[F2_PTR]] : !cir.float, !cir.ptr<!cir.float>
+// CIR: %[[FALSE2:.+]] = cir.const #false
+// CIR: cir.store{{.*}} %[[FALSE2]], %[[B2_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: %[[ZEROI2:.+]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store{{.*}} %[[ZEROI2]], %[[I2_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[ZEROI_LHS:.+]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[ZEROI_RHS:.+]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[CMP:.+]] = cir.cmp(eq, %[[ZEROI_LHS]], %[[ZEROI_RHS]]) : !s32i, !cir.bool
+// CIR: cir.store{{.*}} %[[CMP]], %[[S_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: cir.return
+
+// LLVM: define{{.*}} void @_Z10value_initv()
+// LLVM: %[[F_PTR:.+]] = alloca float
+// LLVM: %[[B_PTR:.+]] = alloca i8
+// LLVM: %[[I_PTR:.+]] = alloca i32
+// LLVM: %[[F2_PTR:.+]] = alloca float
+// LLVM: %[[B2_PTR:.+]] = alloca i8
+// LLVM: %[[I2_PTR:.+]] = alloca i32
+// LLVM: %[[S_PTR:.+]] = alloca i8
+// LLVM: store float 0.000000e+00, ptr %[[F_PTR]]
+// LLVM: store i8 0, ptr %[[B_PTR]]
+// LLVM: store i32 0, ptr %[[I_PTR]]
+// LLVM: store float 0.000000e+00, ptr %[[F2_PTR]]
+// LLVM: store i8 0, ptr %[[B2_PTR]]
+// LLVM: store i32 0, ptr %[[I2_PTR]]
+// LLVM: store i8 1, ptr %[[S_PTR]]
+// LLVM: ret void
+
+// OGCG: define{{.*}} void @_Z10value_initv()
+// OGCG: %[[F_PTR:.+]] = alloca float
+// OGCG: %[[B_PTR:.+]] = alloca i8
+// OGCG: %[[I_PTR:.+]] = alloca i32
+// OGCG: %[[F2_PTR:.+]] = alloca float
+// OGCG: %[[B2_PTR:.+]] = alloca i8
+// OGCG: %[[I2_PTR:.+]] = alloca i32
+// OGCG: %[[S_PTR:.+]] = alloca i8
+// OGCG: store float 0.000000e+00, ptr %[[F_PTR]]
+// OGCG: store i8 0, ptr %[[B_PTR]]
+// OGCG: store i32 0, ptr %[[I_PTR]]
+// OGCG: store float 0.000000e+00, ptr %[[F2_PTR]]
+// OGCG: store i8 0, ptr %[[B2_PTR]]
+// OGCG: store i32 0, ptr %[[I2_PTR]]
+// OGCG: store i8 1, ptr %[[S_PTR]]
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/module-asm.c b/clang/test/CIR/CodeGen/module-asm.c
new file mode 100644
index 0000000..e6cec5e
--- /dev/null
+++ b/clang/test/CIR/CodeGen/module-asm.c
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+
+// CHECK: cir.module_asm = [".globl bar", ".globl foo"]
+__asm (".globl bar");
+__asm (".globl foo");
diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp
new file mode 100644
index 0000000..b42f8a6
--- /dev/null
+++ b/clang/test/CIR/CodeGen/multi-vtable.cpp
@@ -0,0 +1,183 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fno-rtti -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fno-rtti -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fno-rtti -emit-llvm %s -o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+// Note: This test is using -fno-rtti so that we can delay implemntation of that handling.
+// When rtti handling for vtables is implemented, that option should be removed.
+
+class Mother {
+public:
+ virtual void MotherKey();
+ void simple() { }
+ virtual void MotherNonKey() {}
+};
+
+class Father {
+public:
+ virtual void FatherKey();
+};
+
+class Child : public Mother, public Father {
+public:
+ Child();
+ void MotherKey() override;
+};
+
+void Mother::MotherKey() {}
+void Father::FatherKey() {}
+void Child::MotherKey() {}
+
+// CIR-DAG: [[MOTHER_VTABLE_TYPE:.*]] = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+// CIR-DAG: [[FATHER_VTABLE_TYPE:.*]] = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 3>}>
+// CIR-DAG: [[CHILD_VTABLE_TYPE:.*]] = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>, !cir.array<!cir.ptr<!u8i> x 3>}>
+// CIR-DAG: !rec_Father = !cir.record<class "Father" {!cir.vptr}
+// CIR-DAG: !rec_Mother = !cir.record<class "Mother" {!cir.vptr}
+// CIR-DAG: !rec_Child = !cir.record<class "Child" {!rec_Mother, !rec_Father}
+
+// Child vtable
+
+// CIR: cir.global "private" external @_ZTV5Child = #cir.vtable<{
+// CIR-SAME: #cir.const_array<[
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN5Child9MotherKeyEv> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN6Mother12MotherNonKeyEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 4>,
+// CIR-SAME: #cir.const_array<[
+// CIR-SAME: #cir.ptr<-8 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN6Father9FatherKeyEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 3>
+// CIR-SAME: }> : [[CHILD_VTABLE_TYPE]]
+
+// LLVM: @_ZTV5Child = global { [4 x ptr], [3 x ptr] } {
+// LLVM-SAME: [4 x ptr] [
+// LLVM-SAME: ptr null,
+// LLVM-SAME: ptr null,
+// LLVM-SAME: ptr @_ZN5Child9MotherKeyEv,
+// LLVM-SAME: ptr @_ZN6Mother12MotherNonKeyEv
+// LLVM-SAME: ],
+// LLVM-SAME: [3 x ptr] [
+// LLVM-SAME: ptr inttoptr (i64 -8 to ptr),
+// LLVM-SAME: ptr null,
+// LLVM-SAME: ptr @_ZN6Father9FatherKeyEv
+// LLVM-SAME: ]
+// LLVM-SAME: }
+
+// OGCG: @_ZTV5Child = unnamed_addr constant { [4 x ptr], [3 x ptr] } {
+// OGCG-SAME: [4 x ptr] [
+// OGCG-SAME: ptr null,
+// OGCG-SAME: ptr null,
+// OGCG-SAME: ptr @_ZN5Child9MotherKeyEv,
+// OGCG-SAME: ptr @_ZN6Mother12MotherNonKeyEv
+// OGCG-SAME: ],
+// OGCG-SAME: [3 x ptr] [
+// OGCG-SAME: ptr inttoptr (i64 -8 to ptr),
+// OGCG-SAME: ptr null,
+// OGCG-SAME: ptr @_ZN6Father9FatherKeyEv
+// OGCG-SAME: ]
+// OGCG-SAME: }
+
+// Mother vtable
+
+// CIR: cir.global "private" external @_ZTV6Mother = #cir.vtable<{
+// CIR-SAME: #cir.const_array<[
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN6Mother9MotherKeyEv> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN6Mother12MotherNonKeyEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 4>
+// CIR-SAME: }> : [[MOTHER_VTABLE_TYPE]]
+
+// LLVM: @_ZTV6Mother = global { [4 x ptr] } {
+// LLVM-SAME: [4 x ptr] [
+// LLVM-SAME: ptr null,
+// LLVM-SAME: ptr null,
+// LLVM-SAME: ptr @_ZN6Mother9MotherKeyEv,
+// LLVM-SAME: ptr @_ZN6Mother12MotherNonKeyEv
+// LLVM-SAME: ]
+// LLVM-SAME: }
+
+// OGCG: @_ZTV6Mother = unnamed_addr constant { [4 x ptr] } {
+// OGCG-SAME: [4 x ptr] [
+// OGCG-SAME: ptr null,
+// OGCG-SAME: ptr null,
+// OGCG-SAME: ptr @_ZN6Mother9MotherKeyEv,
+// OGCG-SAME: ptr @_ZN6Mother12MotherNonKeyEv
+// OGCG-SAME: ]
+// OGCG-SAME: }
+
+// Father vtable
+
+// CIR: cir.global "private" external @_ZTV6Father = #cir.vtable<{
+// CIR-SAME: #cir.const_array<[
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN6Father9FatherKeyEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 3>
+// CIR-SAME: }> : [[FATHER_VTABLE_TYPE]]
+
+// LLVM: @_ZTV6Father = global { [3 x ptr] } {
+// LLVM-SAME: [3 x ptr] [
+// LLVM-SAME: ptr null,
+// LLVM-SAME: ptr null,
+// LLVM-SAME: ptr @_ZN6Father9FatherKeyEv
+// LLVM-SAME: ]
+// LLVM-SAME: }
+
+// OGCG: @_ZTV6Father = unnamed_addr constant { [3 x ptr] } {
+// OGCG-SAME: [3 x ptr] [
+// OGCG-SAME: ptr null,
+// OGCG-SAME: ptr null,
+// OGCG-SAME: ptr @_ZN6Father9FatherKeyEv
+// OGCG-SAME: ]
+// OGCG-SAME: }
+
+
+Child::Child() {}
+
+// CIR: cir.func {{.*}} @_ZN5ChildC2Ev(%[[THIS_ARG:.*]]: !cir.ptr<!rec_Child>
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR: %[[MOTHER_BASE:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_Child> nonnull [0] -> !cir.ptr<!rec_Mother>
+// CIR: cir.call @_ZN6MotherC2Ev(%[[MOTHER_BASE]]) nothrow : (!cir.ptr<!rec_Mother>) -> ()
+// CIR: %[[FATHER_BASE:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_Child> nonnull [8] -> !cir.ptr<!rec_Father>
+// CIR: cir.call @_ZN6FatherC2Ev(%[[FATHER_BASE]]) nothrow : (!cir.ptr<!rec_Father>) -> ()
+// CIR: %[[CHILD_VPTR:.*]] = cir.vtable.address_point(@_ZTV5Child, address_point = <index = 0, offset = 2>) : !cir.vptr
+// CIR: %[[CHILD_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_Child> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store{{.*}} %[[CHILD_VPTR]], %[[CHILD_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+// CIR: %[[FATHER_IN_CHILD_VPTR:.*]] = cir.vtable.address_point(@_ZTV5Child, address_point = <index = 1, offset = 2>) : !cir.vptr
+// CIR: %[[FATHER_BASE:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_Child> nonnull [8] -> !cir.ptr<!rec_Father>
+// CIR: %[[FATHER_IN_CHILD_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[FATHER_BASE]] : !cir.ptr<!rec_Father> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store{{.*}} %[[FATHER_IN_CHILD_VPTR]], %[[FATHER_IN_CHILD_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+// CIR: cir.return
+
+// The GEP instructions are different between LLVM and OGCG, but they calculate the same addresses.
+
+// LLVM: define{{.*}} void @_ZN5ChildC2Ev(ptr{{.*}} %[[THIS_ARG:.*]])
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM: call void @_ZN6MotherC2Ev(ptr{{.*}} %[[THIS]])
+// LLVM: %[[FATHER_BASE:.*]] = getelementptr{{.*}} i8, ptr %[[THIS]], i32 8
+// LLVM: call void @_ZN6FatherC2Ev(ptr{{.*}} %[[FATHER_BASE]])
+// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV5Child, i64 16), ptr %[[THIS]]
+// LLVM: %[[FATHER_BASE:.*]] = getelementptr{{.*}} i8, ptr %[[THIS]], i32 8
+// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV5Child, i64 48), ptr %[[FATHER_BASE]]
+// LLVM: ret void
+
+// OGCG: define{{.*}} void @_ZN5ChildC2Ev(ptr{{.*}} %[[THIS_ARG:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: call void @_ZN6MotherC2Ev(ptr {{.*}} %[[THIS]])
+// OGCG: %[[FATHER_BASE:.*]] = getelementptr{{.*}} i8, ptr %[[THIS]], i64 8
+// OGCG: call void @_ZN6FatherC2Ev(ptr{{.*}} %[[FATHER_BASE]])
+// OGCG: store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 0, i32 2), ptr %[[THIS]]
+// OGCG: %[[FATHER_BASE:.*]] = getelementptr{{.*}} i8, ptr %[[THIS]], i64 8
+// OGCG: store ptr getelementptr inbounds inrange(-16, 8) ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 1, i32 2), ptr %[[FATHER_BASE]]
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/statement-exprs.c b/clang/test/CIR/CodeGen/statement-exprs.c
new file mode 100644
index 0000000..1b54edf
--- /dev/null
+++ b/clang/test/CIR/CodeGen/statement-exprs.c
@@ -0,0 +1,277 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+// This fails because of a non-ignored copy of an aggregate in test3.
+// XFAIL: *
+
+int f19(void) {
+ return ({ 3;;4;; });
+}
+
+// CIR: cir.func dso_local @f19() -> !s32i
+// CIR: %[[RETVAL:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[TMP:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["tmp"]
+// CIR: cir.scope {
+// CIR: %[[C3:.+]] = cir.const #cir.int<3> : !s32i
+// CIR: %[[C4:.+]] = cir.const #cir.int<4> : !s32i
+// CIR: cir.store {{.*}} %[[C4]], %[[TMP]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+// CIR: %[[TMP_VAL:.+]] = cir.load {{.*}} %[[TMP]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[TMP_VAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[RES:.+]] = cir.load %[[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[RES]] : !s32i
+
+// LLVM: define dso_local i32 @f19()
+// LLVM: %[[VAR1:.+]] = alloca i32, i64 1
+// LLVM: %[[VAR2:.+]] = alloca i32, i64 1
+// LLVM: br label %[[LBL3:.+]]
+// LLVM: [[LBL3]]:
+// LLVM: store i32 4, ptr %[[VAR2]]
+// LLVM: br label %[[LBL4:.+]]
+// LLVM: [[LBL4]]:
+// LLVM: %[[V1:.+]] = load i32, ptr %[[VAR2]]
+// LLVM: store i32 %[[V1]], ptr %[[VAR1]]
+// LLVM: %[[RES:.+]] = load i32, ptr %[[VAR1]]
+// LLVM: ret i32 %[[RES]]
+
+// OGCG: define dso_local i32 @f19()
+// OGCG: entry:
+// OGCG: %[[TMP:.+]] = alloca i32
+// OGCG: store i32 4, ptr %[[TMP]]
+// OGCG: %[[TMP_VAL:.+]] = load i32, ptr %[[TMP]]
+// OGCG: ret i32 %[[TMP_VAL]]
+
+
+int nested(void) {
+ ({123;});
+ {
+ int bar = 987;
+ return ({ ({ int asdf = 123; asdf; }); ({9999;}); });
+ }
+}
+
+// CIR: cir.func dso_local @nested() -> !s32i
+// CIR: %[[RETVAL:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[TMP_OUTER:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["tmp"]
+// CIR: cir.scope {
+// CIR: %[[C123_OUTER:.+]] = cir.const #cir.int<123> : !s32i
+// CIR: cir.store {{.*}} %[[C123_OUTER]], %[[TMP_OUTER]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+// CIR: %[[LOAD_TMP_OUTER:.+]] = cir.load {{.*}} %[[TMP_OUTER]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.scope {
+// CIR: %[[BAR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["bar", init]
+// CIR: %[[TMP_BARRET:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["tmp"]
+// CIR: %[[C987:.+]] = cir.const #cir.int<987> : !s32i
+// CIR: cir.store {{.*}} %[[C987]], %[[BAR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.scope {
+// CIR: %[[TMP1:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["tmp"]
+// CIR: %[[TMP2:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["tmp"]
+// CIR: cir.scope {
+// CIR: %[[ASDF:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["asdf", init]
+// CIR: %[[C123_INNER:.+]] = cir.const #cir.int<123> : !s32i
+// CIR: cir.store {{.*}} %[[C123_INNER]], %[[ASDF]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[LOAD_ASDF:.+]] = cir.load {{.*}} %[[ASDF]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store {{.*}} %[[LOAD_ASDF]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+// CIR: %[[V1:.+]] = cir.load {{.*}} %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.scope {
+// CIR: %[[C9999:.+]] = cir.const #cir.int<9999> : !s32i
+// CIR: cir.store {{.*}} %[[C9999]], %[[TMP2]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+// CIR: %[[V2:.+]] = cir.load {{.*}} %[[TMP2]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store {{.*}} %[[V2]], %[[TMP_BARRET]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+// CIR: %[[BARRET_VAL:.+]] = cir.load {{.*}} %[[TMP_BARRET]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[BARRET_VAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[RES:.+]] = cir.load %[[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[RES]] : !s32i
+// CIR: }
+// CIR: %[[FINAL_RES:.+]] = cir.load %[[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[FINAL_RES]] : !s32i
+
+// LLVM: define dso_local i32 @nested()
+// LLVM: %[[VAR1:.+]] = alloca i32, i64 1
+// LLVM: %[[VAR2:.+]] = alloca i32, i64 1
+// LLVM: %[[VAR3:.+]] = alloca i32, i64 1
+// LLVM: %[[VAR4:.+]] = alloca i32, i64 1
+// LLVM: %[[VAR5:.+]] = alloca i32, i64 1
+// LLVM: %[[VAR6:.+]] = alloca i32, i64 1
+// LLVM: %[[VAR7:.+]] = alloca i32, i64 1
+// LLVM: br label %[[LBL8:.+]]
+// LLVM: [[LBL8]]:
+// LLVM: store i32 123, ptr %[[VAR7]]
+// LLVM: br label %[[LBL9:.+]]
+// LLVM: [[LBL9]]:
+// LLVM: br label %[[LBL10:.+]]
+// LLVM: [[LBL10]]:
+// LLVM: store i32 987, ptr %[[VAR1]]
+// LLVM: br label %[[LBL11:.+]]
+// LLVM: [[LBL11]]:
+// LLVM: br label %[[LBL12:.+]]
+// LLVM: [[LBL12]]:
+// LLVM: store i32 123, ptr %[[VAR5]]
+// LLVM: %[[V1:.+]] = load i32, ptr %[[VAR5]]
+// LLVM: store i32 %[[V1]], ptr %[[VAR3]]
+// LLVM: br label %[[LBL14:.+]]
+// LLVM: [[LBL14]]:
+// LLVM: br label %[[LBL15:.+]]
+// LLVM: [[LBL15]]:
+// LLVM: store i32 9999, ptr %[[VAR4]]
+// LLVM: br label %[[LBL16:.+]]
+// LLVM: [[LBL16]]:
+// LLVM: %[[V2:.+]] = load i32, ptr %[[VAR4]]
+// LLVM: store i32 %[[V2]], ptr %[[VAR2]]
+// LLVM: br label %[[LBL18:.+]]
+// LLVM: [[LBL18]]:
+// LLVM: %[[V3:.+]] = load i32, ptr %[[VAR2]]
+// LLVM: store i32 %[[V3]], ptr %[[VAR6]]
+// LLVM: %[[RES:.+]] = load i32, ptr %[[VAR6]]
+// LLVM: ret i32 %[[RES]]
+
+// OGCG: define dso_local i32 @nested()
+// OGCG: entry:
+// OGCG: %[[TMP_OUTER:.+]] = alloca i32
+// OGCG: %[[BAR:.+]] = alloca i32
+// OGCG: %[[ASDF:.+]] = alloca i32
+// OGCG: %[[TMP1:.+]] = alloca i32
+// OGCG: %[[TMP2:.+]] = alloca i32
+// OGCG: %[[TMP3:.+]] = alloca i32
+// OGCG: store i32 123, ptr %[[TMP_OUTER]]
+// OGCG: %[[OUTER_VAL:.+]] = load i32, ptr %[[TMP_OUTER]]
+// OGCG: store i32 987, ptr %[[BAR]]
+// OGCG: store i32 123, ptr %[[ASDF]]
+// OGCG: %[[ASDF_VAL:.+]] = load i32, ptr %[[ASDF]]
+// OGCG: store i32 %[[ASDF_VAL]], ptr %[[TMP1]]
+// OGCG: %[[TMP1_VAL:.+]] = load i32, ptr %[[TMP1]]
+// OGCG: store i32 9999, ptr %[[TMP3]]
+// OGCG: %[[TMP3_VAL:.+]] = load i32, ptr %[[TMP3]]
+// OGCG: store i32 %[[TMP3_VAL]], ptr %[[TMP2]]
+// OGCG: %[[RES:.+]] = load i32, ptr %[[TMP2]]
+// OGCG: ret i32 %[[RES]]
+
+void empty() {
+ return ({;;;;});
+}
+
+// CIR: cir.func no_proto dso_local @empty()
+// CIR-NEXT: cir.return
+
+// LLVM: define dso_local void @empty()
+// LLVM: ret void
+// LLVM: }
+
+// OGCG: define dso_local void @empty()
+// OGCG: ret void
+// OGCG: }
+
+void empty2() { ({ }); }
+
+// CIR: @empty2
+// CIR-NEXT: cir.return
+
+// LLVM: @empty2()
+// LLVM: ret void
+// LLVM: }
+
+// OGCG: @empty2()
+// OGCG: ret void
+// OGCG: }
+
+
+// Yields an out-of-scope scalar.
+void test2() { ({int x = 3; x; }); }
+// CIR: @test2
+// CIR: %[[RETVAL:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>
+// CIR: cir.scope {
+// CIR: %[[VAR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init]
+// [...]
+// CIR: %[[TMP:.+]] = cir.load{{.*}} %[[VAR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store{{.*}} %[[TMP]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+// CIR: %{{.+}} = cir.load{{.*}} %[[RETVAL]] : !cir.ptr<!s32i>, !s32i
+
+// LLVM: define dso_local void @test2()
+// LLVM: %[[X:.+]] = alloca i32, i64 1
+// LLVM: %[[TMP:.+]] = alloca i32, i64 1
+// LLVM: br label %[[LBL3:.+]]
+// LLVM: [[LBL3]]:
+// LLVM: store i32 3, ptr %[[X]]
+// LLVM: %[[X_VAL:.+]] = load i32, ptr %[[X]]
+// LLVM: store i32 %[[X_VAL]], ptr %[[TMP]]
+// LLVM: br label %[[LBL5:.+]]
+// LLVM: [[LBL5]]:
+// LLVM: ret void
+
+// OGCG: define dso_local void @test2()
+// OGCG: entry:
+// OGCG: %[[X:.+]] = alloca i32
+// OGCG: %[[TMP:.+]] = alloca i32
+// OGCG: store i32 3, ptr %[[X]]
+// OGCG: %[[X_VAL:.+]] = load i32, ptr %[[X]]
+// OGCG: store i32 %[[X_VAL]], ptr %[[TMP]]
+// OGCG: %[[TMP_VAL:.+]] = load i32, ptr %[[TMP]]
+// OGCG: ret void
+
+// Yields an aggregate.
+struct S { int x; };
+int test3() { return ({ struct S s = {1}; s; }).x; }
+// CIR: cir.func no_proto dso_local @test3() -> !s32i
+// CIR: %[[RETVAL:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[YIELDVAL:.+]] = cir.scope {
+// CIR: %[[REF_TMP0:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["ref.tmp0"]
+// CIR: %[[TMP:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["tmp"]
+// CIR: cir.scope {
+// CIR: %[[S:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["s", init]
+// CIR: %[[GEP_X_S:.+]] = cir.get_member %[[S]][0] {name = "x"} : !cir.ptr<!rec_S> -> !cir.ptr<!s32i>
+// CIR: %[[C1:.+]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store {{.*}} %[[C1]], %[[GEP_X_S]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+// CIR: %[[GEP_X_TMP:.+]] = cir.get_member %[[REF_TMP0]][0] {name = "x"} : !cir.ptr<!rec_S> -> !cir.ptr<!s32i>
+// CIR: %[[XVAL:.+]] = cir.load {{.*}} %[[GEP_X_TMP]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.yield %[[XVAL]] : !s32i
+// CIR: } : !s32i
+// CIR: cir.store %[[YIELDVAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[RES:.+]] = cir.load %[[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[RES]] : !s32i
+
+// LLVM: define dso_local i32 @test3()
+// LLVM: %[[VAR1:.+]] = alloca %struct.S, i64 1
+// LLVM: %[[VAR2:.+]] = alloca %struct.S, i64 1
+// LLVM: %[[VAR3:.+]] = alloca %struct.S, i64 1
+// LLVM: %[[VAR4:.+]] = alloca i32, i64 1
+// LLVM: br label %[[LBL5:.+]]
+// LLVM: [[LBL5]]:
+// LLVM: br label %[[LBL6:.+]]
+// LLVM: [[LBL6]]:
+// LLVM: %[[GEP_S:.+]] = getelementptr %struct.S, ptr %[[VAR3]], i32 0, i32 0
+// LLVM: store i32 1, ptr %[[GEP_S]]
+// LLVM: br label %[[LBL8:.+]]
+// LLVM: [[LBL8]]:
+// LLVM: %[[GEP_VAR1:.+]] = getelementptr %struct.S, ptr %[[VAR1]], i32 0, i32 0
+// LLVM: %[[LOAD_X:.+]] = load i32, ptr %[[GEP_VAR1]]
+// LLVM: br label %[[LBL11:.+]]
+// LLVM: [[LBL11]]:
+// LLVM: %[[PHI:.+]] = phi i32 [ %[[LOAD_X]], %[[LBL8]] ]
+// LLVM: store i32 %[[PHI]], ptr %[[VAR4]]
+// LLVM: %[[RES:.+]] = load i32, ptr %[[VAR4]]
+// LLVM: ret i32 %[[RES]]
+
+// OGCG: define dso_local i32 @test3()
+// OGCG: entry:
+// OGCG: %[[REF_TMP:.+]] = alloca %struct.S
+// OGCG: %[[S:.+]] = alloca %struct.S
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[S]], ptr align 4 @__const.test3.s, i64 4, i1 false)
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[REF_TMP]], ptr align 4 %[[S]], i64 4, i1 false)
+// OGCG: %[[GEP:.+]] = getelementptr inbounds nuw %struct.S, ptr %[[REF_TMP]], i32 0, i32 0
+// OGCG: %[[XVAL:.+]] = load i32, ptr %[[GEP]]
+// OGCG: ret i32 %[[XVAL]]
+
+// Expression is wrapped in an expression attribute (just ensure it does not crash).
+void test4(int x) { ({[[gsl::suppress("foo")]] x;}); }
+// CIR: @test4
+// LLVM: @test4
+// OGCG: @test4
diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp
index d949936..4f22fc7ab 100644
--- a/clang/test/CIR/CodeGen/static-vars.cpp
+++ b/clang/test/CIR/CodeGen/static-vars.cpp
@@ -2,6 +2,37 @@
// RUN: FileCheck --input-file=%t.cir %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t1.ll
// RUN: FileCheck --check-prefix=LLVM --input-file=%t1.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t1.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t1.ll %s
+
+template<typename T>
+struct implicitly_instantiated {
+ static T member;
+};
+
+template<typename T>
+T implicitly_instantiated<T>::member = 12345;
+
+int use_implicitly_instantiated() {
+ return implicitly_instantiated<int>::member;
+}
+
+// CHECK-DAG: cir.global linkonce_odr comdat @_ZN23implicitly_instantiatedIiE6memberE = #cir.int<12345> : !s32i
+// LLVM-DAG: @_ZN23implicitly_instantiatedIiE6memberE = linkonce_odr global i32 12345, comdat
+// OGCG-DAG: @_ZN23implicitly_instantiatedIiE6memberE = linkonce_odr global i32 12345, comdat
+
+template<typename T>
+struct explicitly_instantiated {
+ static T member;
+};
+
+template<typename T>
+T explicitly_instantiated<T>::member = 54321;
+
+template int explicitly_instantiated<int>::member;
+// CHECK-DAG: cir.global weak_odr comdat @_ZN23explicitly_instantiatedIiE6memberE = #cir.int<54321> : !s32i
+// LLVM-DAG: @_ZN23explicitly_instantiatedIiE6memberE = weak_odr global i32 54321, comdat
+// OGCG-DAG: @_ZN23explicitly_instantiatedIiE6memberE = weak_odr global i32 54321, comdat
void func1(void) {
// Should lower default-initialized static vars.
@@ -42,6 +73,8 @@ void func2(void) {
// LLVM-DAG: $_ZZ4testvE1c = comdat any
// LLVM-DAG: @_ZZ4testvE1c = linkonce_odr global i32 0, comdat, align 4
+// OGCG-DAG: $_ZZ4testvE1c = comdat any
+// OGCG-DAG: @_ZZ4testvE1c = linkonce_odr global i32 0, comdat, align 4
inline void test() { static int c; }
// CHECK-LABEL: @_Z4testv
diff --git a/clang/test/CIR/CodeGen/stmt-expr.cpp b/clang/test/CIR/CodeGen/stmt-expr.cpp
new file mode 100644
index 0000000..9e3911f
--- /dev/null
+++ b/clang/test/CIR/CodeGen/stmt-expr.cpp
@@ -0,0 +1,90 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+class A {
+public:
+ A(): x(0) {}
+ A(A &a) : x(a.x) {}
+ int x;
+ void Foo() {}
+};
+
+void test1() {
+ ({
+ A a;
+ a;
+ }).Foo();
+}
+
+// CIR: cir.func dso_local @_Z5test1v()
+// CIR: cir.scope {
+// CIR: %[[REF_TMP0:.+]] = cir.alloca !rec_A, !cir.ptr<!rec_A>, ["ref.tmp0"]
+// CIR: %[[TMP:.+]] = cir.alloca !rec_A, !cir.ptr<!rec_A>, ["tmp"]
+// CIR: cir.scope {
+// CIR: %[[A:.+]] = cir.alloca !rec_A, !cir.ptr<!rec_A>, ["a", init]
+// CIR: cir.call @_ZN1AC2Ev(%[[A]]) : (!cir.ptr<!rec_A>) -> ()
+// CIR: cir.call @_ZN1AC2ERS_(%[[REF_TMP0]], %[[A]]) : (!cir.ptr<!rec_A>, !cir.ptr<!rec_A>) -> ()
+// CIR: }
+// CIR: cir.call @_ZN1A3FooEv(%[[REF_TMP0]]) : (!cir.ptr<!rec_A>) -> ()
+// CIR: }
+// CIR: cir.return
+
+// LLVM: define dso_local void @_Z5test1v()
+// LLVM: %[[VAR1:.+]] = alloca %class.A, i64 1
+// LLVM: %[[VAR2:.+]] = alloca %class.A, i64 1
+// LLVM: %[[VAR3:.+]] = alloca %class.A, i64 1
+// LLVM: br label %[[LBL4:.+]]
+// LLVM: [[LBL4]]:
+// LLVM: br label %[[LBL5:.+]]
+// LLVM: [[LBL5]]:
+// LLVM: call void @_ZN1AC2Ev(ptr %[[VAR3]])
+// LLVM: call void @_ZN1AC2ERS_(ptr %[[VAR1]], ptr %[[VAR3]])
+// LLVM: br label %[[LBL6:.+]]
+// LLVM: [[LBL6]]:
+// LLVM: call void @_ZN1A3FooEv(ptr %[[VAR1]])
+// LLVM: br label %[[LBL7:.+]]
+// LLVM: [[LBL7]]:
+// LLVM: ret void
+
+// OGCG: define dso_local void @_Z5test1v()
+// OGCG: entry:
+// OGCG: %[[REF_TMP:.+]] = alloca %class.A
+// OGCG: %[[A:.+]] = alloca %class.A
+// OGCG: call void @_ZN1AC2Ev(ptr {{.*}} %[[A]])
+// OGCG: call void @_ZN1AC2ERS_(ptr {{.*}} %[[REF_TMP]], ptr {{.*}} %[[A]])
+// OGCG: call void @_ZN1A3FooEv(ptr {{.*}} %[[REF_TMP]])
+// OGCG: ret void
+
+struct with_dtor {
+ ~with_dtor();
+};
+
+void cleanup() {
+ ({ with_dtor wd; });
+}
+
+// CIR: cir.func dso_local @_Z7cleanupv()
+// CIR: cir.scope {
+// CIR: %[[WD:.+]] = cir.alloca !rec_with_dtor, !cir.ptr<!rec_with_dtor>, ["wd"]
+// CIR: cir.call @_ZN9with_dtorD1Ev(%[[WD]]) nothrow : (!cir.ptr<!rec_with_dtor>) -> ()
+// CIR: }
+// CIR: cir.return
+
+// LLVM: define dso_local void @_Z7cleanupv()
+// LLVM: %[[WD:.+]] = alloca %struct.with_dtor, i64 1
+// LLVM: br label %[[LBL2:.+]]
+// LLVM: [[LBL2]]:
+// LLVM: call void @_ZN9with_dtorD1Ev(ptr %[[WD]])
+// LLVM: br label %[[LBL3:.+]]
+// LLVM: [[LBL3]]:
+// LLVM: ret void
+
+// OGCG: define dso_local void @_Z7cleanupv()
+// OGCG: entry:
+// OGCG: %[[WD:.+]] = alloca %struct.with_dtor
+// OGCG: call void @_ZN9with_dtorD1Ev(ptr {{.*}} %[[WD]])
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/string-literals.c b/clang/test/CIR/CodeGen/string-literals.c
index 44fd191..38657b2 100644
--- a/clang/test/CIR/CodeGen/string-literals.c
+++ b/clang/test/CIR/CodeGen/string-literals.c
@@ -17,13 +17,13 @@ char g_exact[4] = "123";
// CIR: cir.global external @g_exact = #cir.const_array<"123\00" : !cir.array<!s8i x 4>> : !cir.array<!s8i x 4>
-// CIR: cir.global "private" cir_private dso_local @[[STR1_GLOBAL:.*]] = #cir.const_array<"1\00" : !cir.array<!s8i x 2>> : !cir.array<!s8i x 2>
-// CIR: cir.global "private" cir_private dso_local @[[STR2_GLOBAL:.*]] = #cir.zero : !cir.array<!s8i x 1>
-// CIR: cir.global "private" cir_private dso_local @[[STR3_GLOBAL:.*]] = #cir.zero : !cir.array<!s8i x 2>
+// CIR: cir.global "private" constant cir_private dso_local @[[STR1_GLOBAL:.*]] = #cir.const_array<"1\00" : !cir.array<!s8i x 2>> : !cir.array<!s8i x 2>
+// CIR: cir.global "private" constant cir_private dso_local @[[STR2_GLOBAL:.*]] = #cir.zero : !cir.array<!s8i x 1>
+// CIR: cir.global "private" constant cir_private dso_local @[[STR3_GLOBAL:.*]] = #cir.zero : !cir.array<!s8i x 2>
-// LLVM: @[[STR1_GLOBAL:.*]] = private global [2 x i8] c"1\00"
-// LLVM: @[[STR2_GLOBAL:.*]] = private global [1 x i8] zeroinitializer
-// LLVM: @[[STR3_GLOBAL:.*]] = private global [2 x i8] zeroinitializer
+// LLVM: @[[STR1_GLOBAL:.*]] = private constant [2 x i8] c"1\00"
+// LLVM: @[[STR2_GLOBAL:.*]] = private constant [1 x i8] zeroinitializer
+// LLVM: @[[STR3_GLOBAL:.*]] = private constant [2 x i8] zeroinitializer
// OGCG: @[[STR1_GLOBAL:.*]] = private unnamed_addr constant [2 x i8] c"1\00"
// OGCG: @[[STR2_GLOBAL:.*]] = private unnamed_addr constant [1 x i8] zeroinitializer
diff --git a/clang/test/CIR/CodeGen/string-literals.cpp b/clang/test/CIR/CodeGen/string-literals.cpp
index 081c4c2..1411567 100644
--- a/clang/test/CIR/CodeGen/string-literals.cpp
+++ b/clang/test/CIR/CodeGen/string-literals.cpp
@@ -5,11 +5,37 @@
// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -emit-llvm %s -o %t.ll
// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
-// CIR: cir.global "private" cir_private dso_local @[[STR1_GLOBAL:.*]] = #cir.const_array<"abcd\00" : !cir.array<!s8i x 5>> : !cir.array<!s8i x 5>
-// LLVM: @[[STR1_GLOBAL:.*]] = private global [5 x i8] c"abcd\00"
+char const *array[] {
+ "my", "hands", "are", "typing", "words"
+};
-// OGCG: @[[STR1_GLOBAL:.*]] = private unnamed_addr constant [5 x i8] c"abcd\00"
+// CIR: cir.global "private" constant cir_private dso_local @"[[STR:.+]]" = #cir.const_array<"my\00" : !cir.array<!s8i x 3>> : !cir.array<!s8i x 3>
+// CIR: cir.global "private" constant cir_private dso_local @"[[STR1:.+]]" = #cir.const_array<"hands\00" : !cir.array<!s8i x 6>> : !cir.array<!s8i x 6>
+// CIR: cir.global "private" constant cir_private dso_local @"[[STR2:.+]]" = #cir.const_array<"are\00" : !cir.array<!s8i x 4>> : !cir.array<!s8i x 4>
+// CIR: cir.global "private" constant cir_private dso_local @"[[STR3:.+]]" = #cir.const_array<"typing\00" : !cir.array<!s8i x 7>> : !cir.array<!s8i x 7>
+// CIR: cir.global "private" constant cir_private dso_local @"[[STR4:.+]]" = #cir.const_array<"words\00" : !cir.array<!s8i x 6>> : !cir.array<!s8i x 6>
+// CIR: cir.global external @array = #cir.const_array<[#cir.global_view<@"[[STR]]"> : !cir.ptr<!s8i>, #cir.global_view<@"[[STR1]]"> : !cir.ptr<!s8i>, #cir.global_view<@"[[STR2]]"> : !cir.ptr<!s8i>, #cir.global_view<@"[[STR3]]"> : !cir.ptr<!s8i>, #cir.global_view<@"[[STR4]]"> : !cir.ptr<!s8i>]> : !cir.array<!cir.ptr<!s8i> x 5>
+
+// LLVM: @[[STR:.+]] = private constant [3 x i8] c"my\00"
+// LLVM: @[[STR1:.+]] = private constant [6 x i8] c"hands\00"
+// LLVM: @[[STR2:.+]] = private constant [4 x i8] c"are\00"
+// LLVM: @[[STR3:.+]] = private constant [7 x i8] c"typing\00"
+// LLVM: @[[STR4:.+]] = private constant [6 x i8] c"words\00"
+// LLVM: @array = global [5 x ptr] [ptr @[[STR]], ptr @[[STR1]], ptr @[[STR2]], ptr @[[STR3]], ptr @[[STR4]]]
+
+// OGCG: @[[STR:.+]] = private unnamed_addr constant [3 x i8] c"my\00"
+// OGCG: @[[STR1:.+]] = private unnamed_addr constant [6 x i8] c"hands\00"
+// OGCG: @[[STR2:.+]] = private unnamed_addr constant [4 x i8] c"are\00"
+// OGCG: @[[STR3:.+]] = private unnamed_addr constant [7 x i8] c"typing\00"
+// OGCG: @[[STR4:.+]] = private unnamed_addr constant [6 x i8] c"words\00"
+// OGCG: @array = global [5 x ptr] [ptr @[[STR]], ptr @[[STR1]], ptr @[[STR2]], ptr @[[STR3]], ptr @[[STR4]]]
+
+// CIR: cir.global "private" constant cir_private dso_local @[[STR5_GLOBAL:.*]] = #cir.const_array<"abcd\00" : !cir.array<!s8i x 5>> : !cir.array<!s8i x 5>
+
+// LLVM: @[[STR5_GLOBAL:.*]] = private constant [5 x i8] c"abcd\00"
+
+// OGCG: @[[STR5_GLOBAL:.*]] = private unnamed_addr constant [5 x i8] c"abcd\00"
decltype(auto) returns_literal() {
return "abcd";
@@ -17,7 +43,7 @@ decltype(auto) returns_literal() {
// CIR: cir.func{{.*}} @_Z15returns_literalv() -> !cir.ptr<!cir.array<!s8i x 5>>
// CIR: %[[RET_ADDR:.*]] = cir.alloca !cir.ptr<!cir.array<!s8i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s8i x 5>>>, ["__retval"]
-// CIR: %[[STR_ADDR:.*]] = cir.get_global @[[STR1_GLOBAL]] : !cir.ptr<!cir.array<!s8i x 5>>
+// CIR: %[[STR_ADDR:.*]] = cir.get_global @[[STR5_GLOBAL]] : !cir.ptr<!cir.array<!s8i x 5>>
// CIR: cir.store{{.*}} %[[STR_ADDR]], %[[RET_ADDR]]
// CIR: %[[RET:.*]] = cir.load %[[RET_ADDR]]
// CIR: cir.return %[[RET]]
diff --git a/clang/test/CIR/CodeGen/throws.cpp b/clang/test/CIR/CodeGen/throws.cpp
new file mode 100644
index 0000000..0122f30
--- /dev/null
+++ b/clang/test/CIR/CodeGen/throws.cpp
@@ -0,0 +1,85 @@
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+void foo() {
+ throw;
+}
+
+// CIR: cir.throw
+// CIR: cir.unreachable
+
+// LLVM: call void @__cxa_rethrow()
+// LLVM: unreachable
+
+// OGCG: call void @__cxa_rethrow()
+// OGCG: unreachable
+
+int foo1(int a, int b) {
+ if (b == 0)
+ throw;
+ return a / b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR: %[[RES_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: cir.store %{{.*}}, %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store %{{.*}}, %[[B_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.scope {
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[IS_B_ZERO:.*]] = cir.cmp(eq, %[[TMP_B]], %[[CONST_0]]) : !s32i, !cir.bool
+// CIR: cir.if %[[IS_B_ZERO]] {
+// CIR: cir.throw
+// CIR: cir.unreachable
+// CIR: }
+// CIR: }
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[DIV_A_B:.*]] = cir.binop(div, %[[TMP_A:.*]], %[[TMP_B:.*]]) : !s32i
+// CIR: cir.store %[[DIV_A_B]], %[[RES_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[RESULT:.*]] = cir.load %[[RES_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[RESULT]] : !s32i
+
+// LLVM: %[[A_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[RES_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: store i32 %{{.*}}, ptr %[[A_ADDR]], align 4
+// LLVM: store i32 %{{.*}}, ptr %[[B_ADDR]], align 4
+// LLVM: br label %[[CHECK_COND:.*]]
+// LLVM: [[CHECK_COND]]:
+// LLVM: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// LLVM: %[[IS_B_ZERO:.*]] = icmp eq i32 %[[TMP_B]], 0
+// LLVM: br i1 %[[IS_B_ZERO]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
+// LLVM: [[IF_THEN]]:
+// LLVM: call void @__cxa_rethrow()
+// LLVM: unreachable
+// LLVM: [[IF_ELSE]]:
+// LLVM: br label %[[IF_END:.*]]
+// LLVM: [[IF_END]]:
+// LLVM: %[[TMP_A:.*]] = load i32, ptr %[[A_ADDR]], align 4
+// LLVM: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// LLVM: %[[DIV_A_B:.*]] = sdiv i32 %[[TMP_A]], %[[TMP_B]]
+// LLVM: store i32 %[[DIV_A_B]], ptr %[[RES_ADDR]], align 4
+// LLVM: %[[RESULT:.*]] = load i32, ptr %[[RES_ADDR]], align 4
+// LLVM: ret i32 %[[RESULT]]
+
+// OGCG: %[[A_ADDR:.*]] = alloca i32, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca i32, align 4
+// OGCG: store i32 %{{.*}}, ptr %[[A_ADDR]], align 4
+// OGCG: store i32 %{{.*}}, ptr %[[B_ADDR]], align 4
+// OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// OGCG: %[[IS_B_ZERO:.*]] = icmp eq i32 %[[TMP_B]], 0
+// OGCG: br i1 %[[IS_B_ZERO]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+// OGCG: [[IF_THEN]]:
+// OGCG: call void @__cxa_rethrow()
+// OGCG: unreachable
+// OGCG: [[IF_END]]:
+// OGCG: %[[TMP_A:.*]] = load i32, ptr %[[A_ADDR]], align 4
+// OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// OGCG: %[[DIV_A_B:.*]] = sdiv i32 %[[TMP_A]], %[[TMP_B]]
+// OGCG: ret i32 %[[DIV_A_B]]
diff --git a/clang/test/CIR/CodeGen/var_arg.c b/clang/test/CIR/CodeGen/var_arg.c
new file mode 100644
index 0000000..e9c4acb
--- /dev/null
+++ b/clang/test/CIR/CodeGen/var_arg.c
@@ -0,0 +1,166 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+// CIR: !rec___va_list_tag = !cir.record<struct "__va_list_tag" {!u32i, !u32i, !cir.ptr<!void>, !cir.ptr<!void>}
+// LLVM: %struct.__va_list_tag = type { i32, i32, ptr, ptr }
+// OGCG: %struct.__va_list_tag = type { i32, i32, ptr, ptr }
+
+int varargs(int count, ...) {
+ __builtin_va_list args;
+ __builtin_va_start(args, count);
+ int res = __builtin_va_arg(args, int);
+ __builtin_va_end(args);
+ return res;
+}
+
+// CIR-LABEL: cir.func dso_local @varargs(
+// CIR: %[[COUNT_ADDR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["count", init]
+// CIR: %[[RET_ADDR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[VAAREA:.+]] = cir.alloca !cir.array<!rec___va_list_tag x 1>, !cir.ptr<!cir.array<!rec___va_list_tag x 1>>, ["args"]
+// CIR: %[[RES_ADDR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["res", init]
+// CIR: cir.store %arg0, %[[COUNT_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[VA_PTR0:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[COUNT_VAL:.+]] = cir.load{{.*}} %[[COUNT_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.va_start %[[VA_PTR0]] %[[COUNT_VAL]] : !cir.ptr<!rec___va_list_tag>, !s32i
+// CIR: %[[VA_PTR1:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[VA_ARG:.+]] = cir.va_arg %[[VA_PTR1]] : (!cir.ptr<!rec___va_list_tag>) -> !s32i
+// CIR: cir.store{{.*}} %[[VA_ARG]], %[[RES_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[VA_PTR2:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: cir.va_end %[[VA_PTR2]] : !cir.ptr<!rec___va_list_tag>
+// CIR: %[[RESULT:.+]] = cir.load{{.*}} %[[RES_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[RESULT]], %[[RET_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[RETVAL:.+]] = cir.load{{.*}} %[[RET_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[RETVAL]] : !s32i
+
+// LLVM-LABEL: define dso_local i32 @varargs(
+// LLVM: %[[COUNT_ADDR:.+]] = alloca i32{{.*}}
+// LLVM: %[[RET_ADDR:.+]] = alloca i32{{.*}}
+// LLVM: %[[VAAREA:.+]] = alloca [1 x %struct.__va_list_tag]{{.*}}
+// LLVM: %[[RES_ADDR:.+]] = alloca i32{{.*}}
+// LLVM: %[[VA_PTR0:.+]] = getelementptr %struct.__va_list_tag, ptr %[[VAAREA]], i32 0
+// LLVM: call void @llvm.va_start.p0(ptr %[[VA_PTR0]])
+// LLVM: %[[VA_PTR1:.+]] = getelementptr %struct.__va_list_tag, ptr %[[VAAREA]], i32 0
+// LLVM: %[[VA_ARG:.+]] = va_arg ptr %[[VA_PTR1]], i32
+// LLVM: store i32 %[[VA_ARG]], ptr %[[RES_ADDR]], {{.*}}
+// LLVM: %[[VA_PTR2:.+]] = getelementptr %struct.__va_list_tag, ptr %[[VAAREA]], i32 0
+// LLVM: call void @llvm.va_end.p0(ptr %[[VA_PTR2]])
+// LLVM: %[[TMP_LOAD:.+]] = load i32, ptr %[[RES_ADDR]], {{.*}}
+// LLVM: store i32 %[[TMP_LOAD]], ptr %[[RET_ADDR]], {{.*}}
+// LLVM: %[[RETVAL:.+]] = load i32, ptr %[[RET_ADDR]], {{.*}}
+// LLVM: ret i32 %[[RETVAL]]
+
+// OGCG-LABEL: define dso_local i32 @varargs
+// OGCG: %[[COUNT_ADDR:.+]] = alloca i32
+// OGCG: %[[VAAREA:.+]] = alloca [1 x %struct.__va_list_tag]
+// OGCG: %[[RES_ADDR:.+]] = alloca i32
+// OGCG: %[[DECAY:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[VAAREA]]
+// OGCG: call void @llvm.va_start.p0(ptr %[[DECAY]])
+// OGCG: %[[DECAY1:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[VAAREA]]
+// OGCG: %[[GPOFFSET_PTR:.+]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[DECAY1]], i32 0, i32 0
+// OGCG: %[[GPOFFSET:.+]] = load i32, ptr %[[GPOFFSET_PTR]]
+// OGCG: %[[COND:.+]] = icmp ule i32 %[[GPOFFSET]], 40
+// OGCG: br i1 %[[COND]], label %vaarg.in_reg, label %vaarg.in_mem
+//
+// OGCG: vaarg.in_reg:
+// OGCG: %[[REGSAVE_PTR:.+]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[DECAY1]], i32 0, i32 3
+// OGCG: %[[REGSAVE:.+]] = load ptr, ptr %[[REGSAVE_PTR]]
+// OGCG: %[[VAADDR1:.+]] = getelementptr i8, ptr %[[REGSAVE]], i32 %[[GPOFFSET]]
+// OGCG: br label %vaarg.end
+//
+// OGCG: vaarg.in_mem:
+// OGCG: %[[OVERFLOW_PTR:.+]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[DECAY1]], i32 0, i32 2
+// OGCG: %[[OVERFLOW:.+]] = load ptr, ptr %[[OVERFLOW_PTR]]
+// OGCG: br label %vaarg.end
+//
+// OGCG: vaarg.end:
+// OGCG: %[[PHI:.+]] = phi ptr [ %[[VAADDR1]], %vaarg.in_reg ], [ %[[OVERFLOW]], %vaarg.in_mem ]
+// OGCG: %[[LOADED:.+]] = load i32, ptr %[[PHI]]
+// OGCG: store i32 %[[LOADED]], ptr %[[RES_ADDR]]
+// OGCG: %[[DECAY2:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[VAAREA]]
+// OGCG: call void @llvm.va_end.p0(ptr %[[DECAY2]])
+// OGCG: %[[VAL:.+]] = load i32, ptr %[[RES_ADDR]]
+// OGCG: ret i32 %[[VAL]]
+
+int stdarg_start(int count, ...) {
+ __builtin_va_list args;
+ __builtin_stdarg_start(args, 12345);
+ int res = __builtin_va_arg(args, int);
+ __builtin_va_end(args);
+ return res;
+}
+
+// CIR-LABEL: cir.func dso_local @stdarg_start(
+// CIR: %[[COUNT_ADDR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["count", init]
+// CIR: %[[RET_ADDR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[VAAREA:.+]] = cir.alloca !cir.array<!rec___va_list_tag x 1>, !cir.ptr<!cir.array<!rec___va_list_tag x 1>>, ["args"]
+// CIR: %[[RES_ADDR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["res", init]
+// CIR: cir.store %arg0, %[[COUNT_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[VA_PTR0:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[C12345:.+]] = cir.const #cir.int<12345> : !s32i
+// CIR: cir.va_start %[[VA_PTR0]] %[[C12345]] : !cir.ptr<!rec___va_list_tag>, !s32i
+// CIR: %[[VA_PTR1:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[VA_ARG:.+]] = cir.va_arg %[[VA_PTR1]] : (!cir.ptr<!rec___va_list_tag>) -> !s32i
+// CIR: cir.store{{.*}} %[[VA_ARG]], %[[RES_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[VA_PTR2:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: cir.va_end %[[VA_PTR2]] : !cir.ptr<!rec___va_list_tag>
+// CIR: %[[RESULT:.+]] = cir.load{{.*}} %[[RES_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[RESULT]], %[[RET_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[RETVAL:.+]] = cir.load{{.*}} %[[RET_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[RETVAL]] : !s32i
+
+// LLVM-LABEL: define dso_local i32 @stdarg_start(
+// LLVM: %[[COUNT_ADDR:.+]] = alloca i32{{.*}}
+// LLVM: %[[RET_ADDR:.+]] = alloca i32{{.*}}
+// LLVM: %[[VAAREA:.+]] = alloca [1 x %struct.__va_list_tag]{{.*}}
+// LLVM: %[[RES_ADDR:.+]] = alloca i32{{.*}}
+// LLVM: %[[VA_PTR0:.+]] = getelementptr %struct.__va_list_tag, ptr %[[VAAREA]], i32 0
+// LLVM: call void @llvm.va_start.p0(ptr %[[VA_PTR0]])
+// LLVM: %[[VA_PTR1:.+]] = getelementptr %struct.__va_list_tag, ptr %[[VAAREA]], i32 0
+// LLVM: %[[VA_ARG:.+]] = va_arg ptr %[[VA_PTR1]], i32
+// LLVM: store i32 %[[VA_ARG]], ptr %[[RES_ADDR]], {{.*}}
+// LLVM: %[[VA_PTR2:.+]] = getelementptr %struct.__va_list_tag, ptr %[[VAAREA]], i32 0
+// LLVM: call void @llvm.va_end.p0(ptr %[[VA_PTR2]])
+// LLVM: %[[TMP_LOAD:.+]] = load i32, ptr %[[RES_ADDR]], {{.*}}
+// LLVM: store i32 %[[TMP_LOAD]], ptr %[[RET_ADDR]], {{.*}}
+// LLVM: %[[RETVAL:.+]] = load i32, ptr %[[RET_ADDR]], {{.*}}
+// LLVM: ret i32 %[[RETVAL]]
+
+// OGCG-LABEL: define dso_local i32 @stdarg_start
+// OGCG: %[[COUNT_ADDR:.+]] = alloca i32
+// OGCG: %[[VAAREA:.+]] = alloca [1 x %struct.__va_list_tag]
+// OGCG: %[[RES_ADDR:.+]] = alloca i32
+// OGCG: %[[DECAY:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[VAAREA]], i64 0, i64 0
+// OGCG: call void @llvm.va_start.p0(ptr %[[DECAY]])
+// OGCG: %[[DECAY1:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[VAAREA]], i64 0, i64 0
+// OGCG: %[[GPOFFSET_PTR:.+]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[DECAY1]], i32 0, i32 0
+// OGCG: %[[GPOFFSET:.+]] = load i32, ptr %[[GPOFFSET_PTR]]
+// OGCG: %[[COND:.+]] = icmp ule i32 %[[GPOFFSET]], 40
+// OGCG: br i1 %[[COND]], label %vaarg.in_reg, label %vaarg.in_mem
+//
+// OGCG: vaarg.in_reg:
+// OGCG: %[[REGSAVE_PTR:.+]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[DECAY1]], i32 0, i32 3
+// OGCG: %[[REGSAVE:.+]] = load ptr, ptr %[[REGSAVE_PTR]]
+// OGCG: %[[VAADDR1:.+]] = getelementptr i8, ptr %[[REGSAVE]], i32 %[[GPOFFSET]]
+// OGCG: %[[NEXT_GPOFFSET:.+]] = add i32 %[[GPOFFSET]], 8
+// OGCG: store i32 %[[NEXT_GPOFFSET]], ptr %[[GPOFFSET_PTR]]
+// OGCG: br label %vaarg.end
+//
+// OGCG: vaarg.in_mem:
+// OGCG: %[[OVERFLOW_PTR:.+]] = getelementptr inbounds nuw %struct.__va_list_tag, ptr %[[DECAY1]], i32 0, i32 2
+// OGCG: %[[OVERFLOW:.+]] = load ptr, ptr %[[OVERFLOW_PTR]]
+// OGCG: %[[OVERFLOW_NEXT:.+]] = getelementptr i8, ptr %[[OVERFLOW]], i32 8
+// OGCG: store ptr %[[OVERFLOW_NEXT]], ptr %[[OVERFLOW_PTR]]
+// OGCG: br label %vaarg.end
+//
+// OGCG: vaarg.end:
+// OGCG: %[[PHI:.+]] = phi ptr [ %[[VAADDR1]], %vaarg.in_reg ], [ %[[OVERFLOW]], %vaarg.in_mem ]
+// OGCG: %[[LOADED:.+]] = load i32, ptr %[[PHI]]
+// OGCG: store i32 %[[LOADED]], ptr %[[RES_ADDR]]
+// OGCG: %[[DECAY2:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[VAAREA]], i64 0, i64 0
+// OGCG: call void @llvm.va_end.p0(ptr %[[DECAY2]])
+// OGCG: %[[VAL:.+]] = load i32, ptr %[[RES_ADDR]]
+// OGCG: ret i32 %[[VAL]]
diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp
new file mode 100644
index 0000000..1d1b5e0
--- /dev/null
+++ b/clang/test/CIR/CodeGen/vbase.cpp
@@ -0,0 +1,70 @@
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+struct A {
+ int a;
+};
+
+struct B: virtual A {
+ int b;
+};
+
+void ppp() { B b; }
+
+// Note: OGCG speculatively emits the VTT and VTables. This is not yet implemented in CIR.
+
+// Vtable definition for B
+// CIR: cir.global "private" external @_ZTV1B
+
+// LLVM: @_ZTV1B = external global { [3 x ptr] }
+
+// OGCG: @_ZTV1B = linkonce_odr unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr inttoptr (i64 12 to ptr), ptr null, ptr @_ZTI1B] }, comdat, align 8
+
+// Constructor for A
+// CIR: cir.func comdat linkonce_odr @_ZN1AC2Ev(%arg0: !cir.ptr<!rec_A>
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["this", init]
+// CIR: cir.store %arg0, %[[THIS_ADDR]] : !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
+// CIR: cir.return
+
+// LLVM: define{{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) {
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM: ret void
+
+// Note: OGCG elides the constructor for A. This is not yet implemented in CIR.
+
+// Constructor for B
+// CIR: cir.func comdat linkonce_odr @_ZN1BC1Ev(%arg0: !cir.ptr<!rec_B>
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!rec_B>>, ["this", init]
+// CIR: cir.store %arg0, %[[THIS_ADDR]] : !cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!rec_B>>
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_B>>, !cir.ptr<!rec_B>
+// CIR: %[[BASE_A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_B> nonnull [12] -> !cir.ptr<!rec_A>
+// CIR: cir.call @_ZN1AC2Ev(%[[BASE_A_ADDR]]) nothrow : (!cir.ptr<!rec_A>) -> ()
+// CIR: %[[VTABLE:.*]] = cir.vtable.address_point(@_ZTV1B, address_point = <index = 0, offset = 3>) : !cir.vptr
+// CIR: %[[B_VPTR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_B> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store align(8) %[[VTABLE]], %[[B_VPTR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+// CIR: cir.return
+
+// LLVM: define{{.*}} void @_ZN1BC1Ev(ptr %[[THIS_ARG:.*]]) {
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM: %[[BASE_A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 12
+// LLVM: call void @_ZN1AC2Ev(ptr %[[BASE_A_ADDR]])
+// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1B, i64 24), ptr %[[THIS]]
+// LLVM: ret void
+
+// OGCG: define{{.*}} void @_ZN1BC1Ev(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: %[[BASE_A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 12
+// OGCG: store ptr getelementptr inbounds inrange(-24, 0) ({ [3 x ptr] }, ptr @_ZTV1B, i32 0, i32 0, i32 3), ptr %[[THIS]]
+// OGCG: ret void
+
diff --git a/clang/test/CIR/CodeGen/virtual-function-calls.cpp b/clang/test/CIR/CodeGen/virtual-function-calls.cpp
index 3e03b32..e68b38f 100644
--- a/clang/test/CIR/CodeGen/virtual-function-calls.cpp
+++ b/clang/test/CIR/CodeGen/virtual-function-calls.cpp
@@ -1,13 +1,81 @@
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir
// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
struct A {
+ A();
virtual void f(char);
};
-// This is just here to force the class definition to be emitted without
-// requiring any other support. It will be removed when more complete
-// vtable support is implemented.
-A *a;
+// This should initialize the vtable pointer.
+A::A() {}
// CIR: !rec_A = !cir.record<struct "A" {!cir.vptr}>
+// CIR: !rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 3>}>
+
+// CIR: cir.global "private" external @_ZTV1A : !rec_anon_struct
+
+// LLVM: @_ZTV1A = external global { [3 x ptr] }
+
+// OGCG: @_ZTV1A = external unnamed_addr constant { [3 x ptr] }
+
+// CIR: cir.func{{.*}} @_ZN1AC2Ev(%arg0: !cir.ptr<!rec_A> {{.*}})
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["this", init]
+// CIR: cir.store %arg0, %[[THIS_ADDR]] : !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
+// CIR: %[[VPTR:.*]] = cir.vtable.address_point(@_ZTV1A, address_point = <index = 0, offset = 2>) : !cir.vptr
+// CIR: %[[THIS_VPTR_PTR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store{{.*}} align(8) %[[VPTR]], %[[THIS_VPTR_PTR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+// CIR: cir.return
+
+// LLVM: define{{.*}} void @_ZN1AC2Ev(ptr %[[ARG0:.*]])
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM: store ptr %[[ARG0]], ptr %[[THIS_ADDR]]
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1A, i64 16), ptr %[[THIS]]
+
+// OGCG: define{{.*}} void @_ZN1AC2Ev(ptr {{.*}} %[[ARG0:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[ARG0]], ptr %[[THIS_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 2), ptr %[[THIS]]
+
+// NOTE: The GEP in OGCG looks very different from the one generated with CIR,
+// but it is equivalent. The OGCG GEP indexes by base pointer, then
+// structure, then array, whereas the CIR GEP indexes by byte offset.
+
+void f1(A *a) {
+ a->f('c');
+}
+
+// CIR: cir.func{{.*}} @_Z2f1P1A(%arg0: !cir.ptr<!rec_A> {{.*}})
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.ptr<!rec_A>
+// CIR: cir.store %arg0, %[[A_ADDR]]
+// CIR: %[[A:.*]] = cir.load{{.*}} %[[A_ADDR]]
+// CIR: %[[C_LITERAL:.*]] = cir.const #cir.int<99> : !s8i
+// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[A]] : !cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr>
+// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr
+// CIR: %[[FN_PTR_PTR:.*]] = cir.vtable.get_virtual_fn_addr %[[VPTR]][0] : !cir.vptr -> !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!rec_A>, !s8i)>>>
+// CIR: %[[FN_PTR:.*]] = cir.load{{.*}} %[[FN_PTR_PTR:.*]] : !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!rec_A>, !s8i)>>>, !cir.ptr<!cir.func<(!cir.ptr<!rec_A>, !s8i)>>
+// CIR: cir.call %[[FN_PTR]](%[[A]], %[[C_LITERAL]]) : (!cir.ptr<!cir.func<(!cir.ptr<!rec_A>, !s8i)>>, !cir.ptr<!rec_A>, !s8i) -> ()
+
+// LLVM: define{{.*}} void @_Z2f1P1A(ptr %[[ARG0:.*]])
+// LLVM: %[[A_ADDR:.*]] = alloca ptr
+// LLVM: store ptr %[[ARG0]], ptr %[[A_ADDR]]
+// LLVM: %[[A:.*]] = load ptr, ptr %[[A_ADDR]]
+// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[A]]
+// LLVM: %[[FN_PTR_PTR:.*]] = getelementptr inbounds ptr, ptr %[[VPTR]], i32 0
+// LLVM: %[[FN_PTR:.*]] = load ptr, ptr %[[FN_PTR_PTR]]
+// LLVM: call void %[[FN_PTR]](ptr %[[A]], i8 99)
+
+// OGCG: define{{.*}} void @_Z2f1P1A(ptr {{.*}} %[[ARG0:.*]])
+// OGCG: %[[A_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[ARG0]], ptr %[[A_ADDR]]
+// OGCG: %[[A:.*]] = load ptr, ptr %[[A_ADDR]]
+// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[A]]
+// OGCG: %[[FN_PTR_PTR:.*]] = getelementptr inbounds ptr, ptr %[[VPTR]], i64 0
+// OGCG: %[[FN_PTR:.*]] = load ptr, ptr %[[FN_PTR_PTR]]
+// OGCG: call void %[[FN_PTR]](ptr {{.*}} %[[A]], i8 {{.*}} 99)
diff --git a/clang/test/CIR/CodeGen/vtable-emission.cpp b/clang/test/CIR/CodeGen/vtable-emission.cpp
new file mode 100644
index 0000000..9a34573
--- /dev/null
+++ b/clang/test/CIR/CodeGen/vtable-emission.cpp
@@ -0,0 +1,38 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -fclangir -emit-llvm -o %t-cir.ll %s
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -emit-llvm -o %t.ll %s
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+// Note: This test is using -fno-rtti so that we can delay implemntation of that handling.
+// When rtti handling for vtables is implemented, that option should be removed.
+
+struct S {
+ virtual void key();
+ virtual void nonKey() {}
+};
+
+void S::key() {}
+
+// CHECK-DAG: !rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+
+// The definition of the key function should result in the vtable being emitted.
+// CHECK: cir.global "private" external @_ZTV1S = #cir.vtable<{
+// CHECK-SAME: #cir.const_array<[
+// CHECK-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CHECK-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CHECK-SAME: #cir.global_view<@_ZN1S3keyEv> : !cir.ptr<!u8i>,
+// CHECK-SAME: #cir.global_view<@_ZN1S6nonKeyEv> : !cir.ptr<!u8i>]>
+// CHECK-SAME: : !cir.array<!cir.ptr<!u8i> x 4>}> : !rec_anon_struct
+
+// LLVM: @_ZTV1S = global { [4 x ptr] } { [4 x ptr]
+// LLVM-SAME: [ptr null, ptr null, ptr @_ZN1S3keyEv, ptr @_ZN1S6nonKeyEv] }
+
+// OGCG: @_ZTV1S = unnamed_addr constant { [4 x ptr] } { [4 x ptr]
+// OGCG-SAME: [ptr null, ptr null, ptr @_ZN1S3keyEv, ptr @_ZN1S6nonKeyEv] }
+
+// CHECK: cir.func dso_local @_ZN1S3keyEv
+
+// The reference from the vtable should result in nonKey being emitted.
+// CHECK: cir.func comdat linkonce_odr @_ZN1S6nonKeyEv
diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp
new file mode 100644
index 0000000..631aab4
--- /dev/null
+++ b/clang/test/CIR/CodeGen/vtt.cpp
@@ -0,0 +1,45 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+// Note: This test will be expanded to verify VTT emission and VTT implicit
+// argument handling. For now, it's just test the record layout.
+
+class A {
+public:
+ int a;
+ virtual void v() {}
+};
+
+class B : public virtual A {
+public:
+ int b;
+ virtual void w();
+};
+
+class C : public virtual A {
+public:
+ long c;
+ virtual void x() {}
+};
+
+class D : public B, public C {
+public:
+ long d;
+ virtual void y() {}
+};
+
+// This is just here to force the record types to be emitted.
+void f(D *d) {}
+
+// CIR: !rec_A2Ebase = !cir.record<struct "A.base" packed {!cir.vptr, !s32i}>
+// CIR: !rec_B2Ebase = !cir.record<struct "B.base" packed {!cir.vptr, !s32i}>
+// CIR: !rec_C2Ebase = !cir.record<struct "C.base" {!cir.vptr, !s64i}>
+// CIR: !rec_D = !cir.record<class "D" {!rec_B2Ebase, !rec_C2Ebase, !s64i, !rec_A2Ebase}>
+
+// Nothing interesting to see here yet.
+// LLVM: define{{.*}} void @_Z1fP1D
+// OGCG: define{{.*}} void @_Z1fP1D
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
new file mode 100644
index 0000000..f17e7b1
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
@@ -0,0 +1,779 @@
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct NoCopyConstruct {};
+
+struct CopyConstruct {
+ CopyConstruct() = default;
+ CopyConstruct(const CopyConstruct&);
+};
+
+struct NonDefaultCtor {
+ NonDefaultCtor();
+};
+
+struct HasDtor {
+ ~HasDtor();
+};
+
+// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
+// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[DECAY_TO]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr<!rec_HasDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>, ["__array_idx"]
+// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> ()
+// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr<!rec_HasDtor>, %[[NEG_ONE]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr<!rec_HasDtor>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}):
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS7HasDtor : !cir.ptr<!rec_HasDtor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr<!rec_HasDtor>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr<!rec_NonDefaultCtor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}):
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr<!rec_NonDefaultCtor>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr<!rec_CopyConstruct> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr<!rec_CopyConstruct>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr<!rec_NoCopyConstruct> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr<!rec_NoCopyConstruct>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr<!cir.float> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.float> {{.*}}):
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr<!s32i> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!s32i> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+extern "C" void acc_combined() {
+ // CHECK: cir.func{{.*}} @acc_combined() {
+
+ int someInt;
+ // CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
+ float someFloat;
+ // CHECK-NEXT: %[[SOMEFLOAT:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["someFloat"]
+ NoCopyConstruct noCopy;
+ // CHECK-NEXT: %[[NOCOPY:.*]] = cir.alloca !rec_NoCopyConstruct, !cir.ptr<!rec_NoCopyConstruct>, ["noCopy"]
+ CopyConstruct hasCopy;
+ // CHECK-NEXT: %[[HASCOPY:.*]] = cir.alloca !rec_CopyConstruct, !cir.ptr<!rec_CopyConstruct>, ["hasCopy"]
+ NonDefaultCtor notDefCtor;
+ // CHECK-NEXT: %[[NOTDEFCTOR:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr<!rec_NonDefaultCtor>, ["notDefCtor", init]
+ HasDtor dtor;
+ // CHECK-NEXT: %[[DTOR:.*]] = cir.alloca !rec_HasDtor, !cir.ptr<!rec_HasDtor>, ["dtor"]
+ int someIntArr[5];
+ // CHECK-NEXT: %[[INTARR:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["someIntArr"]
+ float someFloatArr[5];
+ // CHECK-NEXT: %[[FLOATARR:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["someFloatArr"]
+ NoCopyConstruct noCopyArr[5];
+ // CHECK-NEXT: %[[NOCOPYARR:.*]] = cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["noCopyArr"]
+ CopyConstruct hasCopyArr[5];
+ // CHECK-NEXT: %[[HASCOPYARR:.*]] = cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["hasCopyArr"]
+ NonDefaultCtor notDefCtorArr[5];
+ // CHECK-NEXT: %[[NOTDEFCTORARR:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["notDefCtorArr", init]
+ HasDtor dtorArr[5];
+ // CHECK-NEXT: %[[DTORARR:.*]] = cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["dtorArr"]
+ // CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[NOTDEFCTOR]]) : (!cir.ptr<!rec_NonDefaultCtor>) -> ()
+
+#pragma acc parallel loop firstprivate(someInt)
+ for(int i = 0; i < 5; ++i);
+ // CHECK: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[SOMEINT]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSi -> %[[PRIVATE]] : !cir.ptr<!s32i>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial loop firstprivate(someFloat)
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[SOMEFLOAT]] : !cir.ptr<!cir.float>) -> !cir.ptr<!cir.float> {name = "someFloat"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSf -> %[[PRIVATE]] : !cir.ptr<!cir.float>) {
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel loop firstprivate(noCopy)
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPY]] : !cir.ptr<!rec_NoCopyConstruct>) -> !cir.ptr<!rec_NoCopyConstruct> {name = "noCopy"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTS15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!rec_NoCopyConstruct>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial loop firstprivate(hasCopy)
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPY]] : !cir.ptr<!rec_CopyConstruct>) -> !cir.ptr<!rec_CopyConstruct> {name = "hasCopy"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTS13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!rec_CopyConstruct>) {
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial loop firstprivate(notDefCtor)
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTOR]] : !cir.ptr<!rec_NonDefaultCtor>) -> !cir.ptr<!rec_NonDefaultCtor> {name = "notDefCtor"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTS14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!rec_NonDefaultCtor>) {
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial loop firstprivate(dtor)
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTOR]] : !cir.ptr<!rec_HasDtor>) -> !cir.ptr<!rec_HasDtor> {name = "dtor"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTS7HasDtor -> %[[PRIVATE]] : !cir.ptr<!rec_HasDtor>) {
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel loop firstprivate(someInt, someFloat, noCopy, hasCopy, notDefCtor, dtor)
+ for(int i = 0; i < 5; ++i);
+ // CHECK: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[SOMEINT]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[SOMEFLOAT]] : !cir.ptr<!cir.float>) -> !cir.ptr<!cir.float> {name = "someFloat"}
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPY]] : !cir.ptr<!rec_NoCopyConstruct>) -> !cir.ptr<!rec_NoCopyConstruct> {name = "noCopy"}
+ // CHECK-NEXT: %[[PRIVATE4:.*]] = acc.firstprivate varPtr(%[[HASCOPY]] : !cir.ptr<!rec_CopyConstruct>) -> !cir.ptr<!rec_CopyConstruct> {name = "hasCopy"}
+ // CHECK-NEXT: %[[PRIVATE5:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTOR]] : !cir.ptr<!rec_NonDefaultCtor>) -> !cir.ptr<!rec_NonDefaultCtor> {name = "notDefCtor"}
+ // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTOR]] : !cir.ptr<!rec_HasDtor>) -> !cir.ptr<!rec_HasDtor> {name = "dtor"}
+ // CHECK: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSi -> %[[PRIVATE1]] : !cir.ptr<!s32i>,
+ // CHECK-SAME: @firstprivatization__ZTSf -> %[[PRIVATE2]] : !cir.ptr<!cir.float>,
+ // CHECK-SAME: @firstprivatization__ZTS15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!rec_NoCopyConstruct>,
+ // CHECK-SAME: @firstprivatization__ZTS13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!rec_CopyConstruct>,
+ // CHECK-SAME: @firstprivatization__ZTS14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!rec_NonDefaultCtor>,
+ // CHECK-SAME: @firstprivatization__ZTS7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!rec_HasDtor>)
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc serial loop firstprivate(someIntArr[1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) {
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel loop firstprivate(someFloatArr[1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial loop firstprivate(noCopyArr[1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) {
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel loop firstprivate(hasCopyArr[1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel loop firstprivate(notDefCtorArr[1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel loop firstprivate(dtorArr[1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial loop firstprivate(someIntArr[1], someFloatArr[1], noCopyArr[1], hasCopyArr[1], notDefCtorArr[1], dtorArr[1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE4:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE5:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1]"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>)
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel loop firstprivate(someIntArr[1:1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial loop firstprivate(someFloatArr[1:1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) {
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel loop firstprivate(noCopyArr[1:1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial loop firstprivate(hasCopyArr[1:1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1:1]"}
+ // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) {
+ // CHECK-NEXT: acc.loop combined(serial)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel loop firstprivate(notDefCtorArr[1:1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1:1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel loop firstprivate(dtorArr[1:1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1:1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) {
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel loop firstprivate(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1], hasCopyArr[1:1], notDefCtorArr[1:1], dtorArr[1:1])
+ for(int i = 0; i < 5; ++i);
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE4:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE5:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1:1]"}
+ // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>)
+ // CHECK-NEXT: acc.loop combined(parallel)
+ // CHECK: acc.yield
+ // CHECK-NEXT: } loc
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
new file mode 100644
index 0000000..97b2479
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
@@ -0,0 +1,428 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct DefaultOperators {
+ int i;
+ float f;
+ double d;
+};
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_combined() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_combined<DefaultOperators>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
new file mode 100644
index 0000000..9d8b4f5
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
@@ -0,0 +1,366 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr<!cir.float> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr<!cir.float> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr<!cir.float> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr<!cir.float> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr<!cir.float> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr<!cir.float> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr<!cir.float> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr<!cir.float> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr<!cir.float> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_combined() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_combined<float>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
new file mode 100644
index 0000000..3c41188b
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
@@ -0,0 +1,657 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct HasOperatorsInline {
+ int i;
+ float f;
+ double d;
+
+ ~HasOperatorsInline();
+
+ HasOperatorsInline &operator+=(HasOperatorsInline& other);
+ HasOperatorsInline &operator*=(HasOperatorsInline& other);
+ HasOperatorsInline &operator&=(HasOperatorsInline& other);
+ HasOperatorsInline &operator|=(HasOperatorsInline& other);
+ HasOperatorsInline &operator^=(HasOperatorsInline& other);
+ bool &operator&&(HasOperatorsInline& other);
+ bool &operator||(HasOperatorsInline& other);
+ // For min/max
+ HasOperatorsInline &operator<(HasOperatorsInline& other);
+ HasOperatorsInline &operator=(HasOperatorsInline& other);
+};
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_combined() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_combined<HasOperatorsInline>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
new file mode 100644
index 0000000..4f6fd28
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
@@ -0,0 +1,363 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr<!s32i> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr<!s32i> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr<!s32i> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr<!s32i> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr<!s32i> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr<!s32i> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr<!s32i> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr<!s32i> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr<!s32i> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_combined() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_combined<int>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
new file mode 100644
index 0000000..38c7d32
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
@@ -0,0 +1,655 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+struct HasOperatorsOutline {
+ int i;
+ float f;
+ double d;
+
+ ~HasOperatorsOutline();
+HasOperatorsOutline &operator=(const HasOperatorsOutline &);
+};
+
+HasOperatorsOutline &operator+=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator*=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator&=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator|=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &);
+bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &);
+bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &);
+// For min/max
+HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &);
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_combined() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc parallel loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc parallel loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_combined<HasOperatorsOutline>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause-templates.cpp b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause-templates.cpp
new file mode 100644
index 0000000..50b3219
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause-templates.cpp
@@ -0,0 +1,95 @@
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct CopyConstruct {
+ CopyConstruct() = default;
+ CopyConstruct(const CopyConstruct&);
+};
+
+struct NonDefaultCtor {
+ NonDefaultCtor();
+};
+
+struct HasDtor {
+ ~HasDtor();
+};
+
+// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr<!s32i> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!s32i> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: %[[LOAD:.*]] = cir.load {{.*}} %[[ARG_FROM]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS7HasDtor : !cir.ptr<!rec_HasDtor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr<!rec_HasDtor>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr<!rec_NonDefaultCtor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}):
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr<!rec_NonDefaultCtor>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr<!rec_CopyConstruct> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr<!rec_CopyConstruct>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+template<typename T, typename U, typename V, typename W>
+void dependent_version(const T &cc, const U &ndc, const V &dtor, const W &someInt) {
+ // CHECK: cir.func {{.*}}@_Z17dependent_versionI13CopyConstruct14NonDefaultCtor7HasDtoriEvRKT_RKT0_RKT1_RKT2_(%[[ARG0:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}, %[[ARG1:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}, %[[ARG2:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}, %[[ARG3:.*]]: !cir.ptr<!s32i> {{.*}}) {
+ // CHECK-NEXT: %[[CC:.*]] = cir.alloca !cir.ptr<!rec_CopyConstruct>, !cir.ptr<!cir.ptr<!rec_CopyConstruct>>, ["cc", init, const]
+ // CHECK-NEXT: %[[NDC:.*]] = cir.alloca !cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!cir.ptr<!rec_NonDefaultCtor>>, ["ndc", init, const]
+ // CHECK-NEXT: %[[DTOR:.*]] = cir.alloca !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>, ["dtor", init, const]
+ // CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["someInt", init, const]
+ // % 3 = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["someInt", init, const]
+
+#pragma acc parallel firstprivate(cc, ndc, dtor, someInt)
+ ;
+ // CHECK: %[[PRIV_LOAD:.*]] = cir.load %[[CC]] : !cir.ptr<!cir.ptr<!rec_CopyConstruct>>, !cir.ptr<!rec_CopyConstruct>
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[PRIV_LOAD]] : !cir.ptr<!rec_CopyConstruct>) -> !cir.ptr<!rec_CopyConstruct> {name = "cc"}
+ // CHECK-NEXT: %[[PRIV_LOAD:.*]] = cir.load %[[NDC]] : !cir.ptr<!cir.ptr<!rec_NonDefaultCtor>>, !cir.ptr<!rec_NonDefaultCtor>
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[PRIV_LOAD]] : !cir.ptr<!rec_NonDefaultCtor>) -> !cir.ptr<!rec_NonDefaultCtor> {name = "ndc"}
+ // CHECK-NEXT: %[[PRIV_LOAD:.*]] = cir.load %[[DTOR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor>
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[PRIV_LOAD]] : !cir.ptr<!rec_HasDtor>) -> !cir.ptr<!rec_HasDtor> {name = "dtor"}
+ // CHECK-NEXT: %[[PRIV_LOAD:.*]] = cir.load %[[SOMEINT]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+ // CHECK-NEXT: %[[PRIVATE4:.*]] = acc.firstprivate varPtr(%[[PRIV_LOAD]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTS13CopyConstruct -> %[[PRIVATE1]] : !cir.ptr<!rec_CopyConstruct>,
+ // CHECK-SAME: @firstprivatization__ZTS14NonDefaultCtor -> %[[PRIVATE2]] : !cir.ptr<!rec_NonDefaultCtor>,
+ // CHECK-SAME: @firstprivatization__ZTS7HasDtor -> %[[PRIVATE3]] : !cir.ptr<!rec_HasDtor>,
+ // CHECK-SAME: @firstprivatization__ZTSi -> %[[PRIVATE4]] : !cir.ptr<!s32i>) {
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+}
+
+void use() {
+ CopyConstruct cc;
+ NonDefaultCtor ndc;
+ HasDtor dtor;
+ int i;
+ dependent_version(cc, ndc, dtor, i);
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
new file mode 100644
index 0000000..7f1480f
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
@@ -0,0 +1,340 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+// This encounters NYI errors because of a non-ignored copy of an aggregate.
+// When that is fixed, the 'not' should be removed from the RUN line above.
+
+struct NoCopyConstruct {};
+
+// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// TODO: OpenACC: cir.copy isn't implemented correctly yet, so this doesn't actually do any initialization.
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+//
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr<!rec_NoCopyConstruct> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr<!rec_NoCopyConstruct>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}):
+// TODO: OpenACC: This should emit a copy, but cir.copy isn't implemented yet.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr<!cir.float> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.float> {{.*}}):
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr<!s32i> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!s32i> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+void acc_compute() {
+ // CHECK: cir.func{{.*}} @acc_compute() {
+
+ int someInt;
+ // CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
+ float someFloat;
+ // CHECK-NEXT: %[[SOMEFLOAT:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["someFloat"]
+ struct NoCopyConstruct noCopy;
+ // CHECK-NEXT: %[[NOCOPY:.*]] = cir.alloca !rec_NoCopyConstruct, !cir.ptr<!rec_NoCopyConstruct>, ["noCopy"]
+ int someIntArr[5];
+ // CHECK-NEXT: %[[INTARR:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["someIntArr"]
+ float someFloatArr[5];
+ // CHECK-NEXT: %[[FLOATARR:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["someFloatArr"]
+ struct NoCopyConstruct noCopyArr[5];
+ // CHECK-NEXT: %[[NOCOPYARR:.*]] = cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["noCopyArr"]
+
+#pragma acc parallel firstprivate(someInt)
+ ;
+ // CHECK: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[SOMEINT]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSi -> %[[PRIVATE]] : !cir.ptr<!s32i>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(someFloat)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[SOMEFLOAT]] : !cir.ptr<!cir.float>) -> !cir.ptr<!cir.float> {name = "someFloat"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSf -> %[[PRIVATE]] : !cir.ptr<!cir.float>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel firstprivate(noCopy)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPY]] : !cir.ptr<!rec_NoCopyConstruct>) -> !cir.ptr<!rec_NoCopyConstruct> {name = "noCopy"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTS15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!rec_NoCopyConstruct>
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel firstprivate(someInt, someFloat, noCopy)
+ ;
+ // CHECK: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[SOMEINT]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[SOMEFLOAT]] : !cir.ptr<!cir.float>) -> !cir.ptr<!cir.float> {name = "someFloat"}
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPY]] : !cir.ptr<!rec_NoCopyConstruct>) -> !cir.ptr<!rec_NoCopyConstruct> {name = "noCopy"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSi -> %[[PRIVATE1]] : !cir.ptr<!s32i>,
+ // CHECK-SAME: @firstprivatization__ZTSf -> %[[PRIVATE2]] : !cir.ptr<!cir.float>,
+ // CHECK-SAME: @firstprivatization__ZTS15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!rec_NoCopyConstruct>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc serial firstprivate(someIntArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(someFloatArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(noCopyArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(someIntArr[1], someFloatArr[1], noCopyArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel firstprivate(someIntArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(someFloatArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(noCopyArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
new file mode 100644
index 0000000..d8258b0
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
@@ -0,0 +1,716 @@
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct NoCopyConstruct {};
+
+struct CopyConstruct {
+ CopyConstruct() = default;
+ CopyConstruct(const CopyConstruct&);
+};
+
+struct NonDefaultCtor {
+ NonDefaultCtor();
+};
+
+struct HasDtor {
+ ~HasDtor();
+};
+
+// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
+// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[DECAY_TO]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+//
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr<!rec_HasDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>, ["__array_idx"]
+// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> ()
+// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr<!rec_HasDtor>, %[[NEG_ONE]] : !s64i), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr<!rec_HasDtor>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}):
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+//
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+//
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+//
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS7HasDtor : !cir.ptr<!rec_HasDtor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr<!rec_HasDtor>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasDtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr<!rec_NonDefaultCtor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}):
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr<!rec_NonDefaultCtor>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_NonDefaultCtor> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr<!rec_CopyConstruct> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr<!rec_CopyConstruct>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_CopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr<!rec_NoCopyConstruct> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr<!rec_NoCopyConstruct>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr<!cir.float> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.float> {{.*}}):
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr<!s32i> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.firstprivate.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } copy {
+// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!s32i> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+extern "C" void acc_compute() {
+ // CHECK: cir.func{{.*}} @acc_compute() {
+
+ int someInt;
+ // CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
+ float someFloat;
+ // CHECK-NEXT: %[[SOMEFLOAT:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["someFloat"]
+ NoCopyConstruct noCopy;
+ // CHECK-NEXT: %[[NOCOPY:.*]] = cir.alloca !rec_NoCopyConstruct, !cir.ptr<!rec_NoCopyConstruct>, ["noCopy"]
+ CopyConstruct hasCopy;
+ // CHECK-NEXT: %[[HASCOPY:.*]] = cir.alloca !rec_CopyConstruct, !cir.ptr<!rec_CopyConstruct>, ["hasCopy"]
+ NonDefaultCtor notDefCtor;
+ // CHECK-NEXT: %[[NOTDEFCTOR:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr<!rec_NonDefaultCtor>, ["notDefCtor", init]
+ HasDtor dtor;
+ // CHECK-NEXT: %[[DTOR:.*]] = cir.alloca !rec_HasDtor, !cir.ptr<!rec_HasDtor>, ["dtor"]
+ int someIntArr[5];
+ // CHECK-NEXT: %[[INTARR:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["someIntArr"]
+ float someFloatArr[5];
+ // CHECK-NEXT: %[[FLOATARR:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["someFloatArr"]
+ NoCopyConstruct noCopyArr[5];
+ // CHECK-NEXT: %[[NOCOPYARR:.*]] = cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["noCopyArr"]
+ CopyConstruct hasCopyArr[5];
+ // CHECK-NEXT: %[[HASCOPYARR:.*]] = cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["hasCopyArr"]
+ NonDefaultCtor notDefCtorArr[5];
+ // CHECK-NEXT: %[[NOTDEFCTORARR:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["notDefCtorArr", init]
+ HasDtor dtorArr[5];
+ // CHECK-NEXT: %[[DTORARR:.*]] = cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["dtorArr"]
+ // CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[NOTDEFCTOR]]) : (!cir.ptr<!rec_NonDefaultCtor>) -> ()
+
+#pragma acc parallel firstprivate(someInt)
+ ;
+ // CHECK: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[SOMEINT]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSi -> %[[PRIVATE]] : !cir.ptr<!s32i>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(someFloat)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[SOMEFLOAT]] : !cir.ptr<!cir.float>) -> !cir.ptr<!cir.float> {name = "someFloat"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSf -> %[[PRIVATE]] : !cir.ptr<!cir.float>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel firstprivate(noCopy)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPY]] : !cir.ptr<!rec_NoCopyConstruct>) -> !cir.ptr<!rec_NoCopyConstruct> {name = "noCopy"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTS15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!rec_NoCopyConstruct>
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(hasCopy)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPY]] : !cir.ptr<!rec_CopyConstruct>) -> !cir.ptr<!rec_CopyConstruct> {name = "hasCopy"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTS13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!rec_CopyConstruct>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(notDefCtor)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTOR]] : !cir.ptr<!rec_NonDefaultCtor>) -> !cir.ptr<!rec_NonDefaultCtor> {name = "notDefCtor"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTS14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!rec_NonDefaultCtor>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(dtor)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTOR]] : !cir.ptr<!rec_HasDtor>) -> !cir.ptr<!rec_HasDtor> {name = "dtor"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTS7HasDtor -> %[[PRIVATE]] : !cir.ptr<!rec_HasDtor>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel firstprivate(someInt, someFloat, noCopy, hasCopy, notDefCtor, dtor)
+ ;
+ // CHECK: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[SOMEINT]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[SOMEFLOAT]] : !cir.ptr<!cir.float>) -> !cir.ptr<!cir.float> {name = "someFloat"}
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPY]] : !cir.ptr<!rec_NoCopyConstruct>) -> !cir.ptr<!rec_NoCopyConstruct> {name = "noCopy"}
+ // CHECK-NEXT: %[[PRIVATE4:.*]] = acc.firstprivate varPtr(%[[HASCOPY]] : !cir.ptr<!rec_CopyConstruct>) -> !cir.ptr<!rec_CopyConstruct> {name = "hasCopy"}
+ // CHECK-NEXT: %[[PRIVATE5:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTOR]] : !cir.ptr<!rec_NonDefaultCtor>) -> !cir.ptr<!rec_NonDefaultCtor> {name = "notDefCtor"}
+ // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTOR]] : !cir.ptr<!rec_HasDtor>) -> !cir.ptr<!rec_HasDtor> {name = "dtor"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSi -> %[[PRIVATE1]] : !cir.ptr<!s32i>,
+ // CHECK-SAME: @firstprivatization__ZTSf -> %[[PRIVATE2]] : !cir.ptr<!cir.float>,
+ // CHECK-SAME: @firstprivatization__ZTS15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!rec_NoCopyConstruct>,
+ // CHECK-SAME: @firstprivatization__ZTS13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!rec_CopyConstruct>,
+ // CHECK-SAME: @firstprivatization__ZTS14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!rec_NonDefaultCtor>,
+ // CHECK-SAME: @firstprivatization__ZTS7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!rec_HasDtor>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc serial firstprivate(someIntArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(someFloatArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(noCopyArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(hasCopyArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(notDefCtorArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(dtorArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(someIntArr[1], someFloatArr[1], noCopyArr[1], hasCopyArr[1], notDefCtorArr[1], dtorArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE4:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE5:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel firstprivate(someIntArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(someFloatArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(noCopyArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial firstprivate(hasCopyArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1:1]"}
+ // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(notDefCtorArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1:1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(dtorArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1:1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel firstprivate(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1], hasCopyArr[1:1], notDefCtorArr[1:1], dtorArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE4:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE5:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1:1]"}
+ // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>,
+ // CHECK-SAME: @firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c
new file mode 100644
index 0000000..a128bd3
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c
@@ -0,0 +1,223 @@
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct NoCopyConstruct {};
+
+// CHECK: acc.private.recipe @privatization__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.private.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.private.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.private.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.private.recipe @privatization__ZTS15NoCopyConstruct : !cir.ptr<!rec_NoCopyConstruct> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_NoCopyConstruct> {{.*}}):
+// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr<!rec_NoCopyConstruct>, ["openacc.private.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr<!cir.float> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float> {{.*}}):
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.private.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+//
+// CHECK-NEXT: acc.private.recipe @privatization__ZTSi : !cir.ptr<!s32i> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i> {{.*}}):
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.private.init"]
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+void acc_compute() {
+ // CHECK: cir.func{{.*}} @acc_compute() {
+
+ int someInt;
+ // CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
+ float someFloat;
+ // CHECK-NEXT: %[[SOMEFLOAT:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["someFloat"]
+ struct NoCopyConstruct noCopy;
+ // CHECK-NEXT: %[[NOCOPY:.*]] = cir.alloca !rec_NoCopyConstruct, !cir.ptr<!rec_NoCopyConstruct>, ["noCopy"]
+ int someIntArr[5];
+ // CHECK-NEXT: %[[INTARR:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["someIntArr"]
+ float someFloatArr[5];
+ // CHECK-NEXT: %[[FLOATARR:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["someFloatArr"]
+ struct NoCopyConstruct noCopyArr[5];
+ // CHECK-NEXT: %[[NOCOPYARR:.*]] = cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["noCopyArr"]
+
+#pragma acc parallel private(someInt)
+ ;
+ // CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[SOMEINT]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+ // CHECK-NEXT: acc.parallel private(@privatization__ZTSi -> %[[PRIVATE]] : !cir.ptr<!s32i>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial private(someFloat)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[SOMEFLOAT]] : !cir.ptr<!cir.float>) -> !cir.ptr<!cir.float> {name = "someFloat"}
+ // CHECK-NEXT: acc.serial private(@privatization__ZTSf -> %[[PRIVATE]] : !cir.ptr<!cir.float>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel private(noCopy)
+ ;
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPY]] : !cir.ptr<!rec_NoCopyConstruct>) -> !cir.ptr<!rec_NoCopyConstruct> {name = "noCopy"}
+ // CHECK-NEXT: acc.parallel private(@privatization__ZTS15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!rec_NoCopyConstruct>
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel private(someInt, someFloat, noCopy)
+ ;
+ // CHECK: %[[PRIVATE1:.*]] = acc.private varPtr(%[[SOMEINT]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "someInt"}
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.private varPtr(%[[SOMEFLOAT]] : !cir.ptr<!cir.float>) -> !cir.ptr<!cir.float> {name = "someFloat"}
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.private varPtr(%[[NOCOPY]] : !cir.ptr<!rec_NoCopyConstruct>) -> !cir.ptr<!rec_NoCopyConstruct> {name = "noCopy"}
+ // CHECK-NEXT: acc.parallel private(@privatization__ZTSi -> %[[PRIVATE1]] : !cir.ptr<!s32i>,
+ // CHECK-SAME: @privatization__ZTSf -> %[[PRIVATE2]] : !cir.ptr<!cir.float>,
+ // CHECK-SAME: @privatization__ZTS15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!rec_NoCopyConstruct>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc serial private(someIntArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"}
+ // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel private(someFloatArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"}
+ // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial private(noCopyArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"}
+ // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial private(someIntArr[1], someFloatArr[1], noCopyArr[1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE_CONST:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"}
+ // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>,
+ // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>,
+ // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+
+#pragma acc parallel private(someIntArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"}
+ // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc serial private(someFloatArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"}
+ // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel private(noCopyArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"}
+ // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+#pragma acc parallel private(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1])
+ ;
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE1:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE2:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"}
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+ // CHECK-NEXT: %[[ONE_CAST2:.*]] = builtin.unrealized_conversion_cast %[[ONE]] : !s32i to si32
+ // CHECK-NEXT: %[[ZERO_CONST:.*]] = arith.constant 0
+ // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1
+ // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64)
+ // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"}
+ // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>,
+ // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>,
+ // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>)
+ // CHECK-NEXT: acc.yield
+ // CHECK-NEXT: } loc
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
new file mode 100644
index 0000000..a40ea1f
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
@@ -0,0 +1,396 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct DefaultOperators {
+ int i;
+ float f;
+ double d;
+};
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators
+// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]]
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators
+// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]]
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators
+// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]]
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators
+// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]]
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+void acc_compute() {
+ struct DefaultOperators someVar;
+ struct DefaultOperators someVarArr[5];
+#pragma acc parallel reduction(+:someVar)
+ ;
+#pragma acc parallel reduction(*:someVar)
+ ;
+#pragma acc parallel reduction(max:someVar)
+ ;
+#pragma acc parallel reduction(min:someVar)
+ ;
+#pragma acc parallel reduction(&:someVar)
+ ;
+#pragma acc parallel reduction(|:someVar)
+ ;
+#pragma acc parallel reduction(^:someVar)
+ ;
+#pragma acc parallel reduction(&&:someVar)
+ ;
+#pragma acc parallel reduction(||:someVar)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr)
+ ;
+#pragma acc parallel reduction(*:someVarArr)
+ ;
+#pragma acc parallel reduction(max:someVarArr)
+ ;
+#pragma acc parallel reduction(min:someVarArr)
+ ;
+#pragma acc parallel reduction(&:someVarArr)
+ ;
+#pragma acc parallel reduction(|:someVarArr)
+ ;
+#pragma acc parallel reduction(^:someVarArr)
+ ;
+#pragma acc parallel reduction(&&:someVarArr)
+ ;
+#pragma acc parallel reduction(||:someVarArr)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[2])
+ ;
+#pragma acc parallel reduction(*:someVarArr[2])
+ ;
+#pragma acc parallel reduction(max:someVarArr[2])
+ ;
+#pragma acc parallel reduction(min:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(|:someVarArr[2])
+ ;
+#pragma acc parallel reduction(^:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(||:someVarArr[2])
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(*:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(max:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(min:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(|:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(^:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(||:someVarArr[1:1])
+ ;
+}
+
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
new file mode 100644
index 0000000..89af9d4
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
@@ -0,0 +1,429 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct DefaultOperators {
+ int i;
+ float f;
+ double d;
+};
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+
+template<typename T>
+void acc_compute() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel reduction(+:someVar)
+ ;
+#pragma acc parallel reduction(*:someVar)
+ ;
+#pragma acc parallel reduction(max:someVar)
+ ;
+#pragma acc parallel reduction(min:someVar)
+ ;
+#pragma acc parallel reduction(&:someVar)
+ ;
+#pragma acc parallel reduction(|:someVar)
+ ;
+#pragma acc parallel reduction(^:someVar)
+ ;
+#pragma acc parallel reduction(&&:someVar)
+ ;
+#pragma acc parallel reduction(||:someVar)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr)
+ ;
+#pragma acc parallel reduction(*:someVarArr)
+ ;
+#pragma acc parallel reduction(max:someVarArr)
+ ;
+#pragma acc parallel reduction(min:someVarArr)
+ ;
+#pragma acc parallel reduction(&:someVarArr)
+ ;
+#pragma acc parallel reduction(|:someVarArr)
+ ;
+#pragma acc parallel reduction(^:someVarArr)
+ ;
+#pragma acc parallel reduction(&&:someVarArr)
+ ;
+#pragma acc parallel reduction(||:someVarArr)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[2])
+ ;
+#pragma acc parallel reduction(*:someVarArr[2])
+ ;
+#pragma acc parallel reduction(max:someVarArr[2])
+ ;
+#pragma acc parallel reduction(min:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(|:someVarArr[2])
+ ;
+#pragma acc parallel reduction(^:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(||:someVarArr[2])
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(*:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(max:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(min:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(|:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(^:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(||:someVarArr[1:1])
+ ;
+}
+
+void uses() {
+ acc_compute<DefaultOperators>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
new file mode 100644
index 0000000..6047578
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
@@ -0,0 +1,361 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr<!cir.float> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr<!cir.float> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr<!cir.float> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr<!cir.float> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr<!cir.float> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr<!cir.float> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr<!cir.float> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr<!cir.float> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr<!cir.float> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+void acc_compute() {
+ float someVar;
+ float someVarArr[5];
+#pragma acc parallel reduction(+:someVar)
+ ;
+#pragma acc parallel reduction(*:someVar)
+ ;
+#pragma acc parallel reduction(max:someVar)
+ ;
+#pragma acc parallel reduction(min:someVar)
+ ;
+#pragma acc parallel reduction(&:someVar)
+ ;
+#pragma acc parallel reduction(|:someVar)
+ ;
+#pragma acc parallel reduction(^:someVar)
+ ;
+#pragma acc parallel reduction(&&:someVar)
+ ;
+#pragma acc parallel reduction(||:someVar)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr)
+ ;
+#pragma acc parallel reduction(*:someVarArr)
+ ;
+#pragma acc parallel reduction(max:someVarArr)
+ ;
+#pragma acc parallel reduction(min:someVarArr)
+ ;
+#pragma acc parallel reduction(&:someVarArr)
+ ;
+#pragma acc parallel reduction(|:someVarArr)
+ ;
+#pragma acc parallel reduction(^:someVarArr)
+ ;
+#pragma acc parallel reduction(&&:someVarArr)
+ ;
+#pragma acc parallel reduction(||:someVarArr)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[2])
+ ;
+#pragma acc parallel reduction(*:someVarArr[2])
+ ;
+#pragma acc parallel reduction(max:someVarArr[2])
+ ;
+#pragma acc parallel reduction(min:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(|:someVarArr[2])
+ ;
+#pragma acc parallel reduction(^:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(||:someVarArr[2])
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(*:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(max:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(min:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(|:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(^:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(||:someVarArr[1:1])
+ ;
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
new file mode 100644
index 0000000..2351ace
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
@@ -0,0 +1,366 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr<!cir.float> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr<!cir.float> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr<!cir.float> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr<!cir.float> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr<!cir.float> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr<!cir.float> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr<!cir.float> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr<!cir.float> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr<!cir.float> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_compute() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel reduction(+:someVar)
+ ;
+#pragma acc parallel reduction(*:someVar)
+ ;
+#pragma acc parallel reduction(max:someVar)
+ ;
+#pragma acc parallel reduction(min:someVar)
+ ;
+#pragma acc parallel reduction(&:someVar)
+ ;
+#pragma acc parallel reduction(|:someVar)
+ ;
+#pragma acc parallel reduction(^:someVar)
+ ;
+#pragma acc parallel reduction(&&:someVar)
+ ;
+#pragma acc parallel reduction(||:someVar)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr)
+ ;
+#pragma acc parallel reduction(*:someVarArr)
+ ;
+#pragma acc parallel reduction(max:someVarArr)
+ ;
+#pragma acc parallel reduction(min:someVarArr)
+ ;
+#pragma acc parallel reduction(&:someVarArr)
+ ;
+#pragma acc parallel reduction(|:someVarArr)
+ ;
+#pragma acc parallel reduction(^:someVarArr)
+ ;
+#pragma acc parallel reduction(&&:someVarArr)
+ ;
+#pragma acc parallel reduction(||:someVarArr)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[2])
+ ;
+#pragma acc parallel reduction(*:someVarArr[2])
+ ;
+#pragma acc parallel reduction(max:someVarArr[2])
+ ;
+#pragma acc parallel reduction(min:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(|:someVarArr[2])
+ ;
+#pragma acc parallel reduction(^:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(||:someVarArr[2])
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(*:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(max:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(min:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(|:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(^:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(||:someVarArr[1:1])
+ ;
+}
+
+void uses() {
+ acc_compute<float>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
new file mode 100644
index 0000000..abb8d07
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
@@ -0,0 +1,657 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct HasOperatorsInline {
+ int i;
+ float f;
+ double d;
+
+ ~HasOperatorsInline();
+
+ HasOperatorsInline &operator+=(HasOperatorsInline& other);
+ HasOperatorsInline &operator*=(HasOperatorsInline& other);
+ HasOperatorsInline &operator&=(HasOperatorsInline& other);
+ HasOperatorsInline &operator|=(HasOperatorsInline& other);
+ HasOperatorsInline &operator^=(HasOperatorsInline& other);
+ bool &operator&&(HasOperatorsInline& other);
+ bool &operator||(HasOperatorsInline& other);
+ // For min/max
+ HasOperatorsInline &operator<(HasOperatorsInline& other);
+ HasOperatorsInline &operator=(HasOperatorsInline& other);
+};
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_compute() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel reduction(+:someVar)
+ ;
+#pragma acc parallel reduction(*:someVar)
+ ;
+#pragma acc parallel reduction(max:someVar)
+ ;
+#pragma acc parallel reduction(min:someVar)
+ ;
+#pragma acc parallel reduction(&:someVar)
+ ;
+#pragma acc parallel reduction(|:someVar)
+ ;
+#pragma acc parallel reduction(^:someVar)
+ ;
+#pragma acc parallel reduction(&&:someVar)
+ ;
+#pragma acc parallel reduction(||:someVar)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr)
+ ;
+#pragma acc parallel reduction(*:someVarArr)
+ ;
+#pragma acc parallel reduction(max:someVarArr)
+ ;
+#pragma acc parallel reduction(min:someVarArr)
+ ;
+#pragma acc parallel reduction(&:someVarArr)
+ ;
+#pragma acc parallel reduction(|:someVarArr)
+ ;
+#pragma acc parallel reduction(^:someVarArr)
+ ;
+#pragma acc parallel reduction(&&:someVarArr)
+ ;
+#pragma acc parallel reduction(||:someVarArr)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[2])
+ ;
+#pragma acc parallel reduction(*:someVarArr[2])
+ ;
+#pragma acc parallel reduction(max:someVarArr[2])
+ ;
+#pragma acc parallel reduction(min:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(|:someVarArr[2])
+ ;
+#pragma acc parallel reduction(^:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(||:someVarArr[2])
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(*:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(max:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(min:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(|:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(^:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(||:someVarArr[1:1])
+ ;
+}
+
+void uses() {
+ acc_compute<HasOperatorsInline>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
new file mode 100644
index 0000000..877230c
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
@@ -0,0 +1,361 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr<!s32i> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr<!s32i> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr<!s32i> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr<!s32i> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr<!s32i> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr<!s32i> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr<!s32i> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr<!s32i> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr<!s32i> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+void acc_compute() {
+ int someVar;
+ int someVarArr[5];
+#pragma acc parallel reduction(+:someVar)
+ ;
+#pragma acc parallel reduction(*:someVar)
+ ;
+#pragma acc parallel reduction(max:someVar)
+ ;
+#pragma acc parallel reduction(min:someVar)
+ ;
+#pragma acc parallel reduction(&:someVar)
+ ;
+#pragma acc parallel reduction(|:someVar)
+ ;
+#pragma acc parallel reduction(^:someVar)
+ ;
+#pragma acc parallel reduction(&&:someVar)
+ ;
+#pragma acc parallel reduction(||:someVar)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr)
+ ;
+#pragma acc parallel reduction(*:someVarArr)
+ ;
+#pragma acc parallel reduction(max:someVarArr)
+ ;
+#pragma acc parallel reduction(min:someVarArr)
+ ;
+#pragma acc parallel reduction(&:someVarArr)
+ ;
+#pragma acc parallel reduction(|:someVarArr)
+ ;
+#pragma acc parallel reduction(^:someVarArr)
+ ;
+#pragma acc parallel reduction(&&:someVarArr)
+ ;
+#pragma acc parallel reduction(||:someVarArr)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[2])
+ ;
+#pragma acc parallel reduction(*:someVarArr[2])
+ ;
+#pragma acc parallel reduction(max:someVarArr[2])
+ ;
+#pragma acc parallel reduction(min:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(|:someVarArr[2])
+ ;
+#pragma acc parallel reduction(^:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(||:someVarArr[2])
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(*:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(max:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(min:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(|:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(^:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(||:someVarArr[1:1])
+ ;
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
new file mode 100644
index 0000000..c1efc92
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
@@ -0,0 +1,363 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr<!s32i> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr<!s32i> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr<!s32i> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr<!s32i> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr<!s32i> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr<!s32i> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr<!s32i> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr<!s32i> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr<!s32i> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_compute() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel reduction(+:someVar)
+ ;
+#pragma acc parallel reduction(*:someVar)
+ ;
+#pragma acc parallel reduction(max:someVar)
+ ;
+#pragma acc parallel reduction(min:someVar)
+ ;
+#pragma acc parallel reduction(&:someVar)
+ ;
+#pragma acc parallel reduction(|:someVar)
+ ;
+#pragma acc parallel reduction(^:someVar)
+ ;
+#pragma acc parallel reduction(&&:someVar)
+ ;
+#pragma acc parallel reduction(||:someVar)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr)
+ ;
+#pragma acc parallel reduction(*:someVarArr)
+ ;
+#pragma acc parallel reduction(max:someVarArr)
+ ;
+#pragma acc parallel reduction(min:someVarArr)
+ ;
+#pragma acc parallel reduction(&:someVarArr)
+ ;
+#pragma acc parallel reduction(|:someVarArr)
+ ;
+#pragma acc parallel reduction(^:someVarArr)
+ ;
+#pragma acc parallel reduction(&&:someVarArr)
+ ;
+#pragma acc parallel reduction(||:someVarArr)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[2])
+ ;
+#pragma acc parallel reduction(*:someVarArr[2])
+ ;
+#pragma acc parallel reduction(max:someVarArr[2])
+ ;
+#pragma acc parallel reduction(min:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(|:someVarArr[2])
+ ;
+#pragma acc parallel reduction(^:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(||:someVarArr[2])
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(*:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(max:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(min:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(|:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(^:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(||:someVarArr[1:1])
+ ;
+}
+
+void uses() {
+ acc_compute<int>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
new file mode 100644
index 0000000..87203bf
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
@@ -0,0 +1,655 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+struct HasOperatorsOutline {
+ int i;
+ float f;
+ double d;
+
+ ~HasOperatorsOutline();
+ HasOperatorsOutline &operator=(const HasOperatorsOutline &);
+};
+
+HasOperatorsOutline &operator+=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator*=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator&=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator|=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &);
+bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &);
+bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &);
+// For min/max
+HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &);
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_compute() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc parallel reduction(+:someVar)
+ ;
+#pragma acc parallel reduction(*:someVar)
+ ;
+#pragma acc parallel reduction(max:someVar)
+ ;
+#pragma acc parallel reduction(min:someVar)
+ ;
+#pragma acc parallel reduction(&:someVar)
+ ;
+#pragma acc parallel reduction(|:someVar)
+ ;
+#pragma acc parallel reduction(^:someVar)
+ ;
+#pragma acc parallel reduction(&&:someVar)
+ ;
+#pragma acc parallel reduction(||:someVar)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr)
+ ;
+#pragma acc parallel reduction(*:someVarArr)
+ ;
+#pragma acc parallel reduction(max:someVarArr)
+ ;
+#pragma acc parallel reduction(min:someVarArr)
+ ;
+#pragma acc parallel reduction(&:someVarArr)
+ ;
+#pragma acc parallel reduction(|:someVarArr)
+ ;
+#pragma acc parallel reduction(^:someVarArr)
+ ;
+#pragma acc parallel reduction(&&:someVarArr)
+ ;
+#pragma acc parallel reduction(||:someVarArr)
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[2])
+ ;
+#pragma acc parallel reduction(*:someVarArr[2])
+ ;
+#pragma acc parallel reduction(max:someVarArr[2])
+ ;
+#pragma acc parallel reduction(min:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(|:someVarArr[2])
+ ;
+#pragma acc parallel reduction(^:someVarArr[2])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[2])
+ ;
+#pragma acc parallel reduction(||:someVarArr[2])
+ ;
+
+#pragma acc parallel reduction(+:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(*:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(max:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(min:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(|:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(^:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(&&:someVarArr[1:1])
+ ;
+#pragma acc parallel reduction(||:someVarArr[1:1])
+ ;
+}
+
+void uses() {
+ acc_compute<HasOperatorsOutline>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/init.c b/clang/test/CIR/CodeGenOpenACC/init.c
index 177e5a6..805fb08 100644
--- a/clang/test/CIR/CodeGenOpenACC/init.c
+++ b/clang/test/CIR/CodeGenOpenACC/init.c
@@ -11,12 +11,8 @@ void acc_init(int cond) {
// CHECK-NEXT: acc.init attributes {device_types = [#acc.device_type<star>]}
#pragma acc init device_type(nvidia)
// CHECK-NEXT: acc.init attributes {device_types = [#acc.device_type<nvidia>]}
-#pragma acc init device_type(host, multicore)
- // CHECK-NEXT: acc.init attributes {device_types = [#acc.device_type<host>, #acc.device_type<multicore>]}
#pragma acc init device_type(NVIDIA)
// CHECK-NEXT: acc.init attributes {device_types = [#acc.device_type<nvidia>]}
-#pragma acc init device_type(HoSt, MuLtIcORe)
- // CHECK-NEXT: acc.init attributes {device_types = [#acc.device_type<host>, #acc.device_type<multicore>]}
#pragma acc init device_type(HoSt) device_type(MuLtIcORe)
// CHECK-NEXT: acc.init attributes {device_types = [#acc.device_type<host>, #acc.device_type<multicore>]}
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
new file mode 100644
index 0000000..3dada2d
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
@@ -0,0 +1,428 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct DefaultOperators {
+ int i;
+ float f;
+ double d;
+};
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_DefaultOperators>, %[[ONE]] : !s64i), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_DefaultOperators>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr<!rec_DefaultOperators> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_DefaultOperators>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr<!rec_DefaultOperators>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_loop() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_loop<DefaultOperators>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
new file mode 100644
index 0000000..0d3a018
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
@@ -0,0 +1,366 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!cir.float>>, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!cir.float>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr<!cir.float> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr<!cir.float> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr<!cir.float> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr<!cir.float> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr<!cir.float> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr<!cir.float> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr<!cir.float> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr<!cir.float> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr<!cir.float> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.float>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_loop() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_loop<float>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
new file mode 100644
index 0000000..43b50c6
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
@@ -0,0 +1,657 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+struct HasOperatorsInline {
+ int i;
+ float f;
+ double d;
+
+ ~HasOperatorsInline();
+
+ HasOperatorsInline &operator+=(HasOperatorsInline& other);
+ HasOperatorsInline &operator*=(HasOperatorsInline& other);
+ HasOperatorsInline &operator&=(HasOperatorsInline& other);
+ HasOperatorsInline &operator|=(HasOperatorsInline& other);
+ HasOperatorsInline &operator^=(HasOperatorsInline& other);
+ bool &operator&&(HasOperatorsInline& other);
+ bool &operator||(HasOperatorsInline& other);
+ // For min/max
+ HasOperatorsInline &operator<(HasOperatorsInline& other);
+ HasOperatorsInline &operator=(HasOperatorsInline& other);
+};
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsInline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsInline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsInline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsInline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_loop() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_loop<HasOperatorsInline>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
new file mode 100644
index 0000000..97ade74
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
@@ -0,0 +1,363 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!s32i>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr<!s32i> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr<!s32i> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr<!s32i> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr<!s32i> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr<!s32i> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr<!s32i> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr<!s32i> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr<!s32i> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr<!s32i> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!s32i>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_loop() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_loop<int>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
new file mode 100644
index 0000000..b0191fc
--- /dev/null
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
@@ -0,0 +1,655 @@
+// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+struct HasOperatorsOutline {
+ int i;
+ float f;
+ double d;
+
+ ~HasOperatorsOutline();
+ HasOperatorsOutline &operator=(const HasOperatorsOutline &);
+};
+
+HasOperatorsOutline &operator+=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator*=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator&=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator|=(HasOperatorsOutline &, HasOperatorsOutline &);
+HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &);
+bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &);
+bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &);
+// For min/max
+HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &);
+
+// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
+// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
+// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i
+// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[NEG]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr<!rec_HasOperatorsOutline>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <land> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <xor> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <ior> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <iand> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <max> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <mul> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init"]
+// TODO OpenACC: Expecting an initialization to... SOME value here.
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <add> init {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
+// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
+// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][1] {name = "f"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][2] {name = "d"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double
+// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr<!cir.double>
+// CHECK-NEXT: acc.yield
+//
+// CHECK-NEXT: } combiner {
+// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
+// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: } destroy {
+// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_HasOperatorsOutline>)
+// CHECK-NEXT: acc.yield
+// CHECK-NEXT: }
+
+template<typename T>
+void acc_loop() {
+ T someVar;
+ T someVarArr[5];
+#pragma acc loop reduction(+:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVar)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVar)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr)
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr)
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[2])
+ for(int i = 0; i < 5; ++i);
+
+#pragma acc loop reduction(+:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(*:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(max:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(min:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(|:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(^:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(&&:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+#pragma acc loop reduction(||:someVarArr[1:1])
+ for(int i = 0; i < 5; ++i);
+}
+
+void uses() {
+ acc_loop<HasOperatorsOutline>();
+}
diff --git a/clang/test/CIR/CodeGenOpenACC/openacc-not-implemented.cpp b/clang/test/CIR/CodeGenOpenACC/openacc-not-implemented.cpp
index 0bf932e..33e12fe 100644
--- a/clang/test/CIR/CodeGenOpenACC/openacc-not-implemented.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/openacc-not-implemented.cpp
@@ -2,18 +2,10 @@
void HelloWorld(int *A, int *B, int *C, int N) {
-// expected-error@+2{{ClangIR code gen Not Yet Implemented: OpenACC Atomic Construct}}
-// expected-error@+1{{ClangIR code gen Not Yet Implemented: emitCompoundStmtWithoutScope: OpenACCAtomicConstruct}}
+// expected-error@+1{{ClangIR code gen Not Yet Implemented: OpenACC Atomic Construct}}
#pragma acc atomic
N = N + 1;
// expected-error@+1{{ClangIR code gen Not Yet Implemented: OpenACC Declare Construct}}
#pragma acc declare create(A)
-
- // expected-error@+1{{ClangIR code gen Not Yet Implemented: OpenACC Clause: firstprivate}}
-#pragma acc parallel loop firstprivate(A)
- for(int i = 0; i <5; ++i);
- // expected-error@+1{{ClangIR code gen Not Yet Implemented: OpenACC Clause: reduction}}
-#pragma acc parallel loop reduction(+:A)
- for(int i = 0; i <5; ++i);
}
diff --git a/clang/test/CIR/CodeGenOpenACC/shutdown.c b/clang/test/CIR/CodeGenOpenACC/shutdown.c
index 52db382..b68ef90 100644
--- a/clang/test/CIR/CodeGenOpenACC/shutdown.c
+++ b/clang/test/CIR/CodeGenOpenACC/shutdown.c
@@ -11,12 +11,8 @@ void acc_shutdown(int cond) {
// CHECK-NEXT: acc.shutdown attributes {device_types = [#acc.device_type<star>]}
#pragma acc shutdown device_type(nvidia)
// CHECK-NEXT: acc.shutdown attributes {device_types = [#acc.device_type<nvidia>]}
-#pragma acc shutdown device_type(host, multicore)
- // CHECK-NEXT: acc.shutdown attributes {device_types = [#acc.device_type<host>, #acc.device_type<multicore>]}
#pragma acc shutdown device_type(NVIDIA)
// CHECK-NEXT: acc.shutdown attributes {device_types = [#acc.device_type<nvidia>]}
-#pragma acc shutdown device_type(HoSt, MuLtIcORe)
- // CHECK-NEXT: acc.shutdown attributes {device_types = [#acc.device_type<host>, #acc.device_type<multicore>]}
#pragma acc shutdown device_type(HoSt) device_type(MuLtIcORe)
// CHECK-NEXT: acc.shutdown attributes {device_types = [#acc.device_type<host>, #acc.device_type<multicore>]}
diff --git a/clang/test/CIR/IR/inline-asm.cir b/clang/test/CIR/IR/inline-asm.cir
new file mode 100644
index 0000000..fb1f631
--- /dev/null
+++ b/clang/test/CIR/IR/inline-asm.cir
@@ -0,0 +1,112 @@
+// RUN: cir-opt %s | FileCheck %s
+
+!s32i = !cir.int<s, 32>
+!u32i = !cir.int<u, 32>
+
+module {
+cir.func @f1() {
+ // CHECK: cir.asm(x86_att,
+ // CHECK: out = [],
+ // CHECK: in = [],
+ // CHECK: in_out = [],
+ // CHECK: {"" "~{dirflag},~{fpsr},~{flags}"})
+ cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"" "~{dirflag},~{fpsr},~{flags}"})
+ cir.return
+}
+
+cir.func @f2() {
+ // CHECK: cir.asm(x86_att,
+ // CHECK: out = [],
+ // CHECK: in = [],
+ // CHECK: in_out = [],
+ // CHECK: {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.return
+}
+
+cir.func @f3() {
+ // CHECK: cir.asm(x86_att,
+ // CHECK: out = [],
+ // CHECK: in = [],
+ // CHECK: in_out = [],
+ // CHECK: {"abc" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"abc" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.return
+}
+
+cir.func @f4(%arg0: !s32i) {
+ %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64}
+ cir.store %arg0, %0 : !s32i, !cir.ptr<!s32i>
+ // CHECK: cir.asm(x86_att,
+ // CHECK: out = [],
+ // CHECK: in = [%0 : !cir.ptr<!s32i> (maybe_memory)],
+ // CHECK: in_out = [],
+ // CHECK: {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.asm(x86_att,
+ out = [],
+ in = [%0 : !cir.ptr<!s32i> (maybe_memory)],
+ in_out = [],
+ {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.return
+}
+
+cir.func @f5() {
+ // CHECK: cir.asm(x86_intel,
+ // CHECK: out = [],
+ // CHECK: in = [],
+ // CHECK: in_out = [],
+ // CHECK: {"" "~{dirflag},~{fpsr},~{flags}"})
+ cir.asm(x86_intel,
+ out = [],
+ in = [],
+ in_out = [],
+ {"" "~{dirflag},~{fpsr},~{flags}"})
+ cir.return
+}
+cir.func @f6() -> !s32i {
+ %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64}
+ // CHECK: %1 = cir.asm(x86_att,
+ // CHECK: out = [],
+ // CHECK: in = [],
+ // CHECK: in_out = [],
+ // CHECK: {"movl $$42, $0" "=r,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i
+ %1 = cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"movl $$42, $0" "=r,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i
+ cir.store align(4) %1, %0 : !s32i, !cir.ptr<!s32i>
+ %3 = cir.load align(4) %0 : !cir.ptr<!s32i>, !s32i
+ cir.return %3 : !s32i
+}
+cir.func @f7(%arg0: !u32i) -> !u32i {
+ %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["x", init] {alignment = 4 : i64}
+ cir.store %arg0, %0 : !u32i, !cir.ptr<!u32i>
+ %1 = cir.load align(4) %0 : !cir.ptr<!u32i>, !u32i
+ // CHECK: %2 = cir.asm(x86_att,
+ // CHECK: out = [],
+ // CHECK: in = [],
+ // CHECK: in_out = [%1 : !u32i],
+ // CHECK: {"addl $$42, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !u32i
+ %2 = cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [%1 : !u32i],
+ {"addl $$42, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !u32i
+ cir.store align(4) %2, %0 : !u32i, !cir.ptr<!u32i>
+ %3 = cir.load align(4) %0 : !cir.ptr<!u32i>, !u32i
+ cir.return %3 : !u32i
+}
+}
diff --git a/clang/test/CIR/IR/invalid-const-record.cir b/clang/test/CIR/IR/invalid-const-record.cir
new file mode 100644
index 0000000..37d7789
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-const-record.cir
@@ -0,0 +1,23 @@
+// RUN: cir-opt %s -verify-diagnostics -split-input-file
+
+!s32i = !cir.int<s, 32>
+!rec_anon_struct = !cir.record<struct packed {!s32i, !s32i, !cir.array<!s32i x 8>}>
+
+// expected-error @below {{expected !cir.record type}}
+cir.global external @e = #cir.const_record<{#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.zero : !cir.array<!s32i x 8>}> : !cir.ptr<!rec_anon_struct>
+
+// -----
+
+!s32i = !cir.int<s, 32>
+!rec_anon_struct = !cir.record<struct packed {!s32i, !s32i, !cir.array<!s32i x 8>}>
+
+// expected-error @below {{number of elements must match}}
+cir.global external @e = #cir.const_record<{#cir.int<1> : !s32i, #cir.zero : !cir.array<!s32i x 8>}> : !rec_anon_struct
+
+// -----
+
+!s32i = !cir.int<s, 32>
+!rec_anon_struct = !cir.record<struct packed {!s32i, !s32i, !cir.array<!s32i x 8>}>
+
+// expected-error @below {{element at index 1 has type '!cir.float' but the expected type for this element is '!cir.int<s, 32>'}}
+cir.global external @e = #cir.const_record<{#cir.int<1> : !s32i, #cir.fp<2.000000e+00> : !cir.float, #cir.zero : !cir.array<!s32i x 8>}> : !rec_anon_struct
diff --git a/clang/test/CIR/IR/invalid-goto.cir b/clang/test/CIR/IR/invalid-goto.cir
new file mode 100644
index 0000000..9f58bac
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-goto.cir
@@ -0,0 +1,9 @@
+// RUN: cir-opt %s -verify-diagnostics -split-input-file
+
+// expected-error@+1 {{goto/label mismatch}}
+cir.func @bad_goto() -> () {
+ cir.goto "somewhere"
+^bb1:
+ cir.label "label"
+ cir.return
+}
diff --git a/clang/test/CIR/IR/invalid-label.cir b/clang/test/CIR/IR/invalid-label.cir
new file mode 100644
index 0000000..4cb8d01
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-label.cir
@@ -0,0 +1,12 @@
+// RUN: cir-opt %s -verify-diagnostics -split-input-file
+
+!s32i = !cir.int<s, 32>
+
+module {
+ // expected-error@+3 {{must be the first operation in a block}}
+ cir.func @error(){
+ %0 = cir.const #cir.int<0> : !s32i
+ cir.label "label"
+ cir.return
+ }
+}
diff --git a/clang/test/CIR/IR/invalid-lang-attr.cir b/clang/test/CIR/IR/invalid-lang-attr.cir
new file mode 100644
index 0000000..ffe523b
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-lang-attr.cir
@@ -0,0 +1,5 @@
+// RUN: cir-opt %s -verify-diagnostics
+
+// expected-error@below {{expected ::cir::SourceLanguage to be one of}}
+// expected-error@below {{failed to parse CIR_SourceLanguageAttr parameter 'value'}}
+module attributes {cir.lang = #cir.lang<dummy>} { }
diff --git a/clang/test/CIR/IR/invalid-throw.cir b/clang/test/CIR/IR/invalid-throw.cir
new file mode 100644
index 0000000..53582a1
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-throw.cir
@@ -0,0 +1,16 @@
+// RUN: cir-opt %s -verify-diagnostics -split-input-file
+
+!s32i = !cir.int<s, 32>
+
+module {
+
+cir.func dso_local @throw_without_type_info() {
+ %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["a"]
+ // expected-error @below {{'cir.throw' op 'type_info' symbol attribute missing}}
+ cir.throw %0 : !cir.ptr<!s32i>
+ cir.unreachable
+ ^bb1:
+ cir.return
+}
+
+}
diff --git a/clang/test/CIR/IR/invalid-vtable.cir b/clang/test/CIR/IR/invalid-vtable.cir
new file mode 100644
index 0000000..2e88016
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-vtable.cir
@@ -0,0 +1,134 @@
+// RUN: cir-opt %s -verify-diagnostics -split-input-file
+
+!u32i = !cir.int<u, 32>
+cir.func @reference_unknown_vtable() {
+ // expected-error @below {{'cir.vtable.address_point' op 'some_vtable' does not reference a valid cir.global}}
+ %0 = cir.vtable.address_point(@some_vtable, address_point = <index = 0, offset = 2>) : !cir.vptr
+ cir.return
+}
+
+// -----
+
+!u8i = !cir.int<u, 8>
+!u32i = !cir.int<u, 32>
+cir.global linkonce_odr @_ZTT1D = #cir.const_array<[#cir.global_view<@_ZTV1D, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>, #cir.global_view<@_ZTC1D0_1B, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 2>
+cir.func @reference_non_vtable() {
+ // expected-error @below {{Expected #cir.vtable in initializer for global '_ZTT1D'}}
+ %0 = cir.vtable.address_point(@_ZTT1D, address_point = <index = 0, offset = 2>) : !cir.vptr
+ cir.return
+}
+
+// -----
+
+!rec_S = !cir.record<struct "S" {!cir.vptr}>
+!u8i = !cir.int<u, 8>
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+module {
+ // expected-error @below {{expected !cir.record type result}}
+ cir.global external @_ZTV1S = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S3keyEv> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S6nonKeyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 4>}> : !cir.ptr<!rec_anon_struct>
+ cir.func private dso_local @_ZN1S3keyEv(%arg0: !cir.ptr<!rec_S>)
+ cir.func private dso_local @_ZN1S6nonKeyEv(%arg0: !cir.ptr<!rec_S>)
+}
+
+// -----
+
+!rec_S = !cir.record<struct "S" {!cir.vptr}>
+!u8i = !cir.int<u, 8>
+!rec_anon_struct = !cir.record<struct {}>
+module {
+ // expected-error @below {{expected record type with one or more subtype}}
+ cir.global external @_ZTV1S = #cir.vtable<{}> : !rec_anon_struct {alignment = 8 : i64}
+ cir.func private dso_local @_ZN1S3keyEv(%arg0: !cir.ptr<!rec_S>)
+ cir.func private dso_local @_ZN1S6nonKeyEv(%arg0: !cir.ptr<!rec_S>)
+}
+
+// -----
+
+!rec_S = !cir.record<struct "S" {!cir.vptr}>
+!u8i = !cir.int<u, 8>
+!rec_anon_struct = !cir.record<struct {!cir.ptr<!u8i>}>
+module {
+ // expected-error @below {{expected constant array subtype}}
+ cir.global external @_ZTV1S = #cir.vtable<{#cir.ptr<null> : !cir.ptr<!u8i>}> : !rec_anon_struct {alignment = 8 : i64}
+ cir.func private dso_local @_ZN1S3keyEv(%arg0: !cir.ptr<!rec_S>)
+ cir.func private dso_local @_ZN1S6nonKeyEv(%arg0: !cir.ptr<!rec_S>)
+}
+
+// -----
+
+!rec_S = !cir.record<struct "S" {!cir.vptr}>
+!u64i = !cir.int<u, 64>
+!rec_anon_struct = !cir.record<struct {!cir.array<!u64i x 4>}>
+module {
+ // expected-error @below {{expected GlobalViewAttr or ConstPtrAttr}}
+ cir.global external @_ZTV1S = #cir.vtable<{#cir.const_array<[#cir.int<1> : !u64i, #cir.int<1> : !u64i, #cir.int<3> : !u64i, #cir.int<4> : !u64i]> : !cir.array<!u64i x 4>}> : !rec_anon_struct {alignment = 8 : i64}
+ cir.func private dso_local @_ZN1S3keyEv(%arg0: !cir.ptr<!rec_S>)
+ cir.func private dso_local @_ZN1S6nonKeyEv(%arg0: !cir.ptr<!rec_S>)
+}
+
+// -----
+
+!rec_Q = !cir.record<struct "Q" {!cir.vptr}>
+!rec_S = !cir.record<struct "S" {!cir.vptr}>
+!rec_S2 = !cir.record<struct "S2" {!rec_Q, !rec_S}>
+!u8i = !cir.int<u, 8>
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>, !cir.ptr<!u8i>}>
+module {
+ // expected-error @below {{expected constant array subtype}}
+ cir.global external @_ZTV2S2 = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S3keyEv> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S6nonKeyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 4>, #cir.ptr<null> : !cir.ptr<!u8i>}> : !rec_anon_struct {alignment = 8 : i64}
+
+ cir.func private dso_local @_ZN1S3keyEv(%arg0: !cir.ptr<!rec_S>)
+ cir.func private dso_local @_ZN1S6nonKeyEv(%arg0: !cir.ptr<!rec_S>)
+ cir.func private dso_local @_ZN2S23keyEv(%arg0: !cir.ptr<!rec_S2>)
+}
+
+// -----
+
+!u32i = !cir.int<u, 32>
+!void = !cir.void
+cir.func @reference_unknown_vtt() {
+ // expected-error @below {{'cir.vtt.address_point' op 'some_vtt' does not reference a valid cir.global}}
+ %0 = cir.vtt.address_point @some_vtt, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+ cir.return
+}
+
+// -----
+
+!u8i = !cir.int<u, 8>
+!u32i = !cir.int<u, 32>
+!void = !cir.void
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+cir.global external @_ZTV1S = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S3keyEv> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S6nonKeyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 4>}> : !rec_anon_struct {alignment = 8 : i64}
+cir.func @reference_non_vtt() {
+ // expected-error @below {{'cir.vtt.address_point' op Expected constant array in initializer for global VTT '_ZTV1S'}}
+ %0 = cir.vtt.address_point @_ZTV1S, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+ cir.return
+}
+
+// -----
+
+!u8i = !cir.int<u, 8>
+!u32i = !cir.int<u, 32>
+!void = !cir.void
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+!rec_C = !cir.record<class "C" {!cir.vptr}>
+cir.global linkonce_odr @_ZTT1C = #cir.const_array<[#cir.global_view<@_ZTV1C, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>, #cir.global_view<@_ZTC1C0_1B, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 2> {alignment = 8 : i64}
+cir.func @reference_name_and_value(%arg0: !cir.ptr<!rec_C>, %arg1: !cir.ptr<!cir.ptr<!void>>) {
+ // expected-error @below {{'cir.vtt.address_point' op should use either a symbol or value, but not both}}
+ %0 = cir.vtt.address_point @_ZTT1C %arg1 : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+ cir.return
+}
+
+// -----
+
+!u8i = !cir.int<u, 8>
+!u32i = !cir.int<u, 32>
+!void = !cir.void
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+!rec_C = !cir.record<class "C" {!cir.vptr}>
+cir.global linkonce_odr @_ZTT1C = #cir.const_array<[#cir.global_view<@_ZTV1C, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>, #cir.global_view<@_ZTC1C0_1B, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 2> {alignment = 8 : i64}
+cir.func @bad_return_type_for_vtt_addrpoint() {
+ // expected-error @below {{result type must be '!cir.ptr<!cir.ptr<!cir.void>>', but provided result type is '!cir.ptr<!cir.int<u, 8>>'}}
+ %0 = cir.vtt.address_point @_ZTT1C, offset = 1 -> !cir.ptr<!u8i>
+ cir.return
+}
diff --git a/clang/test/CIR/IR/label.cir b/clang/test/CIR/IR/label.cir
new file mode 100644
index 0000000..2211a4e
--- /dev/null
+++ b/clang/test/CIR/IR/label.cir
@@ -0,0 +1,26 @@
+// RUN: cir-opt %s | FileCheck %s
+
+!s32i = !cir.int<s, 32>
+
+module {
+ cir.func @label() {
+ cir.label "label"
+ cir.return
+ }
+
+ cir.func @label2() {
+ %0 = cir.const #cir.int<0> : !s32i
+ cir.br ^bb1
+ ^bb1: // pred: ^bb0
+ cir.label "label2"
+ cir.return
+ }
+}
+
+// CHECK: cir.func @label
+// CHECK-NEXT: cir.label "label"
+
+// CHECK: cir.func @label2
+// CHECK: cir.br ^bb1
+// CHECK-NEXT: ^bb1: // pred: ^bb0
+// CHECK-NEXT: cir.label "label2"
diff --git a/clang/test/CIR/IR/module.cir b/clang/test/CIR/IR/module.cir
new file mode 100644
index 0000000..7ce2c0b
--- /dev/null
+++ b/clang/test/CIR/IR/module.cir
@@ -0,0 +1,12 @@
+// RUN: cir-opt %s -split-input-file -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+
+// Should parse and print C source language attribute.
+module attributes {cir.lang = #cir.lang<c>} { }
+// CHECK: module attributes {cir.lang = #cir.lang<c>}
+
+// -----
+
+// Should parse and print C++ source language attribute.
+module attributes {cir.lang = #cir.lang<cxx>} { }
+// CHECK: module attributes {cir.lang = #cir.lang<cxx>}
diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir
index 85f475f..33f2e98 100644
--- a/clang/test/CIR/IR/struct.cir
+++ b/clang/test/CIR/IR/struct.cir
@@ -13,8 +13,9 @@
// CHECK-DAG: !rec_S = !cir.record<struct "S" incomplete>
// CHECK-DAG: !rec_U = !cir.record<union "U" incomplete>
-!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 5>}>
-!rec_anon_struct1 = !cir.record<struct {!cir.ptr<!u8i>, !cir.ptr<!u8i>, !cir.ptr<!u8i>}>
+!rec_anon_struct = !cir.record<struct packed {!s32i, !s32i, !cir.array<!s32i x 8>}>
+!rec_anon_struct1 = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 5>}>
+!rec_anon_struct2 = !cir.record<struct {!cir.ptr<!u8i>, !cir.ptr<!u8i>, !cir.ptr<!u8i>}>
!rec_S1 = !cir.record<struct "S1" {!s32i, !s32i}>
!rec_Sc = !cir.record<struct "Sc" {!u8i, !u16i, !u32i}>
@@ -42,18 +43,22 @@
!rec_Node = !cir.record<struct "Node" {!cir.ptr<!cir.record<struct "Node">>}>
// CHECK-DAG: !cir.record<struct "Node" {!cir.ptr<!cir.record<struct "Node">>}>
+
+
module {
cir.global external @p1 = #cir.ptr<null> : !cir.ptr<!rec_S>
cir.global external @p2 = #cir.ptr<null> : !cir.ptr<!rec_U>
cir.global external @p3 = #cir.ptr<null> : !cir.ptr<!rec_C>
+ cir.global external @arr = #cir.const_record<{#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.zero : !cir.array<!s32i x 8>}> : !rec_anon_struct
// CHECK: cir.global external @p1 = #cir.ptr<null> : !cir.ptr<!rec_S>
// CHECK: cir.global external @p2 = #cir.ptr<null> : !cir.ptr<!rec_U>
// CHECK: cir.global external @p3 = #cir.ptr<null> : !cir.ptr<!rec_C>
+// CHECK: cir.global external @arr = #cir.const_record<{#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.zero : !cir.array<!s32i x 8>}> : !rec_anon_struct
// Dummy function to use types and force them to be printed.
cir.func @useTypes(%arg0: !rec_Node,
- %arg1: !rec_anon_struct1,
- %arg2: !rec_anon_struct,
+ %arg1: !rec_anon_struct2,
+ %arg2: !rec_anon_struct1,
%arg3: !rec_S1,
%arg4: !rec_Ac,
%arg5: !rec_P1,
diff --git a/clang/test/CIR/IR/throw.cir b/clang/test/CIR/IR/throw.cir
new file mode 100644
index 0000000..8b24b48
--- /dev/null
+++ b/clang/test/CIR/IR/throw.cir
@@ -0,0 +1,63 @@
+// RUN: cir-opt %s | FileCheck %s
+
+!s32i = !cir.int<s, 32>
+
+module {
+
+cir.func @throw_with_no_return() {
+ cir.throw
+ cir.unreachable
+}
+
+// CHECK: cir.func @throw_with_no_return() {
+// CHECK: cir.throw
+// CHECK: cir.unreachable
+// CHECK: }
+
+cir.func @throw_with_no_return_2(%arg0: !s32i, %arg1: !s32i) -> !s32i {
+ %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+ %1 = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+ %2 = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+ cir.store %arg0, %0 : !s32i, !cir.ptr<!s32i>
+ cir.store %arg1, %1 : !s32i, !cir.ptr<!s32i>
+ cir.scope {
+ %7 = cir.load align(4) %1 : !cir.ptr<!s32i>, !s32i
+ %8 = cir.const #cir.int<0> : !s32i
+ %9 = cir.cmp(eq, %7, %8) : !s32i, !cir.bool
+ cir.if %9 {
+ cir.throw
+ cir.unreachable
+ }
+ }
+ %3 = cir.load align(4) %0 : !cir.ptr<!s32i>, !s32i
+ %4 = cir.load align(4) %1 : !cir.ptr<!s32i>, !s32i
+ %5 = cir.binop(div, %3, %4) : !s32i
+ cir.store %5, %2 : !s32i, !cir.ptr<!s32i>
+ %6 = cir.load %2 : !cir.ptr<!s32i>, !s32i
+ cir.return %6 : !s32i
+}
+
+// CHECK: cir.func @throw_with_no_return_2(%[[ARG_0:.*]]: !s32i, %[[ARG_1:.*]]: !s32i) -> !s32i {
+// CHECK: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CHECK: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CHECK: %[[RES_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CHECK: cir.store %[[ARG_0]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CHECK: cir.store %[[ARG_1]], %[[B_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CHECK: cir.scope {
+// CHECK: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CHECK: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CHECK: %[[IS_B_ZERO:.*]] = cir.cmp(eq, %[[TMP_B]], %[[CONST_0]]) : !s32i, !cir.bool
+// CHECK: cir.if %[[IS_B_ZERO]] {
+// CHECK: cir.throw
+// CHECK: cir.unreachable
+// CHECK: }
+// CHECK: }
+// CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CHECK: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CHECK: %[[DIV_A_B:.*]] = cir.binop(div, %[[TMP_A:.*]], %[[TMP_B:.*]]) : !s32i
+// CHECK: cir.store %[[DIV_A_B]], %[[RES_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CHECK: %[[RESULT:.*]] = cir.load %[[RES_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CHECK: cir.return %[[RESULT]] : !s32i
+// CHECK: }
+
+}
diff --git a/clang/test/CIR/IR/vtable-addrpt.cir b/clang/test/CIR/IR/vtable-addrpt.cir
new file mode 100644
index 0000000..0b809cc
--- /dev/null
+++ b/clang/test/CIR/IR/vtable-addrpt.cir
@@ -0,0 +1,23 @@
+// RUN: cir-opt %s | FileCheck %s
+
+// Test the parsing and printing of a constructor that uses a vtable addess_point op.
+
+!u32i = !cir.int<u, 32>
+!u8i = !cir.int<u, 8>
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+!rec_S = !cir.record<struct "S" {!cir.vptr}>
+
+module {
+ cir.global "private" external @_ZTV1S : !rec_anon_struct {alignment = 8 : i64}
+ cir.func @_ZN1SC2Ev(%arg0: !cir.ptr<!rec_S>) {
+ %0 = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["this", init] {alignment = 8 : i64}
+ cir.store %arg0, %0 : !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>
+ %1 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
+ %2 = cir.vtable.address_point(@_ZTV1S, address_point = <index = 0, offset = 2>) : !cir.vptr
+ %3 = cir.cast(bitcast, %1 : !cir.ptr<!rec_S>), !cir.ptr<!cir.vptr>
+ cir.store align(8) %2, %3 : !cir.vptr, !cir.ptr<!cir.vptr>
+ cir.return
+ }
+}
+
+// CHECK: cir.vtable.address_point(@_ZTV1S, address_point = <index = 0, offset = 2>) : !cir.vptr
diff --git a/clang/test/CIR/IR/vtable-attr.cir b/clang/test/CIR/IR/vtable-attr.cir
new file mode 100644
index 0000000..3854208
--- /dev/null
+++ b/clang/test/CIR/IR/vtable-attr.cir
@@ -0,0 +1,19 @@
+// RUN: cir-opt %s | FileCheck %s
+
+!rec_Q = !cir.record<struct "Q" {!cir.vptr}>
+!rec_S = !cir.record<struct "S" {!cir.vptr}>
+!rec_S2 = !cir.record<struct "S2" {!rec_Q, !rec_S}>
+!u8i = !cir.int<u, 8>
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+!rec_anon_struct1 = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>, !cir.array<!cir.ptr<!u8i> x 3>}>
+module {
+ cir.global external @_ZTV1S = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S3keyEv> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S6nonKeyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 4>}> : !rec_anon_struct {alignment = 8 : i64}
+ // CHECK: cir.global external @_ZTV1S = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S3keyEv> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S6nonKeyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 4>}> : !rec_anon_struct {alignment = 8 : i64}
+
+ cir.global external @_ZTV2S2 = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S3keyEv> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S6nonKeyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 4>, #cir.const_array<[#cir.ptr<-8 : i64> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN2S23keyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 3>}> : !rec_anon_struct1 {alignment = 8 : i64}
+ // CHECK: cir.global external @_ZTV2S2 = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S3keyEv> : !cir.ptr<!u8i>, #cir.global_view<@_ZN1S6nonKeyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 4>, #cir.const_array<[#cir.ptr<-8 : i64> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.global_view<@_ZN2S23keyEv> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 3>}> : !rec_anon_struct1 {alignment = 8 : i64}
+
+ cir.func private dso_local @_ZN1S3keyEv(%arg0: !cir.ptr<!rec_S>)
+ cir.func private dso_local @_ZN1S6nonKeyEv(%arg0: !cir.ptr<!rec_S>)
+ cir.func private dso_local @_ZN2S23keyEv(%arg0: !cir.ptr<!rec_S2>)
+}
diff --git a/clang/test/CIR/IR/vtt-addrpoint.cir b/clang/test/CIR/IR/vtt-addrpoint.cir
new file mode 100644
index 0000000..f05bb78
--- /dev/null
+++ b/clang/test/CIR/IR/vtt-addrpoint.cir
@@ -0,0 +1,55 @@
+// RUN: cir-opt %s | FileCheck %s
+
+// Test the parsing and printing of the two forms of vtt.address_point op, as
+// they will appear in constructors.
+
+!u8i = !cir.int<u, 8>
+!void = !cir.void
+!rec_A = !cir.record<struct "A" {!u8i}>
+!rec_B = !cir.record<struct "B" {!cir.vptr}>
+!rec_C = !cir.record<struct "C" {!rec_B}>
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 3>}>
+module {
+ cir.func private @_ZN1AC2Ev(!cir.ptr<!rec_A>)
+ cir.func private @_ZN1BC2Ev(!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>)
+ cir.func dso_local @_ZN1CC2Ev(%arg0: !cir.ptr<!rec_C>, %arg1: !cir.ptr<!cir.ptr<!void>>) {
+ %0 = cir.alloca !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>, ["this", init] {alignment = 8 : i64}
+ %1 = cir.alloca !cir.ptr<!cir.ptr<!void>>, !cir.ptr<!cir.ptr<!cir.ptr<!void>>>, ["vtt", init] {alignment = 8 : i64}
+ cir.store %arg0, %0 : !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>
+ cir.store %arg1, %1 : !cir.ptr<!cir.ptr<!void>>, !cir.ptr<!cir.ptr<!cir.ptr<!void>>>
+ %2 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_C>>, !cir.ptr<!rec_C>
+ %3 = cir.load align(8) %1 : !cir.ptr<!cir.ptr<!cir.ptr<!void>>>, !cir.ptr<!cir.ptr<!void>>
+ %4 = cir.base_class_addr %2 : !cir.ptr<!rec_C> nonnull [0] -> !cir.ptr<!rec_B>
+
+ %5 = cir.vtt.address_point %3 : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+ // CHECK: cir.vtt.address_point %{{.*}} : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+
+ cir.call @_ZN1BC2Ev(%4, %5) : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
+ %6 = cir.vtt.address_point %3 : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
+ %7 = cir.cast(bitcast, %6 : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+ %8 = cir.load align(8) %7 : !cir.ptr<!cir.vptr>, !cir.vptr
+ %9 = cir.vtable.get_vptr %2 : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+ cir.store align(8) %8, %9 : !cir.vptr, !cir.ptr<!cir.vptr>
+ cir.return
+ }
+ cir.global linkonce_odr dso_local @_ZTV1C = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 3>}> : !rec_anon_struct {alignment = 8 : i64}
+ cir.global linkonce_odr @_ZTT1C = #cir.const_array<[#cir.global_view<@_ZTV1C, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>, #cir.global_view<@_ZTC1C0_1B, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 2> {alignment = 8 : i64}
+ cir.func dso_local @_ZN1CC1Ev(%arg0: !cir.ptr<!rec_C>) {
+ %0 = cir.alloca !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>, ["this", init] {alignment = 8 : i64}
+ cir.store %arg0, %0 : !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>
+ %1 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_C>>, !cir.ptr<!rec_C>
+ %2 = cir.base_class_addr %1 : !cir.ptr<!rec_C> nonnull [0] -> !cir.ptr<!rec_A>
+ cir.call @_ZN1AC2Ev(%2) : (!cir.ptr<!rec_A>) -> ()
+ %3 = cir.base_class_addr %1 : !cir.ptr<!rec_C> nonnull [0] -> !cir.ptr<!rec_B>
+
+ %4 = cir.vtt.address_point @_ZTT1C, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+ // CHECK: cir.vtt.address_point @_ZTT1C, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+
+ cir.call @_ZN1BC2Ev(%3, %4) : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
+ %5 = cir.vtable.address_point(@_ZTV1C, address_point = <index = 0, offset = 3>) : !cir.vptr
+ %6 = cir.vtable.get_vptr %1 : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+ cir.store align(8) %5, %6 : !cir.vptr, !cir.ptr<!cir.vptr>
+ cir.return
+ }
+ cir.global linkonce_odr dso_local @_ZTC1C0_1B = #cir.const_record<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 3>}> : !rec_anon_struct {alignment = 8 : i64}
+}
diff --git a/clang/test/CIR/Lowering/array.cpp b/clang/test/CIR/Lowering/array.cpp
index 438d41e..40ad986 100644
--- a/clang/test/CIR/Lowering/array.cpp
+++ b/clang/test/CIR/Lowering/array.cpp
@@ -19,7 +19,7 @@ int dd[3][2] = {{1, 2}, {3, 4}, {5, 6}};
// CHECK: [i32 3, i32 4], [2 x i32] [i32 5, i32 6]]
int e[10] = {1, 2};
-// CHECK: @e = global [10 x i32] [i32 1, i32 2, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0]
+// CHECK: @e = global <{ i32, i32, [8 x i32] }> <{ i32 1, i32 2, [8 x i32] zeroinitializer }>
int f[5] = {1, 2};
// CHECK: @f = global [5 x i32] [i32 1, i32 2, i32 0, i32 0, i32 0]
@@ -57,17 +57,28 @@ void func() {
void func2() {
int arr[2] = {5};
}
+
// CHECK: define{{.*}} void @_Z5func2v()
-// CHECK: %[[ARR_ALLOCA:.*]] = alloca [2 x i32], i64 1, align 4
-// CHECK: %[[TMP:.*]] = alloca ptr, i64 1, align 8
-// CHECK: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0
-// CHECK: store i32 5, ptr %[[ARR_PTR]], align 4
-// CHECK: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1
-// CHECK: store ptr %[[ELE_1_PTR]], ptr %[[TMP]], align 8
-// CHECK: %[[TMP2:.*]] = load ptr, ptr %[[TMP]], align 8
-// CHECK: store i32 0, ptr %[[TMP2]], align 4
-// CHECK: %[[ELE_1:.*]] = getelementptr i32, ptr %[[TMP2]], i64 1
-// CHECK: store ptr %[[ELE_1]], ptr %[[TMP]], align 8
+// CHECK: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
+// CHECK: %[[TMP:.*]] = alloca ptr, i64 1, align 8
+// CHECK: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0
+// CHECK: store i32 5, ptr %[[ARR_PTR]], align 4
+// CHECK: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1
+// CHECK: store ptr %[[ELE_1_PTR]], ptr %[[TMP]], align 8
+// CHECK: %[[END_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 2
+// CHECK: br label %[[LOOP_BODY:.*]]
+// CHECK: [[LOOP_NEXT:.*]]:
+// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// CHECK: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]]
+// CHECK: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]]
+// CHECK: [[LOOP_BODY]]:
+// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// CHECK: store i32 0, ptr %[[CUR]], align 4
+// CHECK: %[[NEXT:.*]] = getelementptr i32, ptr %[[CUR]], i64 1
+// CHECK: store ptr %[[NEXT]], ptr %[[TMP]], align 8
+// CHECK: br label %[[LOOP_NEXT:.*]]
+// CHECK: [[LOOP_END]]:
+// CHECK: ret void
void func3() {
int arr3[2] = {5, 6};
@@ -103,17 +114,27 @@ void func5() {
int arr[2][1] = {{5}};
}
// CHECK: define{{.*}} void @_Z5func5v()
-// CHECK: %[[ARR_ALLOCA:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
-// CHECK: %[[TMP:.*]] = alloca ptr, i64 1, align 8
-// CHECK: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0
-// CHECK: %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0
-// CHECK: store i32 5, ptr %[[ARR_0]], align 4
-// CHECK: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
-// CHECK: store ptr %[[ARR_1]], ptr %[[TMP]], align 8
-// CHECK: %[[ARR_1_VAL:.*]] = load ptr, ptr %[[TMP]], align 8
-// CHECK: store [1 x i32] zeroinitializer, ptr %[[ARR_1_VAL]], align 4
-// CHECK: %[[ARR_1_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_1_VAL]], i64 1
-// CHECK: store ptr %[[ARR_1_PTR]], ptr %[[TMP]], align 8
+// CHECK: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
+// CHECK: %[[TMP:.*]] = alloca ptr, i64 1, align 8
+// CHECK: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0
+// CHECK: %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0
+// CHECK: store i32 5, ptr %[[ARR_0]], align 4
+// CHECK: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
+// CHECK: store ptr %[[ARR_1]], ptr %[[TMP]], align 8
+// CHECK: %[[END_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 2
+// CHECK: br label %[[LOOP_BODY:.*]]
+// CHECK: [[LOOP_NEXT:.*]]:
+// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// CHECK: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]]
+// CHECK: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]]
+// CHECK: [[LOOP_BODY]]:
+// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// CHECK: store [1 x i32] zeroinitializer, ptr %[[CUR]], align 4
+// CHECK: %[[NEXT:.*]] = getelementptr [1 x i32], ptr %[[CUR]], i64 1
+// CHECK: store ptr %[[NEXT]], ptr %[[TMP]], align 8
+// CHECK: br label %[[LOOP_NEXT:.*]]
+// CHECK: [[LOOP_END]]:
+// CHECK: ret void
void func6() {
int x = 4;
@@ -133,14 +154,24 @@ void func7() {
int* arr[1] = {};
}
// CHECK: define{{.*}} void @_Z5func7v()
-// CHECK: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8
-// CHECK: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
-// CHECK: %[[ELE_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0
-// CHECK: store ptr %[[ELE_PTR]], ptr %[[ALLOCA]], align 8
-// CHECK: %[[TMP:.*]] = load ptr, ptr %[[ALLOCA]], align 8
-// CHECK: store ptr null, ptr %[[TMP]], align 8
-// CHECK: %[[ELE:.*]] = getelementptr ptr, ptr %[[TMP]], i64 1
-// CHECK: store ptr %[[ELE]], ptr %[[ALLOCA]], align 8
+// CHECK: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8
+// CHECK: %[[TMP:.*]] = alloca ptr, i64 1, align 8
+// CHECK: %[[ARR_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0
+// CHECK: store ptr %[[ARR_PTR]], ptr %[[TMP]], align 8
+// CHECK: %[[END_PTR:.*]] = getelementptr ptr, ptr %[[ARR_PTR]], i64 1
+// CHECK: br label %[[LOOP_BODY:.*]]
+// CHECK: [[LOOP_NEXT:.*]]:
+// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// CHECK: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]]
+// CHECK: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]]
+// CHECK: [[LOOP_BODY]]:
+// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8
+// CHECK: store ptr null, ptr %[[CUR]], align 8
+// CHECK: %[[NEXT:.*]] = getelementptr ptr, ptr %[[CUR]], i64 1
+// CHECK: store ptr %[[NEXT]], ptr %[[TMP]], align 8
+// CHECK: br label %[[LOOP_NEXT:.*]]
+// CHECK: [[LOOP_END]]:
+// CHECK: ret void
void func8(int p[10]) {}
// CHECK: define{{.*}} void @_Z5func8Pi(ptr {{%.*}})
diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir
new file mode 100644
index 0000000..cd3a57d
--- /dev/null
+++ b/clang/test/CIR/Lowering/goto.cir
@@ -0,0 +1,52 @@
+// RUN: cir-opt %s --pass-pipeline='builtin.module(cir-to-llvm,canonicalize{region-simplify=disabled})' -o - | FileCheck %s -check-prefix=MLIR
+
+!s32i = !cir.int<s, 32>
+
+module {
+
+ cir.func @gotoFromIf(%arg0: !s32i) -> !s32i {
+ %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64}
+ %1 = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
+ cir.store %arg0, %0 : !s32i, !cir.ptr<!s32i>
+ cir.scope {
+ %6 = cir.load %0 : !cir.ptr<!s32i>, !s32i
+ %7 = cir.const #cir.int<5> : !s32i
+ %8 = cir.cmp(gt, %6, %7) : !s32i, !cir.bool
+ cir.if %8 {
+ cir.goto "err"
+ }
+ }
+ %2 = cir.const #cir.int<0> : !s32i
+ cir.store %2, %1 : !s32i, !cir.ptr<!s32i>
+ cir.br ^bb1
+ ^bb1:
+ %3 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+ cir.return %3 : !s32i
+ ^bb2:
+ cir.label "err"
+ %4 = cir.const #cir.int<1> : !s32i
+ %5 = cir.unary(minus, %4) : !s32i, !s32i
+ cir.store %5, %1 : !s32i, !cir.ptr<!s32i>
+ cir.br ^bb1
+ }
+
+// MLIR: llvm.func @gotoFromIf
+// MLIR: %[[#One:]] = llvm.mlir.constant(1 : i32) : i32
+// MLIR: %[[#Zero:]] = llvm.mlir.constant(0 : i32) : i32
+// MLIR: llvm.cond_br {{.*}}, ^bb[[#COND_YES:]], ^bb[[#COND_NO:]]
+// MLIR: ^bb[[#COND_YES]]:
+// MLIR: llvm.br ^bb[[#GOTO_BLK:]]
+// MLIR: ^bb[[#COND_NO]]:
+// MLIR: llvm.br ^bb[[#BLK:]]
+// MLIR: ^bb[[#BLK]]:
+// MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr:]] {{.*}}: i32, !llvm.ptr
+// MLIR: llvm.br ^bb[[#RETURN:]]
+// MLIR: ^bb[[#RETURN]]:
+// MLIR: %[[#Ret_val:]] = llvm.load %[[#Ret_val_addr]] {alignment = 4 : i64} : !llvm.ptr -> i32
+// MLIR: llvm.return %[[#Ret_val]] : i32
+// MLIR: ^bb[[#GOTO_BLK]]:
+// MLIR: %[[#Neg_one:]] = llvm.sub %[[#Zero]], %[[#One]] : i32
+// MLIR: llvm.store %[[#Neg_one]], %[[#Ret_val_addr]] {{.*}}: i32, !llvm.ptr
+// MLIR: llvm.br ^bb[[#RETURN]]
+// MLIR: }
+}
diff --git a/clang/test/CIR/Lowering/inline-asm.cir b/clang/test/CIR/Lowering/inline-asm.cir
new file mode 100644
index 0000000..a8545d4
--- /dev/null
+++ b/clang/test/CIR/Lowering/inline-asm.cir
@@ -0,0 +1,86 @@
+// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering | FileCheck %s
+
+!s32i = !cir.int<s, 32>
+!u32i = !cir.int<u, 32>
+
+module {
+cir.func @f1() {
+ // CHECK: call void asm "", "~{dirflag},~{fpsr},~{flags}"()
+ cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"" "~{dirflag},~{fpsr},~{flags}"})
+ cir.return
+}
+
+cir.func @f2() {
+ // CHECK: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"()
+ cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.return
+}
+
+cir.func @f3() {
+ // CHECK: call void asm sideeffect "abc", "~{dirflag},~{fpsr},~{flags}"()
+ cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"abc" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.return
+}
+
+cir.func @f4(%arg0: !s32i) {
+ %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64}
+ cir.store %arg0, %0 : !s32i, !cir.ptr<!s32i>
+ // CHECK: call void asm sideeffect "", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %2)
+ cir.asm(x86_att,
+ out = [],
+ in = [%0 : !cir.ptr<!s32i> (maybe_memory)],
+ in_out = [],
+ {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects
+ cir.return
+}
+
+cir.func @f5() {
+ // CHECK: call void asm inteldialect "", "~{dirflag},~{fpsr},~{flags}"()
+ cir.asm(x86_intel,
+ out = [],
+ in = [],
+ in_out = [],
+ {"" "~{dirflag},~{fpsr},~{flags}"})
+ cir.return
+}
+
+cir.func @f6() -> !s32i {
+ %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64}
+ // CHECK: %2 = call i32 asm sideeffect "movl $$42, $0", "=r,~{dirflag},~{fpsr},~{flags}"()
+ %1 = cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [],
+ {"movl $$42, $0" "=r,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i
+ cir.store align(4) %1, %0 : !s32i, !cir.ptr<!s32i>
+ %3 = cir.load align(4) %0 : !cir.ptr<!s32i>, !s32i
+ cir.return %3 : !s32i
+}
+
+cir.func @f7(%arg0: !u32i) -> !u32i {
+ %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["x", init] {alignment = 4 : i64}
+ cir.store %arg0, %0 : !u32i, !cir.ptr<!u32i>
+ %1 = cir.load align(4) %0 : !cir.ptr<!u32i>, !u32i
+ // CHECK: %4 = call i32 asm sideeffect "addl $$42, $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 %3)
+ %2 = cir.asm(x86_att,
+ out = [],
+ in = [],
+ in_out = [%1 : !u32i],
+ {"addl $$42, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !u32i
+ cir.store align(4) %2, %0 : !u32i, !cir.ptr<!u32i>
+ %3 = cir.load align(4) %0 : !cir.ptr<!u32i>, !u32i
+ cir.return %3 : !u32i
+}
+}
diff --git a/clang/test/CIR/Lowering/module-asm.cir b/clang/test/CIR/Lowering/module-asm.cir
new file mode 100644
index 0000000..b802cda
--- /dev/null
+++ b/clang/test/CIR/Lowering/module-asm.cir
@@ -0,0 +1,11 @@
+// RUN: cir-opt %s -cir-to-llvm -o %t.cir
+// RUN: FileCheck %s --input-file=%t.cir
+
+// RUN: cir-translate -cir-to-llvmir --disable-cc-lowering -o %t.ll %s
+// RUN: FileCheck -check-prefix=LLVM --input-file=%t.ll %s
+
+// CHECK: llvm.module_asm = [".globl bar", ".globl foo"]
+// LLVM: module asm ".globl bar"
+// LLVM: module asm ".globl foo"
+module attributes {cir.module_asm = [".globl bar", ".globl foo"]} {
+}
diff --git a/clang/test/CIR/Lowering/vtt-addrpoint.cir b/clang/test/CIR/Lowering/vtt-addrpoint.cir
new file mode 100644
index 0000000..a3e7271
--- /dev/null
+++ b/clang/test/CIR/Lowering/vtt-addrpoint.cir
@@ -0,0 +1,59 @@
+// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu -o %t.ll
+// RUN: FileCheck %s --input-file=%t.ll
+
+// Test the lowering of the two forms of vtt.address_point op, as they will
+// appear in constructors.
+
+!u8i = !cir.int<u, 8>
+!void = !cir.void
+!rec_A = !cir.record<struct "A" {!u8i}>
+!rec_B = !cir.record<struct "B" {!cir.vptr}>
+!rec_C = !cir.record<struct "C" {!rec_B}>
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 3>}>
+module {
+ cir.func private @_ZN1AC2Ev(!cir.ptr<!rec_A>)
+ cir.func private @_ZN1BC2Ev(!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>)
+ cir.func dso_local @_ZN1CC2Ev(%arg0: !cir.ptr<!rec_C>, %arg1: !cir.ptr<!cir.ptr<!void>>) {
+ %0 = cir.alloca !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>, ["this", init] {alignment = 8 : i64}
+ %1 = cir.alloca !cir.ptr<!cir.ptr<!void>>, !cir.ptr<!cir.ptr<!cir.ptr<!void>>>, ["vtt", init] {alignment = 8 : i64}
+ cir.store %arg0, %0 : !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>
+ cir.store %arg1, %1 : !cir.ptr<!cir.ptr<!void>>, !cir.ptr<!cir.ptr<!cir.ptr<!void>>>
+ %2 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_C>>, !cir.ptr<!rec_C>
+ %3 = cir.load align(8) %1 : !cir.ptr<!cir.ptr<!cir.ptr<!void>>>, !cir.ptr<!cir.ptr<!void>>
+ %4 = cir.base_class_addr %2 : !cir.ptr<!rec_C> nonnull [0] -> !cir.ptr<!rec_B>
+ %5 = cir.vtt.address_point %3 : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+ cir.call @_ZN1BC2Ev(%4, %5) : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
+ %6 = cir.vtt.address_point %3 : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
+ %7 = cir.cast(bitcast, %6 : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+ %8 = cir.load align(8) %7 : !cir.ptr<!cir.vptr>, !cir.vptr
+ %9 = cir.vtable.get_vptr %2 : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+ cir.store align(8) %8, %9 : !cir.vptr, !cir.ptr<!cir.vptr>
+ cir.return
+ }
+
+// CHECK: define{{.*}} void @_ZN1CC2Ev
+// CHECK: %[[VTT:.*]] = getelementptr inbounds i8, ptr %{{.*}}, i32 1
+// CHECK: call void @_ZN1BC2Ev(ptr %{{.*}}, ptr %[[VTT]])
+
+ cir.global linkonce_odr dso_local @_ZTV1C = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 3>}> : !rec_anon_struct {alignment = 8 : i64}
+ cir.global linkonce_odr @_ZTT1C = #cir.const_array<[#cir.global_view<@_ZTV1C, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>, #cir.global_view<@_ZTC1C0_1B, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 2> {alignment = 8 : i64}
+ cir.func dso_local @_ZN1CC1Ev(%arg0: !cir.ptr<!rec_C>) {
+ %0 = cir.alloca !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>, ["this", init] {alignment = 8 : i64}
+ cir.store %arg0, %0 : !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>
+ %1 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_C>>, !cir.ptr<!rec_C>
+ %2 = cir.base_class_addr %1 : !cir.ptr<!rec_C> nonnull [0] -> !cir.ptr<!rec_A>
+ cir.call @_ZN1AC2Ev(%2) : (!cir.ptr<!rec_A>) -> ()
+ %3 = cir.base_class_addr %1 : !cir.ptr<!rec_C> nonnull [0] -> !cir.ptr<!rec_B>
+ %4 = cir.vtt.address_point @_ZTT1C, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+ cir.call @_ZN1BC2Ev(%3, %4) : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
+ %5 = cir.vtable.address_point(@_ZTV1C, address_point = <index = 0, offset = 3>) : !cir.vptr
+ %6 = cir.vtable.get_vptr %1 : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+ cir.store align(8) %5, %6 : !cir.vptr, !cir.ptr<!cir.vptr>
+ cir.return
+ }
+
+// CHECK: define{{.*}} void @_ZN1CC1Ev
+// CHECK: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1C, i64 24), ptr %{{.*}}
+
+ cir.global linkonce_odr dso_local @_ZTC1C0_1B = #cir.const_record<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 3>}> : !rec_anon_struct {alignment = 8 : i64}
+}
diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt
index 286c9d4..e9f4f83 100644
--- a/clang/test/CMakeLists.txt
+++ b/clang/test/CMakeLists.txt
@@ -26,7 +26,6 @@ llvm_canonicalize_cmake_booleans(
PPC_LINUX_DEFAULT_IEEELONGDOUBLE
LLVM_TOOL_LLVM_DRIVER_BUILD
LLVM_INCLUDE_SPIRV_TOOLS_TESTS
- LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS
)
# Run tests requiring Z3 headers only if LLVM was built with Z3
diff --git a/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp b/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
index 5c281ac..76e2afb 100644
--- a/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
+++ b/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
@@ -197,7 +197,7 @@ namespace InhCtor {
// FIXME: Consider reusing the same diagnostic between dependent and non-dependent contexts
typedef int I;
struct UsingInt {
- using I::I; // expected-error {{'InhCtor::I' (aka 'int') is not a class, namespace, or enumeration}}
+ using I::I; // expected-error {{'I' (aka 'int') is not a class, namespace, or enumeration}}
};
template<typename T> struct UsingIntTemplate {
using T::T; // expected-error {{type 'int' cannot be used prior to '::' because it has no members}}
diff --git a/clang/test/CXX/class.access/p6.cpp b/clang/test/CXX/class.access/p6.cpp
index 15f2644..9a8aebe 100644
--- a/clang/test/CXX/class.access/p6.cpp
+++ b/clang/test/CXX/class.access/p6.cpp
@@ -176,7 +176,7 @@ namespace test8 {
};
void test(A &a) {
- if (a) return; // expected-error-re {{'operator void *(A::*)(){{( __attribute__\(\(thiscall\)\))?}} const' is a private member of 'test8::A'}}
+ if (a) return; // expected-error-re {{'operator void *(test8::A::*)(){{( __attribute__\(\(thiscall\)\))?}} const' is a private member of 'test8::A'}}
}
}
diff --git a/clang/test/CXX/class.derived/class.derived.general/p2.cpp b/clang/test/CXX/class.derived/class.derived.general/p2.cpp
index 1423eea..c700e8a 100644
--- a/clang/test/CXX/class.derived/class.derived.general/p2.cpp
+++ b/clang/test/CXX/class.derived/class.derived.general/p2.cpp
@@ -2,7 +2,7 @@
namespace CurrentInstantiation {
template<typename T>
- struct A0 { // expected-note 6{{definition of 'A0<T>' is not complete until the closing '}'}}
+ struct A0 { // expected-note 6{{definition of 'CurrentInstantiation::A0<T>' is not complete until the closing '}'}}
struct B0 : A0 { }; // expected-error {{base class has incomplete type}}
template<typename U>
@@ -26,7 +26,7 @@ namespace CurrentInstantiation {
};
template<typename U>
- struct B5 { // expected-note 2{{definition of 'B5<U>' is not complete until the closing '}'}}
+ struct B5 { // expected-note 2{{definition of 'CurrentInstantiation::A0::B5<U>' is not complete until the closing '}'}}
struct C0 : A0, B5 { }; // expected-error 2{{base class has incomplete type}}
template<typename V>
@@ -63,7 +63,7 @@ namespace CurrentInstantiation {
struct A0<T>::B5<U>::C3 : A0, B5 { };
template<typename T>
- struct A0<T*> { // expected-note 2{{definition of 'A0<T *>' is not complete until the closing '}'}}
+ struct A0<T*> { // expected-note 2{{definition of 'CurrentInstantiation::A0<T *>' is not complete until the closing '}'}}
struct B0 : A0 { }; // expected-error {{base class has incomplete type}}
template<typename U>
@@ -91,7 +91,7 @@ namespace MemberOfCurrentInstantiation {
template<typename U>
struct C : C<U> { }; // expected-error {{base class has incomplete type}}
- // expected-note@-1 {{definition of 'C<U>' is not complete until the closing '}'}}
+ // expected-note@-1 {{definition of 'MemberOfCurrentInstantiation::A0::C<U>' is not complete until the closing '}'}}
};
template<typename T>
diff --git a/clang/test/CXX/class/class.mem/class.mem.general/p8.cpp b/clang/test/CXX/class/class.mem/class.mem.general/p8.cpp
index 8cc9b41..c4aca32 100644
--- a/clang/test/CXX/class/class.mem/class.mem.general/p8.cpp
+++ b/clang/test/CXX/class/class.mem/class.mem.general/p8.cpp
@@ -45,7 +45,7 @@ namespace N1 {
void g0() noexcept(y); // expected-error {{use of undeclared identifier 'y'}}
void f1() noexcept(A::x);
- void g1() noexcept(A::y); // expected-error {{no member named 'y' in 'A<T>'}}
+ void g1() noexcept(A::y); // expected-error {{no member named 'y' in 'N1::A<T>'}}
template<typename U>
void f2() noexcept(x);
@@ -55,13 +55,13 @@ namespace N1 {
template<typename U>
void f3() noexcept(A::x);
template<typename U>
- void g3() noexcept(A::y); // expected-error {{no member named 'y' in 'A<T>'}}
+ void g3() noexcept(A::y); // expected-error {{no member named 'y' in 'N1::A<T>'}}
friend void f4() noexcept(x);
friend void g4() noexcept(y); // expected-error {{use of undeclared identifier 'y'}}
friend void f5() noexcept(A::x);
- friend void g5() noexcept(A::y); // expected-error {{no member named 'y' in 'A<T>'}}
+ friend void g5() noexcept(A::y); // expected-error {{no member named 'y' in 'N1::A<T>'}}
template<typename U>
friend void f6() noexcept(x);
@@ -71,7 +71,7 @@ namespace N1 {
template<typename U>
friend void f7() noexcept(A::x);
template<typename U>
- friend void g7() noexcept(A::y); // expected-error {{no member named 'y' in 'A<T>'}}
+ friend void g7() noexcept(A::y); // expected-error {{no member named 'y' in 'N1::A<T>'}}
static constexpr bool x = true;
};
diff --git a/clang/test/CXX/class/class.mem/p13.cpp b/clang/test/CXX/class/class.mem/p13.cpp
index d947586..a30aa5d 100644
--- a/clang/test/CXX/class/class.mem/p13.cpp
+++ b/clang/test/CXX/class/class.mem/p13.cpp
@@ -114,3 +114,12 @@ template<typename B> struct CtorDtorName : B {
CtorDtorName();
~CtorDtorName(); // expected-error {{identifier 'CtorDtorName' after '~' in destructor name does not name a type}}
};
+
+struct S { // expected-note {{'S' declared here}}
+ enum E {
+ R = 11,
+ S = 12 // expected-error {{member 'S' has the same name as its class}}
+ };
+ static_assert(E::R == 11, "E::R is not 11");
+ static_assert(E::S == 12, "E::S is not 12"); // expected-error {{no member named 'S' in 'S::E'}}
+};
diff --git a/clang/test/CXX/class/class.union/class.union.anon/p4.cpp b/clang/test/CXX/class/class.union/class.union.anon/p4.cpp
index a12ec38..f124ac1 100644
--- a/clang/test/CXX/class/class.union/class.union.anon/p4.cpp
+++ b/clang/test/CXX/class/class.union/class.union.anon/p4.cpp
@@ -8,3 +8,28 @@ union U {
int y = 1; // expected-error {{initializing multiple members of union}}
};
};
+
+namespace GH149985 {
+ union X {
+ enum {
+ csize = 42,
+ cs = sizeof(int) // expected-note {{previous declaration is here}}
+ };
+ struct {
+ int data; // expected-note {{previous declaration is here}}
+ union X *cs[csize] = {}; // expected-error {{member of anonymous struct redeclares}} expected-note {{previous initialization is here}}
+ };
+ struct {
+ int data; // expected-error {{member of anonymous struct redeclares}}
+ union X *ds[2] = {}; // expected-error {{initializing multiple members of union}}
+ };
+ };
+
+ union U {
+ int x; // expected-note {{previous declaration is here}}
+ union {
+ int x = {}; // expected-error {{member of anonymous union redeclares}} expected-note {{previous initialization is here}}
+ };
+ int y = {}; // expected-error {{initializing multiple members of union}}
+ };
+}
diff --git a/clang/test/CXX/dcl.dcl/dcl.attr/dcl.attr.nodiscard/p2.cpp b/clang/test/CXX/dcl.dcl/dcl.attr/dcl.attr.nodiscard/p2.cpp
index 0012ab9..7f933a4 100644
--- a/clang/test/CXX/dcl.dcl/dcl.attr/dcl.attr.nodiscard/p2.cpp
+++ b/clang/test/CXX/dcl.dcl/dcl.attr/dcl.attr.nodiscard/p2.cpp
@@ -115,7 +115,7 @@ void usage() {
S(); // expected-warning {{ignoring temporary created by a constructor declared with 'nodiscard' attribute}}
S('A'); // expected-warning {{ignoring temporary created by a constructor declared with 'nodiscard' attribute: Don't let that S-Char go!}}
S(1);
- S(2.2);
+ S(2.2); // expected-warning {{ignoring temporary created by a constructor declared with 'gnu::warn_unused_result' attribute}}
Y(); // expected-warning {{ignoring temporary of type 'Y' declared with 'nodiscard' attribute: Don't throw me away either!}}
S s;
ConvertTo{}; // expected-warning {{ignoring return value of type 'ConvertTo' declared with 'nodiscard' attribute: Don't throw me away!}}
diff --git a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
index 8bdd4905..c7b331e 100644
--- a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
+++ b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
@@ -38,7 +38,7 @@ template <> struct B<A> {
void b1(struct B<float>);
void b2(class B<float>);
-void b3(union B<float>); // expected-error {{use of 'B<float>' with tag type that does not match previous declaration}}
+void b3(union B<float>); // expected-error {{use of 'union B<float>' with tag type that does not match previous declaration}}
//void b4(enum B<float>); // this just doesn't parse; you can't template an enum directly
void c1(struct B<float>::Member);
diff --git a/clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp b/clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp
index 77aef99..ad3b014 100644
--- a/clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp
+++ b/clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp
@@ -22,7 +22,7 @@ void example1() {
// CHECK: VarDecl{{.*}}rca 'const A &'
// CHECK: ImplicitCastExpr{{.*}}'const A' lvalue <DerivedToBase (A)>
// CHECK-NOT: MaterializeTemporaryExpr
- // CHECK: ImplicitCastExpr{{.*}}'const B' lvalue <NoOp>
+ // CHECK: ImplicitCastExpr{{.*}}'const struct B' lvalue <NoOp>
const A& rca = b;
}
diff --git a/clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp b/clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp
index a06b107..bb1eb73 100644
--- a/clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp
+++ b/clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp
@@ -59,6 +59,6 @@ namespace TypoCorrection {
int A<T>::template typo<int>::* f();
template<typename T>
- int A<T>::typo<int>::* g(); // expected-error {{no template named 'typo' in 'A<T>'; did you mean 'Typo'?}}
+ int A<T>::typo<int>::* g(); // expected-error {{no template named 'typo' in 'TypoCorrection::A<T>'; did you mean 'Typo'?}}
// expected-error@-1 {{expected unqualified-id}}
}
diff --git a/clang/test/CXX/dcl/dcl.fct/p17.cpp b/clang/test/CXX/dcl/dcl.fct/p17.cpp
index 4a81875..431a8fb 100644
--- a/clang/test/CXX/dcl/dcl.fct/p17.cpp
+++ b/clang/test/CXX/dcl/dcl.fct/p17.cpp
@@ -109,7 +109,7 @@ namespace unconstrained {
template<typename T>
template<typename U>
constexpr auto S<T>::f2(auto x, U u, T t) -> decltype(x + u + t) { return x + u + t; }
- // expected-error@-1 {{out-of-line definition of 'f2' does not match any declaration in 'S<T>'}}
+ // expected-error@-1 {{out-of-line definition of 'f2' does not match any declaration in 'unconstrained::S<T>'}}
// expected-note@#defined-here {{S defined here}}
template<typename T>
diff --git a/clang/test/CXX/drs/cwg0xx.cpp b/clang/test/CXX/drs/cwg0xx.cpp
index 4d4e2f6..805be67 100644
--- a/clang/test/CXX/drs/cwg0xx.cpp
+++ b/clang/test/CXX/drs/cwg0xx.cpp
@@ -244,7 +244,7 @@ namespace cwg16 { // cwg16: 2.8
// expected-error@#cwg16-A-f-call {{'A' is a private member of 'cwg16::A'}}
// expected-note@#cwg16-B {{constrained by implicitly private inheritance here}}
// expected-note@#cwg16-A {{member is declared here}}
- // expected-error@#cwg16-A-f-call {{cannot cast 'cwg16::C' to its private base class 'cwg16::A'}}
+ // expected-error@#cwg16-A-f-call {{cannot cast 'cwg16::C' to its private base class 'A'}}
// expected-note@#cwg16-B {{implicitly declared private here}}
}
};
@@ -838,7 +838,7 @@ namespace cwg52 { // cwg52: 2.8
// expected-error@#cwg52-k {{'A' is a private member of 'cwg52::A'}}
// expected-note@#cwg52-B {{constrained by private inheritance here}}
// expected-note@#cwg52-A {{member is declared here}}
- // expected-error@#cwg52-k {{cannot cast 'struct B' to its private base class 'cwg52::A'}}
+ // expected-error@#cwg52-k {{cannot cast 'struct B' to its private base class 'A'}}
// expected-note@#cwg52-B {{declared private here}}
} // namespace cwg52
@@ -859,7 +859,7 @@ namespace cwg54 { // cwg54: 2.8
// expected-error@-1 {{cannot cast 'struct B' to its private base class 'A'}}
// expected-note@#cwg54-B {{declared private here}}
int A::*smab = static_cast<int A::*>(&B::b);
- // expected-error@-1 {{cannot cast 'B' to its private base class 'A'}}
+ // expected-error@-1 {{cannot cast 'cwg54::B' to its private base class 'A'}}
// expected-note@#cwg54-B {{declared private here}}
B &sba = static_cast<B&>(a);
// expected-error@-1 {{cannot cast private base class 'cwg54::A' to 'cwg54::B'}}
@@ -874,7 +874,7 @@ namespace cwg54 { // cwg54: 2.8
V &svb = static_cast<V&>(b);
V *spvb = static_cast<V*>(&b);
int V::*smvb = static_cast<int V::*>(&B::b);
- // expected-error@-1 {{conversion from pointer to member of class 'B' to pointer to member of class 'V' via virtual base 'cwg54::V' is not allowed}}
+ // expected-error@-1 {{conversion from pointer to member of class 'cwg54::B' to pointer to member of class 'V' via virtual base 'cwg54::V' is not allowed}}
B &sbv = static_cast<B&>(v);
// expected-error@-1 {{cannot cast 'struct V' to 'B &' via virtual base 'cwg54::V'}}
B *spbv = static_cast<B*>(&v);
@@ -892,7 +892,7 @@ namespace cwg54 { // cwg54: 2.8
V &cvb = (V&)(b);
V *cpvb = (V*)(&b);
int V::*cmvb = (int V::*)(&B::b);
- // expected-error@-1 {{conversion from pointer to member of class 'B' to pointer to member of class 'V' via virtual base 'cwg54::V' is not allowed}}
+ // expected-error@-1 {{conversion from pointer to member of class 'cwg54::B' to pointer to member of class 'V' via virtual base 'cwg54::V' is not allowed}}
B &cbv = (B&)(v);
// expected-error@-1 {{cannot cast 'struct V' to 'B &' via virtual base 'cwg54::V'}}
B *cpbv = (B*)(&v);
diff --git a/clang/test/CXX/drs/cwg12xx.cpp b/clang/test/CXX/drs/cwg12xx.cpp
index e02a7e1..03a9228 100644
--- a/clang/test/CXX/drs/cwg12xx.cpp
+++ b/clang/test/CXX/drs/cwg12xx.cpp
@@ -81,7 +81,7 @@ void g() {
A b(auto ()->C);
static_assert(sizeof(B ()->C[1] == sizeof(int)), "");
sizeof(auto () -> C[1]);
- // since-cxx11-error@-1 {{function cannot return array type 'C[1]' (aka 'cwg1223::BB[1]')}}
+ // since-cxx11-error@-1 {{function cannot return array type 'C[1]' (aka 'struct BB[1]')}}
}
#endif
} // namespace cwg1223
diff --git a/clang/test/CXX/drs/cwg13xx.cpp b/clang/test/CXX/drs/cwg13xx.cpp
index c4acafd..ad6ee01 100644
--- a/clang/test/CXX/drs/cwg13xx.cpp
+++ b/clang/test/CXX/drs/cwg13xx.cpp
@@ -252,7 +252,7 @@ namespace cwg1330 { // cwg1330: 4 c++11
void (B<P>::*bpf3)() = &B<P>::f;
void (B<P>::*bpf4)() throw() = &B<P>::f;
// cxx98-14-error@-1 {{target exception specification is not superset of source}}
- // since-cxx17-error@-2 {{cannot initialize a variable of type 'void (B<P>::*)() throw()' with an rvalue of type 'void (B<P>::*)() throw(T, typename P::type)': different exception specifications}}
+ // since-cxx17-error@-2 {{cannot initialize a variable of type 'void (B<P>::*)() throw()' with an rvalue of type 'void (B<P>::*)() throw(T, typename cwg1330::P::type)': different exception specifications}}
#if __cplusplus >= 201103L
static_assert(noexcept(B<P>().g()), "");
diff --git a/clang/test/CXX/drs/cwg2149.cpp b/clang/test/CXX/drs/cwg2149.cpp
index 416c895..6b54223 100644
--- a/clang/test/CXX/drs/cwg2149.cpp
+++ b/clang/test/CXX/drs/cwg2149.cpp
@@ -56,22 +56,22 @@ void f() {
// CXX98: VarDecl {{.+}} a 'X[2]'
// CXX98-NEXT: `-InitListExpr {{.+}} 'X[2]'
-// CXX98-NEXT: |-InitListExpr {{.+}} 'X':'cwg2149::X'
+// CXX98-NEXT: |-InitListExpr {{.+}} 'X'{{$}}
// CXX98-NEXT: | |-IntegerLiteral {{.+}} 'int' 1
// CXX98-NEXT: | |-IntegerLiteral {{.+}} 'int' 2
// CXX98-NEXT: | `-IntegerLiteral {{.+}} 'int' 3
-// CXX98-NEXT: `-InitListExpr {{.+}} 'X':'cwg2149::X'
+// CXX98-NEXT: `-InitListExpr {{.+}} 'X'{{$}}
// CXX98-NEXT: |-IntegerLiteral {{.+}} 'int' 4
// CXX98-NEXT: |-IntegerLiteral {{.+}} 'int' 5
// CXX98-NEXT: `-IntegerLiteral {{.+}} 'int' 6
// CXX98: VarDecl {{.+}} b 'X[2]'
// CXX98-NEXT: `-InitListExpr {{.+}} 'X[2]'
-// CXX98-NEXT: |-InitListExpr {{.+}} 'X':'cwg2149::X'
+// CXX98-NEXT: |-InitListExpr {{.+}} 'X'{{$}}
// CXX98-NEXT: | |-IntegerLiteral {{.+}} 'int' 1
// CXX98-NEXT: | |-IntegerLiteral {{.+}} 'int' 2
// CXX98-NEXT: | `-IntegerLiteral {{.+}} 'int' 3
-// CXX98-NEXT: `-InitListExpr {{.+}} 'X':'cwg2149::X'
+// CXX98-NEXT: `-InitListExpr {{.+}} 'X'{{$}}
// CXX98-NEXT: |-IntegerLiteral {{.+}} 'int' 4
// CXX98-NEXT: |-IntegerLiteral {{.+}} 'int' 5
// CXX98-NEXT: `-IntegerLiteral {{.+}} 'int' 6
diff --git a/clang/test/CXX/drs/cwg26xx.cpp b/clang/test/CXX/drs/cwg26xx.cpp
index 426fc6c..bceef64 100644
--- a/clang/test/CXX/drs/cwg26xx.cpp
+++ b/clang/test/CXX/drs/cwg26xx.cpp
@@ -291,12 +291,12 @@ static_assert(__is_same(decltype(i), I<char, 4>));
J j = { "ghi" };
// since-cxx20-error@-1 {{no viable constructor or deduction guide}}
-// since-cxx20-note@#cwg2681-J {{candidate template ignored: could not match 'J<N>' against 'const char *'}}
-// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J(J<N>) -> J<N>'}}
+// since-cxx20-note@#cwg2681-J {{candidate template ignored: could not match 'cwg2681::J<N>' against 'const char *'}}
+// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J(cwg2681::J<N>) -> cwg2681::J<N>'}}
// since-cxx20-note@#cwg2681-J {{candidate template ignored: could not match 'const unsigned char' against 'const char'}}
-// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J(const unsigned char (&)[N]) -> J<N>'}}
+// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J(const unsigned char (&)[N]) -> cwg2681::J<N>'}}
// since-cxx20-note@#cwg2681-J {{candidate function template not viable: requires 0 arguments, but 1 was provided}}
-// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J() -> J<N>'}}
+// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J() -> cwg2681::J<N>'}}
#endif
} // namespace cwg2681
diff --git a/clang/test/CXX/drs/cwg279.cpp b/clang/test/CXX/drs/cwg279.cpp
index 3c63486..d10ceb2 100644
--- a/clang/test/CXX/drs/cwg279.cpp
+++ b/clang/test/CXX/drs/cwg279.cpp
@@ -46,8 +46,8 @@ extern S2 *q2;
// FIXME: This is well-formed, because [basic.def.odr]/15 is satisfied.
struct S3 {};
-// since-cxx20-error@-1 {{redefinition of 'S3'}}
-// since-cxx20-note@cwg279_A.cppm:23 {{previous definition is here}}
+// since-cxx20-error@-1 {{declaration of 'S3' in the global module follows declaration in module cwg279_A}}
+// since-cxx20-note@cwg279_A.cppm:23 {{previous declaration is here}}
extern S3 *q3;
// since-cxx20-error@-1 {{declaration of 'q3' in the global module follows declaration in module cwg279_A}}
// since-cxx20-note@cwg279_A.cppm:24 {{previous declaration is here}}
diff --git a/clang/test/CXX/drs/cwg28xx.cpp b/clang/test/CXX/drs/cwg28xx.cpp
index b32e649..a6b2b99 100644
--- a/clang/test/CXX/drs/cwg28xx.cpp
+++ b/clang/test/CXX/drs/cwg28xx.cpp
@@ -227,12 +227,12 @@ void f() {
auto L2 = [&](this auto&& self) { (void) &x; };
O1<decltype(L1)>{L1, L1}();
/* since-cxx23-error-re@-1 {{inaccessible due to ambiguity:
- struct cwg2881::O1<class (lambda at {{.+}})> -> A<(lambda at {{.+}})> -> class (lambda at {{.+}})
- struct cwg2881::O1<class (lambda at {{.+}})> -> B<(lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
+ struct cwg2881::O1<class (lambda at {{.+}})> -> A<class (lambda at {{.+}})> -> class (lambda at {{.+}})
+ struct cwg2881::O1<class (lambda at {{.+}})> -> B<class (lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
O1<decltype(L2)>{L2, L2}();
/* since-cxx23-error-re@-1 {{inaccessible due to ambiguity:
- struct cwg2881::O1<class (lambda at {{.+}})> -> A<(lambda at {{.+}})> -> class (lambda at {{.+}})
- struct cwg2881::O1<class (lambda at {{.+}})> -> B<(lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
+ struct cwg2881::O1<class (lambda at {{.+}})> -> A<class (lambda at {{.+}})> -> class (lambda at {{.+}})
+ struct cwg2881::O1<class (lambda at {{.+}})> -> B<class (lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
O2{L1}();
// since-cxx23-error-re@-1 {{invalid explicit object parameter type 'cwg2881::O2<(lambda at {{.+}})>' in lambda with capture; the type must derive publicly from the lambda}}
// since-cxx23-note@#cwg2881-O2 {{declared protected here}}
@@ -277,7 +277,7 @@ struct Indirect : T {
template<typename T>
struct Ambiguous : Indirect<T>, T {
/* since-cxx23-warning-re@-1 {{direct base '(lambda at {{.+}})' is inaccessible due to ambiguity:
- struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> Indirect<(lambda at {{.+}})> -> class (lambda at {{.+}})
+ struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> Indirect<class (lambda at {{.+}})> -> class (lambda at {{.+}})
struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
// since-cxx23-note-re@#cwg2881-f4 {{in instantiation of template class 'cwg2881::Ambiguous<(lambda at {{.+}})>' requested here}}
// since-cxx34-note-re@#cwg2881-f4-call {{while substituting deduced template arguments into function template 'f4' [with L = (lambda at {{.+}})]}}
@@ -303,7 +303,7 @@ void g() {
// expected-error@-1 {{no matching function for call to 'f4'}}
// expected-note-re@-2 {{while substituting deduced template arguments into function template 'f4' [with L = (lambda at {{.+}})]}}
/* expected-note-re@#cwg2881-f4 {{candidate template ignored: substitution failure [with L = (lambda at {{.+}})]: lambda '(lambda at {{.+}})' is inaccessible due to ambiguity:
- struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> Indirect<(lambda at {{.+}})> -> class (lambda at {{.+}})
+ struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> Indirect<class (lambda at {{.+}})> -> class (lambda at {{.+}})
struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
static_assert(!is_callable<Private<decltype(lambda)>>);
static_assert(!is_callable<Ambiguous<decltype(lambda)>>);
diff --git a/clang/test/CXX/drs/cwg2xx.cpp b/clang/test/CXX/drs/cwg2xx.cpp
index 556407a..37186e3 100644
--- a/clang/test/CXX/drs/cwg2xx.cpp
+++ b/clang/test/CXX/drs/cwg2xx.cpp
@@ -98,8 +98,8 @@ public:
void foo() { Templ<Derived> x(&Derived::func); }
// expected-error@-1 {{no matching constructor for initialization of 'Templ<Derived>'}}
-// expected-note@#cwg203-ex3-Templ {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int (Derived::*)() const' (aka 'int (Base::*)() const') to 'const Templ<cwg203::ex3::Derived>' for 1st argument}}
-// since-cxx11-note@#cwg203-ex3-Templ {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int (Derived::*)() const' (aka 'int (Base::*)() const') to 'Templ<cwg203::ex3::Derived>' for 1st argument}}
+// expected-note@#cwg203-ex3-Templ {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int (Derived::*)() const' (aka 'int (cwg203::ex3::Base::*)() const') to 'const Templ<cwg203::ex3::Derived>' for 1st argument}}
+// since-cxx11-note@#cwg203-ex3-Templ {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int (Derived::*)() const' (aka 'int (cwg203::ex3::Base::*)() const') to 'Templ<cwg203::ex3::Derived>' for 1st argument}}
// expected-note@#cwg203-ex3-Templ-ctor {{candidate template ignored: could not match 'cwg203::ex3::Derived' against 'cwg203::ex3::Base'}}
} // namespace ex3
@@ -690,8 +690,8 @@ namespace cwg244 { // cwg244: 11
void f() {
D_object.~B();
- // expected-error@-1 {{destructor type 'cwg244::B' in object destruction expression does not match the type 'D' of the object being destroyed}}
- // expected-note@#cwg244-B {{type 'cwg244::B' found by destructor name lookup}}
+ // expected-error@-1 {{destructor type 'B' in object destruction expression does not match the type 'D' of the object being destroyed}}
+ // expected-note@#cwg244-B {{type 'B' found by destructor name lookup}}
D_object.B::~B();
D_object.D::~B(); // FIXME: Missing diagnostic for this.
B_ptr->~B();
@@ -1400,7 +1400,7 @@ namespace cwg298 { // cwg298: 3.1
// expected-error@-1 {{a type specifier is required for all declarations}}
B::A() {} // ok
C::~C() {}
- // expected-error@-1 {{destructor cannot be declared using a typedef 'C' (aka 'const cwg298::A') of the class name}}
+ // expected-error@-1 {{destructor cannot be declared using a typedef 'C' (aka 'const A') of the class name}}
typedef struct D E; // #cwg298-E
struct E {};
diff --git a/clang/test/CXX/drs/cwg3xx.cpp b/clang/test/CXX/drs/cwg3xx.cpp
index 8b035cf..bbd87c0 100644
--- a/clang/test/CXX/drs/cwg3xx.cpp
+++ b/clang/test/CXX/drs/cwg3xx.cpp
@@ -143,7 +143,7 @@ namespace cwg305 { // cwg305: no
void h(B *b) {
struct B {}; // #cwg305-h-B
b->~B();
- // expected-error@-1 {{destructor type 'B' in object destruction expression does not match the type 'B' (aka 'cwg305::A') of the object being destroyed}}
+ // expected-error@-1 {{destructor type 'B' in object destruction expression does not match the type 'B' (aka 'A') of the object being destroyed}}
// expected-note@#cwg305-h-B {{type 'B' found by destructor name lookup}}
}
@@ -1027,7 +1027,7 @@ namespace cwg357 { // cwg357: 2.7
void f() const; // #cwg357-f
};
template<typename T> void A<T>::f() {}
- // expected-error@-1 {{out-of-line definition of 'f' does not match any declaration in 'A<T>'}}
+ // expected-error@-1 {{out-of-line definition of 'f' does not match any declaration in 'cwg357::A<T>'}}
// expected-note@#cwg357-A {{defined here}}
// expected-note@#cwg357-f {{member declaration does not match because it is const qualified}}
@@ -1321,7 +1321,7 @@ namespace cwg381 { // cwg381: 2.7
void f() {
E e;
e.B::a = 0;
- /* expected-error@-1 {{ambiguous conversion from derived class 'E' to base class 'cwg381::B':
+ /* expected-error@-1 {{ambiguous conversion from derived class 'E' to base class 'B':
struct cwg381::E -> C -> B
struct cwg381::E -> D -> B}} */
F f;
@@ -1800,8 +1800,8 @@ namespace cwg399 { // cwg399: 11
void f() {
D_object.~B();
- // expected-error@-1 {{destructor type 'cwg399::B' in object destruction expression does not match the type 'D' of the object being destroyed}}
- // expected-note@#cwg399-B {{type 'cwg399::B' found by destructor name lookup}}
+ // expected-error@-1 {{destructor type 'B' in object destruction expression does not match the type 'D' of the object being destroyed}}
+ // expected-note@#cwg399-B {{type 'B' found by destructor name lookup}}
D_object.B::~B();
D_object.D::~B(); // FIXME: Missing diagnostic for this.
B_ptr->~B();
diff --git a/clang/test/CXX/drs/cwg4xx.cpp b/clang/test/CXX/drs/cwg4xx.cpp
index dc53ae7..8497f97 100644
--- a/clang/test/CXX/drs/cwg4xx.cpp
+++ b/clang/test/CXX/drs/cwg4xx.cpp
@@ -1185,7 +1185,7 @@ namespace cwg480 { // cwg480: 2.7
extern int D::*c;
int A::*d = static_cast<int A::*>(c);
- // expected-error@-1 {{conversion from pointer to member of class 'D' to pointer to member of class 'A' via virtual base 'cwg480::B' is not allowed}}
+ // expected-error@-1 {{conversion from pointer to member of class 'cwg480::D' to pointer to member of class 'A' via virtual base 'cwg480::B' is not allowed}}
D *e;
A *f = e;
diff --git a/clang/test/CXX/drs/cwg6xx.cpp b/clang/test/CXX/drs/cwg6xx.cpp
index e2eb009..11eb0bf 100644
--- a/clang/test/CXX/drs/cwg6xx.cpp
+++ b/clang/test/CXX/drs/cwg6xx.cpp
@@ -895,12 +895,12 @@ namespace cwg666 { // cwg666: 2.8
template<int> int f();
template<typename T> int f() {
T::type *p = 0;
- // expected-error@-1 {{missing 'typename' prior to dependent type name 'Y::type'}}
+ // expected-error@-1 {{missing 'typename' prior to dependent type name 'cwg666::Y::type'}}
// expected-note@#cwg666-f-Y {{in instantiation of function template specialization 'cwg666::f<cwg666::Y>' requested here}}
int a(T::type);
- // expected-error@-1 {{missing 'typename' prior to dependent type name 'Y::type'}}
+ // expected-error@-1 {{missing 'typename' prior to dependent type name 'cwg666::Y::type'}}
return f<T::type>();
- // expected-error@-1 {{missing 'typename' prior to dependent type name 'Y::type'}}
+ // expected-error@-1 {{missing 'typename' prior to dependent type name 'cwg666::Y::type'}}
}
struct X { static const int type = 0; };
struct Y { typedef int type; };
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp b/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
index dc0e842..31587a9 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
@@ -160,7 +160,7 @@ namespace std_example {
template<typename T> concept C2 =
requires(T x) {
{*x} -> same_as<typename T::inner>;
- // expected-note@-1{{because type constraint 'same_as<int, typename T2::inner>' was not satisfied:}}
+ // expected-note@-1{{because type constraint 'same_as<int, typename std_example::T2::inner>' was not satisfied:}}
// expected-note@-2{{because '*x' would be invalid: indirection requires pointer operand ('int' invalid)}}
};
diff --git a/clang/test/CXX/module/cpp.pre/module_decl.cpp b/clang/test/CXX/module/cpp.pre/module_decl.cpp
index 6238347..5c29aef 100644
--- a/clang/test/CXX/module/cpp.pre/module_decl.cpp
+++ b/clang/test/CXX/module/cpp.pre/module_decl.cpp
@@ -1,8 +1,147 @@
// RUN: rm -rf %t
// RUN: mkdir -p %t
-// RUN: %clang_cc1 -std=c++20 -emit-module-interface %s -verify -o %t/M.pcm
+// RUN: split-file %s %t
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/line.cpp -verify -o %t/line.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/gnu_line_marker.cpp -verify -o %t/gnu_line_marker.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/include.cpp -verify -o %t/include.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/ident.cpp -verify -o %t/ident.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_comment.cpp -verify -o %t/pragma_comment.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_mark.cpp -verify -o %t/pragma_mark.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_detect_mismatch.cpp -verify -o %t/pragma_detect_mismatch.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_clang_debug.cpp -verify -o %t/pragma_clang_debug.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_message.cpp -verify -o %t/pragma_message.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_gcc_warn.cpp -verify -o %t/pragma_gcc_warn.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_gcc_error.cpp -verify -o %t/pragma_gcc_error.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_diag_push_pop.cpp -verify -o %t/pragma_diag_push_pop.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_diag_ignore.cpp -verify -o %t/pragma_diag_ignore.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_opencl_ext.cpp -verify -o %t/pragma_opencl_ext.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_push_pop.cpp -verify -o %t/pragma_push_pop.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_exec_charset.cpp -verify -o %t/pragma_exec_charset.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/pragma_clang_assume_nonnull.cpp -verify -o %t/pragma_clang_assume_nonnull.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/marco_expand.cpp -DMACRO="" -verify -o %t/marco_expand.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/define.cpp -verify -o %t/define.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/undef.cpp -verify -o %t/undef.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/defined.cpp -verify -o %t/defined.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/has_embed.cpp -verify -o %t/has_embed.pcm
+// RUN: %clang_cc1 -std=c++20 -emit-module-interface %t/has_include.cpp -verify -o %t/has_include.pcm
+//--- header.h
+#ifndef HEADER_H
+#define HEADER_H
+
+#endif // HEADER_H
+
+//--- line.cpp
+// expected-no-diagnostics
+#line 3
+export module M;
+
+//--- gnu_line_marker.cpp
+// expected-no-diagnostics
+# 1 __FILE__ 1 3
+export module M;
+
+//--- include.cpp
+#include "header.h" // expected-note {{add 'module;' to the start of the file to introduce a global module fragment}}
+export module M; // expected-error {{module declaration must occur at the start of the translation unit}}
+
+//--- ident.cpp
+// expected-no-diagnostics
+#ident "$Header:$"
+export module M;
+
+//--- pragma_comment.cpp
+// expected-no-diagnostics
+#pragma comment(lib, "msvcrt.lib")
+export module M;
+
+//--- pragma_mark.cpp
+// expected-no-diagnostics
+#pragma mark LLVM's world
+export module M;
+
+//--- pragma_detect_mismatch.cpp
+// expected-no-diagnostics
+#pragma detect_mismatch("test", "1")
+export module M;
+
+//--- pragma_clang_debug.cpp
+// expected-no-diagnostics
+#pragma clang __debug dump Test
+export module M;
+
+//--- pragma_message.cpp
+#pragma message "test" // expected-warning {{test}}
+export module M;
+
+//--- pragma_gcc_warn.cpp
+#pragma GCC warning "Foo" // expected-warning {{Foo}}
+export module M;
+
+//--- pragma_gcc_error.cpp
+#pragma GCC error "Foo" // expected-error {{Foo}}
+export module M;
+
+//--- pragma_diag_push_pop.cpp
+// expected-no-diagnostics
+#pragma gcc diagnostic push
+#pragma gcc diagnostic pop
+export module M;
+
+//--- pragma_diag_ignore.cpp
+// expected-no-diagnostics
+#pragma GCC diagnostic ignored "-Wframe-larger-than"
+export module M;
+
+//--- pragma_opencl_ext.cpp
+// expected-no-diagnostics
+#pragma OPENCL EXTENSION __cl_clang_variadic_functions : enable
+export module M;
+
+//--- pragma_push_pop.cpp
+// expected-no-diagnostics
+#pragma warning(push)
+#pragma warning(pop)
+export module M;
+
+//--- pragma_exec_charset.cpp
+// expected-no-diagnostics
+#pragma execution_character_set(push, "UTF-8")
+#pragma execution_character_set(pop)
+export module M;
+
+//--- pragma_clang_assume_nonnull.cpp
+// expected-no-diagnostics
+#pragma clang assume_nonnull begin
+#pragma clang assume_nonnull end
+export module M;
+
+//--- marco_expand.cpp
+MACRO // expected-note {{add 'module;' to the start of the file to introduce a global module fragment}}
+export module M; // expected-error {{module declaration must occur at the start of the translation unit}}
+
+//--- define.cpp
// This is a comment
#define I32 int // expected-note {{add 'module;' to the start of the file to introduce a global module fragment}}
export module M; // expected-error {{module declaration must occur at the start of the translation unit}}
export I32 i32;
+
+//--- undef.cpp
+#undef FOO // expected-note {{add 'module;' to the start of the file to introduce a global module fragment}}
+export module M; // expected-error {{module declaration must occur at the start of the translation unit}}
+
+//--- defined.cpp
+#if defined(FOO) // expected-note {{add 'module;' to the start of the file to introduce a global module fragment}}
+#endif
+export module M; // expected-error {{module declaration must occur at the start of the translation unit}}
+
+//--- has_embed.cpp
+#if __has_embed(__FILE__ ext::token(0xB055)) // expected-note {{add 'module;' to the start of the file to introduce a global module fragment}}
+#endif
+export module M; // expected-error {{module declaration must occur at the start of the translation unit}}
+
+//--- has_include.cpp
+#if __has_include(<stdio.h>) || __has_include_next(<stdlib.h>) // expected-note {{add 'module;' to the start of the file to introduce a global module fragment}} \
+ // expected-warning {{#include_next in primary source file; will search from start of include path}}
+#endif
+export module M; // expected-error {{module declaration must occur at the start of the translation unit}}
diff --git a/clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp b/clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp
index d192070..a32295e 100644
--- a/clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp
+++ b/clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp
@@ -37,7 +37,7 @@ namespace std {
namespace p0702r1 {
template<typename T> struct X { // expected-note {{candidate}} expected-note {{implicit deduction guide}}
X(std::initializer_list<T>); // expected-note {{candidate template ignored: could not match 'std::initializer_list<T>' against 'Z'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename T> X(std::initializer_list<T>) -> X<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename T> X(std::initializer_list<T>) -> p0702r1::X<T>'}}
};
X xi = {0};
diff --git a/clang/test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp b/clang/test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp
index d88d5be..7a0cacc 100644
--- a/clang/test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp
+++ b/clang/test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp
@@ -13,6 +13,7 @@ enum class E { e };
template<typename T> int f(T t) { return ~t; } // expected-error {{invalid argument type}}
template<typename T, typename U> int f(T t, U u) { return t % u; } // expected-error {{invalid operands to}}
+ // expected-note@-1 {{no implicit conversion for scoped enum}}
int b1 = ~E::e; // expected-error {{invalid argument type}}
int b2 = f(E::e); // expected-note {{in instantiation of}}
diff --git a/clang/test/CXX/stmt.stmt/stmt.select/stmt.if/p2.cpp b/clang/test/CXX/stmt.stmt/stmt.select/stmt.if/p2.cpp
index abb4244..05830de 100644
--- a/clang/test/CXX/stmt.stmt/stmt.select/stmt.if/p2.cpp
+++ b/clang/test/CXX/stmt.stmt/stmt.select/stmt.if/p2.cpp
@@ -239,5 +239,21 @@ void f2() {
}
+namespace GH153884 {
+ bool f1() {
+ auto f = [](auto) { return true; };
+ if constexpr (0)
+ return f(1);
+ return false;
+ }
+ bool f2() {
+ auto f = [](auto x) { if (x) return 1.5; else return "wat"; };
+ // expected-error@-1 {{'auto' in return type deduced as 'const char *' here but deduced as 'double' in earlier return statement}}
+ if constexpr (0)
+ return f(1);
+ // expected-note@-1 {{in instantiation of function template specialization 'GH153884::f2()}}
+ return false;
+ }
+}
#endif
diff --git a/clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp b/clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp
index 01ce33b..4b93d86 100644
--- a/clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp
+++ b/clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp
@@ -238,9 +238,9 @@ namespace pointer_to_member_data {
X0<&Y::y> x0a;
X0<&Y::x> x0b;
#if __cplusplus <= 201402L
- // expected-error@-2 {{non-type template argument of type 'int Y::*' (aka 'int X::*') cannot be converted to a value of type 'int Y::*'}}
+ // expected-error@-2 {{non-type template argument of type 'int Y::*' (aka 'int pointer_to_member_data::X::*') cannot be converted to a value of type 'int Y::*'}}
#else
- // expected-error@-4 {{conversion from 'int Y::*' (aka 'int X::*') to 'int Y::*' is not allowed in a converted constant expression}}
+ // expected-error@-4 {{conversion from 'int Y::*' (aka 'int pointer_to_member_data::X::*') to 'int Y::*' (aka 'int pointer_to_member_data::Y::*') is not allowed in a converted constant expression}}
#endif
// Test qualification conversions
diff --git a/clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp b/clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp
index 9a8148b..499e6ab 100644
--- a/clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp
+++ b/clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp
@@ -54,7 +54,7 @@ struct TA { // #defined-here
template <unsigned N>
template <template <unsigned> class TT> int TA<N>::A() { return sizeof(TT<N>); }
-// expected-error@-1{{out-of-line definition of 'A' does not match any declaration in 'TA<N>'}}
+// expected-error@-1{{out-of-line definition of 'A' does not match any declaration in 'diag::TA<N>'}}
// expected-note@#defined-here{{defined here}}
} // end namespace diag
diff --git a/clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp b/clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp
index 2da0382..5345b08 100644
--- a/clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp
+++ b/clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp
@@ -38,7 +38,7 @@ A<short>::C::B<int*> absip;
template<typename T, typename U>
struct Outer {
template<typename X, typename Y> struct Inner;
- template<typename Y> struct Inner<T, Y> {}; // expected-note{{previous declaration of class template partial specialization 'Inner<int, Y>' is here}}
+ template<typename Y> struct Inner<T, Y> {}; // expected-note{{previous declaration of class template partial specialization 'Outer<int, int>::Inner<int, Y>' is here}}
template<typename Y> struct Inner<U, Y> {}; // expected-error{{cannot be redeclared}}
};
@@ -80,7 +80,7 @@ namespace print_dependent_TemplateSpecializationType {
template <class T, class U> struct Foo {
template <unsigned long, class X, class Y> struct Bar;
template <class Y> struct Bar<0, T, Y> {};
- // expected-note-re@-1 {{previous declaration {{.*}} 'Bar<0, int, Y>' is here}}
+ // expected-note-re@-1 {{previous declaration {{.*}} 'print_dependent_TemplateSpecializationType::Foo<int, int>::Bar<0, int, Y>' is here}}
template <class Y> struct Bar<0, U, Y> {};
// expected-error@-1 {{partial specialization 'Bar<0, int, Y>' cannot be redeclared}}
};
diff --git a/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp b/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp
index 7d5be01..1eb35ab 100644
--- a/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp
+++ b/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp
@@ -78,9 +78,9 @@ namespace std_example {
template<class T> struct A { // expected-note {{candidate}} expected-note {{implicit deduction guide}}
template<class U>
A(T &&, U &&, int *); // expected-note {{[with T = int, U = int] not viable: expects an rvalue}} \
- // expected-note {{implicit deduction guide declared as 'template <class T, class U> A(T &&, U &&, int *) -> A<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <class T, class U> A(T &&, U &&, int *) -> std_example::A<T>'}}
A(T &&, int *); // expected-note {{requires 2}} \
- // expected-note {{implicit deduction guide declared as 'template <class T> A(T &&, int *) -> A<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <class T> A(T &&, int *) -> std_example::A<T>'}}
};
template<class T> A(T &&, int *) -> A<T>; // expected-note {{requires 2}}
diff --git a/clang/test/CXX/temp/temp.param/p15-cxx0x.cpp b/clang/test/CXX/temp/temp.param/p15-cxx0x.cpp
index 83144a4..238490c 100644
--- a/clang/test/CXX/temp/temp.param/p15-cxx0x.cpp
+++ b/clang/test/CXX/temp/temp.param/p15-cxx0x.cpp
@@ -97,14 +97,14 @@ template<unsigned N, typename...Ts> struct drop {
using T1 = take<3, int, char, double, long>::type; // expected-note {{previous}}
// FIXME: Desguar the types on the RHS in this diagnostic.
// desired-error {{'types<void, void, void, void>' vs 'types<int, char, double, (no argument)>'}}
-using T1 = types<void, void, void, void>; // expected-error {{'types<void, void, void, void>' vs 'types<typename inner<_>::type, typename inner<_>::type, typename inner<_>::type, (no argument)>'}}
+using T1 = types<void, void, void, void>; // expected-error {{'types<void, void, void, void>' vs 'types<typename ParameterPackExpansions::wrap<int>::inner<_>::type, typename ParameterPackExpansions::wrap<char>::inner<_>::type, typename ParameterPackExpansions::wrap<double>::inner<_>::type, (no argument)>'}}
using D1 = drop<3, int, char, double, long>::type;
using D1 = types<long>;
using T2 = take<4, int, char, double, long>::type; // expected-note {{previous}}
// FIXME: Desguar the types on the RHS in this diagnostic.
// desired-error {{'types<void, void, void, void>' vs 'types<int, char, double, long>'}}
-using T2 = types<void, void, void, void>; // expected-error {{'types<void, void, void, void>' vs 'types<typename inner<_>::type, typename inner<_>::type, typename inner<_>::type, typename inner<_>::type>'}}
+using T2 = types<void, void, void, void>; // expected-error {{'types<void, void, void, void>' vs 'types<typename ParameterPackExpansions::wrap<int>::inner<_>::type, typename ParameterPackExpansions::wrap<char>::inner<_>::type, typename ParameterPackExpansions::wrap<double>::inner<_>::type, typename ParameterPackExpansions::wrap<long>::inner<_>::type>'}}
using T2 = types<int, char, double, long>;
using D2 = drop<4, int, char, double, long>::type;
using D2 = types<>;
diff --git a/clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp b/clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp
index f32f49e..75805a7 100644
--- a/clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp
+++ b/clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp
@@ -326,52 +326,52 @@ namespace N0 {
// None of the following should be found in the current instantiation.
new M4; // expected-error{{unknown type name 'M4'}}
- new B::M4; // expected-error{{no type named 'M4' in 'B<T>'}}
+ new B::M4; // expected-error{{no type named 'M4' in 'N0::B<T>'}}
new A::M4; // expected-error{{no type named 'M4' in 'N0::A'}}
new B::A::M4; // expected-error{{no type named 'M4' in 'N0::A'}}
x4; // expected-error{{use of undeclared identifier 'x4'}}
- B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
f4(); // expected-error{{use of undeclared identifier 'f4'}}
- B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
- this->x4; // expected-error{{no member named 'x4' in 'B<T>'}}
- this->B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ this->x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
+ this->B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
this->A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
this->B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
- this->f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
- this->B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ this->f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
+ this->B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
this->A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
this->B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
- a->x4; // expected-error{{no member named 'x4' in 'B<T>'}}
- a->B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ a->x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
+ a->B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
a->A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
a->B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
- a->f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
- a->B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ a->f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
+ a->B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
a->A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
a->B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
- (*this).x4; // expected-error{{no member named 'x4' in 'B<T>'}}
- (*this).B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ (*this).x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
+ (*this).B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
(*this).A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
(*this).B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
- (*this).f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
- (*this).B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ (*this).f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
+ (*this).B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
(*this).A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
(*this).B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
- b.x4; // expected-error{{no member named 'x4' in 'B<T>'}}
- b.B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ b.x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
+ b.B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
b.A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
b.B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
- b.f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
- b.B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ b.f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
+ b.B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
b.A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
b.B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
}
@@ -424,7 +424,7 @@ namespace N2 {
void not_instantiated(A *a, B *b) {
b->x; // expected-error{{no member named 'x' in 'N2::A::B'}}
b->B::x; // expected-error{{no member named 'x' in 'N2::A::B'}}
- a->B::C::x; // expected-error{{no member named 'x' in 'A<T>'}}
+ a->B::C::x; // expected-error{{no member named 'x' in 'N2::A<T>'}}
}
};
diff --git a/clang/test/ClangScanDeps/link-libraries-diag-dup.c b/clang/test/ClangScanDeps/link-libraries-diag-dup.c
new file mode 100644
index 0000000..e6612ca
--- /dev/null
+++ b/clang/test/ClangScanDeps/link-libraries-diag-dup.c
@@ -0,0 +1,57 @@
+// RUN: rm -rf %t
+// RUN: mkdir %t
+// RUN: split-file %s %t
+
+//--- module.modulemap
+module A {
+ umbrella header "A.h"
+
+ module B {
+ header "B.h"
+ link "libraryB"
+ }
+
+ explicit module D {
+ header "D.h"
+ link "libraryD"
+ }
+
+ link "libraryA"
+ link "libraryA"
+}
+
+module C {
+ header "C.h"
+ link "libraryA"
+}
+
+//--- A.h
+#include "B.h"
+//--- B.h
+// empty
+//--- C.h
+// empty
+//--- D.h
+// empty
+//--- TU.c
+#include "A.h"
+#include "C.h"
+#include "D.h"
+
+//--- cdb.json.template
+[{
+ "file": "DIR/TU.c",
+ "directory": "DIR",
+ "command": "clang -fmodules -fmodules-cache-path=DIR/cache -I DIR -c DIR/TU.c"
+}]
+
+// RUN: sed "s|DIR|%/t|g" %t/cdb.json.template > %t/cdb.json
+// RUN: not clang-scan-deps -compilation-database %t/cdb.json -format \
+// RUN: experimental-full 2>&1 | FileCheck %s
+
+// Note that module D does not report an error because it is explicit.
+// Therefore we can use CHECK-NEXT for the redeclaration error on line 15.
+// CHECK: module.modulemap:6:5: error: link declaration is not allowed in submodules
+// CHECK-NEXT: module.modulemap:15:3: error: redeclaration of link library 'libraryA' [-Wmodule-link-redeclaration]
+// CHECK-NEXT: module.modulemap:14:3: note: previously declared here
+// CHECK-NOT: module.modulemap:20:3: error: redeclaration of link library 'libraryA'
diff --git a/clang/test/CodeCompletion/cpp23-explicit-object.cpp b/clang/test/CodeCompletion/cpp23-explicit-object.cpp
new file mode 100644
index 0000000..ea97237
--- /dev/null
+++ b/clang/test/CodeCompletion/cpp23-explicit-object.cpp
@@ -0,0 +1,153 @@
+struct A {
+ void foo(this auto&& self, int arg);
+ void bar(this A self, int arg);
+};
+
+int func1() {
+ A a {};
+ a.
+}
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):5 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC1 %s
+// CHECK-CC1: COMPLETION: A : A::
+// CHECK-NEXT-CC1: COMPLETION: bar : [#void#]bar(<#int arg#>)
+// CHECK-NEXT-CC1: COMPLETION: foo : [#void#]foo(<#int arg#>)
+// CHECK-NEXT-CC1: COMPLETION: operator= : [#A &#]operator=(<#const A &#>)
+// CHECK-NEXT-CC1: COMPLETION: operator= : [#A &#]operator=(<#A &&#>)
+// CHECK-NEXT-CC1: COMPLETION: ~A : [#void#]~A()
+
+struct B {
+ template <typename T>
+ void foo(this T&& self, int arg);
+};
+
+int func2() {
+ B b {};
+ b.foo();
+}
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):9 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC2 %s
+// CHECK-CC2: OVERLOAD: [#void#]foo(int arg)
+
+// TODO: llvm/llvm-project/146649
+// This is incorrect behavior. Correct Result should be a variant of,
+// CC3: should be something like [#void#]foo(<#A self#>, <#int arg#>)
+// CC4: should be something like [#void#]bar(<#A self#>, <#int arg#>)
+int func3() {
+ (&A::foo)
+ (&A::bar)
+}
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-3):10 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC3 %s
+// CHECK-CC3: COMPLETION: foo : [#void#]foo<<#class self:auto#>>(<#int arg#>)
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-4):10 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC4 %s
+// CHECK-CC4: COMPLETION: bar : [#void#]bar(<#int arg#>)
+
+int func4() {
+ // TODO (&A::foo)(
+ (&A::bar)()
+}
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):13 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC5 %s
+// CHECK-CC5: OVERLOAD: [#void#](<#A#>, int)
+
+struct C {
+ int member {};
+ int memberFnA(int a);
+ int memberFnA(this C&, float a);
+
+ void foo(this C& self) {
+ // Should not offer any members here, since
+ // it needs to be referenced through `self`.
+ mem
+ // RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):8 -std=c++23 %s | FileCheck --allow-empty %s
+ // CHECK-NOT: COMPLETION: member : [#int#]member
+ // CHECK-NOT: COMPLETION: memberFnA : [#int#]memberFnA(<#int a#>)
+ // CHECK-NOT: COMPLETION: memberFnA : [#int#]memberFnA(<#float a#>)
+ }
+ void bar(this C& self) {
+ // should offer all results
+ self.mem
+ // RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):13 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC6 %s
+ // CHECK-CC6: COMPLETION: member : [#int#]member
+ // CHECK-CC6: COMPLETION: memberFnA : [#int#]memberFnA(<#int a#>)
+ // CHECK-CC6: COMPLETION: memberFnA : [#int#]memberFnA(<#float a#>)
+ }
+ void baz(this C& self) {
+ [&]() {
+ // Should not offer any results
+ mem
+ // RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):10 -std=c++23 %s | FileCheck --allow-empty %s
+ // CHECK-NOT: COMPLETION: member : [#int#]member
+ // CHECK-NOT: COMPLETION: memberFnA : [#int#]memberFnA(<#int a#>)
+ // CHECK-NOT: COMPLETION: memberFnA : [#int#]memberFnA(<#float a#>)
+ }();
+ }
+};
+
+
+struct S {
+ void foo1(int a);
+ void foo2(int a) const;
+ void foo2(this const S& self, float a);
+ void foo3(this const S& self, int a);
+ void foo4(this S& self, int a);
+};
+
+void S::foo1(int a) {
+ this->;
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):9 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC7 %s
+// CHECK-CC7: COMPLETION: foo1 : [#void#]foo1(<#int a#>)
+// CHECK-CC7: COMPLETION: foo2 : [#void#]foo2(<#int a#>)[# const#]
+// CHECK-CC7: COMPLETION: foo2 : [#void#]foo2(<#float a#>)[# const#]
+// CHECK-CC7: COMPLETION: foo3 : [#void#]foo3(<#int a#>)[# const#]
+// CHECK-CC7: COMPLETION: foo4 : [#void#]foo4(<#int a#>)
+}
+
+void S::foo2(int a) const {
+ this->;
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):9 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC8 %s
+// CHECK-CC8: COMPLETION: foo2 : [#void#]foo2(<#int a#>)[# const#]
+// CHECK-CC8: COMPLETION: foo2 : [#void#]foo2(<#float a#>)[# const#]
+// CHECK-CC8: COMPLETION: foo3 : [#void#]foo3(<#int a#>)[# const#]
+}
+
+void S::foo3(this const S& self, int a) {
+ self.;
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):8 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC9 %s
+// CHECK-CC9: COMPLETION: foo2 : [#void#]foo2(<#int a#>)[# const#]
+// CHECK-CC9: COMPLETION: foo2 : [#void#]foo2(<#float a#>)[# const#]
+// CHECK-CC9: COMPLETION: foo3 : [#void#]foo3(<#int a#>)[# const#]
+}
+
+void S::foo4(this S& self, int a) {
+ self.;
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):8 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC10 %s
+// CHECK-CC10: COMPLETION: foo1 : [#void#]foo1(<#int a#>)
+// CHECK-CC10: COMPLETION: foo2 : [#void#]foo2(<#int a#>)[# const#]
+// CHECK-CC10: COMPLETION: foo2 : [#void#]foo2(<#float a#>)[# const#]
+// CHECK-CC10: COMPLETION: foo3 : [#void#]foo3(<#int a#>)[# const#]
+// CHECK-CC10: COMPLETION: foo4 : [#void#]foo4(<#int a#>)
+}
+
+void test1(S s) {
+ s.;
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):5 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC11 %s
+// CHECK-CC11: COMPLETION: foo1 : [#void#]foo1(<#int a#>)
+// CHECK-CC11: COMPLETION: foo2 : [#void#]foo2(<#int a#>)[# const#]
+// CHECK-CC11: COMPLETION: foo2 : [#void#]foo2(<#float a#>)[# const#]
+// CHECK-CC11: COMPLETION: foo3 : [#void#]foo3(<#int a#>)[# const#]
+// CHECK-CC11: COMPLETION: foo4 : [#void#]foo4(<#int a#>)
+}
+
+void test2(const S s) {
+ s.;
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):5 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC12 %s
+// CHECK-CC12: COMPLETION: foo2 : [#void#]foo2(<#int a#>)[# const#]
+// CHECK-CC12: COMPLETION: foo2 : [#void#]foo2(<#float a#>)[# const#]
+// CHECK-CC12: COMPLETION: foo3 : [#void#]foo3(<#int a#>)[# const#]
+}
+
+void test3(S s) {
+ s.foo2();
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-1):10 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC13 %s
+// CHECK-CC13: OVERLOAD: [#void#]foo2(<#int a#>)
+// CHECK-CC13: OVERLOAD: [#void#]foo2(float a)
+// TODO: foo2 should be OVERLOAD: [#void#]foo2(<#float a#>)
+}
diff --git a/clang/test/CodeCompletion/skip-explicit-object-parameter.cpp b/clang/test/CodeCompletion/skip-explicit-object-parameter.cpp
deleted file mode 100644
index 587d6cb..0000000
--- a/clang/test/CodeCompletion/skip-explicit-object-parameter.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-struct A {
- void foo(this auto&& self, int arg);
- void bar(this A self, int arg);
-};
-
-int func1() {
- A a {};
- a.
-}
-// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):5 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC1 %s
-// CHECK-CC1: COMPLETION: A : A::
-// CHECK-NEXT-CC1: COMPLETION: bar : [#void#]bar(<#int arg#>)
-// CHECK-NEXT-CC1: COMPLETION: foo : [#void#]foo(<#int arg#>)
-// CHECK-NEXT-CC1: COMPLETION: operator= : [#A &#]operator=(<#const A &#>)
-// CHECK-NEXT-CC1: COMPLETION: operator= : [#A &#]operator=(<#A &&#>)
-// CHECK-NEXT-CC1: COMPLETION: ~A : [#void#]~A()
-
-struct B {
- template <typename T>
- void foo(this T&& self, int arg);
-};
-
-int func2() {
- B b {};
- b.foo();
-}
-// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):9 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC2 %s
-// CHECK-CC2: OVERLOAD: [#void#]foo(int arg)
-
-// TODO: llvm/llvm-project/146649
-// This is incorrect behavior. Correct Result should be a variant of,
-// CC3: should be something like [#void#]foo(<#A self#>, <#int arg#>)
-// CC4: should be something like [#void#]bar(<#A self#>, <#int arg#>)
-int func3() {
- (&A::foo)
- (&A::bar)
-}
-// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-3):10 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC3 %s
-// CHECK-CC3: COMPLETION: foo : [#void#]foo<<#class self:auto#>>(<#int arg#>)
-// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-4):10 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC4 %s
-// CHECK-CC4: COMPLETION: bar : [#void#]bar(<#int arg#>)
-
-int func4() {
- // TODO (&A::foo)(
- (&A::bar)(
-}
-// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):13 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC5 %s
-// CHECK-CC5: OVERLOAD: [#void#](<#A#>, int)
diff --git a/clang/test/CodeGen/2007-01-20-VectorICE.c b/clang/test/CodeGen/2007-01-20-VectorICE.c
index 286b8a1..61013ac 100644
--- a/clang/test/CodeGen/2007-01-20-VectorICE.c
+++ b/clang/test/CodeGen/2007-01-20-VectorICE.c
@@ -1,11 +1,11 @@
-// RUN: %clang_cc1 %s -emit-llvm -o -
+// RUN: %clang_cc1 %s -emit-llvm -flax-vector-conversions=none -o -
typedef float __m128 __attribute__((__vector_size__(16)));
typedef long long __v2di __attribute__((__vector_size__(16)));
typedef int __v4si __attribute__((__vector_size__(16)));
-__v2di bar(void);
+__v2di bar(void);
void foo(int X, __v4si *P) {
- *P = X == 2 ? bar() : bar();
+ *P = X == 2 ? (__v4si)bar() : (__v4si)bar();
}
diff --git a/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c b/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c
index daed3baf..ccfdc1a 100644
--- a/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c
+++ b/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -no-enable-noundef-analysis %s -o - -emit-llvm | FileCheck %s
-// XFAIL: target={{(aarch64|arm64).*}}, target=x86_64-pc-windows-msvc, target=x86_64-{{(pc|w64)}}-windows-gnu
+// XFAIL: target={{(aarch64|arm64).*}}, target=x86_64-pc-windows-msvc, target=x86_64-{{(pc|w64)}}-windows-gnu, target=x86_64-pc-windows-cygnus
// PR1513
diff --git a/clang/test/CodeGen/AArch64/ABI-align-packed.c b/clang/test/CodeGen/AArch64/ABI-align-packed.c
index 0349ebc..09f9180 100644
--- a/clang/test/CodeGen/AArch64/ABI-align-packed.c
+++ b/clang/test/CodeGen/AArch64/ABI-align-packed.c
@@ -72,9 +72,9 @@ __attribute__((noinline)) void named_arg_non_packed_struct(double d0, double d1,
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_non_packed_struct(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -89,7 +89,7 @@ void variadic_non_packed_struct(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_NON_PACKED_STRUCT:%.*]] = alloca [[STRUCT_NON_PACKED_STRUCT:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_NON_PACKED_STRUCT]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -97,7 +97,7 @@ void variadic_non_packed_struct(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_NON_PACKED_STRUCT]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_non_packed_struct(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(16) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_non_packed_struct() {
struct non_packed_struct s_non_packed_struct;
@@ -127,9 +127,9 @@ __attribute__((noinline)) void named_arg_packed_struct(double d0, double d1, dou
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_packed_struct(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -144,7 +144,7 @@ void variadic_packed_struct(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_PACKED_STRUCT:%.*]] = alloca [[STRUCT_PACKED_STRUCT:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_PACKED_STRUCT]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -152,7 +152,7 @@ void variadic_packed_struct(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_PACKED_STRUCT]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_packed_struct(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(8) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_packed_struct() {
struct packed_struct s_packed_struct;
@@ -182,9 +182,9 @@ __attribute__((noinline)) void named_arg_packed_member(double d0, double d1, dou
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_packed_member(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -199,7 +199,7 @@ void variadic_packed_member(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_PACKED_MEMBER:%.*]] = alloca [[STRUCT_PACKED_MEMBER:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_PACKED_MEMBER]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -207,7 +207,7 @@ void variadic_packed_member(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_PACKED_MEMBER]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_packed_member(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(8) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_packed_member() {
struct packed_member s_packed_member;
@@ -237,9 +237,9 @@ __attribute__((noinline)) void named_arg_aligned_struct_8(double d0, double d1,
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_aligned_struct_8(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -254,7 +254,7 @@ void variadic_aligned_struct_8(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_ALIGNED_STRUCT_8:%.*]] = alloca [[STRUCT_ALIGNED_STRUCT_8:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_ALIGNED_STRUCT_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -262,7 +262,7 @@ void variadic_aligned_struct_8(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_ALIGNED_STRUCT_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_aligned_struct_8(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(16) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_aligned_struct_8() {
struct aligned_struct_8 s_aligned_struct_8;
@@ -292,9 +292,9 @@ __attribute__((noinline)) void named_arg_aligned_member_8(double d0, double d1,
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_aligned_member_8(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -309,7 +309,7 @@ void variadic_aligned_member_8(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_ALIGNED_MEMBER_8:%.*]] = alloca [[STRUCT_ALIGNED_MEMBER_8:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_ALIGNED_MEMBER_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -317,7 +317,7 @@ void variadic_aligned_member_8(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_ALIGNED_MEMBER_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_aligned_member_8(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(16) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_aligned_member_8() {
struct aligned_member_8 s_aligned_member_8;
@@ -347,9 +347,9 @@ __attribute__((noinline)) void named_arg_pragma_packed_struct_8(double d0, doubl
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_pragma_packed_struct_8(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -364,7 +364,7 @@ void variadic_pragma_packed_struct_8(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_PRAGMA_PACKED_STRUCT_8:%.*]] = alloca [[STRUCT_PRAGMA_PACKED_STRUCT_8:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_PRAGMA_PACKED_STRUCT_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -372,7 +372,7 @@ void variadic_pragma_packed_struct_8(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_PRAGMA_PACKED_STRUCT_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_pragma_packed_struct_8(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(8) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_pragma_packed_struct_8() {
struct pragma_packed_struct_8 s_pragma_packed_struct_8;
@@ -402,9 +402,9 @@ __attribute__((noinline)) void named_arg_pragma_packed_struct_4(double d0, doubl
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_pragma_packed_struct_4(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -419,7 +419,7 @@ void variadic_pragma_packed_struct_4(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_PRAGMA_PACKED_STRUCT_4:%.*]] = alloca [[STRUCT_PRAGMA_PACKED_STRUCT_4:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_PRAGMA_PACKED_STRUCT_4]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -427,7 +427,7 @@ void variadic_pragma_packed_struct_4(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_PRAGMA_PACKED_STRUCT_4]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_pragma_packed_struct_4(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(8) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_pragma_packed_struct_4() {
struct pragma_packed_struct_4 s_pragma_packed_struct_4;
diff --git a/clang/test/CodeGen/AArch64/byval-temp.c b/clang/test/CodeGen/AArch64/byval-temp.c
index 5033b6c..353bfa7 100644
--- a/clang/test/CodeGen/AArch64/byval-temp.c
+++ b/clang/test/CodeGen/AArch64/byval-temp.c
@@ -47,13 +47,13 @@ void example(void) {
// CHECK-O3-NEXT: %[[byvaltemp1:[0-9A-Za-z-]+]] = alloca %struct.large, align 8
//
// Mark the start of the lifetime for `l`
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %[[l]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr %[[l]])
//
// First, memset `l` to 0.
// CHECK-O3-NEXT: call void @llvm.memset.p0.i64(ptr align 8 %[[l]], i8 0, i64 64, i1 false)
//
// Lifetime of the first temporary starts here and ends right after the call.
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %[[byvaltemp]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr %[[byvaltemp]])
//
// Then, memcpy `l` to the temporary stack space.
// CHECK-O3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[byvaltemp]], ptr align 8 %[[l]], i64 64, i1 false)
@@ -61,16 +61,16 @@ void example(void) {
// CHECK-O3-NEXT: call void @pass_large(ptr dead_on_return noundef %[[byvaltemp]])
//
// The lifetime of the temporary used to pass a pointer to the struct ends here.
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %[[byvaltemp]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr %[[byvaltemp]])
//
// Now, do the same for the second call, using the second temporary alloca.
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %[[byvaltemp1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr %[[byvaltemp1]])
// CHECK-O3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[byvaltemp1]], ptr align 8 %[[l]], i64 64, i1 false)
// CHECK-O3-NEXT: call void @pass_large(ptr dead_on_return noundef %[[byvaltemp1]])
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %[[byvaltemp1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr %[[byvaltemp1]])
//
// Mark the end of the lifetime of `l`.
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %l)
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr %l)
// CHECK-O3-NEXT: ret void
void example_BitInt(void) {
@@ -101,20 +101,20 @@ void example_BitInt(void) {
// CHECK-O3-NEXT: [[L:%.*]] = alloca i256, align 16
// CHECK-O3-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i256, align 16
// CHECK-O3-NEXT: [[INDIRECT_ARG_TEMP1:%.*]] = alloca i256, align 16
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[L]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr [[L]])
// CHECK-O3-NEXT: store i256 0, ptr [[L]], align 16, !tbaa [[TBAA6:![0-9]+]]
// CHECK-O3-NEXT: [[TMP0:%.*]] = load i256, ptr [[L]], align 16, !tbaa [[TBAA6]]
// CHECK-O3-NEXT: [[LOADEDV:%.*]] = trunc i256 [[TMP0]] to i129
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr [[INDIRECT_ARG_TEMP]])
// CHECK-O3-NEXT: [[STOREDV:%.*]] = sext i129 [[LOADEDV]] to i256
// CHECK-O3-NEXT: store i256 [[STOREDV]], ptr [[INDIRECT_ARG_TEMP]], align 16, !tbaa [[TBAA6]]
// CHECK-O3-NEXT: call void @pass_large_BitInt(ptr dead_on_return noundef [[INDIRECT_ARG_TEMP]])
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr [[INDIRECT_ARG_TEMP]])
// CHECK-O3-NEXT: [[TMP1:%.*]] = load i256, ptr [[L]], align 16, !tbaa [[TBAA6]]
// CHECK-O3-NEXT: [[LOADEDV1:%.*]] = trunc i256 [[TMP1]] to i129
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr [[INDIRECT_ARG_TEMP1]])
// CHECK-O3-NEXT: [[STOREDV1:%.*]] = sext i129 [[LOADEDV1]] to i256
// CHECK-O3-NEXT: store i256 [[STOREDV1]], ptr [[INDIRECT_ARG_TEMP1]], align 16, !tbaa [[TBAA6]]
// CHECK-O3-NEXT: call void @pass_large_BitInt(ptr dead_on_return noundef [[INDIRECT_ARG_TEMP1]])
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[INDIRECT_ARG_TEMP1]])
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[L]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr [[L]])
diff --git a/clang/test/CodeGen/AArch64/pure-scalable-args-empty-union.c b/clang/test/CodeGen/AArch64/pure-scalable-args-empty-union.c
index 804e14a..29aa532 100644
--- a/clang/test/CodeGen/AArch64/pure-scalable-args-empty-union.c
+++ b/clang/test/CodeGen/AArch64/pure-scalable-args-empty-union.c
@@ -23,7 +23,7 @@ void f0(S0 *p) {
#ifdef __cplusplus
-// PST containing an empty union with `[[no_unique_address]]`` - pass in registers.
+// PST containing an empty union with `[[no_unique_address]]` - pass in registers.
typedef struct {
fvec32 x[4];
[[no_unique_address]]
diff --git a/clang/test/CodeGen/AArch64/pure-scalable-args.c b/clang/test/CodeGen/AArch64/pure-scalable-args.c
index 48988f7a..d34c7f9 100644
--- a/clang/test/CodeGen/AArch64/pure-scalable-args.c
+++ b/clang/test/CodeGen/AArch64/pure-scalable-args.c
@@ -329,7 +329,7 @@ void test_pass_variadic(PST *p, PST *q) {
// CHECK-AAPCS: call void (<vscale x 16 x i1>, <vscale x 2 x double>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 16 x i8>, <vscale x 16 x i1>, ...) @pass_variadic_callee(<vscale x 16 x i1> %1, <vscale x 2 x double> %cast.scalable1, <vscale x 4 x float> %cast.scalable2, <vscale x 4 x float> %cast.scalable3, <vscale x 16 x i8> %cast.scalable4, <vscale x 16 x i1> %12, ptr dead_on_return noundef nonnull %byval-temp)
// CHECK-DARWIN: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(96) %byval-temp, ptr noundef nonnull align 16 dereferenceable(96) %p, i64 96, i1 false)
-// CHECK-DARWIN: call void @llvm.lifetime.start.p0(i64 96, ptr nonnull %byval-temp1)
+// CHECK-DARWIN: call void @llvm.lifetime.start.p0(ptr nonnull %byval-temp1)
// CHECK-DARWIN: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(96) %byval-temp1, ptr noundef nonnull align 16 dereferenceable(96) %q, i64 96, i1 false)
// CHECK-DARWIN: call void (ptr, ...) @pass_variadic_callee(ptr dead_on_return noundef nonnull %byval-temp, ptr dead_on_return noundef nonnull %byval-temp1)
@@ -392,7 +392,7 @@ void test_va_arg(int n, ...) {
// CHECK-AAPCS: define dso_local void @test_va_arg(i32 noundef %n, ...)
// CHECK-AAPCS-NEXT: entry:
// CHECK-AAPCS-NEXT: %ap = alloca %struct.__va_list, align 8
-// CHECK-AAPCS-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %ap)
+// CHECK-AAPCS-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %ap)
// CHECK-AAPCS-NEXT: call void @llvm.va_start.p0(ptr nonnull %ap)
// CHECK-AAPCS-NEXT: %gr_offs_p = getelementptr inbounds nuw i8, ptr %ap, i64 24
// CHECK-AAPCS-NEXT: %gr_offs = load i32, ptr %gr_offs_p, align 8
@@ -435,14 +435,14 @@ void test_va_arg(int n, ...) {
// CHECK-AAPCS-NEXT: %3 = bitcast <vscale x 2 x i8> %cast.scalable to <vscale x 16 x i1>
// CHECK-AAPCS-NEXT: %cast.scalable2 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %v.sroa.43.0.copyload, i64 0)
// CHECK-AAPCS-NEXT: call void @use1(<vscale x 16 x i1> noundef %3, <vscale x 4 x float> noundef %cast.scalable2)
-// CHECK-AAPCS-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %ap)
+// CHECK-AAPCS-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %ap)
// CHECK-AAPCS-NEXT: ret void
// CHECK-AAPCS-NEXT: }
// CHECK-DARWIN: define void @test_va_arg(i32 noundef %n, ...)
// CHECK-DARWIN-NEXT: entry:
// CHECK-DARWIN-NEXT: %ap = alloca ptr, align 8
-// CHECK-DARWIN-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ap)
+// CHECK-DARWIN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %ap)
// CHECK-DARWIN-NEXT: call void @llvm.va_start.p0(ptr nonnull %ap)
// CHECK-DARWIN-NEXT: %argp.cur = load ptr, ptr %ap, align 8
// CHECK-DARWIN-NEXT: %argp.next = getelementptr inbounds nuw i8, ptr %argp.cur, i64 8
@@ -456,7 +456,7 @@ void test_va_arg(int n, ...) {
// CHECK-DARWIN-NEXT: %1 = bitcast <vscale x 2 x i8> %cast.scalable to <vscale x 16 x i1>
// CHECK-DARWIN-NEXT: %cast.scalable2 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %v.sroa.43.0.copyload, i64 0)
// CHECK-DARWIN-NEXT: call void @use1(<vscale x 16 x i1> noundef %1, <vscale x 4 x float> noundef %cast.scalable2)
-// CHECK-DARWIN-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ap)
+// CHECK-DARWIN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %ap)
// CHECK-DARWIN-NEXT: ret void
// CHECK-DARWIN-NEXT: }
diff --git a/clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_ld1_vnum.c b/clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_ld1_vnum.c
index fb86690..e4c93ad 100644
--- a/clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_ld1_vnum.c
+++ b/clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_ld1_vnum.c
@@ -9,29 +9,31 @@
// CHECK-C-LABEL: define dso_local void @test_svld1_hor_vnum_za8(
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// CHECK-C-NEXT: entry:
-// CHECK-C-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP0]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP3:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP3]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[ADD]], 15
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP4]])
+// CHECK-C-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP1:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[TMP0]]
+// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 15
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP5]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z23test_svld1_hor_vnum_za8ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// CHECK-CXX-NEXT: entry:
-// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP0]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP3]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[ADD]], 15
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[TMP0]]
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 15
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP5]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_hor_vnum_za8(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -43,30 +45,32 @@ void test_svld1_hor_vnum_za8(uint32_t slice_base, svbool_t pg, const void *ptr,
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 7
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 1, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 7
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 1, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svld1_hor_vnum_za16ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 7
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 1, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 7
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 1, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_hor_vnum_za16(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -78,30 +82,32 @@ void test_svld1_hor_vnum_za16(uint32_t slice_base, svbool_t pg, const void *ptr,
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 3
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 3, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 3
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 3, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svld1_hor_vnum_za32ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 3
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 3, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 3
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 3, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_hor_vnum_za32(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -113,30 +119,32 @@ void test_svld1_hor_vnum_za32(uint32_t slice_base, svbool_t pg, const void *ptr,
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 1
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 7, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 1
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 7, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svld1_hor_vnum_za64ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 1
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 7, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 1
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 7, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_hor_vnum_za64(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -148,26 +156,28 @@ void test_svld1_hor_vnum_za64(uint32_t slice_base, svbool_t pg, const void *ptr,
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 15, i32 [[TMP4]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 15, i32 [[TMP5]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z25test_svld1_hor_vnum_za128ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 15, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 15, i32 [[TMP5]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_hor_vnum_za128(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -178,29 +188,31 @@ void test_svld1_hor_vnum_za128(uint32_t slice_base, svbool_t pg, const void *ptr
// CHECK-C-LABEL: define dso_local void @test_svld1_ver_hor_za8(
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
-// CHECK-C-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP0]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP3:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP3]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[ADD]], 15
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP4]])
+// CHECK-C-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP1:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[TMP0]]
+// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 15
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP5]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z22test_svld1_ver_hor_za8ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
-// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP0]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP3]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[ADD]], 15
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[TMP0]]
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 15
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP5]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_ver_hor_za8(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -212,30 +224,32 @@ void test_svld1_ver_hor_za8(uint32_t slice_base, svbool_t pg, const void *ptr, i
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 7
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 1, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 7
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 1, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svld1_ver_vnum_za16ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 7
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 1, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 7
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 1, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_ver_vnum_za16(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -247,30 +261,32 @@ void test_svld1_ver_vnum_za16(uint32_t slice_base, svbool_t pg, const void *ptr,
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 3
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 3, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 3
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 3, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svld1_ver_vnum_za32ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 3
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 3, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 3
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 3, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_ver_vnum_za32(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -282,30 +298,32 @@ void test_svld1_ver_vnum_za32(uint32_t slice_base, svbool_t pg, const void *ptr,
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 1
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 7, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 1
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 7, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svld1_ver_vnum_za64ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 1
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 7, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 1
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 7, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_ver_vnum_za64(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
@@ -317,26 +335,28 @@ void test_svld1_ver_vnum_za64(uint32_t slice_base, svbool_t pg, const void *ptr,
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 15, i32 [[TMP4]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.ld1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 15, i32 [[TMP5]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z25test_svld1_ver_vnum_za128ju10__SVBool_tPKvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 15, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.ld1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 15, i32 [[TMP5]])
// CHECK-CXX-NEXT: ret void
//
void test_svld1_ver_vnum_za128(uint32_t slice_base, svbool_t pg, const void *ptr, int64_t vnum) __arm_streaming __arm_out("za") {
diff --git a/clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_st1_vnum.c b/clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_st1_vnum.c
index dafc3d6..22a0b9e 100644
--- a/clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_st1_vnum.c
+++ b/clang/test/CodeGen/AArch64/sme-intrinsics/acle_sme_st1_vnum.c
@@ -9,29 +9,31 @@
// CHECK-C-LABEL: define dso_local void @test_svst1_hor_vnum_za8(
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// CHECK-C-NEXT: entry:
-// CHECK-C-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP0]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP3:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP3]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[ADD]], 15
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP4]])
+// CHECK-C-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP1:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[TMP0]]
+// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 15
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP5]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z23test_svst1_hor_vnum_za8ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// CHECK-CXX-NEXT: entry:
-// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP0]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP3]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[ADD]], 15
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[TMP0]]
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 15
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP5]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_hor_vnum_za8(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -43,30 +45,32 @@ void test_svst1_hor_vnum_za8(uint32_t slice_base, svbool_t pg, void *ptr, int64_
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 7
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 1, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 7
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 1, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svst1_hor_vnum_za16ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 7
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 1, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 7
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1h.horiz(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 1, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_hor_vnum_za16(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -78,30 +82,32 @@ void test_svst1_hor_vnum_za16(uint32_t slice_base, svbool_t pg, void *ptr, int64
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 3
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 3, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 3
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 3, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svst1_hor_vnum_za32ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 3
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 3, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 3
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 3, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_hor_vnum_za32(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -113,30 +119,32 @@ void test_svst1_hor_vnum_za32(uint32_t slice_base, svbool_t pg, void *ptr, int64
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 1
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 7, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 1
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 7, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svst1_hor_vnum_za64ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 1
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 7, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 1
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1d.horiz(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 7, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_hor_vnum_za64(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -148,26 +156,28 @@ void test_svst1_hor_vnum_za64(uint32_t slice_base, svbool_t pg, void *ptr, int64
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 15, i32 [[TMP4]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 15, i32 [[TMP5]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z25test_svst1_hor_vnum_za128ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 15, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1q.horiz(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 15, i32 [[TMP5]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_hor_vnum_za128(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -178,29 +188,31 @@ void test_svst1_hor_vnum_za128(uint32_t slice_base, svbool_t pg, void *ptr, int6
// CHECK-C-LABEL: define dso_local void @test_svst1_ver_vnum_za8(
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
-// CHECK-C-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP0]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP3:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP3]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[ADD]], 15
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP4]])
+// CHECK-C-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP1:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[TMP0]]
+// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 15
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP5]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z23test_svst1_ver_vnum_za8ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
-// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP0]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP3]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP2]]
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[ADD]], 15
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP1]], i32 0, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[TMP0]]
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 15
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1b.vert(<vscale x 16 x i1> [[PG]], ptr [[TMP2]], i32 0, i32 [[TMP5]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_ver_vnum_za8(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -212,30 +224,32 @@ void test_svst1_ver_vnum_za8(uint32_t slice_base, svbool_t pg, void *ptr, int64_
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 7
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 1, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 7
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 1, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svst1_ver_vnum_za16ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 7
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP2]], i32 1, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 7
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1h.vert(<vscale x 8 x i1> [[TMP0]], ptr [[TMP3]], i32 1, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_ver_vnum_za16(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -247,30 +261,32 @@ void test_svst1_ver_vnum_za16(uint32_t slice_base, svbool_t pg, void *ptr, int64
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 3
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 3, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 3
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 3, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svst1_ver_vnum_za32ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 3
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP2]], i32 3, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 3
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1w.vert(<vscale x 4 x i1> [[TMP0]], ptr [[TMP3]], i32 3, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_ver_vnum_za32(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -282,30 +298,32 @@ void test_svst1_ver_vnum_za32(uint32_t slice_base, svbool_t pg, void *ptr, int64
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 1
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 7, i32 [[TMP5]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 1
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 7, i32 [[TMP6]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z24test_svst1_ver_vnum_za64ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[ADD]], 1
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP2]], i32 7, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: [[ADD:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: [[TMP6:%.*]] = add i32 [[ADD]], 1
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1d.vert(<vscale x 2 x i1> [[TMP0]], ptr [[TMP3]], i32 7, i32 [[TMP6]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_ver_vnum_za64(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
@@ -317,26 +335,28 @@ void test_svst1_ver_vnum_za64(uint32_t slice_base, svbool_t pg, void *ptr, int64
// CHECK-C-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-C-NEXT: entry:
// CHECK-C-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG]])
-// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-C-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-C-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-C-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 15, i32 [[TMP4]])
+// CHECK-C-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-C-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-C-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-C-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-C-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-C-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-C-NEXT: tail call void @llvm.aarch64.sme.st1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 15, i32 [[TMP5]])
// CHECK-C-NEXT: ret void
//
// CHECK-CXX-LABEL: define dso_local void @_Z25test_svst1_ver_vnum_za128ju10__SVBool_tPvl(
// CHECK-CXX-SAME: i32 noundef [[SLICE_BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], ptr noundef [[PTR:%.*]], i64 noundef [[VNUM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-CXX-NEXT: entry:
// CHECK-CXX-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG]])
-// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.aarch64.sme.cntsb()
-// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP1]], [[VNUM]]
-// CHECK-CXX-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
-// CHECK-CXX-NEXT: [[TMP3:%.*]] = trunc i64 [[VNUM]] to i32
-// CHECK-CXX-NEXT: [[TMP4:%.*]] = add i32 [[SLICE_BASE]], [[TMP3]]
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 0, i32 [[TMP4]])
-// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP2]], i32 15, i32 [[TMP4]])
+// CHECK-CXX-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
+// CHECK-CXX-NEXT: [[TMP2:%.*]] = shl i64 [[VNUM]], 4
+// CHECK-CXX-NEXT: [[MULVL:%.*]] = mul i64 [[TMP2]], [[TMP1]]
+// CHECK-CXX-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[MULVL]]
+// CHECK-CXX-NEXT: [[TMP4:%.*]] = trunc i64 [[VNUM]] to i32
+// CHECK-CXX-NEXT: [[TMP5:%.*]] = add i32 [[SLICE_BASE]], [[TMP4]]
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 0, i32 [[TMP5]])
+// CHECK-CXX-NEXT: tail call void @llvm.aarch64.sme.st1q.vert(<vscale x 1 x i1> [[TMP0]], ptr [[TMP3]], i32 15, i32 [[TMP5]])
// CHECK-CXX-NEXT: ret void
//
void test_svst1_ver_vnum_za128(uint32_t slice_base, svbool_t pg, void *ptr, int64_t vnum) __arm_streaming __arm_in("za") {
diff --git a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
index d244a8b..4a1185d0 100644
--- a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
+++ b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
@@ -86,10 +86,10 @@ typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N)));
// CHECK-NEXT: entry:
// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
// CHECK-NEXT: [[X:%.*]] = tail call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0)
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 [[SIZE:[0-9]+]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]]
// CHECK-NEXT: call void @f3(ptr dead_on_return noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 [[SIZE]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: ret void
// CHECK128-LABEL: declare void @f3(<16 x i8> noundef)
diff --git a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
index d42ecb6..6211b60 100644
--- a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
+++ b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
@@ -73,10 +73,10 @@ typedef svint16_t vec2 __attribute__((arm_sve_vector_bits(N)));
// CHECK128-NEXT: ret void
// CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16
// CHECKWIDE-NEXT: [[X:%.*]] = tail call <[[#div(VBITS, 16)]] x i16> @llvm.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
-// CHECKWIDE-NEXT: call void @llvm.lifetime.start.p0(i64 [[SIZE:[0-9]+]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
+// CHECKWIDE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]]
// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(ptr dead_on_return noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
-// CHECKWIDE-NEXT: call void @llvm.lifetime.end.p0(i64 [[SIZE]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
+// CHECKWIDE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECKWIDE-NEXT: ret void
void g(vec2 x) { f(x); } // OK
#endif
diff --git a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesd.c b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesd.c
index 0839b32..17f4a54 100644
--- a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesd.c
+++ b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesd.c
@@ -4,6 +4,11 @@
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+
// REQUIRES: aarch64-registered-target
#include <arm_sve.h>
@@ -15,6 +20,12 @@
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
#endif
+#ifdef __ARM_FEATURE_SME
+#define STREAMING __arm_streaming
+#else
+#define STREAMING
+#endif
+
// CHECK-LABEL: @test_svaesd_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.aesd(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
@@ -25,7 +36,7 @@
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.aesd(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-svuint8_t test_svaesd_u8(svuint8_t op1, svuint8_t op2)
+svuint8_t test_svaesd_u8(svuint8_t op1, svuint8_t op2) STREAMING
{
return SVE_ACLE_FUNC(svaesd,_u8,,)(op1, op2);
}
diff --git a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aese.c b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aese.c
index 08ca748..768c8ef 100644
--- a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aese.c
+++ b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aese.c
@@ -4,6 +4,11 @@
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+
// REQUIRES: aarch64-registered-target
#include <arm_sve.h>
@@ -15,6 +20,12 @@
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
#endif
+#if defined(__ARM_FEATURE_SME)
+#define STREAMING __arm_streaming
+#else
+#define STREAMING
+#endif
+
// CHECK-LABEL: @test_svaese_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.aese(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
@@ -25,7 +36,7 @@
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.aese(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-svuint8_t test_svaese_u8(svuint8_t op1, svuint8_t op2)
+svuint8_t test_svaese_u8(svuint8_t op1, svuint8_t op2) STREAMING
{
return SVE_ACLE_FUNC(svaese,_u8,,)(op1, op2);
}
diff --git a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesimc.c b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesimc.c
index 78d3deb..4574589 100644
--- a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesimc.c
+++ b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesimc.c
@@ -4,6 +4,12 @@
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+
+
// REQUIRES: aarch64-registered-target
#include <arm_sve.h>
@@ -15,6 +21,13 @@
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
#endif
+#if defined(__ARM_FEATURE_SME)
+#define STREAMING __arm_streaming
+#else
+#define STREAMING
+#endif
+
+
// CHECK-LABEL: @test_svaesimc_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.aesimc(<vscale x 16 x i8> [[OP:%.*]])
@@ -25,7 +38,7 @@
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.aesimc(<vscale x 16 x i8> [[OP:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-svuint8_t test_svaesimc_u8(svuint8_t op)
+svuint8_t test_svaesimc_u8(svuint8_t op) STREAMING
{
return SVE_ACLE_FUNC(svaesimc,_u8,,)(op);
}
diff --git a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesmc.c b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesmc.c
index 48d1301..249eaba 100644
--- a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesmc.c
+++ b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_aesmc.c
@@ -4,6 +4,11 @@
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+
// REQUIRES: aarch64-registered-target
#include <arm_sve.h>
@@ -15,6 +20,12 @@
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
#endif
+#if defined(__ARM_FEATURE_SME)
+#define MODE_ATTR __arm_streaming
+#else
+#define MODE_ATTR
+#endif
+
// CHECK-LABEL: @test_svaesmc_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.aesmc(<vscale x 16 x i8> [[OP:%.*]])
@@ -25,7 +36,7 @@
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.aesmc(<vscale x 16 x i8> [[OP:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-svuint8_t test_svaesmc_u8(svuint8_t op)
+svuint8_t test_svaesmc_u8(svuint8_t op) MODE_ATTR
{
return SVE_ACLE_FUNC(svaesmc,_u8,,)(op);
}
diff --git a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullb_128.c b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullb_128.c
index 09583f98..3ca7ff7 100644
--- a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullb_128.c
+++ b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullb_128.c
@@ -6,6 +6,11 @@
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+
#include <arm_sve.h>
#ifdef SVE_OVERLOADED_FORMS
@@ -15,6 +20,15 @@
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
#endif
+#ifdef __ARM_FEATURE_SME
+#define STREAMING __arm_streaming
+#else
+#define STREAMING
+#endif
+
+
+//
+//
// CHECK-LABEL: @test_svpmullb_pair_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.pmullb.pair.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
@@ -25,11 +39,14 @@
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.pmullb.pair.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-svuint64_t test_svpmullb_pair_u64(svuint64_t op1, svuint64_t op2)
+svuint64_t test_svpmullb_pair_u64(svuint64_t op1, svuint64_t op2) STREAMING
{
return SVE_ACLE_FUNC(svpmullb_pair,_u64,,)(op1, op2);
}
+
+//
+//
// CHECK-LABEL: @test_svpmullb_pair_n_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
@@ -44,7 +61,7 @@ svuint64_t test_svpmullb_pair_u64(svuint64_t op1, svuint64_t op2)
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.pmullb.pair.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-svuint64_t test_svpmullb_pair_n_u64(svuint64_t op1, uint64_t op2)
+svuint64_t test_svpmullb_pair_n_u64(svuint64_t op1, uint64_t op2) STREAMING
{
return SVE_ACLE_FUNC(svpmullb_pair,_n_u64,,)(op1, op2);
}
diff --git a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullt_128.c b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullt_128.c
index a4ffc31..d97248c 100644
--- a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullt_128.c
+++ b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_pmullt_128.c
@@ -6,6 +6,11 @@
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +ssve-aes -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+
#include <arm_sve.h>
#ifdef SVE_OVERLOADED_FORMS
@@ -15,6 +20,13 @@
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
#endif
+#ifdef __ARM_FEATURE_SME
+#define STREAMING __arm_streaming
+#else
+#define STREAMING
+#endif
+
+
// CHECK-LABEL: @test_svpmullt_pair_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.pmullt.pair.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
@@ -25,11 +37,12 @@
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.pmullt.pair.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-svuint64_t test_svpmullt_pair_u64(svuint64_t op1, svuint64_t op2)
+svuint64_t test_svpmullt_pair_u64(svuint64_t op1, svuint64_t op2) STREAMING
{
return SVE_ACLE_FUNC(svpmullt_pair,_u64,,)(op1, op2);
}
+
// CHECK-LABEL: @test_svpmullt_pair_n_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
@@ -44,7 +57,7 @@ svuint64_t test_svpmullt_pair_u64(svuint64_t op1, svuint64_t op2)
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.pmullt.pair.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-svuint64_t test_svpmullt_pair_n_u64(svuint64_t op1, uint64_t op2)
+svuint64_t test_svpmullt_pair_n_u64(svuint64_t op1, uint64_t op2) STREAMING
{
return SVE_ACLE_FUNC(svpmullt_pair,_n_u64,,)(op1, op2);
}
diff --git a/clang/test/CodeGen/LoongArch/targetattr-lasx.c b/clang/test/CodeGen/LoongArch/targetattr-lasx.c
new file mode 100644
index 0000000..56fd657
--- /dev/null
+++ b/clang/test/CodeGen/LoongArch/targetattr-lasx.c
@@ -0,0 +1,7 @@
+// RUN: %clang_cc1 -triple loongarch64 -target-feature -lsx -emit-llvm %s -o - | FileCheck %s
+
+__attribute__((target("lasx")))
+// CHECK: #[[ATTR0:[0-9]+]] {
+void testlasx() {}
+
+// CHECK: attributes #[[ATTR0]] = { {{.*}}"target-features"="+64bit,+lasx,+lsx"{{.*}} }
diff --git a/clang/test/CodeGen/PowerPC/builtins-bcd-format-conversion.c b/clang/test/CodeGen/PowerPC/builtins-bcd-format-conversion.c
new file mode 100644
index 0000000..0aeb720
--- /dev/null
+++ b/clang/test/CodeGen/PowerPC/builtins-bcd-format-conversion.c
@@ -0,0 +1,29 @@
+// NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: powerpc-registered-target
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -O2 -target-cpu pwr9 \
+// RUN: -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -O2 -target-cpu pwr9 \
+// RUN: -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown -O2 -target-cpu pwr9 \
+// RUN: -emit-llvm %s -o - | FileCheck %s
+
+// CHECK-LABEL: test_bcdcopysign
+// CHECK: [[TMP0:%.*]] = tail call <16 x i8> @llvm.ppc.bcdcopysign(<16 x i8> %a, <16 x i8> %b)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+vector unsigned char test_bcdcopysign(vector unsigned char a, vector unsigned char b) {
+ return __builtin_ppc_bcdcopysign(a, b);
+}
+
+// CHECK-LABEL: test_bcdsetsign_imm0
+// CHECK: [[TMP0:%.*]] = tail call <16 x i8> @llvm.ppc.bcdsetsign(<16 x i8> %a, i32 0)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+vector unsigned char test_bcdsetsign_imm0(vector unsigned char a) {
+ return __builtin_ppc_bcdsetsign(a, '\0');
+}
+
+// CHECK-LABEL: test_bcdsetsign_imm1
+// CHECK: [[TMP0:%.*]] = tail call <16 x i8> @llvm.ppc.bcdsetsign(<16 x i8> %a, i32 1)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+vector unsigned char test_bcdsetsign_imm1(vector unsigned char a) {
+ return __builtin_ppc_bcdsetsign(a, '\1');
+}
diff --git a/clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c b/clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c
index 4aafc09..c66f5e2 100644
--- a/clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c
+++ b/clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c
@@ -93,18 +93,36 @@ void test_pmdmxvi8gerx4spp(unsigned char *vdmrp, unsigned char *vpp, vector unsi
*((__dmr1024 *)resp) = vdmr;
}
-// CHECK-LABEL: @test_dmf_basic
-// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmsetdmrz()
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmmr(<1024 x i1> [[TMP0]])
-// CHECK-NEXT: store <1024 x i1> [[TMP1]], ptr %res1, align 128
-// CHECK-NEXT: [[TMP2:%.*]] = load <1024 x i1>, ptr %res2, align 128
-// CHECK-NEXT: [[TMP3:%.*]] = load <1024 x i1>, ptr %p, align 128
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxor(<1024 x i1> [[TMP2]], <1024 x i1> [[TMP3]])
-// CHECK-NEXT: store <1024 x i1> [[TMP4]], ptr %res2, align 128
+// CHECK-LABEL: @test_dmf_basic(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmsetdmrz()
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmmr(<1024 x i1> [[TMP0]])
+// CHECK-NEXT: store <1024 x i1> [[TMP1]], ptr [[RES1:%.*]], align 128
+// CHECK-NEXT: [[TMP2:%.*]] = load <1024 x i1>, ptr [[RES2:%.*]], align 128
+// CHECK-NEXT: [[TMP3:%.*]] = load <1024 x i1>, ptr [[P:%.*]], align 128
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxor(<1024 x i1> [[TMP2]], <1024 x i1> [[TMP3]])
+// CHECK-NEXT: store <1024 x i1> [[TMP4]], ptr [[RES2]], align 128
+// CHECK-NEXT: ret void
+//
void test_dmf_basic(char *p, char *res1, char *res2) {
__dmr1024 x[2];
__builtin_mma_dmsetdmrz(&x[0]);
__builtin_mma_dmmr((__dmr1024*)res1, &x[0]);
__builtin_mma_dmxor((__dmr1024*)res2, (__dmr1024*)p);
}
+
+// CHECK-LABEL: @test_dmf_basic2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[V:%.*]], align 16, !tbaa [[TBAA8:![0-9]+]]
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.build.dmr(<16 x i8> [[TMP0]], <16 x i8> [[TMP0]], <16 x i8> [[TMP0]], <16 x i8> [[TMP0]], <16 x i8> [[TMP0]], <16 x i8> [[TMP0]], <16 x i8> [[TMP0]], <16 x i8> [[TMP0]])
+// CHECK-NEXT: store <1024 x i1> [[TMP1]], ptr [[RES2:%.*]], align 128
+// CHECK-NEXT: [[TMP2:%.*]] = load <1024 x i1>, ptr [[P1:%.*]], align 128
+// CHECK-NEXT: store <1024 x i1> [[TMP2]], ptr [[RES1:%.*]], align 128
+// CHECK-NEXT: ret void
+//
+void test_dmf_basic2(char *p1, char *res1, char *res2,
+ vector unsigned char *v) {
+ vector unsigned char vv = *v;
+ __builtin_mma_build_dmr((__dmr1024*)res2, vv, vv, vv, vv, vv, vv, vv, vv);
+ __builtin_mma_disassemble_dmr(res1, (__dmr1024*)p1);
+}
diff --git a/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c b/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c
index 5a92d6e..ea2b99b 100644
--- a/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c
+++ b/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c
@@ -16,6 +16,8 @@ void test_mma(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc)
__builtin_mma_dmsetdmrz(&vdmr);
__builtin_mma_dmmr(&vdmr, (__dmr1024*)vpp);
__builtin_mma_dmxor(&vdmr, (__dmr1024*)vpp);
+ __builtin_mma_build_dmr(&vdmr, vc, vc, vc, vc, vc, vc, vc, vc);
+ __builtin_mma_disassemble_dmr(vdmrp, &vdmr);
// CHECK: error: '__builtin_mma_dmxvi8gerx4' needs target feature mma,paired-vector-memops
// CHECK: error: '__builtin_mma_pmdmxvi8gerx4' needs target feature mma,paired-vector-memops
@@ -26,4 +28,6 @@ void test_mma(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc)
// CHECK: error: '__builtin_mma_dmsetdmrz' needs target feature mma,isa-future-instructions
// CHECK: error: '__builtin_mma_dmmr' needs target feature mma,isa-future-instructions
// CHECK: error: '__builtin_mma_dmxor' needs target feature mma,isa-future-instructions
+// CHECK: error: '__builtin_mma_build_dmr' needs target feature mma,isa-future-instructions
+// CHECK: error: '__builtin_mma_disassemble_dmr' needs target feature mma,isa-future-instructions
}
diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c b/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c
new file mode 100644
index 0000000..699c588
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c
@@ -0,0 +1,66 @@
+// REQUIRES: riscv-registered-target
+
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v \
+// RUN: -mvscale-min=2 -mvscale-max=2 -O2 -emit-llvm %s -o - \
+// RUN: | FileCheck %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -mvscale-min=2 -mvscale-max=2 -O2 -emit-llvm %s -o - \
+// RUN: | FileCheck %s
+
+// Test RISC-V V-extension fixed-length vector inline assembly constraints.
+#include <riscv_vector.h>
+#include <stdbool.h>
+
+typedef vbool1_t fixed_bool1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
+typedef vint32m1_t fixed_i32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
+typedef vint8mf2_t fixed_i8mf2_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 2)));
+
+typedef bool bx2 __attribute__((ext_vector_type(16)));
+typedef int i32x2 __attribute__((ext_vector_type(2)));
+typedef char i8x4 __attribute__((ext_vector_type(4)));
+
+fixed_i32m1_t test_vr(fixed_i32m1_t a) {
+// CHECK-LABEL: define{{.*}} @test_vr
+// CHECK: %0 = tail call <4 x i32> asm sideeffect "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<4 x i32> %a, <4 x i32> %a)
+ fixed_i32m1_t ret;
+ asm volatile ("vadd.vv %0, %1, %2" : "=vr"(ret) : "vr"(a), "vr"(a));
+ return ret;
+}
+
+i32x2 test_vr2(i32x2 a) {
+// CHECK-LABEL: define{{.*}} @test_vr2
+// CHECK: %1 = tail call <2 x i32> asm sideeffect "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<2 x i32> %0, <2 x i32> %0)
+ i32x2 ret;
+ asm volatile ("vadd.vv %0, %1, %2" : "=vr"(ret) : "vr"(a), "vr"(a));
+ return ret;
+}
+
+fixed_i8mf2_t test_vd(fixed_i8mf2_t a) {
+// CHECK-LABEL: define{{.*}} @test_vd
+// CHECK: %0 = tail call <8 x i8> asm sideeffect "vadd.vv $0, $1, $2", "=^vd,^vr,^vr"(<8 x i8> %a, <8 x i8> %a)
+ fixed_i8mf2_t ret;
+ asm volatile ("vadd.vv %0, %1, %2" : "=vd"(ret) : "vr"(a), "vr"(a));
+ return ret;
+}
+
+i8x4 test_vd2(i8x4 a) {
+// CHECK-LABEL: define{{.*}} @test_vd2
+// CHECK: %1 = tail call <4 x i8> asm sideeffect "vadd.vv $0, $1, $2", "=^vd,^vr,^vr"(<4 x i8> %0, <4 x i8> %0)
+ i8x4 ret;
+ asm volatile ("vadd.vv %0, %1, %2" : "=vd"(ret) : "vr"(a), "vr"(a));
+ return ret;
+}
+
+fixed_bool1_t test_vm(fixed_bool1_t a) {
+// CHECK-LABEL: define{{.*}} @test_vm
+// CHECK: %1 = tail call <16 x i8> asm sideeffect "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<16 x i8> %a, <16 x i8> %a)
+ fixed_bool1_t ret;
+ asm volatile ("vmand.mm %0, %1, %2" : "=vm"(ret) : "vm"(a), "vm"(a));
+ return ret;
+}
+
+void test_vm2(bx2 a) {
+// CHECK-LABEL: define{{.*}} @test_vm2
+// CHECK: tail call void asm sideeffect "dummy $0", "^vm"(<16 x i1> %a1)
+ asm volatile ("dummy %0" :: "vm"(a));
+}
diff --git a/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp b/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
index 4c91fef..c5b863b 100644
--- a/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
+++ b/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
@@ -10,10 +10,10 @@ vint32m1_t Baz();
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[REF_TMP:%.*]] = alloca <vscale x 2 x i32>, align 4
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]]) #[[ATTR3:[0-9]+]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[REF_TMP]]) #[[ATTR3]]
-// CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr [[REF_TMP]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR3]]
+// CHECK: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) #[[ATTR3]]
//
vint32m1_t Test() {
const vint32m1_t &a = Baz();
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
index bc89cb5..6866fe0 100644
--- a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
@@ -143,34 +143,34 @@ void __attribute__((riscv_vls_cc)) test_too_large(int32x64_t arg) {}
// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_too_large_256(<vscale x 16 x i32> noundef %arg.coerce)
void __attribute__((riscv_vls_cc(256))) test_too_large_256(int32x64_t arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4(<vscale x 2 x i32> %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4(<vscale x 2 x i32> %arg.target_coerce)
void __attribute__((riscv_vls_cc)) test_st_i32x4(struct st_i32x4 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4_256(<vscale x 1 x i32> %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4_256(<vscale x 1 x i32> %arg.target_coerce)
void __attribute__((riscv_vls_cc(256))) test_st_i32x4_256(struct st_i32x4 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4_arr1(<vscale x 2 x i32> %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4_arr1(<vscale x 2 x i32> %arg.target_coerce)
void __attribute__((riscv_vls_cc)) test_st_i32x4_arr1(struct st_i32x4_arr1 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4_arr1_256(<vscale x 1 x i32> %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4_arr1_256(<vscale x 1 x i32> %arg.target_coerce)
void __attribute__((riscv_vls_cc(256))) test_st_i32x4_arr1_256(struct st_i32x4_arr1 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4_arr4(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4_arr4(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg.target_coerce)
void __attribute__((riscv_vls_cc)) test_st_i32x4_arr4(struct st_i32x4_arr4 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4_arr4_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4_arr4_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %arg.target_coerce)
void __attribute__((riscv_vls_cc(256))) test_st_i32x4_arr4_256(struct st_i32x4_arr4 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4_arr8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4_arr8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %arg.target_coerce)
void __attribute__((riscv_vls_cc)) test_st_i32x4_arr8(struct st_i32x4_arr8 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4_arr8_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4_arr8_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %arg.target_coerce)
void __attribute__((riscv_vls_cc(256))) test_st_i32x4_arr8_256(struct st_i32x4_arr8 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4x2(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4x2(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %arg.target_coerce)
void __attribute__((riscv_vls_cc)) test_st_i32x4x2(struct st_i32x4x2 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4x2_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4x2_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %arg.target_coerce)
void __attribute__((riscv_vls_cc(256))) test_st_i32x4x2_256(struct st_i32x4x2 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x8x2(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x8x2(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %arg.target_coerce)
void __attribute__((riscv_vls_cc)) test_st_i32x8x2(struct st_i32x8x2 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x8x2_256(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x8x2_256(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %arg.target_coerce)
void __attribute__((riscv_vls_cc(256))) test_st_i32x8x2_256(struct st_i32x8x2 arg) {}
// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x64x2(ptr dead_on_return noundef %arg)
@@ -178,17 +178,78 @@ void __attribute__((riscv_vls_cc)) test_st_i32x64x2(struct st_i32x64x2 arg) {}
// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x64x2_256(ptr dead_on_return noundef %arg)
void __attribute__((riscv_vls_cc(256))) test_st_i32x64x2_256(struct st_i32x64x2 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4x3(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4x3(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %arg.target_coerce)
void __attribute__((riscv_vls_cc)) test_st_i32x4x3(struct st_i32x4x3 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4x3_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4x3_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %arg.target_coerce)
void __attribute__((riscv_vls_cc(256))) test_st_i32x4x3_256(struct st_i32x4x3 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4x8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4x8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %arg.target_coerce)
void __attribute__((riscv_vls_cc)) test_st_i32x4x8(struct st_i32x4x8 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4x8_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4x8_256(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %arg.target_coerce)
void __attribute__((riscv_vls_cc(256))) test_st_i32x4x8_256(struct st_i32x4x8 arg) {}
// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @test_st_i32x4x9(ptr dead_on_return noundef %arg)
void __attribute__((riscv_vls_cc)) test_st_i32x4x9(struct st_i32x4x9 arg) {}
// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @test_st_i32x4x9_256(ptr dead_on_return noundef %arg)
void __attribute__((riscv_vls_cc(256))) test_st_i32x4x9_256(struct st_i32x4x9 arg) {}
+
+// CHECK-LLVM-LABEL: define dso_local riscv_vls_cc(128) target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_function_prolog_epilog(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg.target_coerce) #0 {
+// CHECK-LLVM-NEXT: entry:
+// CHECK-LLVM-NEXT: %retval = alloca %struct.st_i32x4_arr4, align 16
+// CHECK-LLVM-NEXT: %arg = alloca %struct.st_i32x4_arr4, align 16
+// CHECK-LLVM-NEXT: %0 = call <vscale x 2 x i32> @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg.target_coerce, i32 0)
+// CHECK-LLVM-NEXT: %1 = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %0, i64 0)
+// CHECK-LLVM-NEXT: %2 = getelementptr inbounds [4 x <4 x i32>], ptr %arg, i64 0, i64 0
+// CHECK-LLVM-NEXT: store <4 x i32> %1, ptr %2, align 16
+// CHECK-LLVM-NEXT: %3 = call <vscale x 2 x i32> @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg.target_coerce, i32 1)
+// CHECK-LLVM-NEXT: %4 = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %3, i64 0)
+// CHECK-LLVM-NEXT: %5 = getelementptr inbounds [4 x <4 x i32>], ptr %arg, i64 0, i64 1
+// CHECK-LLVM-NEXT: store <4 x i32> %4, ptr %5, align 16
+// CHECK-LLVM-NEXT: %6 = call <vscale x 2 x i32> @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg.target_coerce, i32 2)
+// CHECK-LLVM-NEXT: %7 = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %6, i64 0)
+// CHECK-LLVM-NEXT: %8 = getelementptr inbounds [4 x <4 x i32>], ptr %arg, i64 0, i64 2
+// CHECK-LLVM-NEXT: store <4 x i32> %7, ptr %8, align 16
+// CHECK-LLVM-NEXT: %9 = call <vscale x 2 x i32> @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg.target_coerce, i32 3)
+// CHECK-LLVM-NEXT: %10 = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %9, i64 0)
+// CHECK-LLVM-NEXT: %11 = getelementptr inbounds [4 x <4 x i32>], ptr %arg, i64 0, i64 3
+// CHECK-LLVM-NEXT: store <4 x i32> %10, ptr %11, align 16
+// CHECK-LLVM-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 %retval, ptr align 16 %arg, i64 64, i1 false)
+// CHECK-LLVM-NEXT: %12 = load [4 x <4 x i32>], ptr %retval, align 16
+// CHECK-LLVM-NEXT: %13 = extractvalue [4 x <4 x i32>] %12, 0
+// CHECK-LLVM-NEXT: %cast.scalable = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> poison, <4 x i32> %13, i64 0)
+// CHECK-LLVM-NEXT: %14 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, <vscale x 2 x i32> %cast.scalable, i32 0)
+// CHECK-LLVM-NEXT: %15 = extractvalue [4 x <4 x i32>] %12, 1
+// CHECK-LLVM-NEXT: %cast.scalable1 = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> poison, <4 x i32> %15, i64 0)
+// CHECK-LLVM-NEXT: %16 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %14, <vscale x 2 x i32> %cast.scalable1, i32 1)
+// CHECK-LLVM-NEXT: %17 = extractvalue [4 x <4 x i32>] %12, 2
+// CHECK-LLVM-NEXT: %cast.scalable2 = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> poison, <4 x i32> %17, i64 0)
+// CHECK-LLVM-NEXT: %18 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %16, <vscale x 2 x i32> %cast.scalable2, i32 2)
+// CHECK-LLVM-NEXT: %19 = extractvalue [4 x <4 x i32>] %12, 3
+// CHECK-LLVM-NEXT: %cast.scalable3 = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> poison, <4 x i32> %19, i64 0)
+// CHECK-LLVM-NEXT: %20 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %18, <vscale x 2 x i32> %cast.scalable3, i32 3)
+// CHECK-LLVM-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %20
+// CHECK-LLVM-NEXT: }
+struct st_i32x4_arr4 __attribute__((riscv_vls_cc)) test_function_prolog_epilog(struct st_i32x4_arr4 arg) {
+ return arg;
+}
+
+struct st_i32x4 __attribute__((riscv_vls_cc)) dummy(struct st_i32x4);
+// CHECK-LLVM-LABEL: define dso_local riscv_vls_cc(128) <vscale x 2 x i32> @test_call(<vscale x 2 x i32> %arg.target_coerce) #0 {
+// CHECK-LLVM-NEXT: entry:
+// CHECK-LLVM-NEXT: %retval = alloca %struct.st_i32x4, align 16
+// CHECK-LLVM-NEXT: %arg = alloca %struct.st_i32x4, align 16
+// CHECK-LLVM-NEXT: %0 = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %arg.target_coerce, i64 0)
+// CHECK-LLVM-NEXT: store <4 x i32> %0, ptr %arg, align 16
+// CHECK-LLVM-NEXT: %1 = load <4 x i32>, ptr %arg, align 16
+// CHECK-LLVM-NEXT: %cast.scalable = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> poison, <4 x i32> %1, i64 0)
+// CHECK-LLVM-NEXT: %call = call riscv_vls_cc(128) <vscale x 2 x i32> @dummy(<vscale x 2 x i32> %cast.scalable)
+// CHECK-LLVM-NEXT: %2 = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %call, i64 0)
+// CHECK-LLVM-NEXT: store <4 x i32> %2, ptr %retval, align 16
+// CHECK-LLVM-NEXT: %3 = load <4 x i32>, ptr %retval, align 16
+// CHECK-LLVM-NEXT: %cast.scalable1 = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> poison, <4 x i32> %3, i64 0)
+// CHECK-LLVM-NEXT: ret <vscale x 2 x i32> %cast.scalable1
+// CHECK-LLVM-NEXT: }
+struct st_i32x4 __attribute__((riscv_vls_cc)) test_call(struct st_i32x4 arg) {
+ struct st_i32x4 abc = dummy(arg);
+ return abc;
+}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp
index 128610e..3ed4f59 100644
--- a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp
@@ -123,34 +123,34 @@ typedef int __attribute__((vector_size(256))) int32x64_t;
// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z18test_too_large_256Dv64_i(<vscale x 16 x i32> noundef %arg.coerce)
[[riscv::vls_cc(256)]] void test_too_large_256(int32x64_t arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z13test_st_i32x48st_i32x4(<vscale x 2 x i32> %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z13test_st_i32x48st_i32x4(<vscale x 2 x i32> %arg.target_coerce)
[[riscv::vls_cc]] void test_st_i32x4(struct st_i32x4 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z17test_st_i32x4_2568st_i32x4(<vscale x 1 x i32> %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z17test_st_i32x4_2568st_i32x4(<vscale x 1 x i32> %arg.target_coerce)
[[riscv::vls_cc(256)]] void test_st_i32x4_256(struct st_i32x4 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z18test_st_i32x4_arr113st_i32x4_arr1(<vscale x 2 x i32> %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z18test_st_i32x4_arr113st_i32x4_arr1(<vscale x 2 x i32> %arg.target_coerce)
[[riscv::vls_cc]] void test_st_i32x4_arr1(struct st_i32x4_arr1 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z22test_st_i32x4_arr1_25613st_i32x4_arr1(<vscale x 1 x i32> %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z22test_st_i32x4_arr1_25613st_i32x4_arr1(<vscale x 1 x i32> %arg.target_coerce)
[[riscv::vls_cc(256)]] void test_st_i32x4_arr1_256(struct st_i32x4_arr1 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z18test_st_i32x4_arr413st_i32x4_arr4(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z18test_st_i32x4_arr413st_i32x4_arr4(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %arg.target_coerce)
[[riscv::vls_cc]] void test_st_i32x4_arr4(struct st_i32x4_arr4 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z22test_st_i32x4_arr4_25613st_i32x4_arr4(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z22test_st_i32x4_arr4_25613st_i32x4_arr4(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %arg.target_coerce)
[[riscv::vls_cc(256)]] void test_st_i32x4_arr4_256(struct st_i32x4_arr4 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z18test_st_i32x4_arr813st_i32x4_arr8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z18test_st_i32x4_arr813st_i32x4_arr8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %arg.target_coerce)
[[riscv::vls_cc]] void test_st_i32x4_arr8(struct st_i32x4_arr8 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z22test_st_i32x4_arr8_25613st_i32x4_arr8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z22test_st_i32x4_arr8_25613st_i32x4_arr8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %arg.target_coerce)
[[riscv::vls_cc(256)]] void test_st_i32x4_arr8_256(struct st_i32x4_arr8 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x4x210st_i32x4x2(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x4x210st_i32x4x2(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %arg.target_coerce)
[[riscv::vls_cc]] void test_st_i32x4x2(struct st_i32x4x2 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z19test_st_i32x4x2_25610st_i32x4x2(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z19test_st_i32x4x2_25610st_i32x4x2(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %arg.target_coerce)
[[riscv::vls_cc(256)]] void test_st_i32x4x2_256(struct st_i32x4x2 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x8x210st_i32x8x2(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x8x210st_i32x8x2(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %arg.target_coerce)
[[riscv::vls_cc]] void test_st_i32x8x2(struct st_i32x8x2 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z19test_st_i32x8x2_25610st_i32x8x2(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z19test_st_i32x8x2_25610st_i32x8x2(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %arg.target_coerce)
[[riscv::vls_cc(256)]] void test_st_i32x8x2_256(struct st_i32x8x2 arg) {}
// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z16test_st_i32x64x211st_i32x64x2(ptr dead_on_return noundef %arg)
@@ -158,14 +158,14 @@ typedef int __attribute__((vector_size(256))) int32x64_t;
// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z20test_st_i32x64x2_25611st_i32x64x2(ptr dead_on_return noundef %arg)
[[riscv::vls_cc(256)]] void test_st_i32x64x2_256(struct st_i32x64x2 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x4x310st_i32x4x3(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x4x310st_i32x4x3(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %arg.target_coerce)
[[riscv::vls_cc]] void test_st_i32x4x3(struct st_i32x4x3 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z19test_st_i32x4x3_25610st_i32x4x3(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z19test_st_i32x4x3_25610st_i32x4x3(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %arg.target_coerce)
[[riscv::vls_cc(256)]] void test_st_i32x4x3_256(struct st_i32x4x3 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x4x810st_i32x4x8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x4x810st_i32x4x8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %arg.target_coerce)
[[riscv::vls_cc]] void test_st_i32x4x8(struct st_i32x4x8 arg) {}
-// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z19test_st_i32x4x8_25610st_i32x4x8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %arg)
+// CHECK-LLVM: define dso_local riscv_vls_cc(256) void @_Z19test_st_i32x4x8_25610st_i32x4x8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %arg.target_coerce)
[[riscv::vls_cc(256)]] void test_st_i32x4x8_256(struct st_i32x4x8 arg) {}
// CHECK-LLVM: define dso_local riscv_vls_cc(128) void @_Z15test_st_i32x4x910st_i32x4x9(ptr dead_on_return noundef %arg)
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfncvtbf16-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfncvtbf16-out-of-range.c
new file mode 100644
index 0000000..899ec29
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfncvtbf16-out-of-range.c
@@ -0,0 +1,31 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zvfbfmin \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_m(mask, src, 5, vl);
+}
+
+vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tu(vbfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tu(maskedoff, src, 5, vl);
+}
+
+vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tum(mask, maskedoff, src, 5, vl);
+}
+
+vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tumu(mask, maskedoff, src, 5, vl);
+}
+
+vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_mu(mask, maskedoff, src, 5, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfwmaccbf16-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfwmaccbf16-out-of-range.c
new file mode 100644
index 0000000..84b822d1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfwmaccbf16-out-of-range.c
@@ -0,0 +1,66 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zvfbfwma \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm(vfloat32m1_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vv_f32m1_rm(vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vf_f32m1_rm(vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vv_f32m1_rm_m(mask, vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, __bf16 vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vf_f32m1_rm_m(mask, vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vv_f32m1_rm_tu(vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vf_f32m1_rm_tu(vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vv_f32m1_rm_tum(mask, vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, __bf16 vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vf_f32m1_rm_tum(mask, vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, __bf16 vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vf_f32m1_rm_tumu(mask, vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vv_f32m1_rm_mu(mask, vd, vs1, vs2, 5, vl);
+}
+
+vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, __bf16 vs1, vbfloat16mf2_t vs2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfwmaccbf16_vf_f32m1_rm_mu(mask, vd, vs1, vs2, 5, vl);
+}
diff --git a/clang/test/CodeGen/WebAssembly/builtins-test-fp-sig.c b/clang/test/CodeGen/WebAssembly/builtins-test-fp-sig.c
new file mode 100644
index 0000000..88447f7
--- /dev/null
+++ b/clang/test/CodeGen/WebAssembly/builtins-test-fp-sig.c
@@ -0,0 +1,70 @@
+// RUN: %clang_cc1 -triple wasm32-unknown-unknown -target-feature +gc -O3 -emit-llvm -DSINGLE_VALUE -o - %s | FileCheck %s -check-prefixes WEBASSEMBLY,WEBASSEMBLY-SV
+// RUN: %clang_cc1 -triple wasm64-unknown-unknown -target-feature +gc -O3 -emit-llvm -DSINGLE_VALUE -o - %s | FileCheck %s -check-prefixes WEBASSEMBLY,WEBASSEMBLY-SV
+// RUN: %clang_cc1 -triple wasm64-unknown-unknown -target-feature +gc -target-abi experimental-mv -O3 -emit-llvm -o - %s 2>&1 | FileCheck %s -check-prefixes WEBASSEMBLY
+// RUN: not %clang_cc1 -triple wasm64-unknown-unknown -O3 -emit-llvm -o - %s 2>&1 | FileCheck %s -check-prefixes MISSING-GC
+
+void use(int);
+
+typedef void (*Fvoid)(void);
+void test_function_pointer_signature_void(Fvoid func) {
+ // MISSING-GC: error: '__builtin_wasm_test_function_pointer_signature' needs target feature gc
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef float (*Ffloats)(float, double, int);
+void test_function_pointer_signature_floats(Ffloats func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float poison, token poison, float poison, double poison, i32 poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef void (*Fpointers)(Fvoid, Ffloats, void*, int*, int***, char[5]);
+void test_function_pointer_signature_pointers(Fpointers func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, ptr poison, ptr poison, ptr poison, ptr poison, ptr poison, ptr poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef void (*FVarArgs)(int, ...);
+void test_function_pointer_signature_varargs(FVarArgs func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, i32 poison, ptr poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef __externref_t (*FExternRef)(__externref_t, __externref_t);
+void test_function_pointer_externref(FExternRef func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr addrspace(10) poison, token poison, ptr addrspace(10) poison, ptr addrspace(10) poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef __funcref Fpointers (*FFuncRef)(__funcref Fvoid, __funcref Ffloats);
+void test_function_pointer_funcref(FFuncRef func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr addrspace(20) poison, token poison, ptr addrspace(20) poison, ptr addrspace(20) poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+#ifdef SINGLE_VALUE
+// Some tests that we get struct ABIs correct. There is no special code in
+// __builtin_wasm_test_function_pointer_signature for this, it gets handled by
+// the normal type lowering code.
+// Single element structs are unboxed, multi element structs are passed on
+// stack.
+typedef struct {double x;} (*Fstructs1)(struct {double x;}, struct {float x;}, struct {double x; float y;});
+void test_function_pointer_structs1(Fstructs1 func) {
+ // WEBASSEMBLY-SV: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double poison, token poison, double poison, float poison, ptr poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+// Two element return struct ==> return ptr on stack
+typedef struct {double x; double y;} (*Fstructs2)(void);
+void test_function_pointer_structs2(Fstructs2 func) {
+ // WEBASSEMBLY-SV: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, ptr poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+// Return union ==> return ptr on stack, one element union => unboxed
+typedef union {double x; float y;} (*FUnions)(union {double x; float y;}, union {double x;});
+void test_function_pointer_unions(FUnions func) {
+ // WEBASSEMBLY-SV: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, ptr poison, ptr poison, double poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+#endif
diff --git a/clang/test/CodeGen/X86/avx-builtins.c b/clang/test/CodeGen/X86/avx-builtins.c
index a6e70aae..4a04874 100644
--- a/clang/test/CodeGen/X86/avx-builtins.c
+++ b/clang/test/CodeGen/X86/avx-builtins.c
@@ -9,6 +9,17 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X86
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+
#include <immintrin.h>
#include "builtin_test_helpers.h"
@@ -20,12 +31,14 @@ __m256d test_mm256_add_pd(__m256d A, __m256d B) {
// CHECK: fadd <4 x double>
return _mm256_add_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d( _mm256_add_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){-4.0, -5.0, +6.0, +7.0}), -8.0, -10.0, +12.0, +14.0));
__m256 test_mm256_add_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_add_ps
// CHECK: fadd <8 x float>
return _mm256_add_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_add_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}), -8.0f, -10.0f, +12.0f, +14.0f, +14.0f, +12.0f, -10.0f, -8.0f));
__m256d test_mm256_addsub_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_addsub_pd
@@ -44,12 +57,14 @@ __m256d test_mm256_and_pd(__m256d A, __m256d B) {
// CHECK: and <4 x i64>
return _mm256_and_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_and_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){+0.0, -0.0, -0.0, +7.0}), +0.0, -0.0, +0.0, +7.0));
__m256 test_mm256_and_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_and_ps
// CHECK: and <8 x i32>
return _mm256_and_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_and_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), +0.0f, -0.0f, +0.0f, +7.0f, +7.0f, +0.0f, -0.0f, +0.0f));
__m256d test_mm256_andnot_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_andnot_pd
@@ -57,6 +72,7 @@ __m256d test_mm256_andnot_pd(__m256d A, __m256d B) {
// CHECK: and <4 x i64>
return _mm256_andnot_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_andnot_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){+0.0, -0.0, -0.0, +7.0}), +0.0, +0.0, -0.0, +0.0));
__m256 test_mm256_andnot_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_andnot_ps
@@ -64,6 +80,7 @@ __m256 test_mm256_andnot_ps(__m256 A, __m256 B) {
// CHECK: and <8 x i32>
return _mm256_andnot_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_andnot_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), +0.0f, +0.0f, -0.0f, +0.0f, +0.0f, -0.0f, +0.0f, +0.0f));
__m256d test_mm256_blend_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_blend_pd
@@ -141,11 +158,13 @@ __m256 test_mm256_castpd_ps(__m256d A) {
// CHECK-LABEL: test_mm256_castpd_ps
return _mm256_castpd_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_castpd_ps((__m256d){-1.0, +2.0, +4.0, -6.0}), +0.0f, -1.875f, +0.0f, +2.0f, +0.0f, +2.25f, 0.0f, -2.375f));
__m256i test_mm256_castpd_si256(__m256d A) {
// CHECK-LABEL: test_mm256_castpd_si256
return _mm256_castpd_si256(A);
}
+TEST_CONSTEXPR(match_m256i(_mm256_castpd_si256((__m256d){-1.0, +2.0, -3.0, +4.0}), 0xBFF0000000000000ULL, 0x4000000000000000ULL, 0xC008000000000000ULL, 0x4010000000000000ULL));
__m256d test_mm256_castpd128_pd256(__m128d A) {
// CHECK-LABEL: test_mm256_castpd128_pd256
@@ -159,16 +178,19 @@ __m128d test_mm256_castpd256_pd128(__m256d A) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 0, i32 1>
return _mm256_castpd256_pd128(A);
}
+TEST_CONSTEXPR(match_m128d(_mm256_castpd256_pd128((__m256d){-1.0, +2.0, -3.0, +4.0}), -1.0, +2.0));
__m256d test_mm256_castps_pd(__m256 A) {
// CHECK-LABEL: test_mm256_castps_pd
return _mm256_castps_pd(A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_castps_pd((__m256){0.0f, -1.0f, 0.0f, 4.0f, 0.0f, -2.0f, 0.0f, 6.0f}), -0.0078125, 512.0, -2.0, +8192.0));
__m256i test_mm256_castps_si256(__m256 A) {
// CHECK-LABEL: test_mm256_castps_si256
return _mm256_castps_si256(A);
}
+TEST_CONSTEXPR(match_m256i(_mm256_castps_si256((__m256){1.0f, -2.0f, -4.0f, 8.0f, -16.0f, +16.0f, +32.0f, -32.0f}), 0xC00000003F800000ULL, 0x41000000c0800000ULL, 0x41800000C1800000ULL, 0xC200000042000000ULL));
__m256 test_mm256_castps128_ps256(__m128 A) {
// CHECK-LABEL: test_mm256_castps128_ps256
@@ -182,6 +204,7 @@ __m128 test_mm256_castps256_ps128(__m256 A) {
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
return _mm256_castps256_ps128(A);
}
+TEST_CONSTEXPR(match_m128(_mm256_castps256_ps128((__m256){1.0f, -2.0f, -4.0f, 8.0f, -16.0f, +16.0f, +32.0f, -32.0f}), 1.0f, -2.0f, -4.0f, 8.0f));
__m256i test_mm256_castsi128_si256(__m128i A) {
// CHECK-LABEL: test_mm256_castsi128_si256
@@ -194,17 +217,20 @@ __m256d test_mm256_castsi256_pd(__m256i A) {
// CHECK-LABEL: test_mm256_castsi256_pd
return _mm256_castsi256_pd(A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_castsi256_pd((__m256i)(__v4du){0x4070000000000000ULL, 0xC000000000000000ULL, 0xBFF0000000000000ULL, 0xC008000000000000ULL}), 256.0, -2.0, -1.0, -3.0));
__m256 test_mm256_castsi256_ps(__m256i A) {
// CHECK-LABEL: test_mm256_castsi256_ps
return _mm256_castsi256_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_castsi256_ps((__m256i)(__v4du){0x42000000c1800000ULL, 0x43000000c2800000ULL, 0x41000000c0800000ULL, 0xC00000003F800000ULL}), -16.0f, 32.0f, -64.0f, 128.0f, -4.0f, 8.0f, 1.0f, -2.0f));
__m128i test_mm256_castsi256_si128(__m256i A) {
// CHECK-LABEL: test_mm256_castsi256_si128
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 0, i32 1>
return _mm256_castsi256_si128(A);
}
+TEST_CONSTEXPR(match_m128i(_mm256_castsi256_si128((__m256i)(__v4du){0xBFF0000000000000ULL, 0x4070000000000000ULL, 0xC000000000000000ULL, 0xC008000000000000ULL}), 0xBFF0000000000000ULL, 0x4070000000000000ULL));
__m256d test_mm256_ceil_pd(__m256d x) {
// CHECK-LABEL: test_mm256_ceil_pd
@@ -908,12 +934,16 @@ __m256d test_mm256_cvtepi32_pd(__m128i A) {
return _mm256_cvtepi32_pd(A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_cvtepi32_pd((__m128i)(__v4si){-2, -1, 0, 1}), -2.0, -1.0, 0.0, 1.0));
+
__m256 test_mm256_cvtepi32_ps(__m256i A) {
// CHECK-LABEL: test_mm256_cvtepi32_ps
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x float>
return _mm256_cvtepi32_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_cvtepi32_ps((__m256i)(__v8si){-8, -4, -2, -1, 0, 1, 2, 4}), -8.0f, -4.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 4.0f));
+
__m128i test_mm256_cvtpd_epi32(__m256d A) {
// CHECK-LABEL: test_mm256_cvtpd_epi32
// CHECK: call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> %{{.*}})
@@ -938,6 +968,8 @@ __m256d test_mm256_cvtps_pd(__m128 A) {
return _mm256_cvtps_pd(A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_cvtps_pd((__m128){0.25f, 1.75f, -1.75f, 16777216.0f}), 0.25, 1.75, -1.75, 16777216.0));
+
double test_mm256_cvtsd_f64(__m256d __a) {
// CHECK-LABEL: test_mm256_cvtsd_f64
// CHECK: extractelement <4 x double> %{{.*}}, i32 0
@@ -973,12 +1005,14 @@ __m256d test_mm256_div_pd(__m256d A, __m256d B) {
// CHECK: fdiv <4 x double>
return _mm256_div_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d( _mm256_div_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){-1.0, +1.0, -1.0, +1.0}), +4.0, -5.0, -6.0, +7.0));
__m256 test_mm256_div_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_div_ps
// CHECK: fdiv <8 x float>
return _mm256_div_ps(A, B);
}
+TEST_CONSTEXPR(match_m256( _mm256_div_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){-1.0f, +1.0f, -1.0f, +1.0f, +1.0f, -1.0f, +1.0f, -1.0f}), +4.0f, -5.0f, -6.0f, +7.0f, +7.0f, -6.0f, -5.0f, +4.0f));
__m256 test_mm256_dp_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_dp_ps
@@ -1258,18 +1292,21 @@ __m256d test_mm256_movedup_pd(__m256d A) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
return _mm256_movedup_pd(A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_movedup_pd((__m256d){+7.0, -7.0, -42.0, +42.0}), +7.0, +7.0, -42.0, -42.0));
__m256 test_mm256_movehdup_ps(__m256 A) {
// CHECK-LABEL: test_mm256_movehdup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
return _mm256_movehdup_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_movehdup_ps((__m256){+1.0f,-1.0f,+2.0f,+4.0f,+8.0f,-8.0f,-3.0f,+3.0f}), -1.0f, -1.0f, +4.0f, +4.0f, -8.0f, -8.0f, +3.0f, +3.0f));
__m256 test_mm256_moveldup_ps(__m256 A) {
// CHECK-LABEL: test_mm256_moveldup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
return _mm256_moveldup_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_moveldup_ps((__m256){+1.0f,-1.0f,+2.0f,+4.0f,+8.0f,-8.0f,-3.0f,+3.0f}), +1.0f, +1.0f, +2.0f, +2.0f, +8.0f, +8.0f, -3.0f, -3.0f));
int test_mm256_movemask_pd(__m256d A) {
// CHECK-LABEL: test_mm256_movemask_pd
@@ -1288,24 +1325,28 @@ __m256d test_mm256_mul_pd(__m256d A, __m256d B) {
// CHECK: fmul <4 x double>
return _mm256_mul_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d( _mm256_mul_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){-4.0, -5.0, +6.0, +7.0}), +16.0, +25.0, +36.0, +49.0));
__m256 test_mm256_mul_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_mul_ps
// CHECK: fmul <8 x float>
return _mm256_mul_ps(A, B);
}
+TEST_CONSTEXPR(match_m256( _mm256_mul_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}), +16.0f, +25.0f, +36.0f, +49.0f, +49.0f, +36.0f, +25.0f, +16.0f));
__m256d test_mm256_or_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_or_pd
// CHECK: or <4 x i64>
return _mm256_or_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_or_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){+0.0, -0.0, -0.0, +7.0}), -4.0, -5.0, -6.0, +7.0));
__m256 test_mm256_or_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_or_ps
// CHECK: or <8 x i32>
return _mm256_or_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_or_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -4.0f, -5.0f, -6.0f, +7.0f, +7.0f, -6.0f, -5.0f, -4.0f));
__m128d test_mm_permute_pd(__m128d A) {
// CHECK-LABEL: test_mm_permute_pd
@@ -1924,12 +1965,14 @@ __m256d test_mm256_sub_pd(__m256d A, __m256d B) {
// CHECK: fsub <4 x double>
return _mm256_sub_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d( _mm256_sub_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){-0.0, +0.0, +2.0, -1.0}), -4.0, -5.0, 4.0, 8.0));
__m256 test_mm256_sub_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_sub_ps
// CHECK: fsub <8 x float>
return _mm256_sub_ps(A, B);
}
+TEST_CONSTEXPR(match_m256( _mm256_sub_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){-0.0f, +0.0f, +2.0f, -1.0f, -1.0f, +2.0f, +0.0f, -0.0f}), -4.0f, -5.0f, 4.0f, 8.0f, 8.0f, 4.0f, -5.0f, -4.0f));
int test_mm_testc_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_testc_pd
@@ -2053,36 +2096,42 @@ __m256d test_mm256_unpackhi_pd(__m256d A, __m256d B) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
return _mm256_unpackhi_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_unpackhi_pd((__m256d){+1.0, +2.0, +3.0, +4.0}, (__m256d){+5.0, +6.0, +7.0, +8.0}), +2.0, +6.0, +4.0, +8.0));
__m256 test_mm256_unpackhi_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_unpackhi_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
return _mm256_unpackhi_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_unpackhi_ps((__m256){+0.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f}, (__m256){+10.0f, +11.0f, +12.0f, +13.0f, +14.0f, +15.0f, +16.0f, +17.0f}), +2.0f, +12.0f, +3.0f, +13.0f, +6.0f, +16.0f, +7.0f, +17.0f));
__m256d test_mm256_unpacklo_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_unpacklo_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
return _mm256_unpacklo_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_unpacklo_pd((__m256d){+1.0, +2.0, +3.0, +4.0}, (__m256d){+5.0, +6.0, +7.0, +8.0}), +1.0, +5.0, +3.0, +7.0));
__m256 test_mm256_unpacklo_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_unpacklo_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
return _mm256_unpacklo_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_unpacklo_ps((__m256){+0.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f}, (__m256){+10.0f, +11.0f, +12.0f, +13.0f, +14.0f, +15.0f, +16.0f, +17.0f}), +0.0f, +10.0f, +1.0f, +11.0f, +4.0f, +14.0f, +5.0f, +15.0f));
__m256d test_mm256_xor_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_xor_pd
// CHECK: xor <4 x i64>
return _mm256_xor_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_xor_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){+0.0, -0.0, -0.0, +7.0}), -4.0, +5.0, -6.0, +0.0));
__m256 test_mm256_xor_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_xor_ps
// CHECK: xor <8 x i32>
return _mm256_xor_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_xor_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -4.0f, +5.0f, -6.0f, +0.0f, +0.0f, -6.0f, +5.0f, -4.0f));
void test_mm256_zeroall(void) {
// CHECK-LABEL: test_mm256_zeroall
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index 27da56f..a39ce51 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -7,8 +7,17 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx2 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X86
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx2 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
#include <immintrin.h>
+#include "builtin_test_helpers.h"
// NOTE: This should match the tests in llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
@@ -17,18 +26,21 @@ __m256i test_mm256_abs_epi8(__m256i a) {
// CHECK: [[ABS:%.*]] = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %{{.*}}, i1 false)
return _mm256_abs_epi8(a);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_abs_epi8((__m256i)(__v32qs){0, +1, +2, +3, +4, +5, +6, +7, +8, +9, +10, +11, +12, +13, +14, +15, +100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 100, 50, 100, 20, 80, 50, 120, 20, 100, 50, 100, 20, 80, 50, 120, 20));
__m256i test_mm256_abs_epi16(__m256i a) {
// CHECK-LABEL: test_mm256_abs_epi16
// CHECK: [[ABS:%.*]] = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %{{.*}}, i1 false)
return _mm256_abs_epi16(a);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_abs_epi16((__m256i)(__v16hi){+5, -3, -32767, +32767, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129}), 5, 3, 32767, 32767, 10, 8, 0, 256, 256, 128, 3, 9, 15, 33, 63, 129));
__m256i test_mm256_abs_epi32(__m256i a) {
// CHECK-LABEL: test_mm256_abs_epi32
// CHECK: [[ABS:%.*]] = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %{{.*}}, i1 false)
return _mm256_abs_epi32(a);
}
+TEST_CONSTEXPR(match_v8si(_mm256_abs_epi32((__m256i)(__v8si){+5, -3, -2147483647, +2147483647, 0, -256, +256, +1025}), 5, 3, 2147483647, 2147483647, 0, 256, 256, 1025));
__m256i test_mm256_add_epi8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_add_epi8
@@ -59,12 +71,14 @@ __m256i test_mm256_adds_epi8(__m256i a, __m256i b) {
// CHECK: call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_adds_epi8(a, b);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_adds_epi8((__m256i)(__v32qs){0, +1, +2, +3, +4, +5, +6, +7, +8, +9, +10, +11, +12, +13, +14, +15, +100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}, (__m256i)(__v32qs){0, +1, +2, +3, +4, +5, +6, +7, +8, +9, +10, +11, +12, +13, +14, +15, +50, +80, -50, +110, +60, -30, +20, -10, +50, +80, -50, +110, +60, -30, +20, -10}), 0, +2, +4, +6, +8, +10, +12, +14, +16, +18, +20, +22, +24, +26, +28, +30, +127, +127, -128, +127, +127, -80, +127, -30, -50, +30, +50, +90, -20, +20, -100, +10));
__m256i test_mm256_adds_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_adds_epi16
// CHECK: call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_adds_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_adds_epi16((__m256i)(__v16hi){0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, +32000, -32000, +32000, -32000}, (__m256i)(__v16hi){0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, +800, -800, -800, +800}), 0, -2, -4, -6, -8, -10, -12, -14, -16, -18, -20, -22, +32767, -32768, +31200, -31200));
__m256i test_mm256_adds_epu8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_adds_epu8
@@ -72,6 +86,7 @@ __m256i test_mm256_adds_epu8(__m256i a, __m256i b) {
// CHECK: call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_adds_epu8(a, b);
}
+TEST_CONSTEXPR(match_v32qu(_mm256_adds_epu8((__m256i)(__v32qu){0, 0, 0, 0, +64, +64, +64, +64, +64, +64, +127, +127, +127, +127, +127, +127, +128, +128, +128, +128, +128, +128, +192, +192, +192, +192, +192, +192, +255, +255, +255, +255}, (__m256i)(__v32qu){0, +127, +128, +255, 0, +64, +127, +128, +192, +255, 0, +64, +127, +128, +192, +255, 0, +64, +127, +128, +192, +255, 0, +64, +127, +128, +192, +255, 0, +127, +128, +255}), 0, +127, +128, +255, +64, +128, +191, +192, +255, +255, +127, +191, +254, +255, +255, +255, +128, +192, +255, +255, +255, +255, +192, +255, +255, +255, +255, +255, +255, +255, +255, +255));
__m256i test_mm256_adds_epu16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_adds_epu16
@@ -79,6 +94,7 @@ __m256i test_mm256_adds_epu16(__m256i a, __m256i b) {
// CHECK: call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_adds_epu16(a, b);
}
+TEST_CONSTEXPR(match_v16hu(_mm256_adds_epu16((__m256i)(__v16hu){0, 0, 0, 0, +32767, +32767, +32767, +32767, +32768, +32768, +32768, +32768, +65535, +65535, +65535, +65535}, (__m256i)(__v16hu){0, +32767, +32768, +65535, 0, +32767, +32768, +65535, 0, +32767, +32768, +65535, 0, +32767, +32768, +65535}), 0, +32767, +32768, +65535, +32767, +65534, +65535, +65535, +32768, +65535, +65535, +65535, +65535, +65535, +65535, +65535));
__m256i test_mm256_alignr_epi8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_alignr_epi8
@@ -97,6 +113,7 @@ __m256i test_mm256_and_si256(__m256i a, __m256i b) {
// CHECK: and <4 x i64>
return _mm256_and_si256(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_and_si256((__m256i)(__v4di){0, -1, 0, -1}, (__m256i)(__v4di){0, 0, -1, -1}), 0, 0, 0, -1));
__m256i test_mm256_andnot_si256(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_andnot_si256
@@ -104,6 +121,7 @@ __m256i test_mm256_andnot_si256(__m256i a, __m256i b) {
// CHECK: and <4 x i64>
return _mm256_andnot_si256(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_andnot_si256((__m256i)(__v4di){0, -1, 0, -1}, (__m256i)(__v4di){0, 0, -1, -1}), 0, 0, -1, 0));
__m256i test_mm256_avg_epu8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_avg_epu8
@@ -153,6 +171,7 @@ __m128i test_mm_broadcastb_epi8(__m128i a) {
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> zeroinitializer
return _mm_broadcastb_epi8(a);
}
+TEST_CONSTEXPR(match_v16qi(_mm_broadcastb_epi8((__m128i)(__v16qi){42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42));
__m256i test_mm256_broadcastb_epi8(__m128i a) {
// CHECK-LABEL: test_mm256_broadcastb_epi8
@@ -160,6 +179,7 @@ __m256i test_mm256_broadcastb_epi8(__m128i a) {
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <32 x i32> zeroinitializer
return _mm256_broadcastb_epi8(a);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_broadcastb_epi8((__m128i)(__v16qi){42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42));
__m128i test_mm_broadcastd_epi32(__m128i a) {
// CHECK-LABEL: test_mm_broadcastd_epi32
@@ -167,6 +187,7 @@ __m128i test_mm_broadcastd_epi32(__m128i a) {
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> zeroinitializer
return _mm_broadcastd_epi32(a);
}
+TEST_CONSTEXPR(match_v4si(_mm_broadcastd_epi32((__m128i)(__v4si){-42, 0, 0, 0}), -42, -42, -42, -42));
__m256i test_mm256_broadcastd_epi32(__m128i a) {
// CHECK-LABEL: test_mm256_broadcastd_epi32
@@ -174,6 +195,7 @@ __m256i test_mm256_broadcastd_epi32(__m128i a) {
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> zeroinitializer
return _mm256_broadcastd_epi32(a);
}
+TEST_CONSTEXPR(match_v8si(_mm256_broadcastd_epi32((__m128i)(__v4si){-42, 0, 0, 0}), -42, -42, -42, -42, -42, -42, -42, -42));
__m128i test_mm_broadcastq_epi64(__m128i a) {
// CHECK-LABEL: test_mm_broadcastq_epi64
@@ -181,6 +203,7 @@ __m128i test_mm_broadcastq_epi64(__m128i a) {
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> zeroinitializer
return _mm_broadcastq_epi64(a);
}
+TEST_CONSTEXPR(match_v2di(_mm_broadcastq_epi64((__m128i)(__v2di){-42, 0}), -42, -42));
__m256i test_mm256_broadcastq_epi64(__m128i a) {
// CHECK-LABEL: test_mm256_broadcastq_epi64
@@ -188,12 +211,14 @@ __m256i test_mm256_broadcastq_epi64(__m128i a) {
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> zeroinitializer
return _mm256_broadcastq_epi64(a);
}
+TEST_CONSTEXPR(match_v4di(_mm256_broadcastq_epi64((__m128i)(__v2di){-42, 0}), -42, -42, -42, -42));
__m128d test_mm_broadcastsd_pd(__m128d a) {
// CHECK-LABEL: test_mm_broadcastsd_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> zeroinitializer
return _mm_broadcastsd_pd(a);
}
+TEST_CONSTEXPR(match_m128d(_mm_broadcastsd_pd((__m128d){+7.0, -7.0}), +7.0, +7.0));
__m256d test_mm256_broadcastsd_pd(__m128d a) {
// CHECK-LABEL: test_mm256_broadcastsd_pd
@@ -201,12 +226,14 @@ __m256d test_mm256_broadcastsd_pd(__m128d a) {
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> zeroinitializer
return _mm256_broadcastsd_pd(a);
}
+TEST_CONSTEXPR(match_m256d(_mm256_broadcastsd_pd((__m128d){+7.0, -7.0}), +7.0, +7.0, +7.0, +7.0));
__m256i test_mm256_broadcastsi128_si256(__m128i a) {
// CHECK-LABEL: test_mm256_broadcastsi128_si256
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcastsi128_si256(a);
}
+TEST_CONSTEXPR(match_m256i(_mm256_broadcastsi128_si256((__m128i)(__v2di){3, 45}), 3, 45, 3, 45));
__m256i test_mm_broadcastsi128_si256(__m128i a) {
// CHECK-LABEL: test_mm_broadcastsi128_si256
@@ -220,6 +247,7 @@ __m128 test_mm_broadcastss_ps(__m128 a) {
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
return _mm_broadcastss_ps(a);
}
+TEST_CONSTEXPR(match_m128(_mm_broadcastss_ps((__m128){-4.0f, +5.0f, +6.0f, +7.0f}), -4.0f, -4.0f, -4.0f, -4.0f));
__m256 test_mm256_broadcastss_ps(__m128 a) {
// CHECK-LABEL: test_mm256_broadcastss_ps
@@ -227,6 +255,7 @@ __m256 test_mm256_broadcastss_ps(__m128 a) {
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> zeroinitializer
return _mm256_broadcastss_ps(a);
}
+TEST_CONSTEXPR(match_m256(_mm256_broadcastss_ps((__m128){-4.0f, +5.0f, +6.0f, +7.0f}), -4.0f, -4.0f, -4.0f, -4.0f, -4.0f, -4.0f, -4.0f, -4.0f));
__m128i test_mm_broadcastw_epi16(__m128i a) {
// CHECK-LABEL: test_mm_broadcastw_epi16
@@ -234,6 +263,7 @@ __m128i test_mm_broadcastw_epi16(__m128i a) {
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> zeroinitializer
return _mm_broadcastw_epi16(a);
}
+TEST_CONSTEXPR(match_v8hi(_mm_broadcastw_epi16((__m128i)(__v8hi){42, 0, 0, 0, 0, 0, 0, 0}), 42, 42, 42, 42, 42, 42, 42, 42));
__m256i test_mm256_broadcastw_epi16(__m128i a) {
// CHECK-LABEL: test_mm256_broadcastw_epi16
@@ -241,6 +271,7 @@ __m256i test_mm256_broadcastw_epi16(__m128i a) {
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i32> zeroinitializer
return _mm256_broadcastw_epi16(a);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_broadcastw_epi16((__m128i)(__v8hi){42, 0, 0, 0, 0, 0, 0, 0}), 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42));
__m256i test_mm256_bslli_epi128(__m256i a) {
// CHECK-LABEL: test_mm256_bslli_epi128
@@ -259,54 +290,69 @@ __m256i test_mm256_cmpeq_epi8(__m256i a, __m256i b) {
// CHECK: icmp eq <32 x i8>
return _mm256_cmpeq_epi8(a, b);
}
+TEST_CONSTEXPR(match_v16qi(_mm_cmpeq_epi8(
+ (__m128i)(__v16qs){1,-2,3,-4,-5,6,-7,8,-9,10,-11,12,-13,14,-15,16},
+ (__m128i)(__v16qs){10,-2,6,-4,-5,12,-14,8,-9,20,-22,12,-26,14,-30,16}),
+ 0,-1,0,-1,-1,0,0,-1,-1,0,0,-1,0,-1,0,-1));
__m256i test_mm256_cmpeq_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_cmpeq_epi16
// CHECK: icmp eq <16 x i16>
return _mm256_cmpeq_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_cmpeq_epi16((__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-10, -2, +6, -4, +5, -12, +14, -8, +9, -20, +22, -12, +26, -14, +30, -16}), 0, -1, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, 0, -1, 0, -1));
__m256i test_mm256_cmpeq_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_cmpeq_epi32
// CHECK: icmp eq <8 x i32>
return _mm256_cmpeq_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_cmpeq_epi32((__m256i)(__v8si){+1, -2, +3, -4, +5, -6, +7, -8}, (__m256i)(__v8si){-10, -2, +6, -4, +5, -12, +14, -8}), 0, -1, 0, -1, -1, 0, 0, -1));
__m256i test_mm256_cmpeq_epi64(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_cmpeq_epi64
// CHECK: icmp eq <4 x i64>
return _mm256_cmpeq_epi64(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cmpeq_epi64((__m256i)(__v4di){+1, -2, +3, -4}, (__m256i)(__v4di){-10, -2, +6, -4}), 0, -1, 0, -1));
__m256i test_mm256_cmpgt_epi8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_cmpgt_epi8
// CHECK: icmp sgt <32 x i8>
return _mm256_cmpgt_epi8(a, b);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_cmpgt_epi8(
+ (__m256i)(__v32qs){1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16},
+ (__m256i)(__v32qs){10, -2, 6, -5, 30, -7, 8, -1, 20, -3, 12, -8, 25, -10, 9, -2, -10, 2, -6, 5, -30, 7, -8, 1, -20, 3, -12, 8, -25, 10, -9, 2}),
+ 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1));
__m256i test_mm256_cmpgt_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_cmpgt_epi16
// CHECK: icmp sgt <16 x i16>
return _mm256_cmpgt_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_cmpgt_epi16((__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +1, -2, +3, -4, +5, -6, +7, -8}, (__m256i)(__v16hi){-10, -2, +6, -5, +30, -7, +8, -1, -10, -2, +6, -5, +30, -7, +8, -1}), -1, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, -1, 0, -1, 0, 0));
__m256i test_mm256_cmpgt_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_cmpgt_epi32
// CHECK: icmp sgt <8 x i32>
return _mm256_cmpgt_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_cmpgt_epi32((__m256i)(__v8si){+1, -2, +3, -4, +5, -6, +7, -8}, (__m256i)(__v8si){-10, -2, +6, -5, +30, -7, +8, -1}), -1, 0, 0, -1, 0, -1, 0, 0));
__m256i test_mm256_cmpgt_epi64(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_cmpgt_epi64
// CHECK: icmp sgt <4 x i64>
return _mm256_cmpgt_epi64(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cmpgt_epi64((__m256i)(__v4di){+1, -2, +3, -4}, (__m256i)(__v4di){-10, -2, +6, -5}), -1, 0, 0, -1));
__m256i test_mm256_cvtepi8_epi16(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepi8_epi16
// CHECK: sext <16 x i8> %{{.*}} to <16 x i16>
return _mm256_cvtepi8_epi16(a);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_cvtepi8_epi16(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), -3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12));
__m256i test_mm256_cvtepi8_epi32(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepi8_epi32
@@ -314,6 +360,7 @@ __m256i test_mm256_cvtepi8_epi32(__m128i a) {
// CHECK: sext <8 x i8> %{{.*}} to <8 x i32>
return _mm256_cvtepi8_epi32(a);
}
+TEST_CONSTEXPR(match_v8si(_mm256_cvtepi8_epi32(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), -3, 2, -1, 0, 1, -2, 3, -4));
__m256i test_mm256_cvtepi8_epi64(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepi8_epi64
@@ -321,12 +368,14 @@ __m256i test_mm256_cvtepi8_epi64(__m128i a) {
// CHECK: sext <4 x i8> %{{.*}} to <4 x i64>
return _mm256_cvtepi8_epi64(a);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cvtepi8_epi64(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), -3, 2, -1, 0));
__m256i test_mm256_cvtepi16_epi32(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepi16_epi32
// CHECK: sext <8 x i16> %{{.*}} to <8 x i32>
return _mm256_cvtepi16_epi32(a);
}
+TEST_CONSTEXPR(match_v8si(_mm256_cvtepi16_epi32(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), -300, 2, -1, 0, 1, -2, 3, -4));
__m256i test_mm256_cvtepi16_epi64(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepi16_epi64
@@ -334,18 +383,21 @@ __m256i test_mm256_cvtepi16_epi64(__m128i a) {
// CHECK: sext <4 x i16> %{{.*}} to <4 x i64>
return _mm256_cvtepi16_epi64(a);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cvtepi16_epi64(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), -300, 2, -1, 0));
__m256i test_mm256_cvtepi32_epi64(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepi32_epi64
// CHECK: sext <4 x i32> %{{.*}} to <4 x i64>
return _mm256_cvtepi32_epi64(a);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cvtepi32_epi64(_mm_setr_epi32(-70000, 2, -1, 0)), -70000, 2, -1, 0));
__m256i test_mm256_cvtepu8_epi16(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepu8_epi16
// CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
return _mm256_cvtepu8_epi16(a);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_cvtepu8_epi16(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), 253, 2, 255, 0, 1, 254, 3, 252, 5, 250, 7, 248, 9, 246, 11, 244));
__m256i test_mm256_cvtepu8_epi32(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepu8_epi32
@@ -353,6 +405,7 @@ __m256i test_mm256_cvtepu8_epi32(__m128i a) {
// CHECK: zext <8 x i8> %{{.*}} to <8 x i32>
return _mm256_cvtepu8_epi32(a);
}
+TEST_CONSTEXPR(match_v8si(_mm256_cvtepu8_epi32(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), 253, 2, 255, 0, 1, 254, 3, 252));
__m256i test_mm256_cvtepu8_epi64(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepu8_epi64
@@ -360,12 +413,14 @@ __m256i test_mm256_cvtepu8_epi64(__m128i a) {
// CHECK: zext <4 x i8> %{{.*}} to <4 x i64>
return _mm256_cvtepu8_epi64(a);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cvtepu8_epi64(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), 253, 2, 255, 0));
__m256i test_mm256_cvtepu16_epi32(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepu16_epi32
// CHECK: zext <8 x i16> {{.*}} to <8 x i32>
return _mm256_cvtepu16_epi32(a);
}
+TEST_CONSTEXPR(match_v8si(_mm256_cvtepu16_epi32(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), 65236, 2, 65535, 0, 1, 65534, 3, 65532));
__m256i test_mm256_cvtepu16_epi64(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepu16_epi64
@@ -373,12 +428,14 @@ __m256i test_mm256_cvtepu16_epi64(__m128i a) {
// CHECK: zext <4 x i16> %{{.*}} to <4 x i64>
return _mm256_cvtepu16_epi64(a);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cvtepu16_epi64(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), 65236, 2, 65535, 0));
__m256i test_mm256_cvtepu32_epi64(__m128i a) {
// CHECK-LABEL: test_mm256_cvtepu32_epi64
// CHECK: zext <4 x i32> %{{.*}} to <4 x i64>
return _mm256_cvtepu32_epi64(a);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cvtepu32_epi64(_mm_setr_epi32(-70000, 2, -1, 0)), 4294897296, 2, 4294967295, 0));
__m128i test0_mm256_extracti128_si256_0(__m256i a) {
// CHECK-LABEL: test0_mm256_extracti128_si256
@@ -876,6 +933,7 @@ __m256i test_mm256_mul_epi32(__m256i a, __m256i b) {
// CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
return _mm256_mul_epi32(a, b);
}
+TEST_CONSTEXPR(match_m256i(_mm256_mul_epi32((__m256i)(__v8si){+1, -2, +3, -4, +5, -6, +7, -8}, (__m256i)(__v8si){-16, -14, +12, +10, -8, +6, -4, +2}), -16, 36, -40, -28));
__m256i test_mm256_mul_epu32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mul_epu32
@@ -884,18 +942,21 @@ __m256i test_mm256_mul_epu32(__m256i a, __m256i b) {
// CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
return _mm256_mul_epu32(a, b);
}
+TEST_CONSTEXPR(match_m256i(_mm256_mul_epu32((__m256i)(__v8si){+1, -2, +3, -4, +5, -6, +7, -8}, (__m256i)(__v8si){-16, -14, +12, +10, -8, +6, -4, +2}), 4294967280, 36, 21474836440, 30064771044));
__m256i test_mm256_mulhi_epu16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mulhi_epu16
// CHECK: call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mulhi_epu16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mulhi_epu16((__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 0, -32, 0, 25, 4, -28, 0, 17, 8, -24, 0, 9, 12, 5, 14, 1));
__m256i test_mm256_mulhi_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mulhi_epi16
// CHECK: call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mulhi_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mulhi_epi16((__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, -1, -1, -1));
__m256i test_mm256_mulhrs_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mulhrs_epi16
@@ -908,6 +969,7 @@ __m256i test_mm256_mullo_epi16(__m256i a, __m256i b) {
// CHECK: mul <16 x i16>
return _mm256_mullo_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mullo_epi16((__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -32, 60, 84, -104, -120, 132, 140, -144, -144, 140, 132, -120, -104, -84, -60, -32));
__m256i test_mm256_mullo_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mullo_epi32
@@ -920,6 +982,7 @@ __m256i test_mm256_or_si256(__m256i a, __m256i b) {
// CHECK: or <4 x i64>
return _mm256_or_si256(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_or_si256((__m256i)(__v4di){0, -1, 0, -1}, (__m256i)(__v4di){0, 0, -1, -1}), 0, -1, -1, -1));
__m256i test_mm256_packs_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_packs_epi16
@@ -1028,6 +1091,11 @@ __m256i test_mm256_slli_epi16(__m256i a) {
// CHECK: call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %{{.*}}, i32 %{{.*}})
return _mm256_slli_epi16(a, 3);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_slli_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 0), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
+TEST_CONSTEXPR(match_v16hi(_mm256_slli_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e));
+TEST_CONSTEXPR(match_v16hi(_mm256_slli_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 15), 0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000));
+TEST_CONSTEXPR(match_v16hi(_mm256_slli_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v16hi(_mm256_slli_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 17), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m256i test_mm256_slli_epi16_2(__m256i a, int b) {
// CHECK-LABEL: test_mm256_slli_epi16_2
@@ -1040,6 +1108,11 @@ __m256i test_mm256_slli_epi32(__m256i a) {
// CHECK: call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %{{.*}}, i32 %{{.*}})
return _mm256_slli_epi32(a, 3);
}
+TEST_CONSTEXPR(match_v8si(_mm256_slli_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, 0), 0, 1, 2, 3, 4, 5, 6, 7));
+TEST_CONSTEXPR(match_v8si(_mm256_slli_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, 1), 0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe));
+TEST_CONSTEXPR(match_v8su(_mm256_slli_epi32((__m256i)(__v8su){0, 1, 2, 3, 4, 5, 6, 7}, 31), 0, 0x80000000, 0x0, 0x80000000, 0x0, 0x80000000, 0x0, 0x80000000));
+TEST_CONSTEXPR(match_v8si(_mm256_slli_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, 32), 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v8si(_mm256_slli_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, 33), 0, 0, 0, 0, 0, 0, 0, 0));
__m256i test_mm256_slli_epi32_2(__m256i a, int b) {
// CHECK-LABEL: test_mm256_slli_epi32_2
@@ -1052,6 +1125,11 @@ __m256i test_mm256_slli_epi64(__m256i a) {
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %{{.*}}, i32 %{{.*}})
return _mm256_slli_epi64(a, 3);
}
+TEST_CONSTEXPR(match_v4di(_mm256_slli_epi64((__m256i)(__v4di){0, 1, 2, 3}, 0), 0, 1, 2, 3));
+TEST_CONSTEXPR(match_v4di(_mm256_slli_epi64((__m256i)(__v4di){0, 1, 2, 3}, 1), 0, 0x2, 0x4, 0x6));
+TEST_CONSTEXPR(match_v4di(_mm256_slli_epi64((__m256i)(__v4di){0, 1, 2, 3}, 33), 0, 0x200000000LL, 0x400000000LL, 0x600000000LL));
+TEST_CONSTEXPR(match_v4di(_mm256_slli_epi64((__m256i)(__v4di){0, 1, 2, 3}, 64), 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v4di(_mm256_slli_epi64((__m256i)(__v4di){0, 1, 2, 3}, 65), 0, 0, 0, 0));
__m256i test_mm256_slli_epi64_2(__m256i a, int b) {
// CHECK-LABEL: test_mm256_slli_epi64_2
@@ -1070,24 +1148,28 @@ __m128i test_mm_sllv_epi32(__m128i a, __m128i b) {
// CHECK: call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_sllv_epi32(a, b);
}
+TEST_CONSTEXPR(match_v4si(_mm_sllv_epi32((__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 2, -8, 24, 0));
__m256i test_mm256_sllv_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_sllv_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_sllv_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_sllv_epi32((__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 2, -8, 24, -64, 0, 0, 0, 0));
__m128i test_mm_sllv_epi64(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_sllv_epi64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_sllv_epi64(a, b);
}
+TEST_CONSTEXPR(match_m128i(_mm_sllv_epi64((__m128i)(__v2di){1, -3}, (__m128i)(__v2di){8, 63}), 256, 0x8000000000000000ULL));
__m256i test_mm256_sllv_epi64(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_sllv_epi64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_sllv_epi64(a, b);
}
+TEST_CONSTEXPR(match_m256i(_mm256_sllv_epi64((__m256i)(__v4di){1, -2, 3, -4}, (__m256i)(__v4di){1, 2, 3, -4}), 2, -8, 24, 0));
__m256i test_mm256_sra_epi16(__m256i a, __m128i b) {
// CHECK-LABEL: test_mm256_sra_epi16
@@ -1106,6 +1188,7 @@ __m256i test_mm256_srai_epi16(__m256i a) {
// CHECK: call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %{{.*}}, i32 %{{.*}})
return _mm256_srai_epi16(a, 3);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_srai_epi16((__m256i)(__v16hi){-32768, 32767, -3, -2, -1, 0, 1, 2, -32768, 32767, -3, -2, -1, 0, 1, 2}, 1), -16384, 16383, -2, -1, -1, 0, 0, 1, -16384, 16383, -2, -1, -1, 0, 0, 1));
__m256i test_mm256_srai_epi16_2(__m256i a, int b) {
// CHECK-LABEL: test_mm256_srai_epi16_2
@@ -1118,6 +1201,7 @@ __m256i test_mm256_srai_epi32(__m256i a) {
// CHECK: call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %{{.*}}, i32 %{{.*}})
return _mm256_srai_epi32(a, 3);
}
+TEST_CONSTEXPR(match_v8si(_mm256_srai_epi32((__m256i)(__v8si){-32768, 32767, -3, -2, -1, 0, 1, 2}, 1), -16384, 16383, -2, -1, -1, 0, 0, 1));
__m256i test_mm256_srai_epi32_2(__m256i a, int b) {
// CHECK-LABEL: test_mm256_srai_epi32_2
@@ -1130,12 +1214,14 @@ __m128i test_mm_srav_epi32(__m128i a, __m128i b) {
// CHECK: call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_srav_epi32(a, b);
}
+TEST_CONSTEXPR(match_v4si(_mm_srav_epi32((__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 0, -1, 0, -1));
__m256i test_mm256_srav_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_srav_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_srav_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_srav_epi32((__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 0, -1, 0, -1, 0, -1, 0, -1));
__m256i test_mm256_srl_epi16(__m256i a, __m128i b) {
// CHECK-LABEL: test_mm256_srl_epi16
@@ -1160,6 +1246,7 @@ __m256i test_mm256_srli_epi16(__m256i a) {
// CHECK: call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %{{.*}}, i32 %{{.*}})
return _mm256_srli_epi16(a, 3);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_srli_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0, 0x0, 0x1, 0x1, 0x2, 0x2, 0x3, 0x3, 0x4, 0x4, 0x5, 0x5, 0x6, 0x6, 0x7, 0x7));
__m256i test_mm256_srli_epi16_2(__m256i a, int b) {
// CHECK-LABEL: test_mm256_srli_epi16_2
@@ -1172,6 +1259,7 @@ __m256i test_mm256_srli_epi32(__m256i a) {
// CHECK: call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %{{.*}}, i32 %{{.*}})
return _mm256_srli_epi32(a, 3);
}
+TEST_CONSTEXPR(match_v8si(_mm256_srli_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, 31), 0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0));
__m256i test_mm256_srli_epi32_2(__m256i a, int b) {
// CHECK-LABEL: test_mm256_srli_epi32_2
@@ -1184,6 +1272,7 @@ __m256i test_mm256_srli_epi64(__m256i a) {
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %{{.*}}, i32 %{{.*}})
return _mm256_srli_epi64(a, 3);
}
+TEST_CONSTEXPR(match_v4di(_mm256_srli_epi64((__m256i)(__v4di){0, 1, 2, 3}, 33), 0, 0x0, 0x0, 0x0));
__m256i test_mm256_srli_epi64_2(__m256i a, int b) {
// CHECK-LABEL: test_mm256_srli_epi64_2
@@ -1202,24 +1291,28 @@ __m128i test_mm_srlv_epi32(__m128i a, __m128i b) {
// CHECK: call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_srlv_epi32(a, b);
}
+TEST_CONSTEXPR(match_v4si(_mm_srlv_epi32((__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 0, 1073741823, 0, 0));
__m256i test_mm256_srlv_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_srlv_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_srlv_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_srlv_epi32((__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 0, 1073741823, 0, 268435455, 0, 1, 0, 7));
__m128i test_mm_srlv_epi64(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_srlv_epi64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_srlv_epi64(a, b);
}
+TEST_CONSTEXPR(match_m128i(_mm_srlv_epi64((__m128i)(__v2di){1, -3}, (__m128i)(__v2di){8, 63}), 0, 1));
__m256i test_mm256_srlv_epi64(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_srlv_epi64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_srlv_epi64(a, b);
}
+TEST_CONSTEXPR(match_m256i(_mm256_srlv_epi64((__m256i)(__v4di){1, -2, 3, -4}, (__m256i)(__v4di){1, 2, 3, -4}), 0, 0x3FFFFFFFFFFFFFFFULL, 0, 0));
__m256i test_mm256_stream_load_si256(__m256i const *a) {
// CHECK-LABEL: test_mm256_stream_load_si256
@@ -1262,12 +1355,14 @@ __m256i test_mm256_subs_epi8(__m256i a, __m256i b) {
// CHECK: call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_subs_epi8(a, b);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_subs_epi8((__m256i)(__v32qs){0, +1, +2, +3, +4, +5, +6, +7, +8, +9, +10, +11, +12, +13, +14, +15, +100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}, (__m256i)(__v32qs){0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -50, -80, +50, -110, -60, +30, -20, +10, -50, -80, +50, -110, -60, +30, -20, +10}), 0, +2, +4, +6, +8, +10, +12, +14, +16, +18, +20, +22, +24, +26, +28, +30, +127, +127, -128, +127, +127, -80, +127, -30, -50, +30, +50, +90, -20, +20, -100, +10));
__m256i test_mm256_subs_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_subs_epi16
// CHECK: call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_subs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_subs_epi16((__m256i)(__v16hi){0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, +32000, -32000, +32000, -32000}, (__m256i)(__v16hi){0, +1, +2, +3, +4, +5, +6, +7, +8, +9, +10, +11, -800, +800, +800, -800}),0, -2, -4, -6, -8, -10, -12, -14, -16, -18, -20, -22, +32767, -32768, +31200, -31200));
__m256i test_mm256_subs_epu8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_subs_epu8
@@ -1275,6 +1370,7 @@ __m256i test_mm256_subs_epu8(__m256i a, __m256i b) {
// CHECK: call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_subs_epu8(a, b);
}
+TEST_CONSTEXPR(match_v32qu(_mm256_subs_epu8((__m256i)(__v32qu){0, 0, 0, 0, +64, +64, +64, +64, +64, +64, +127, +127, +127, +127, +127, +127, +128, +128, +128, +128, +128, +128, +192, +192, +192, +192, +192, +192, +255, +255, +255, +255}, (__m256i)(__v32qu){0, +127, +128, +255, 0, +64, +127, +128, +192, +255, 0, +64, +127, +128, +192, +255, 0, +64, +127, +128, +192, +255, 0, +64, +127, +128, +192, +255, 0, +127, +128, +255}), 0, 0, 0, 0, +64, 0, 0, 0, 0, 0, +127, +63, 0, 0, 0, 0, +128, +64, +1, 0, 0, 0, +192, +128, +65, +64, 0, 0, +255, +128, +127, 0));
__m256i test_mm256_subs_epu16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_subs_epu16
@@ -1282,57 +1378,67 @@ __m256i test_mm256_subs_epu16(__m256i a, __m256i b) {
// CHECK: call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_subs_epu16(a, b);
}
+TEST_CONSTEXPR(match_v16hu(_mm256_subs_epu16((__m256i)(__v16hu){0, 0, 0, 0, +32767, +32767, +32767, +32767, +32768, +32768, +32768, +32768, +65535, +65535, +65535, +65535}, (__m256i)(__v16hu){0, +32767, +32768, +65535, 0, +32767, +32768, +65535, 0, +32767, +32768, +65535, 0, +32767, +32768, +65535}), 0, 0, 0, 0, +32767, 0, 0, 0, +32768, +1, 0, 0, +65535, +32768, +32767, 0));
__m256i test_mm256_unpackhi_epi8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_unpackhi_epi8
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
return _mm256_unpackhi_epi8(a, b);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_unpackhi_epi8((__m256i)(__v32qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m256i)(__v32qi){32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}), 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47, 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63));
__m256i test_mm256_unpackhi_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_unpackhi_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
return _mm256_unpackhi_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_unpackhi_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m256i)(__v16hi){16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 4, 20, 5, 21, 6, 22, 7, 23, 12, 28, 13, 29, 14, 30, 15, 31));
__m256i test_mm256_unpackhi_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_unpackhi_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
return _mm256_unpackhi_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_unpackhi_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m256i)(__v8si){8, 9, 10, 11, 12, 13, 14, 15}), 2, 10, 3, 11, 6, 14, 7, 15));
__m256i test_mm256_unpackhi_epi64(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_unpackhi_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
return _mm256_unpackhi_epi64(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_unpackhi_epi64((__m256i)(__v4di){0, 1, 2, 3}, (__m256i)(__v4di){ 4, 5, 6, 7}), 1, 5, 3, 7));
__m256i test_mm256_unpacklo_epi8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_unpacklo_epi8
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
return _mm256_unpacklo_epi8(a, b);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_unpacklo_epi8((__m256i)(__v32qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m256i)(__v32qi){32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}), 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39, 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55));
__m256i test_mm256_unpacklo_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_unpacklo_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
return _mm256_unpacklo_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_unpacklo_epi16((__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m256i)(__v16hi){16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 0, 16, 1, 17, 2, 18, 3, 19, 8, 24, 9, 25, 10, 26, 11, 27));
__m256i test_mm256_unpacklo_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_unpacklo_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
return _mm256_unpacklo_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_unpacklo_epi32((__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, (__m256i)(__v8si){ 8, 9, 10, 11, 12, 13, 14, 15}), 0, 8, 1, 9, 4, 12, 5, 13));
__m256i test_mm256_unpacklo_epi64(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_unpacklo_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
return _mm256_unpacklo_epi64(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_unpacklo_epi64((__m256i)(__v4di){0, 1, 2, 3}, (__m256i)(__v4di){ 4, 5, 6, 7}), 0, 4, 2, 6));
__m256i test_mm256_xor_si256(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_xor_si256
// CHECK: xor <4 x i64>
return _mm256_xor_si256(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_xor_si256((__m256i)(__v4di){0, -1, 0, -1}, (__m256i)(__v4di){0, 0, -1, -1}), 0, -1, -1, 0));
diff --git a/clang/test/CodeGen/X86/avx512-reduceIntrin.c b/clang/test/CodeGen/X86/avx512-reduceIntrin.c
index 2ceac3a..598bca4 100644
--- a/clang/test/CodeGen/X86/avx512-reduceIntrin.c
+++ b/clang/test/CodeGen/X86/avx512-reduceIntrin.c
@@ -1,162 +1,180 @@
-// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
+#include "builtin_test_helpers.h"
long long test_mm512_reduce_add_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_add_epi64(
-// CHECK: call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_add_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_add_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_add_epi64((__m512i)(__v8di){-4, -3, -2, -1, 0, 1, 2, 3}) == -4);
long long test_mm512_reduce_mul_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_mul_epi64(
-// CHECK: call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_mul_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_mul_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_mul_epi64((__m512i)(__v8di){1, 2, 3, 4, 5, 6, 7, 8}) == 40320);
long long test_mm512_reduce_or_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_or_epi64(
-// CHECK: call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_or_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_or_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_or_epi64((__m512i)(__v8di){0x100, 0x200, 0x400, 0x800, 0, 0, 0, 0}) == 0xF00);
long long test_mm512_reduce_and_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_and_epi64(
-// CHECK: call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_and_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_and_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_and_epi64((__m512i)(__v8di){0xFFFF, 0xFF00, 0x00FF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF00, 0x00FF}) == 0x0000);
long long test_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_add_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_add_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_add_epi64(__M, __W);
}
long long test_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_mul_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_mul_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_mul_epi64(__M, __W);
}
long long test_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_and_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_and_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_and_epi64(__M, __W);
}
long long test_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_or_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_or_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_or_epi64(__M, __W);
}
int test_mm512_reduce_add_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_add_epi32(
-// CHECK: call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_add_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_add_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_add_epi32((__m512i)(__v16si){-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7}) == -8);
int test_mm512_reduce_mul_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_mul_epi32(
-// CHECK: call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_mul_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_mul_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_mul_epi32((__m512i)(__v16si){1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 1, 1, -3, 1, 1}) == -36);
int test_mm512_reduce_or_epi32(__m512i __W){
-// CHECK: call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_or_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_or_epi32((__m512i)(__v16si){0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0, 0, 0, 0, 0, 0, 0, 0}) == 0xFF);
int test_mm512_reduce_and_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_and_epi32(
-// CHECK: call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_and_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_and_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_and_epi32((__m512i)(__v16si){0xFF, 0xF0, 0x0F, 0xFF, 0xFF, 0xFF, 0xF0, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xF0, 0x0F, 0x0F}) == 0x00);
int test_mm512_mask_reduce_add_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_add_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_add_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_add_epi32(__M, __W);
}
int test_mm512_mask_reduce_mul_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_mul_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_mul_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_mul_epi32(__M, __W);
}
int test_mm512_mask_reduce_and_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_and_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_and_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_and_epi32(__M, __W);
}
int test_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_or_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_or_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_or_epi32(__M, __W);
}
double test_mm512_reduce_add_pd(__m512d __W, double ExtraAddOp){
-// CHECK-LABEL: @test_mm512_reduce_add_pd(
+// CHECK-LABEL: test_mm512_reduce_add_pd
// CHECK-NOT: reassoc
-// CHECK: call reassoc double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> %{{.*}})
+// CHECK: call reassoc {{.*}}double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> %{{.*}})
// CHECK-NOT: reassoc
return _mm512_reduce_add_pd(__W) + ExtraAddOp;
}
double test_mm512_reduce_mul_pd(__m512d __W, double ExtraMulOp){
-// CHECK-LABEL: @test_mm512_reduce_mul_pd(
+// CHECK-LABEL: test_mm512_reduce_mul_pd
// CHECK-NOT: reassoc
-// CHECK: call reassoc double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> %{{.*}})
+// CHECK: call reassoc {{.*}}double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> %{{.*}})
// CHECK-NOT: reassoc
return _mm512_reduce_mul_pd(__W) * ExtraMulOp;
}
float test_mm512_reduce_add_ps(__m512 __W){
-// CHECK-LABEL: @test_mm512_reduce_add_ps(
-// CHECK: call reassoc float @llvm.vector.reduce.fadd.v16f32(float -0.000000e+00, <16 x float> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_add_ps
+// CHECK: call reassoc {{.*}}float @llvm.vector.reduce.fadd.v16f32(float -0.000000e+00, <16 x float> %{{.*}})
return _mm512_reduce_add_ps(__W);
}
float test_mm512_reduce_mul_ps(__m512 __W){
-// CHECK-LABEL: @test_mm512_reduce_mul_ps(
-// CHECK: call reassoc float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_mul_ps
+// CHECK: call reassoc {{.*}}float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> %{{.*}})
return _mm512_reduce_mul_ps(__W);
}
double test_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_add_pd(
+// CHECK-LABEL: test_mm512_mask_reduce_add_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK: call reassoc double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> %{{.*}})
+// CHECK: call reassoc {{.*}}double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> %{{.*}})
return _mm512_mask_reduce_add_pd(__M, __W);
}
double test_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_mul_pd(
+// CHECK-LABEL: test_mm512_mask_reduce_mul_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK: call reassoc double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> %{{.*}})
+// CHECK: call reassoc {{.*}}double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> %{{.*}})
return _mm512_mask_reduce_mul_pd(__M, __W);
}
float test_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_add_ps(
+// CHECK-LABEL: test_mm512_mask_reduce_add_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> {{.*}}, <16 x float> {{.*}}
-// CHECK: call reassoc float @llvm.vector.reduce.fadd.v16f32(float -0.000000e+00, <16 x float> %{{.*}})
+// CHECK: call reassoc {{.*}}float @llvm.vector.reduce.fadd.v16f32(float -0.000000e+00, <16 x float> %{{.*}})
return _mm512_mask_reduce_add_ps(__M, __W);
}
float test_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_mul_ps(
+// CHECK-LABEL: test_mm512_mask_reduce_mul_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> {{.*}}, <16 x float> %{{.*}}
-// CHECK: call reassoc float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> %{{.*}})
+// CHECK: call reassoc {{.*}}float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> %{{.*}})
return _mm512_mask_reduce_mul_ps(__M, __W);
}
diff --git a/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c b/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
index 3e33ec5..309fc28 100644
--- a/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
+++ b/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
@@ -1,164 +1,180 @@
-// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
long long test_mm512_reduce_max_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_max_epi64(
-// CHECK: call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_max_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_max_epi64((__m512i)(__v8di){-4, -3, -2, -1, 0, 1, 2, 3}) == 3);
unsigned long long test_mm512_reduce_max_epu64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_max_epu64(
-// CHECK: call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_epu64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_max_epu64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_max_epu64((__m512i)(__v8du){0, 1, 2, 3, 4, 5, 6, 7}) == 7);
double test_mm512_reduce_max_pd(__m512d __W, double ExtraAddOp){
-// CHECK-LABEL: @test_mm512_reduce_max_pd(
+// CHECK-LABEL: test_mm512_reduce_max_pd
// CHECK-NOT: nnan
-// CHECK: call nnan double @llvm.vector.reduce.fmax.v8f64(<8 x double> %{{.*}})
+// CHECK: call nnan {{.*}}double @llvm.vector.reduce.fmax.v8f64(<8 x double> %{{.*}})
// CHECK-NOT: nnan
return _mm512_reduce_max_pd(__W) + ExtraAddOp;
}
long long test_mm512_reduce_min_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_min_epi64(
-// CHECK: call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_min_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_min_epi64((__m512i)(__v8di){-4, -3, -2, -1, 0, 1, 2, 3}) == -4);
unsigned long long test_mm512_reduce_min_epu64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_min_epu64(
-// CHECK: call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_epu64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_min_epu64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_min_epu64((__m512i)(__v8du){0, 1, 2, 3, 4, 5, 6, 7}) == 0);
double test_mm512_reduce_min_pd(__m512d __W, double ExtraMulOp){
-// CHECK-LABEL: @test_mm512_reduce_min_pd(
+// CHECK-LABEL: test_mm512_reduce_min_pd
// CHECK-NOT: nnan
-// CHECK: call nnan double @llvm.vector.reduce.fmin.v8f64(<8 x double> %{{.*}})
+// CHECK: call nnan {{.*}}double @llvm.vector.reduce.fmin.v8f64(<8 x double> %{{.*}})
// CHECK-NOT: nnan
return _mm512_reduce_min_pd(__W) * ExtraMulOp;
}
long long test_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_max_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_max_epi64(__M, __W);
}
unsigned long test_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_epu64(
+// CHECK-LABEL: test_mm512_mask_reduce_max_epu64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_max_epu64(__M, __W);
}
double test_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_pd(
+// CHECK-LABEL: test_mm512_mask_reduce_max_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK: call nnan double @llvm.vector.reduce.fmax.v8f64(<8 x double> %{{.*}})
+// CHECK: call nnan {{.*}}double @llvm.vector.reduce.fmax.v8f64(<8 x double> %{{.*}})
return _mm512_mask_reduce_max_pd(__M, __W);
}
long long test_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_min_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_min_epi64(__M, __W);
}
unsigned long long test_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_epu64(
+// CHECK-LABEL: test_mm512_mask_reduce_min_epu64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_min_epu64(__M, __W);
}
double test_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_pd(
+// CHECK-LABEL: test_mm512_mask_reduce_min_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK: call nnan double @llvm.vector.reduce.fmin.v8f64(<8 x double> %{{.*}})
+// CHECK: call nnan {{.*}}double @llvm.vector.reduce.fmin.v8f64(<8 x double> %{{.*}})
return _mm512_mask_reduce_min_pd(__M, __W);
}
int test_mm512_reduce_max_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_max_epi32(
-// CHECK: call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_max_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_max_epi32((__m512i)(__v16si){-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7}) == 7);
unsigned int test_mm512_reduce_max_epu32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_max_epu32(
-// CHECK: call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_epu32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_max_epu32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_max_epu32((__m512i)(__v16su){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) == 15);
float test_mm512_reduce_max_ps(__m512 __W){
-// CHECK-LABEL: @test_mm512_reduce_max_ps(
-// CHECK: call nnan float @llvm.vector.reduce.fmax.v16f32(<16 x float> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_ps
+// CHECK: call nnan {{.*}}float @llvm.vector.reduce.fmax.v16f32(<16 x float> %{{.*}})
return _mm512_reduce_max_ps(__W);
}
int test_mm512_reduce_min_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_min_epi32(
-// CHECK: call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_min_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_min_epi32((__m512i)(__v16si){-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7}) == -8);
unsigned int test_mm512_reduce_min_epu32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_min_epu32(
-// CHECK: call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_epu32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_min_epu32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_min_epu32((__m512i)(__v16su){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) == 0);
float test_mm512_reduce_min_ps(__m512 __W){
-// CHECK-LABEL: @test_mm512_reduce_min_ps(
-// CHECK: call nnan float @llvm.vector.reduce.fmin.v16f32(<16 x float> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_ps
+// CHECK: call nnan {{.*}}float @llvm.vector.reduce.fmin.v16f32(<16 x float> %{{.*}})
return _mm512_reduce_min_ps(__W);
}
int test_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_max_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_max_epi32(__M, __W);
}
unsigned int test_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_epu32(
+// CHECK-LABEL: test_mm512_mask_reduce_max_epu32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_max_epu32(__M, __W);
}
float test_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_ps(
+// CHECK-LABEL: test_mm512_mask_reduce_max_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
-// CHECK: call nnan float @llvm.vector.reduce.fmax.v16f32(<16 x float> %{{.*}})
+// CHECK: call nnan {{.*}}float @llvm.vector.reduce.fmax.v16f32(<16 x float> %{{.*}})
return _mm512_mask_reduce_max_ps(__M, __W);
}
int test_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_min_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_min_epi32(__M, __W);
}
unsigned int test_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_epu32(
+// CHECK-LABEL: test_mm512_mask_reduce_min_epu32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_min_epu32(__M, __W);
}
float test_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_ps(
+// CHECK-LABEL: test_mm512_mask_reduce_min_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
-// CHECK: call nnan float @llvm.vector.reduce.fmin.v16f32(<16 x float> %{{.*}})
+// CHECK: call nnan {{.*}}float @llvm.vector.reduce.fmin.v16f32(<16 x float> %{{.*}})
return _mm512_mask_reduce_min_ps(__M, __W);
}
-
diff --git a/clang/test/CodeGen/X86/avx512bf16-builtins.c b/clang/test/CodeGen/X86/avx512bf16-builtins.c
index 8eb93e6..52c20aa 100644
--- a/clang/test/CodeGen/X86/avx512bf16-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bf16-builtins.c
@@ -1,107 +1,96 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin \
-// RUN: -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror \
-// RUN: | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
float test_mm_cvtsbh_ss(__bf16 A) {
- // CHECK-LABEL: @test_mm_cvtsbh_ss
+ // CHECK-LABEL: test_mm_cvtsbh_ss
// CHECK: fpext bfloat %{{.*}} to float
// CHECK: ret float %{{.*}}
return _mm_cvtsbh_ss(A);
}
__m512bh test_mm512_cvtne2ps_pbh(__m512 A, __m512 B) {
- // CHECK-LABEL: @test_mm512_cvtne2ps_pbh
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.512
- // CHECK: ret <32 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm512_cvtne2ps_pbh
+ // CHECK: call {{.*}}<32 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %{{.*}}, <16 x float> %{{.*}})
return _mm512_cvtne2ps_pbh(A, B);
}
__m512bh test_mm512_maskz_cvtne2ps_pbh(__m512 A, __m512 B, __mmask32 U) {
- // CHECK-LABEL: @test_mm512_maskz_cvtne2ps_pbh
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.512
+ // CHECK-LABEL: test_mm512_maskz_cvtne2ps_pbh
+ // CHECK: call {{.*}}<32 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %{{.*}}, <16 x float> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x bfloat> %{{.*}}, <32 x bfloat> %{{.*}}
- // CHECK: ret <32 x bfloat> %{{.*}}
return _mm512_maskz_cvtne2ps_pbh(U, A, B);
}
__m512bh test_mm512_mask_cvtne2ps_pbh(__m512bh C, __mmask32 U, __m512 A, __m512 B) {
- // CHECK-LABEL: @test_mm512_mask_cvtne2ps_pbh
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.512
+ // CHECK-LABEL: test_mm512_mask_cvtne2ps_pbh
+ // CHECK: call {{.*}}<32 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %{{.*}}, <16 x float> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x bfloat> %{{.*}}, <32 x bfloat> %{{.*}}
- // CHECK: ret <32 x bfloat> %{{.*}}
return _mm512_mask_cvtne2ps_pbh(C, U, A, B);
}
__m256bh test_mm512_cvtneps_pbh(__m512 A) {
- // CHECK-LABEL: @test_mm512_cvtneps_pbh
- // CHECK: @llvm.x86.avx512bf16.cvtneps2bf16.512
- // CHECK: ret <16 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm512_cvtneps_pbh
+ // CHECK: call {{.*}}<16 x bfloat> @llvm.x86.avx512bf16.cvtneps2bf16.512(<16 x float> %{{.*}})
return _mm512_cvtneps_pbh(A);
}
__m256bh test_mm512_mask_cvtneps_pbh(__m256bh C, __mmask16 U, __m512 A) {
- // CHECK-LABEL: @test_mm512_mask_cvtneps_pbh
- // CHECK: @llvm.x86.avx512bf16.cvtneps2bf16.512
+ // CHECK-LABEL: test_mm512_mask_cvtneps_pbh
+ // CHECK: call {{.*}}<16 x bfloat> @llvm.x86.avx512bf16.cvtneps2bf16.512(<16 x float> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x bfloat> %{{.*}}, <16 x bfloat> %{{.*}}
- // CHECK: ret <16 x bfloat> %{{.*}}
return _mm512_mask_cvtneps_pbh(C, U, A);
}
__m256bh test_mm512_maskz_cvtneps_pbh(__m512 A, __mmask16 U) {
- // CHECK-LABEL: @test_mm512_maskz_cvtneps_pbh
- // CHECK: @llvm.x86.avx512bf16.cvtneps2bf16.512
+ // CHECK-LABEL: test_mm512_maskz_cvtneps_pbh
+ // CHECK: call {{.*}}<16 x bfloat> @llvm.x86.avx512bf16.cvtneps2bf16.512(<16 x float> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x bfloat> %{{.*}}, <16 x bfloat> %{{.*}}
- // CHECK: ret <16 x bfloat> %{{.*}}
return _mm512_maskz_cvtneps_pbh(U, A);
}
__m512 test_mm512_dpbf16_ps(__m512 D, __m512bh A, __m512bh B) {
- // CHECK-LABEL: @test_mm512_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.512
- // CHECK: ret <16 x float> %{{.*}}
+ // CHECK-LABEL: test_mm512_dpbf16_ps
+ // CHECK: call {{.*}}<16 x float> @llvm.x86.avx512bf16.dpbf16ps.512(<16 x float> %{{.*}}, <32 x bfloat> %{{.*}}, <32 x bfloat> %{{.*}})
return _mm512_dpbf16_ps(D, A, B);
}
__m512 test_mm512_maskz_dpbf16_ps(__m512 D, __m512bh A, __m512bh B, __mmask16 U) {
- // CHECK-LABEL: @test_mm512_maskz_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.512
+ // CHECK-LABEL: test_mm512_maskz_dpbf16_ps
+ // CHECK: call {{.*}}<16 x float> @llvm.x86.avx512bf16.dpbf16ps.512(<16 x float> %{{.*}}, <32 x bfloat> %{{.*}}, <32 x bfloat> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
- // CHECK: ret <16 x float> %{{.*}}
return _mm512_maskz_dpbf16_ps(U, D, A, B);
}
__m512 test_mm512_mask_dpbf16_ps(__m512 D, __m512bh A, __m512bh B, __mmask16 U) {
- // CHECK-LABEL: @test_mm512_mask_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.512
+ // CHECK-LABEL: test_mm512_mask_dpbf16_ps
+ // CHECK: call {{.*}}<16 x float> @llvm.x86.avx512bf16.dpbf16ps.512(<16 x float> %{{.*}}, <32 x bfloat> %{{.*}}, <32 x bfloat> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
- // CHECK: ret <16 x float> %{{.*}}
return _mm512_mask_dpbf16_ps(D, U, A, B);
}
__m512 test_mm512_cvtpbh_ps(__m256bh A) {
- // CHECK-LABEL: @test_mm512_cvtpbh_ps
+ // CHECK-LABEL: test_mm512_cvtpbh_ps
// CHECK: sext <16 x i16> %{{.*}} to <16 x i32>
- // CHECK: @llvm.x86.avx512.pslli.d.512
- // CHECK: ret <16 x float> %{{.*}}
+ // CHECK: call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %{{.*}}, i32 %{{.*}})
return _mm512_cvtpbh_ps(A);
}
__m512 test_mm512_maskz_cvtpbh_ps(__mmask16 M, __m256bh A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtpbh_ps
+ // CHECK-LABEL: test_mm512_maskz_cvtpbh_ps
// CHECK: sext <16 x i16> %{{.*}} to <16 x i32>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
- // CHECK: @llvm.x86.avx512.pslli.d.512
- // CHECK: ret <16 x float> %{{.*}}
+ // CHECK: call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %{{.*}}, i32 %{{.*}})
return _mm512_maskz_cvtpbh_ps(M, A);
}
__m512 test_mm512_mask_cvtpbh_ps(__m512 S, __mmask16 M, __m256bh A) {
- // CHECK-LABEL: @test_mm512_mask_cvtpbh_ps
+ // CHECK-LABEL: test_mm512_mask_cvtpbh_ps
// CHECK: sext <16 x i16> %{{.*}} to <16 x i32>
- // CHECK: @llvm.x86.avx512.pslli.d.512
+ // CHECK: call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %{{.*}}, i32 %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
- // CHECK: ret <16 x float> %{{.*}}
return _mm512_mask_cvtpbh_ps(S, M, A);
}
diff --git a/clang/test/CodeGen/X86/avx512bitalg-builtins.c b/clang/test/CodeGen/X86/avx512bitalg-builtins.c
index c80fb5e..3ac8674 100644
--- a/clang/test/CodeGen/X86/avx512bitalg-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bitalg-builtins.c
@@ -1,54 +1,72 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m512i test_mm512_popcnt_epi16(__m512i __A) {
- // CHECK-LABEL: @test_mm512_popcnt_epi16
+ // CHECK-LABEL: test_mm512_popcnt_epi16
// CHECK: @llvm.ctpop.v32i16
return _mm512_popcnt_epi16(__A);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_popcnt_epi16((__m512i)(__v32hi){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025, +5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, 15, 14, 1, 0, 8, 1, 9, 2, 2, 4, 2, 6, 2, 9, 2, 2, 15, 14, 1, 0, 8, 1, 9, 2, 2, 4, 2, 6, 2, 9, 2));
__m512i test_mm512_mask_popcnt_epi16(__m512i __A, __mmask32 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_popcnt_epi16
+ // CHECK-LABEL: test_mm512_mask_popcnt_epi16
// CHECK: @llvm.ctpop.v32i16
// CHECK: select <32 x i1> %{{[0-9]+}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_popcnt_epi16(__A, __U, __B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_popcnt_epi16(_mm512_set1_epi16(-1), 0xF0F0F0F0, (__m512i)(__v32hi){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025, +5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), -1, -1, -1, -1, 0, 8, 1, 9, -1, -1, -1, -1, 6, 2, 9, 2, -1, -1, -1, -1, 0, 8, 1, 9, -1, -1, -1, -1, 6, 2, 9, 2));
+
__m512i test_mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_popcnt_epi16
+ // CHECK-LABEL: test_mm512_maskz_popcnt_epi16
// CHECK: @llvm.ctpop.v32i16
// CHECK: select <32 x i1> %{{[0-9]+}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_popcnt_epi16(__U, __B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_popcnt_epi16(0x0F0F0F0F, (__m512i)(__v32hi){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025, +5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, 15, 14, 1, 0, 0, 0, 0, 2, 2, 4, 2, 0, 0, 0, 0, 2, 15, 14, 1, 0, 0, 0, 0, 2, 2, 4, 2, 0, 0, 0, 0));
__m512i test_mm512_popcnt_epi8(__m512i __A) {
- // CHECK-LABEL: @test_mm512_popcnt_epi8
+ // CHECK-LABEL: test_mm512_popcnt_epi8
// CHECK: @llvm.ctpop.v64i8
return _mm512_popcnt_epi8(__A);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_popcnt_epi8((__m512i)(__v64qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3, 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3, 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3, 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3));
__m512i test_mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_popcnt_epi8
+ // CHECK-LABEL: test_mm512_mask_popcnt_epi8
// CHECK: @llvm.ctpop.v64i8
// CHECK: select <64 x i1> %{{[0-9]+}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_popcnt_epi8(__A, __U, __B);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_mask_popcnt_epi8(_mm512_set1_epi8(-1), 0xF0F0F0F00F0F0F0FULL, (__m512i)(__v64qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, -1, -1, -1, -1, 2, 2, 4, 2, -1, -1, -1, -1, 2, 7, 6, 1, -1, -1, -1, -1, 2, 2, 4, 2, -1, -1, -1, -1, -1, -1, -1, -1, 0, 4, 1, 4, -1, -1, -1, -1, 6, 2, 4, 3, -1, -1, -1, -1, 0, 4, 1, 4, -1, -1, -1, -1, 6, 2, 4, 3));
+
__m512i test_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_popcnt_epi8
+ // CHECK-LABEL: test_mm512_maskz_popcnt_epi8
// CHECK: @llvm.ctpop.v64i8
// CHECK: select <64 x i1> %{{[0-9]+}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_popcnt_epi8(__U, __B);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_maskz_popcnt_epi8(0x0F0F0F0FF0F0F0F0ULL, (__m512i)(__v64qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 0, 0, 0, 0, 0, 4, 1, 4, 0, 0, 0, 0, 6, 2, 4, 3, 0, 0, 0, 0, 0, 4, 1, 4, 0, 0, 0, 0, 6, 2, 4, 3, 2, 7, 6, 1, 0, 0, 0, 0, 2, 2, 4, 2, 0, 0, 0, 0, 2, 7, 6, 1, 0, 0, 0, 0, 2, 2, 4, 2, 0, 0, 0, 0));
__mmask64 test_mm512_mask_bitshuffle_epi64_mask(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm512_mask_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.512
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_bitshuffle_epi64_mask(__U, __A, __B);
}
__mmask64 test_mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm512_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.512
return _mm512_bitshuffle_epi64_mask(__A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c
index 1d18ca8..264a457 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -1,25 +1,33 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__mmask32 test_knot_mask32(__mmask32 a) {
- // CHECK-LABEL: @test_knot_mask32
+ // CHECK-LABEL: test_knot_mask32
// CHECK: [[IN:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[NOT:%.*]] = xor <32 x i1> [[IN]], splat (i1 true)
return _knot_mask32(a);
}
__mmask64 test_knot_mask64(__mmask64 a) {
- // CHECK-LABEL: @test_knot_mask64
+ // CHECK-LABEL: test_knot_mask64
// CHECK: [[IN:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[NOT:%.*]] = xor <64 x i1> [[IN]], splat (i1 true)
return _knot_mask64(a);
}
__mmask32 test_kand_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kand_mask32
+ // CHECK-LABEL: test_kand_mask32
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = and <32 x i1> [[LHS]], [[RHS]]
@@ -29,7 +37,7 @@ __mmask32 test_kand_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
}
__mmask64 test_kand_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kand_mask64
+ // CHECK-LABEL: test_kand_mask64
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = and <64 x i1> [[LHS]], [[RHS]]
@@ -39,7 +47,7 @@ __mmask64 test_kand_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
}
__mmask32 test_kandn_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kandn_mask32
+ // CHECK-LABEL: test_kandn_mask32
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[NOT:%.*]] = xor <32 x i1> [[LHS]], splat (i1 true)
@@ -50,7 +58,7 @@ __mmask32 test_kandn_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
}
__mmask64 test_kandn_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kandn_mask64
+ // CHECK-LABEL: test_kandn_mask64
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[NOT:%.*]] = xor <64 x i1> [[LHS]], splat (i1 true)
@@ -61,7 +69,7 @@ __mmask64 test_kandn_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
}
__mmask32 test_kor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kor_mask32
+ // CHECK-LABEL: test_kor_mask32
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = or <32 x i1> [[LHS]], [[RHS]]
@@ -71,7 +79,7 @@ __mmask32 test_kor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
}
__mmask64 test_kor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kor_mask64
+ // CHECK-LABEL: test_kor_mask64
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = or <64 x i1> [[LHS]], [[RHS]]
@@ -81,7 +89,7 @@ __mmask64 test_kor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
}
__mmask32 test_kxnor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kxnor_mask32
+ // CHECK-LABEL: test_kxnor_mask32
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[NOT:%.*]] = xor <32 x i1> [[LHS]], splat (i1 true)
@@ -92,7 +100,7 @@ __mmask32 test_kxnor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
}
__mmask64 test_kxnor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kxnor_mask64
+ // CHECK-LABEL: test_kxnor_mask64
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[NOT:%.*]] = xor <64 x i1> [[LHS]], splat (i1 true)
@@ -103,7 +111,7 @@ __mmask64 test_kxnor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
}
__mmask32 test_kxor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kxor_mask32
+ // CHECK-LABEL: test_kxor_mask32
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = xor <32 x i1> [[LHS]], [[RHS]]
@@ -113,7 +121,7 @@ __mmask32 test_kxor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
}
__mmask64 test_kxor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kxor_mask64
+ // CHECK-LABEL: test_kxor_mask64
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = xor <64 x i1> [[LHS]], [[RHS]]
@@ -123,7 +131,7 @@ __mmask64 test_kxor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
}
unsigned char test_kortestz_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_kortestz_mask32_u8
+ // CHECK-LABEL: test_kortestz_mask32_u8
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[OR:%.*]] = or <32 x i1> [[LHS]], [[RHS]]
@@ -136,7 +144,7 @@ unsigned char test_kortestz_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m
}
unsigned char test_kortestc_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_kortestc_mask32_u8
+ // CHECK-LABEL: test_kortestc_mask32_u8
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[OR:%.*]] = or <32 x i1> [[LHS]], [[RHS]]
@@ -149,7 +157,7 @@ unsigned char test_kortestc_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m
}
unsigned char test_kortest_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_kortest_mask32_u8
+ // CHECK-LABEL: test_kortest_mask32_u8
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[OR:%.*]] = or <32 x i1> [[LHS]], [[RHS]]
@@ -169,7 +177,7 @@ unsigned char test_kortest_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m5
}
unsigned char test_kortestz_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_kortestz_mask64_u8
+ // CHECK-LABEL: test_kortestz_mask64_u8
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[OR:%.*]] = or <64 x i1> [[LHS]], [[RHS]]
@@ -182,7 +190,7 @@ unsigned char test_kortestz_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m
}
unsigned char test_kortestc_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_kortestc_mask64_u8
+ // CHECK-LABEL: test_kortestc_mask64_u8
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[OR:%.*]] = or <64 x i1> [[LHS]], [[RHS]]
@@ -195,7 +203,7 @@ unsigned char test_kortestc_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m
}
unsigned char test_kortest_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_kortest_mask64_u8
+ // CHECK-LABEL: test_kortest_mask64_u8
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[OR:%.*]] = or <64 x i1> [[LHS]], [[RHS]]
@@ -215,7 +223,7 @@ unsigned char test_kortest_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m5
}
unsigned char test_ktestz_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestz_mask32_u8
+ // CHECK-LABEL: test_ktestz_mask32_u8
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestz.d(<32 x i1> [[LHS]], <32 x i1> [[RHS]])
@@ -225,7 +233,7 @@ unsigned char test_ktestz_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktestc_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestc_mask32_u8
+ // CHECK-LABEL: test_ktestc_mask32_u8
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.d(<32 x i1> [[LHS]], <32 x i1> [[RHS]])
@@ -235,7 +243,7 @@ unsigned char test_ktestc_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktest_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_ktest_mask32_u8
+ // CHECK-LABEL: test_ktest_mask32_u8
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.d(<32 x i1> [[LHS]], <32 x i1> [[RHS]])
@@ -249,7 +257,7 @@ unsigned char test_ktest_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m512
}
unsigned char test_ktestz_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestz_mask64_u8
+ // CHECK-LABEL: test_ktestz_mask64_u8
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestz.q(<64 x i1> [[LHS]], <64 x i1> [[RHS]])
@@ -259,7 +267,7 @@ unsigned char test_ktestz_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktestc_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestc_mask64_u8
+ // CHECK-LABEL: test_ktestc_mask64_u8
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.q(<64 x i1> [[LHS]], <64 x i1> [[RHS]])
@@ -269,7 +277,7 @@ unsigned char test_ktestc_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktest_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_ktest_mask64_u8
+ // CHECK-LABEL: test_ktest_mask64_u8
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.q(<64 x i1> [[LHS]], <64 x i1> [[RHS]])
@@ -283,7 +291,7 @@ unsigned char test_ktest_mask64_u8(__m512i __A, __m512i __B, __m512i __C, __m512
}
__mmask32 test_kadd_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kadd_mask32
+ // CHECK-LABEL: test_kadd_mask32
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = call <32 x i1> @llvm.x86.avx512.kadd.d(<32 x i1> [[LHS]], <32 x i1> [[RHS]])
@@ -293,7 +301,7 @@ __mmask32 test_kadd_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
}
__mmask64 test_kadd_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kadd_mask64
+ // CHECK-LABEL: test_kadd_mask64
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = call <64 x i1> @llvm.x86.avx512.kadd.q(<64 x i1> [[LHS]], <64 x i1> [[RHS]])
@@ -303,1592 +311,1654 @@ __mmask64 test_kadd_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
}
__mmask32 test_kshiftli_mask32(__m512i A, __m512i B, __m512i C, __m512i D) {
- // CHECK-LABEL: @test_kshiftli_mask32
+ // CHECK-LABEL: test_kshiftli_mask32
// CHECK: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32>
return _mm512_mask_cmpneq_epu16_mask(_kshiftli_mask32(_mm512_cmpneq_epu16_mask(A, B), 31), C, D);
}
__mmask32 test_kshiftri_mask32(__m512i A, __m512i B, __m512i C, __m512i D) {
- // CHECK-LABEL: @test_kshiftri_mask32
+ // CHECK-LABEL: test_kshiftri_mask32
// CHECK: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> <i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62>
return _mm512_mask_cmpneq_epu16_mask(_kshiftri_mask32(_mm512_cmpneq_epu16_mask(A, B), 31), C, D);
}
__mmask64 test_kshiftli_mask64(__m512i A, __m512i B, __m512i C, __m512i D) {
- // CHECK-LABEL: @test_kshiftli_mask64
+ // CHECK-LABEL: test_kshiftli_mask64
// CHECK: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
return _mm512_mask_cmpneq_epu8_mask(_kshiftli_mask64(_mm512_cmpneq_epu8_mask(A, B), 32), C, D);
}
__mmask64 test_kshiftri_mask64(__m512i A, __m512i B, __m512i C, __m512i D) {
- // CHECK-LABEL: @test_kshiftri_mask64
+ // CHECK-LABEL: test_kshiftri_mask64
// CHECK: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
return _mm512_mask_cmpneq_epu8_mask(_kshiftri_mask64(_mm512_cmpneq_epu8_mask(A, B), 32), C, D);
}
unsigned int test_cvtmask32_u32(__m512i A, __m512i B) {
- // CHECK-LABEL: @test_cvtmask32_u32
+ // CHECK-LABEL: test_cvtmask32_u32
return _cvtmask32_u32(_mm512_cmpneq_epu16_mask(A, B));
}
unsigned long long test_cvtmask64_u64(__m512i A, __m512i B) {
- // CHECK-LABEL: @test_cvtmask64_u64
+ // CHECK-LABEL: test_cvtmask64_u64
return _cvtmask64_u64(_mm512_cmpneq_epu8_mask(A, B));
}
__mmask32 test_cvtu32_mask32(__m512i A, __m512i B, unsigned int C) {
- // CHECK-LABEL: @test_cvtu32_mask32
+ // CHECK-LABEL: test_cvtu32_mask32
return _mm512_mask_cmpneq_epu16_mask(_cvtu32_mask32(C), A, B);
}
__mmask64 test_cvtu64_mask64(__m512i A, __m512i B, unsigned long long C) {
- // CHECK-LABEL: @test_cvtu64_mask64
+ // CHECK-LABEL: test_cvtu64_mask64
return _mm512_mask_cmpneq_epu8_mask(_cvtu64_mask64(C), A, B);
}
__mmask32 test_load_mask32(__mmask32 *A, __m512i B, __m512i C) {
- // CHECK-LABEL: @test_load_mask32
+ // CHECK-LABEL: test_load_mask32
// CHECK: [[LOAD:%.*]] = load i32, ptr %{{.*}}
return _mm512_mask_cmpneq_epu16_mask(_load_mask32(A), B, C);
}
__mmask64 test_load_mask64(__mmask64 *A, __m512i B, __m512i C) {
- // CHECK-LABEL: @test_load_mask64
+ // CHECK-LABEL: test_load_mask64
// CHECK: [[LOAD:%.*]] = load i64, ptr %{{.*}}
return _mm512_mask_cmpneq_epu8_mask(_load_mask64(A), B, C);
}
void test_store_mask32(__mmask32 *A, __m512i B, __m512i C) {
- // CHECK-LABEL: @test_store_mask32
+ // CHECK-LABEL: test_store_mask32
// CHECK: store i32 %{{.*}}, ptr %{{.*}}
_store_mask32(A, _mm512_cmpneq_epu16_mask(B, C));
}
void test_store_mask64(__mmask64 *A, __m512i B, __m512i C) {
- // CHECK-LABEL: @test_store_mask64
+ // CHECK-LABEL: test_store_mask64
// CHECK: store i64 %{{.*}}, ptr %{{.*}}
_store_mask64(A, _mm512_cmpneq_epu8_mask(B, C));
}
__mmask64 test_mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpeq_epi8_mask
+ // CHECK-LABEL: test_mm512_cmpeq_epi8_mask
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmpeq_epi8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpeq_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_cmpeq_epi8_mask
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmpeq_epi8_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpeq_epi16_mask
+ // CHECK-LABEL: test_mm512_cmpeq_epi16_mask
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmpeq_epi16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpeq_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_cmpeq_epi16_mask
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmpeq_epi16_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmpgt_epi8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpgt_epi8_mask
+ // CHECK-LABEL: test_mm512_cmpgt_epi8_mask
// CHECK: icmp sgt <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmpgt_epi8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmpgt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpgt_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_cmpgt_epi8_mask
// CHECK: icmp sgt <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmpgt_epi8_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmpgt_epi16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpgt_epi16_mask
+ // CHECK-LABEL: test_mm512_cmpgt_epi16_mask
// CHECK: icmp sgt <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmpgt_epi16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmpgt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpgt_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_cmpgt_epi16_mask
// CHECK: icmp sgt <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmpgt_epi16_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmpeq_epu8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpeq_epu8_mask
+ // CHECK-LABEL: test_mm512_cmpeq_epu8_mask
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmpeq_epu8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmpeq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpeq_epu8_mask
+ // CHECK-LABEL: test_mm512_mask_cmpeq_epu8_mask
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmpeq_epu8_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmpeq_epu16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpeq_epu16_mask
+ // CHECK-LABEL: test_mm512_cmpeq_epu16_mask
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmpeq_epu16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmpeq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpeq_epu16_mask
+ // CHECK-LABEL: test_mm512_mask_cmpeq_epu16_mask
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmpeq_epu16_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmpgt_epu8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpgt_epu8_mask
+ // CHECK-LABEL: test_mm512_cmpgt_epu8_mask
// CHECK: icmp ugt <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmpgt_epu8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmpgt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpgt_epu8_mask
+ // CHECK-LABEL: test_mm512_mask_cmpgt_epu8_mask
// CHECK: icmp ugt <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmpgt_epu8_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmpgt_epu16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpgt_epu16_mask
+ // CHECK-LABEL: test_mm512_cmpgt_epu16_mask
// CHECK: icmp ugt <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmpgt_epu16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmpgt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpgt_epu16_mask
+ // CHECK-LABEL: test_mm512_mask_cmpgt_epu16_mask
// CHECK: icmp ugt <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmpgt_epu16_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmpge_epi8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpge_epi8_mask
+ // CHECK-LABEL: test_mm512_cmpge_epi8_mask
// CHECK: icmp sge <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmpge_epi8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmpge_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpge_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_cmpge_epi8_mask
// CHECK: icmp sge <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmpge_epi8_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmpge_epu8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpge_epu8_mask
+ // CHECK-LABEL: test_mm512_cmpge_epu8_mask
// CHECK: icmp uge <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmpge_epu8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmpge_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpge_epu8_mask
+ // CHECK-LABEL: test_mm512_mask_cmpge_epu8_mask
// CHECK: icmp uge <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmpge_epu8_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmpge_epi16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpge_epi16_mask
+ // CHECK-LABEL: test_mm512_cmpge_epi16_mask
// CHECK: icmp sge <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmpge_epi16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmpge_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpge_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_cmpge_epi16_mask
// CHECK: icmp sge <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmpge_epi16_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmpge_epu16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpge_epu16_mask
+ // CHECK-LABEL: test_mm512_cmpge_epu16_mask
// CHECK: icmp uge <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmpge_epu16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmpge_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpge_epu16_mask
+ // CHECK-LABEL: test_mm512_mask_cmpge_epu16_mask
// CHECK: icmp uge <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmpge_epu16_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmple_epi8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmple_epi8_mask
+ // CHECK-LABEL: test_mm512_cmple_epi8_mask
// CHECK: icmp sle <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmple_epi8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmple_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmple_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_cmple_epi8_mask
// CHECK: icmp sle <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmple_epi8_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmple_epu8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmple_epu8_mask
+ // CHECK-LABEL: test_mm512_cmple_epu8_mask
// CHECK: icmp ule <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmple_epu8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmple_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmple_epu8_mask
+ // CHECK-LABEL: test_mm512_mask_cmple_epu8_mask
// CHECK: icmp ule <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmple_epu8_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmple_epi16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmple_epi16_mask
+ // CHECK-LABEL: test_mm512_cmple_epi16_mask
// CHECK: icmp sle <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmple_epi16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmple_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmple_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_cmple_epi16_mask
// CHECK: icmp sle <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmple_epi16_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmple_epu16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmple_epu16_mask
+ // CHECK-LABEL: test_mm512_cmple_epu16_mask
// CHECK: icmp ule <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmple_epu16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmple_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmple_epu16_mask
+ // CHECK-LABEL: test_mm512_mask_cmple_epu16_mask
// CHECK: icmp ule <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmple_epu16_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmplt_epi8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmplt_epi8_mask
+ // CHECK-LABEL: test_mm512_cmplt_epi8_mask
// CHECK: icmp slt <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmplt_epi8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmplt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmplt_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_cmplt_epi8_mask
// CHECK: icmp slt <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmplt_epi8_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmplt_epu8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmplt_epu8_mask
+ // CHECK-LABEL: test_mm512_cmplt_epu8_mask
// CHECK: icmp ult <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmplt_epu8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmplt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmplt_epu8_mask
+ // CHECK-LABEL: test_mm512_mask_cmplt_epu8_mask
// CHECK: icmp ult <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmplt_epu8_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmplt_epi16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmplt_epi16_mask
+ // CHECK-LABEL: test_mm512_cmplt_epi16_mask
// CHECK: icmp slt <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmplt_epi16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmplt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmplt_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_cmplt_epi16_mask
// CHECK: icmp slt <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmplt_epi16_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmplt_epu16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmplt_epu16_mask
+ // CHECK-LABEL: test_mm512_cmplt_epu16_mask
// CHECK: icmp ult <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmplt_epu16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmplt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmplt_epu16_mask
+ // CHECK-LABEL: test_mm512_mask_cmplt_epu16_mask
// CHECK: icmp ult <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmplt_epu16_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmpneq_epi8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpneq_epi8_mask
+ // CHECK-LABEL: test_mm512_cmpneq_epi8_mask
// CHECK: icmp ne <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmpneq_epi8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmpneq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpneq_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_cmpneq_epi8_mask
// CHECK: icmp ne <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmpneq_epi8_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmpneq_epu8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpneq_epu8_mask
+ // CHECK-LABEL: test_mm512_cmpneq_epu8_mask
// CHECK: icmp ne <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmpneq_epu8_mask(__a, __b);
}
__mmask64 test_mm512_mask_cmpneq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpneq_epu8_mask
+ // CHECK-LABEL: test_mm512_mask_cmpneq_epu8_mask
// CHECK: icmp ne <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmpneq_epu8_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmpneq_epi16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpneq_epi16_mask
+ // CHECK-LABEL: test_mm512_cmpneq_epi16_mask
// CHECK: icmp ne <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmpneq_epi16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmpneq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpneq_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_cmpneq_epi16_mask
// CHECK: icmp ne <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmpneq_epi16_mask(__u, __a, __b);
}
__mmask32 test_mm512_cmpneq_epu16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmpneq_epu16_mask
+ // CHECK-LABEL: test_mm512_cmpneq_epu16_mask
// CHECK: icmp ne <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmpneq_epu16_mask(__a, __b);
}
__mmask32 test_mm512_mask_cmpneq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmpneq_epu16_mask
+ // CHECK-LABEL: test_mm512_mask_cmpneq_epu16_mask
// CHECK: icmp ne <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmpneq_epu16_mask(__u, __a, __b);
}
__mmask64 test_mm512_cmp_epi8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmp_epi8_mask
+ // CHECK-LABEL: test_mm512_cmp_epi8_mask
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmp_epi8_mask(__a, __b, 0);
}
__mmask64 test_mm512_mask_cmp_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmp_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_cmp_epi8_mask
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmp_epi8_mask(__u, __a, __b, 0);
}
__mmask64 test_mm512_cmp_epu8_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmp_epu8_mask
+ // CHECK-LABEL: test_mm512_cmp_epu8_mask
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_cmp_epu8_mask(__a, __b, 0);
}
__mmask64 test_mm512_mask_cmp_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmp_epu8_mask
+ // CHECK-LABEL: test_mm512_mask_cmp_epu8_mask
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return (__mmask64)_mm512_mask_cmp_epu8_mask(__u, __a, __b, 0);
}
__mmask32 test_mm512_cmp_epi16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmp_epi16_mask
+ // CHECK-LABEL: test_mm512_cmp_epi16_mask
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmp_epi16_mask(__a, __b, 0);
}
__mmask32 test_mm512_mask_cmp_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmp_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_cmp_epi16_mask
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmp_epi16_mask(__u, __a, __b, 0);
}
__mmask32 test_mm512_cmp_epu16_mask(__m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_cmp_epu16_mask
+ // CHECK-LABEL: test_mm512_cmp_epu16_mask
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_cmp_epu16_mask(__a, __b, 0);
}
__mmask32 test_mm512_mask_cmp_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
- // CHECK-LABEL: @test_mm512_mask_cmp_epu16_mask
+ // CHECK-LABEL: test_mm512_mask_cmp_epu16_mask
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm512_mask_cmp_epu16_mask(__u, __a, __b, 0);
}
__m512i test_mm512_add_epi8 (__m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_add_epi8
+ //CHECK-LABEL: test_mm512_add_epi8
//CHECK: add <64 x i8>
return _mm512_add_epi8(__A,__B);
}
__m512i test_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_mask_add_epi8
+ //CHECK-LABEL: test_mm512_mask_add_epi8
//CHECK: add <64 x i8> %{{.*}}, %{{.*}}
//CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_add_epi8(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_maskz_add_epi8
+ //CHECK-LABEL: test_mm512_maskz_add_epi8
//CHECK: add <64 x i8> %{{.*}}, %{{.*}}
//CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_add_epi8(__U, __A, __B);
}
__m512i test_mm512_sub_epi8 (__m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_sub_epi8
+ //CHECK-LABEL: test_mm512_sub_epi8
//CHECK: sub <64 x i8>
return _mm512_sub_epi8(__A, __B);
}
__m512i test_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_mask_sub_epi8
+ //CHECK-LABEL: test_mm512_mask_sub_epi8
//CHECK: sub <64 x i8> %{{.*}}, %{{.*}}
//CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_sub_epi8(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_maskz_sub_epi8
+ //CHECK-LABEL: test_mm512_maskz_sub_epi8
//CHECK: sub <64 x i8> %{{.*}}, %{{.*}}
//CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_sub_epi8(__U, __A, __B);
}
__m512i test_mm512_add_epi16 (__m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_add_epi16
+ //CHECK-LABEL: test_mm512_add_epi16
//CHECK: add <32 x i16>
return _mm512_add_epi16(__A, __B);
}
__m512i test_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_mask_add_epi16
+ //CHECK-LABEL: test_mm512_mask_add_epi16
//CHECK: add <32 x i16> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_add_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_maskz_add_epi16
+ //CHECK-LABEL: test_mm512_maskz_add_epi16
//CHECK: add <32 x i16> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_add_epi16(__U, __A, __B);
}
__m512i test_mm512_sub_epi16 (__m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_sub_epi16
+ //CHECK-LABEL: test_mm512_sub_epi16
//CHECK: sub <32 x i16>
return _mm512_sub_epi16(__A, __B);
}
__m512i test_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_mask_sub_epi16
+ //CHECK-LABEL: test_mm512_mask_sub_epi16
//CHECK: sub <32 x i16> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_sub_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_maskz_sub_epi16
+ //CHECK-LABEL: test_mm512_maskz_sub_epi16
//CHECK: sub <32 x i16> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_sub_epi16(__U, __A, __B);
}
__m512i test_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_mullo_epi16
+ //CHECK-LABEL: test_mm512_mullo_epi16
//CHECK: mul <32 x i16>
return _mm512_mullo_epi16(__A, __B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mullo_epi16((__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -64, 124, 180, -232, -280, 324, 364, -400, -432, 460, 484, -504, -520, 532, 540, -544, -544, 540, 532, -520, -504, 484, 460, -432, -400, 364, 324, -280, -232, -180, -124, -64));
__m512i test_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_mask_mullo_epi16
+ //CHECK-LABEL: test_mm512_mask_mullo_epi16
//CHECK: mul <32 x i16> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mullo_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
- //CHECK-LABEL: @test_mm512_maskz_mullo_epi16
+ //CHECK-LABEL: test_mm512_maskz_mullo_epi16
//CHECK: mul <32 x i16> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mullo_epi16(__U, __A, __B);
}
__m512i test_mm512_mask_blend_epi8(__mmask64 __U, __m512i __A, __m512i __W) {
- // CHECK-LABEL: @test_mm512_mask_blend_epi8
+ // CHECK-LABEL: test_mm512_mask_blend_epi8
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_blend_epi8(__U,__A,__W);
}
__m512i test_mm512_mask_blend_epi16(__mmask32 __U, __m512i __A, __m512i __W) {
- // CHECK-LABEL: @test_mm512_mask_blend_epi16
+ // CHECK-LABEL: test_mm512_mask_blend_epi16
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_blend_epi16(__U,__A,__W);
}
__m512i test_mm512_abs_epi8(__m512i __A) {
- // CHECK-LABEL: @test_mm512_abs_epi8
+ // CHECK-LABEL: test_mm512_abs_epi8
// CHECK: [[ABS:%.*]] = call <64 x i8> @llvm.abs.v64i8(<64 x i8> %{{.*}}, i1 false)
return _mm512_abs_epi8(__A);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_abs_epi8((__m512i)(__v64qs){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32, +33, -34, +35, -36, +37, -38, +39, -40, +41, -42, +43, -44, +45, -46, +47, +100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 100, 50, 100, 20, 80, 50, 120, 20, 100, 50, 100, 20, 80, 50, 120, 20));
+
__m512i test_mm512_mask_abs_epi8(__m512i __W, __mmask64 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_abs_epi8
+ // CHECK-LABEL: test_mm512_mask_abs_epi8
// CHECK: [[ABS:%.*]] = call <64 x i8> @llvm.abs.v64i8(<64 x i8> %{{.*}}, i1 false)
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> [[ABS]], <64 x i8> %{{.*}}
return _mm512_mask_abs_epi8(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_mask_abs_epi8((__m512i)(__v64qi){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, (__mmask64)0x000000000000001, (__m512i)(__v64qi){(char)-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
+
__m512i test_mm512_maskz_abs_epi8(__mmask64 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_abs_epi8
+ // CHECK-LABEL: test_mm512_maskz_abs_epi8
// CHECK: [[ABS:%.*]] = call <64 x i8> @llvm.abs.v64i8(<64 x i8> %{{.*}}, i1 false)
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> [[ABS]], <64 x i8> %{{.*}}
return _mm512_maskz_abs_epi8(__U,__A);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_maskz_abs_epi8((__mmask64)0x000000000000001, (__m512i)(__v64qi){(char)-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+
__m512i test_mm512_abs_epi16(__m512i __A) {
- // CHECK-LABEL: @test_mm512_abs_epi16
+ // CHECK-LABEL: test_mm512_abs_epi16
// CHECK: [[ABS:%.*]] = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %{{.*}}, i1 false)
return _mm512_abs_epi16(__A);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_abs_epi16((__m512i)(__v32hi){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, +32000, -32000, +32000, -32000}), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 32000, 32000, 32000, 32000));
+
__m512i test_mm512_mask_abs_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_abs_epi16
+ // CHECK-LABEL: test_mm512_mask_abs_epi16
// CHECK: [[ABS:%.*]] = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %{{.*}}, i1 false)
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> [[ABS]], <32 x i16> %{{.*}}
return _mm512_mask_abs_epi16(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_abs_epi16((__m512i)(__v32hi){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, (__mmask32)0x00000001, (__m512i)(__v32hi){-1000, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}), 1000, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
+
__m512i test_mm512_maskz_abs_epi16(__mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_abs_epi16
+ // CHECK-LABEL: test_mm512_maskz_abs_epi16
// CHECK: [[ABS:%.*]] = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %{{.*}}, i1 false)
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> [[ABS]], <32 x i16> %{{.*}}
return _mm512_maskz_abs_epi16(__U,__A);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_abs_epi16((__mmask32)0x00000001, (__m512i)(__v32hi){-1000, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}), 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+
__m512i test_mm512_packs_epi32(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_packs_epi32
+ // CHECK-LABEL: test_mm512_packs_epi32
// CHECK: @llvm.x86.avx512.packssdw.512
return _mm512_packs_epi32(__A,__B);
}
__m512i test_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_packs_epi32
+ // CHECK-LABEL: test_mm512_maskz_packs_epi32
// CHECK: @llvm.x86.avx512.packssdw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_packs_epi32(__M,__A,__B);
}
__m512i test_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_packs_epi32
+ // CHECK-LABEL: test_mm512_mask_packs_epi32
// CHECK: @llvm.x86.avx512.packssdw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_packs_epi32(__W,__M,__A,__B);
}
__m512i test_mm512_packs_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_packs_epi16
+ // CHECK-LABEL: test_mm512_packs_epi16
// CHECK: @llvm.x86.avx512.packsswb.512
return _mm512_packs_epi16(__A,__B);
}
__m512i test_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_packs_epi16
+ // CHECK-LABEL: test_mm512_mask_packs_epi16
// CHECK: @llvm.x86.avx512.packsswb.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_packs_epi16(__W,__M,__A,__B);
}
__m512i test_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_packs_epi16
+ // CHECK-LABEL: test_mm512_maskz_packs_epi16
// CHECK: @llvm.x86.avx512.packsswb.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_packs_epi16(__M,__A,__B);
}
__m512i test_mm512_packus_epi32(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_packus_epi32
+ // CHECK-LABEL: test_mm512_packus_epi32
// CHECK: @llvm.x86.avx512.packusdw.512
return _mm512_packus_epi32(__A,__B);
}
__m512i test_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_packus_epi32
+ // CHECK-LABEL: test_mm512_maskz_packus_epi32
// CHECK: @llvm.x86.avx512.packusdw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_packus_epi32(__M,__A,__B);
}
__m512i test_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_packus_epi32
+ // CHECK-LABEL: test_mm512_mask_packus_epi32
// CHECK: @llvm.x86.avx512.packusdw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_packus_epi32(__W,__M,__A,__B);
}
__m512i test_mm512_packus_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_packus_epi16
+ // CHECK-LABEL: test_mm512_packus_epi16
// CHECK: @llvm.x86.avx512.packuswb.512
return _mm512_packus_epi16(__A,__B);
}
__m512i test_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_packus_epi16
+ // CHECK-LABEL: test_mm512_mask_packus_epi16
// CHECK: @llvm.x86.avx512.packuswb.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_packus_epi16(__W,__M,__A,__B);
}
__m512i test_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_packus_epi16
+ // CHECK-LABEL: test_mm512_maskz_packus_epi16
// CHECK: @llvm.x86.avx512.packuswb.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_packus_epi16(__M,__A,__B);
}
__m512i test_mm512_adds_epi8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_adds_epi8
+ // CHECK-LABEL: test_mm512_adds_epi8
// CHECK: @llvm.sadd.sat.v64i8
return _mm512_adds_epi8(__A,__B);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_adds_epi8((__m512i)(__v64qs){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32, +33, -34, +35, -36, +37, -38, +39, -40, +41, -42, +43, -44, +45, -46, +47, +100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}, (__m512i)(__v64qs){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32, +33, -34, +35, -36, +37, -38, +39, -40, +41, -42, +43, -44, +45, -46, +47, +50, +80, -50, +110, +60, -30, +20, -10, +50, +80, -50, +110, +60, -30, +20, -10}), 0, +2, -4, +6, -8, +10, -12, +14, -16, +18, -20, +22, -24, +26, -28, +30, -32, +34, -36, +38, -40, +42, -44, +46, -48, +50, -52, +54, -56, +58, -60, +62, -64, +66, -68, +70, -72, +74, -76, +78, -80, +82, -84, +86, -88, +90, -92, +94, +127, +127, -128, +127, +127, -80, +127, -30, -50, +30, +50, +90, -20, +20, -100, +10));
+
__m512i test_mm512_mask_adds_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_adds_epi8
+ // CHECK-LABEL: test_mm512_mask_adds_epi8
// CHECK: @llvm.sadd.sat.v64i8
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_adds_epi8(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_adds_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_adds_epi8
+ // CHECK-LABEL: test_mm512_maskz_adds_epi8
// CHECK: @llvm.sadd.sat.v64i8
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_adds_epi8(__U,__A,__B);
}
__m512i test_mm512_adds_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_adds_epi16
+ // CHECK-LABEL: test_mm512_adds_epi16
// CHECK: @llvm.sadd.sat.v32i16
return _mm512_adds_epi16(__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_adds_epi16((__m512i)(__v32hi){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, +32000, -32000, +32000, -32000}, (__m512i)(__v32hi){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, +800, -800, -800, +800}), 0, +2, -4, +6, -8, +10, -12, +14, -16, +18, -20, +22, -24, +26, -28, +30, -32, +34, -36, +38, -40, +42, -44, +46, -48, +50, -52, +54, +32767, -32768, +31200, -31200));
+
__m512i test_mm512_mask_adds_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_adds_epi16
+ // CHECK-LABEL: test_mm512_mask_adds_epi16
// CHECK: @llvm.sadd.sat.v32i16
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_adds_epi16(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_adds_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_adds_epi16
+ // CHECK-LABEL: test_mm512_maskz_adds_epi16
// CHECK: @llvm.sadd.sat.v32i16
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_adds_epi16(__U,__A,__B);
}
__m512i test_mm512_adds_epu8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_adds_epu8
+ // CHECK-LABEL: test_mm512_adds_epu8
// CHECK-NOT: @llvm.x86.avx512.mask.paddus.b.512
// CHECK: call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_adds_epu8(__A,__B);
}
+TEST_CONSTEXPR(match_v64qu(_mm512_adds_epu8((__m512i)(__v64qu){0, 0, 0, 0, 0, 0, 0, 0, +63, +63, +63, +63, +63, +63, +63, +63, +64, +64, +64, +64, +64, +64, +64, +64, +127, +127, +127, +127, +127, +127, +127, +127, +128, +128, +128, +128, +128, +128, +128, +128, +191, +191, +191, +191, +191, +191, +191, +191, +192, +192, +192, +192, +192, +192, +192, +192, +255, +255, +255, +255, +255, +255, +255, +255}, (__m512i)(__v64qu){0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255}), 0, +63, +64, +127, +128, +191, +192, +255, +63, +126, +127, +190, +191, +254, +255, +255, +64, +127, +128, +191, +192, +255, +255, +255, +127, +190, +191, +254, +255, +255, +255, +255, +128, +191, +192, +255, +255, +255, +255, +255, +191, +254, +255, +255, +255, +255, +255, +255, +192, +255, +255, +255, +255, +255, +255, +255, +255, +255, +255, +255, +255, +255, +255, +255));
+
__m512i test_mm512_mask_adds_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_adds_epu8
+ // CHECK-LABEL: test_mm512_mask_adds_epu8
// CHECK-NOT: @llvm.x86.avx512.mask.paddus.b.512
// CHECK: call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_adds_epu8(__W,__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hu(_mm512_adds_epu16((__m512i)(__v32hu){0, 0, 0, 0, +16384, +16384, +16384, +16384, +16384, +16384, +32767, +32767, +32767, +32767, +32767, +32767, +32768, +32768, +32768, +32768, +32768, +32768, +49152, +49152, +49152, +49152, +49152, +49152, +65535, +65535, +65535, +65535}, (__m512i)(__v32hu){0, +32767, +32768, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +32767, +32768, +65535}), 0, +32767, +32768, +65535, +16384, +32768, +49151, +49152, +65535, +65535, +32767, +49151, +65534, +65535, +65535, +65535, +32768, +49152, +65535, +65535, +65535, +65535, +49152, +65535, +65535, +65535, +65535, +65535, +65535, +65535, +65535, +65535));
+
__m512i test_mm512_maskz_adds_epu8(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_adds_epu8
+ // CHECK-LABEL: test_mm512_maskz_adds_epu8
// CHECK-NOT: @llvm.x86.avx512.mask.paddus.b.512
// CHECK: call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_adds_epu8(__U,__A,__B);
}
__m512i test_mm512_adds_epu16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_adds_epu16
+ // CHECK-LABEL: test_mm512_adds_epu16
// CHECK-NOT: @llvm.x86.avx512.mask.paddus.w.512
// CHECK: call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_adds_epu16(__A,__B);
}
__m512i test_mm512_mask_adds_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_adds_epu16
+ // CHECK-LABEL: test_mm512_mask_adds_epu16
// CHECK-NOT: @llvm.x86.avx512.mask.paddus.w.512
// CHECK: call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_adds_epu16(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_adds_epu16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_adds_epu16
+ // CHECK-LABEL: test_mm512_maskz_adds_epu16
// CHECK-NOT: @llvm.x86.avx512.mask.paddus.w.512
// CHECK: call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_adds_epu16(__U,__A,__B);
}
__m512i test_mm512_avg_epu8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_avg_epu8
+ // CHECK-LABEL: test_mm512_avg_epu8
// CHECK: @llvm.x86.avx512.pavg.b.512
return _mm512_avg_epu8(__A,__B);
}
__m512i test_mm512_mask_avg_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_avg_epu8
+ // CHECK-LABEL: test_mm512_mask_avg_epu8
// CHECK: @llvm.x86.avx512.pavg.b.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_avg_epu8(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_avg_epu8(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_avg_epu8
+ // CHECK-LABEL: test_mm512_maskz_avg_epu8
// CHECK: @llvm.x86.avx512.pavg.b.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_avg_epu8(__U,__A,__B);
}
__m512i test_mm512_avg_epu16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_avg_epu16
+ // CHECK-LABEL: test_mm512_avg_epu16
// CHECK: @llvm.x86.avx512.pavg.w.512
return _mm512_avg_epu16(__A,__B);
}
__m512i test_mm512_mask_avg_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_avg_epu16
+ // CHECK-LABEL: test_mm512_mask_avg_epu16
// CHECK: @llvm.x86.avx512.pavg.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_avg_epu16(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_avg_epu16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_avg_epu16
+ // CHECK-LABEL: test_mm512_maskz_avg_epu16
// CHECK: @llvm.x86.avx512.pavg.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_avg_epu16(__U,__A,__B);
}
__m512i test_mm512_max_epi8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_max_epi8
+ // CHECK-LABEL: test_mm512_max_epi8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.smax.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_max_epi8(__A,__B);
}
__m512i test_mm512_maskz_max_epi8(__mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_max_epi8
+ // CHECK-LABEL: test_mm512_maskz_max_epi8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.smax.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> {{.*}}, <64 x i8> [[RES]], <64 x i8> {{.*}}
return _mm512_maskz_max_epi8(__M,__A,__B);
}
__m512i test_mm512_mask_max_epi8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_max_epi8
+ // CHECK-LABEL: test_mm512_mask_max_epi8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.smax.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> {{.*}}, <64 x i8> [[RES]], <64 x i8> {{.*}}
return _mm512_mask_max_epi8(__W,__M,__A,__B);
}
__m512i test_mm512_max_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_max_epi16
+ // CHECK-LABEL: test_mm512_max_epi16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.smax.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_max_epi16(__A,__B);
}
__m512i test_mm512_maskz_max_epi16(__mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_max_epi16
+ // CHECK-LABEL: test_mm512_maskz_max_epi16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.smax.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> {{.*}}, <32 x i16> [[RES]], <32 x i16> {{.*}}
return _mm512_maskz_max_epi16(__M,__A,__B);
}
__m512i test_mm512_mask_max_epi16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_max_epi16
+ // CHECK-LABEL: test_mm512_mask_max_epi16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.smax.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> {{.*}}, <32 x i16> [[RES]], <32 x i16> {{.*}}
return _mm512_mask_max_epi16(__W,__M,__A,__B);
}
__m512i test_mm512_max_epu8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_max_epu8
+ // CHECK-LABEL: test_mm512_max_epu8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.umax.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_max_epu8(__A,__B);
}
__m512i test_mm512_maskz_max_epu8(__mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_max_epu8
+ // CHECK-LABEL: test_mm512_maskz_max_epu8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.umax.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> {{.*}}, <64 x i8> [[RES]], <64 x i8> {{.*}}
return _mm512_maskz_max_epu8(__M,__A,__B);
}
__m512i test_mm512_mask_max_epu8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_max_epu8
+ // CHECK-LABEL: test_mm512_mask_max_epu8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.umax.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> {{.*}}, <64 x i8> [[RES]], <64 x i8> {{.*}}
return _mm512_mask_max_epu8(__W,__M,__A,__B);
}
__m512i test_mm512_max_epu16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_max_epu16
+ // CHECK-LABEL: test_mm512_max_epu16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.umax.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_max_epu16(__A,__B);
}
__m512i test_mm512_maskz_max_epu16(__mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_max_epu16
+ // CHECK-LABEL: test_mm512_maskz_max_epu16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.umax.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> {{.*}}, <32 x i16> [[RES]], <32 x i16> {{.*}}
return _mm512_maskz_max_epu16(__M,__A,__B);
}
__m512i test_mm512_mask_max_epu16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_max_epu16
+ // CHECK-LABEL: test_mm512_mask_max_epu16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.umax.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> {{.*}}, <32 x i16> [[RES]], <32 x i16> {{.*}}
return _mm512_mask_max_epu16(__W,__M,__A,__B);
}
__m512i test_mm512_min_epi8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_min_epi8
+ // CHECK-LABEL: test_mm512_min_epi8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.smin.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_min_epi8(__A,__B);
}
__m512i test_mm512_maskz_min_epi8(__mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_min_epi8
+ // CHECK-LABEL: test_mm512_maskz_min_epi8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.smin.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> {{.*}}, <64 x i8> [[RES]], <64 x i8> {{.*}}
return _mm512_maskz_min_epi8(__M,__A,__B);
}
__m512i test_mm512_mask_min_epi8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_min_epi8
+ // CHECK-LABEL: test_mm512_mask_min_epi8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.smin.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> {{.*}}, <64 x i8> [[RES]], <64 x i8> {{.*}}
return _mm512_mask_min_epi8(__W,__M,__A,__B);
}
__m512i test_mm512_min_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_min_epi16
+ // CHECK-LABEL: test_mm512_min_epi16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.smin.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_min_epi16(__A,__B);
}
__m512i test_mm512_maskz_min_epi16(__mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_min_epi16
+ // CHECK-LABEL: test_mm512_maskz_min_epi16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.smin.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> {{.*}}, <32 x i16> [[RES]], <32 x i16> {{.*}}
return _mm512_maskz_min_epi16(__M,__A,__B);
}
__m512i test_mm512_mask_min_epi16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_min_epi16
+ // CHECK-LABEL: test_mm512_mask_min_epi16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.smin.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> {{.*}}, <32 x i16> [[RES]], <32 x i16> {{.*}}
return _mm512_mask_min_epi16(__W,__M,__A,__B);
}
__m512i test_mm512_min_epu8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_min_epu8
+ // CHECK-LABEL: test_mm512_min_epu8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.umin.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_min_epu8(__A,__B);
}
__m512i test_mm512_maskz_min_epu8(__mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_min_epu8
+ // CHECK-LABEL: test_mm512_maskz_min_epu8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.umin.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> {{.*}}, <64 x i8> [[RES]], <64 x i8> {{.*}}
return _mm512_maskz_min_epu8(__M,__A,__B);
}
__m512i test_mm512_mask_min_epu8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_min_epu8
+ // CHECK-LABEL: test_mm512_mask_min_epu8
// CHECK: [[RES:%.*]] = call <64 x i8> @llvm.umin.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> {{.*}}, <64 x i8> [[RES]], <64 x i8> {{.*}}
return _mm512_mask_min_epu8(__W,__M,__A,__B);
}
__m512i test_mm512_min_epu16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_min_epu16
+ // CHECK-LABEL: test_mm512_min_epu16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.umin.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_min_epu16(__A,__B);
}
__m512i test_mm512_maskz_min_epu16(__mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_min_epu16
+ // CHECK-LABEL: test_mm512_maskz_min_epu16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.umin.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> {{.*}}, <32 x i16> [[RES]], <32 x i16> {{.*}}
return _mm512_maskz_min_epu16(__M,__A,__B);
}
__m512i test_mm512_mask_min_epu16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_min_epu16
+ // CHECK-LABEL: test_mm512_mask_min_epu16
// CHECK: [[RES:%.*]] = call <32 x i16> @llvm.umin.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> {{.*}}, <32 x i16> [[RES]], <32 x i16> {{.*}}
return _mm512_mask_min_epu16(__W,__M,__A,__B);
}
__m512i test_mm512_shuffle_epi8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shuffle_epi8
+ // CHECK-LABEL: test_mm512_shuffle_epi8
// CHECK: @llvm.x86.avx512.pshuf.b.512
return _mm512_shuffle_epi8(__A,__B);
}
__m512i test_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shuffle_epi8
+ // CHECK-LABEL: test_mm512_mask_shuffle_epi8
// CHECK: @llvm.x86.avx512.pshuf.b.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_shuffle_epi8(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shuffle_epi8
+ // CHECK-LABEL: test_mm512_maskz_shuffle_epi8
// CHECK: @llvm.x86.avx512.pshuf.b.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_shuffle_epi8(__U,__A,__B);
}
__m512i test_mm512_subs_epi8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_subs_epi8
+ // CHECK-LABEL: test_mm512_subs_epi8
// CHECK: @llvm.ssub.sat.v64i8
return _mm512_subs_epi8(__A,__B);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_subs_epi8((__m512i)(__v64qs){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32, +33, -34, +35, -36, +37, -38, +39, -40, +41, -42, +43, -44, +45, -46, +47, +100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}, (__m512i)(__v64qs){0, -1, +2, -3, +4, -5, +6, -7, +8, -9, +10, -11, +12, -13, +14, -15, +16, -17, +18, -19, +20, -21, +22, -23, +24, -25, +26, -27, +28, -29, +30, -31, +32, -33, +34, -35, +36, -37, +38, -39, +40, -41, +42, -43, +44, -45, +46, -47, -50, -80, +50, -110, -60, +30, -20, +10, -50, -80, +50, -110, -60, +30, -20, +10}), 0, +2, -4, +6, -8, +10, -12, +14, -16, +18, -20, +22, -24, +26, -28, +30, -32, +34, -36, +38, -40, +42, -44, +46, -48, +50, -52, +54, -56, +58, -60, +62, -64, +66, -68, +70, -72, +74, -76, +78, -80, +82, -84, +86, -88, +90, -92, +94, +127, +127, -128, +127, +127, -80, +127, -30, -50, +30, +50, +90, -20, +20, -100, +10));
+
__m512i test_mm512_mask_subs_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_subs_epi8
+ // CHECK-LABEL: test_mm512_mask_subs_epi8
// CHECK: @llvm.ssub.sat.v64i8
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_subs_epi8(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_subs_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_subs_epi8
+ // CHECK-LABEL: test_mm512_maskz_subs_epi8
// CHECK: @llvm.ssub.sat.v64i8
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_subs_epi8(__U,__A,__B);
}
__m512i test_mm512_subs_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_subs_epi16
+ // CHECK-LABEL: test_mm512_subs_epi16
// CHECK: @llvm.ssub.sat.v32i16
return _mm512_subs_epi16(__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_subs_epi16((__m512i)(__v32hi){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, +32000, -32000, +32000, -32000}, (__m512i)(__v32hi){0, -1, +2, -3, +4, -5, +6, -7, +8, -9, +10, -11, +12, -13, +14, -15, +16, -17, +18, -19, +20, -21, +22, -23, +24, -25, +26, -27, -800, +800, +800, -800}), 0, +2, -4, +6, -8, +10, -12, +14, -16, +18, -20, +22, -24, +26, -28, +30, -32, +34, -36, +38, -40, +42, -44, +46, -48, +50, -52, +54, +32767, -32768, +31200, -31200));
__m512i test_mm512_mask_subs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_subs_epi16
+ // CHECK-LABEL: test_mm512_mask_subs_epi16
// CHECK: @llvm.ssub.sat.v32i16
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_subs_epi16(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_subs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_subs_epi16
+ // CHECK-LABEL: test_mm512_maskz_subs_epi16
// CHECK: @llvm.ssub.sat.v32i16
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_subs_epi16(__U,__A,__B);
}
__m512i test_mm512_subs_epu8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_subs_epu8
+ // CHECK-LABEL: test_mm512_subs_epu8
// CHECK-NOT: @llvm.x86.avx512.mask.psubus.b.512
// CHECK: call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_subs_epu8(__A,__B);
}
__m512i test_mm512_mask_subs_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_subs_epu8
+ // CHECK-LABEL: test_mm512_mask_subs_epu8
// CHECK-NOT: @llvm.x86.avx512.mask.psubus.b.512
// CHECK: call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_subs_epu8(__W,__U,__A,__B);
}
+TEST_CONSTEXPR(match_v64qu(_mm512_subs_epu8((__m512i)(__v64qu){0, 0, 0, 0, 0, 0, 0, 0, +63, +63, +63, +63, +63, +63, +63, +63, +64, +64, +64, +64, +64, +64, +64, +64, +127, +127, +127, +127, +127, +127, +127, +127, +128, +128, +128, +128, +128, +128, +128, +128, +191, +191, +191, +191, +191, +191, +191, +191, +192, +192, +192, +192, +192, +192, +192, +192, +255, +255, +255, +255, +255, +255, +255, +255}, (__m512i)(__v64qu){0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255}), 0, 0, 0, 0, 0, 0, 0, 0, +63, 0, 0, 0, 0, 0, 0, 0, +64, +1, 0, 0, 0, 0, 0, 0, +127, +64, +63, 0, 0, 0, 0, 0, +128, +65, +64, +1, 0, 0, 0, 0, +191, +128, +127, +64, +63, 0, 0, 0, +192, +129, +128, +65, +64, +1, 0, 0, +255, +192, +191, +128, +127, +64, +63, +0));
+
__m512i test_mm512_maskz_subs_epu8(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_subs_epu8
+ // CHECK-LABEL: test_mm512_maskz_subs_epu8
// CHECK-NOT: @llvm.x86.avx512.mask.psubus.b.512
// CHECK: call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_subs_epu8(__U,__A,__B);
}
__m512i test_mm512_subs_epu16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_subs_epu16
+ // CHECK-LABEL: test_mm512_subs_epu16
// CHECK-NOT: @llvm.x86.avx512.mask.psubus.w.512
// CHECK: call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_subs_epu16(__A,__B);
}
__m512i test_mm512_mask_subs_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_subs_epu16
+ // CHECK-LABEL: test_mm512_mask_subs_epu16
// CHECK-NOT: @llvm.x86.avx512.mask.psubus.w.512
// CHECK: call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_subs_epu16(__W,__U,__A,__B);
+TEST_CONSTEXPR(match_v32hu(_mm512_subs_epu16((__m512i)(__v32hu){0, 0, 0, 0, +16384, +16384, +16384, +16384, +16384, +16384, +32767, +32767, +32767, +32767, +32767, +32767, +32768, +32768, +32768, +32768, +32768, +32768, +49152, +49152, +49152, +49152, +49152, +49152, +65535, +65535, +65535, +65535}, (__m512i)(__v32hu){0, +32767, +32768, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +32767, +32768, +65535}), 0, 0, 0, 0, +16384, 0, 0, 0, 0, 0, +32767, +16383, 0, 0, 0, 0, +32768, +16384, +1, 0, 0, 0, +49152, +32768, +16385, +16384, 0, 0, +65535, +32768, +32767, 0));
}
__m512i test_mm512_maskz_subs_epu16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_subs_epu16
+ // CHECK-LABEL: test_mm512_maskz_subs_epu16
// CHECK-NOT: @llvm.x86.avx512.mask.psubus.w.512
// CHECK: call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_subs_epu16(__U,__A,__B);
}
__m512i test_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask2_permutex2var_epi16
+ // CHECK-LABEL: test_mm512_mask2_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask2_permutex2var_epi16(__A,__I,__U,__B);
}
__m512i test_mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B) {
- // CHECK-LABEL: @test_mm512_permutex2var_epi16
+ // CHECK-LABEL: test_mm512_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.512
return _mm512_permutex2var_epi16(__A,__I,__B);
}
__m512i test_mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_permutex2var_epi16
+ // CHECK-LABEL: test_mm512_mask_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_permutex2var_epi16(__A,__U,__I,__B);
}
__m512i test_mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_permutex2var_epi16
+ // CHECK-LABEL: test_mm512_maskz_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_permutex2var_epi16(__U,__A,__I,__B);
}
__m512i test_mm512_mulhrs_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mulhrs_epi16
+ // CHECK-LABEL: test_mm512_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
return _mm512_mulhrs_epi16(__A,__B);
}
-__m512i test_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_mulhrs_epi16
+__m512i test_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: test_mm512_mask_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mulhrs_epi16(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_mulhrs_epi16
+ // CHECK-LABEL: test_mm512_maskz_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mulhrs_epi16(__U,__A,__B);
}
__m512i test_mm512_mulhi_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mulhi_epi16
+ // CHECK-LABEL: test_mm512_mulhi_epi16
// CHECK: @llvm.x86.avx512.pmulh.w.512
return _mm512_mulhi_epi16(__A,__B);
}
-__m512i test_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_mulhi_epi16
+TEST_CONSTEXPR(match_v32hi(_mm512_mulhi_epi16((__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, -1, -1, -1));
+
+__m512i test_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: test_mm512_mask_mulhi_epi16
// CHECK: @llvm.x86.avx512.pmulh.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mulhi_epi16(__W,__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_mulhi_epi16(_mm512_set1_epi16(1), 0xF00FF00F, (__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 0, 0, -1, -1, 0, 0, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1));
+
__m512i test_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_mulhi_epi16
+ // CHECK-LABEL: test_mm512_maskz_mulhi_epi16
// CHECK: @llvm.x86.avx512.pmulh.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mulhi_epi16(__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_mulhi_epi16(0x0FF00FF0, (__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 0, 0, 0, 0, -1, 0, 0, -1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, -1, -1, 0, 0, -1, 0, 0, 0, 0));
+
__m512i test_mm512_mulhi_epu16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mulhi_epu16
+ // CHECK-LABEL: test_mm512_mulhi_epu16
// CHECK: @llvm.x86.avx512.pmulhu.w.512
return _mm512_mulhi_epu16(__A,__B);
}
-__m512i test_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_mulhi_epu16
+TEST_CONSTEXPR(match_v32hi(_mm512_mulhi_epu16((__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 0, -64, 0, 57, 4, -60, 0, 49, 8, -56, 0, 41, 12, -52, 0, 33, 16, -48, 0, 25, 20, -44, 0, 17, 24, -40, 0, 9, 28, 5, 30, 1));
+
+__m512i test_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ // CHECK-LABEL: test_mm512_mask_mulhi_epu16
// CHECK: @llvm.x86.avx512.pmulhu.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mulhi_epu16(__W,__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_mulhi_epu16(_mm512_set1_epi16(1), 0x0FF00FF0, (__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 1, 1, 1, 1, 4, -60, 0, 49, 8, -56, 0, 41, 1, 1, 1, 1, 1, 1, 1, 1, 20, -44, 0, 17, 24, -40, 0, 9, 1, 1, 1, 1));
+
__m512i test_mm512_maskz_mulhi_epu16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_mulhi_epu16
+ // CHECK-LABEL: test_mm512_maskz_mulhi_epu16
// CHECK: @llvm.x86.avx512.pmulhu.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mulhi_epu16(__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_mulhi_epu16(0xF00FF00F, (__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 0, -64, 0, 57, 0, 0, 0, 0, 0, 0, 0, 0, 12, -52, 0, 33, 16, -48, 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 28, 5, 30, 1));
__m512i test_mm512_maddubs_epi16(__m512i __X, __m512i __Y) {
- // CHECK-LABEL: @test_mm512_maddubs_epi16
+ // CHECK-LABEL: test_mm512_maddubs_epi16
// CHECK: @llvm.x86.avx512.pmaddubs.w.512
return _mm512_maddubs_epi16(__X,__Y);
}
-__m512i test_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X, __m512i __Y) {
- // CHECK-LABEL: @test_mm512_mask_maddubs_epi16
+__m512i test_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X, __m512i __Y) {
+ // CHECK-LABEL: test_mm512_mask_maddubs_epi16
// CHECK: @llvm.x86.avx512.pmaddubs.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_maddubs_epi16(__W,__U,__X,__Y);
}
__m512i test_mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) {
- // CHECK-LABEL: @test_mm512_maskz_maddubs_epi16
+ // CHECK-LABEL: test_mm512_maskz_maddubs_epi16
// CHECK: @llvm.x86.avx512.pmaddubs.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_maddubs_epi16(__U,__X,__Y);
}
__m512i test_mm512_madd_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_madd_epi16
+ // CHECK-LABEL: test_mm512_madd_epi16
// CHECK: @llvm.x86.avx512.pmaddw.d.512
return _mm512_madd_epi16(__A,__B);
}
__m512i test_mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_madd_epi16
+ // CHECK-LABEL: test_mm512_mask_madd_epi16
// CHECK: @llvm.x86.avx512.pmaddw.d.512
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_madd_epi16(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_madd_epi16
+ // CHECK-LABEL: test_mm512_maskz_madd_epi16
// CHECK: @llvm.x86.avx512.pmaddw.d.512
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_madd_epi16(__U,__A,__B);
}
__m256i test_mm512_cvtsepi16_epi8(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm512_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.512
return _mm512_cvtsepi16_epi8(__A);
}
__m256i test_mm512_mask_cvtsepi16_epi8(__m256i __O, __mmask32 __M, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm512_mask_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.512
return _mm512_mask_cvtsepi16_epi8(__O, __M, __A);
}
__m256i test_mm512_maskz_cvtsepi16_epi8(__mmask32 __M, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm512_maskz_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.512
return _mm512_maskz_cvtsepi16_epi8(__M, __A);
}
__m256i test_mm512_cvtusepi16_epi8(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm512_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.512
return _mm512_cvtusepi16_epi8(__A);
}
__m256i test_mm512_mask_cvtusepi16_epi8(__m256i __O, __mmask32 __M, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm512_mask_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.512
return _mm512_mask_cvtusepi16_epi8(__O, __M, __A);
}
__m256i test_mm512_maskz_cvtusepi16_epi8(__mmask32 __M, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm512_maskz_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.512
return _mm512_maskz_cvtusepi16_epi8(__M, __A);
}
__m256i test_mm512_cvtepi16_epi8(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepi16_epi8
+ // CHECK-LABEL: test_mm512_cvtepi16_epi8
// CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
return _mm512_cvtepi16_epi8(__A);
}
__m256i test_mm512_mask_cvtepi16_epi8(__m256i __O, __mmask32 __M, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepi16_epi8
+ // CHECK-LABEL: test_mm512_mask_cvtepi16_epi8
// CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm512_mask_cvtepi16_epi8(__O, __M, __A);
}
__m256i test_mm512_maskz_cvtepi16_epi8(__mmask32 __M, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepi16_epi8
+ // CHECK-LABEL: test_mm512_maskz_cvtepi16_epi8
// CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm512_maskz_cvtepi16_epi8(__M, __A);
}
__m512i test_mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_unpackhi_epi8
+ // CHECK-LABEL: test_mm512_unpackhi_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
return _mm512_unpackhi_epi8(__A, __B);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_unpackhi_epi8((__m512i)(__v64qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}, (__m512i)(__v64qi){64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127}), 8, 72, 9, 73, 10, 74, 11, 75, 12, 76, 13, 77, 14, 78, 15, 79, 24, 88, 25, 89, 26, 90, 27, 91, 28, 92, 29, 93, 30, 94, 31, 95, 40, 104, 41, 105, 42, 106, 43, 107, 44, 108, 45, 109, 46, 110, 47, 111, 56, 120, 57, 121, 58, 122, 59, 123, 60, 124, 61, 125, 62, 126, 63, 127));
__m512i test_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_unpackhi_epi8
+ // CHECK-LABEL: test_mm512_mask_unpackhi_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_unpackhi_epi8(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_unpackhi_epi8
+ // CHECK-LABEL: test_mm512_maskz_unpackhi_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_unpackhi_epi8(__U, __A, __B);
}
__m512i test_mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_unpackhi_epi16
+ // CHECK-LABEL: test_mm512_unpackhi_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
return _mm512_unpackhi_epi16(__A, __B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_unpackhi_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m512i)(__v32hi){32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}), 4, 36, 5, 37, 6, 38, 7, 39, 12, 44, 13, 45, 14, 46, 15, 47, 20, 52, 21, 53, 22, 54, 23, 55, 28, 60, 29, 61, 30, 62, 31, 63));
+
__m512i test_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_unpackhi_epi16
+ // CHECK-LABEL: test_mm512_mask_unpackhi_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_unpackhi_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_unpackhi_epi16
+ // CHECK-LABEL: test_mm512_maskz_unpackhi_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_unpackhi_epi16(__U, __A, __B);
}
__m512i test_mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_unpacklo_epi8
+ // CHECK-LABEL: test_mm512_unpacklo_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
return _mm512_unpacklo_epi8(__A, __B);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_unpacklo_epi8((__m512i)(__v64qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}, (__m512i)(__v64qi){64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127}), 0, 64, 1, 65, 2, 66, 3, 67, 4, 68, 5, 69, 6, 70, 7, 71, 16, 80, 17, 81, 18, 82, 19, 83, 20, 84, 21, 85, 22, 86, 23, 87, 32, 96, 33, 97, 34, 98, 35, 99, 36, 100, 37, 101, 38, 102, 39, 103, 48, 112, 49, 113, 50, 114, 51, 115, 52, 116, 53, 117, 54, 118, 55, 119));
__m512i test_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_unpacklo_epi8
+ // CHECK-LABEL: test_mm512_mask_unpacklo_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_unpacklo_epi8(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_unpacklo_epi8
+ // CHECK-LABEL: test_mm512_maskz_unpacklo_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_unpacklo_epi8(__U, __A, __B);
}
__m512i test_mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_unpacklo_epi16
+ // CHECK-LABEL: test_mm512_unpacklo_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59>
return _mm512_unpacklo_epi16(__A, __B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_unpacklo_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, (__m512i)(__v32hi){32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}), 0, 32, 1, 33, 2, 34, 3, 35, 8, 40, 9, 41, 10, 42, 11, 43, 16, 48, 17, 49, 18, 50, 19, 51, 24, 56, 25, 57, 26, 58, 27, 59));
__m512i test_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_unpacklo_epi16
+ // CHECK-LABEL: test_mm512_mask_unpacklo_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_unpacklo_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_unpacklo_epi16
+ // CHECK-LABEL: test_mm512_maskz_unpacklo_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_unpacklo_epi16(__U, __A, __B);
}
__m512i test_mm512_cvtepi8_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm512_cvtepi8_epi16
+ // CHECK-LABEL: test_mm512_cvtepi8_epi16
// CHECK: sext <32 x i8> %{{.*}} to <32 x i16>
return _mm512_cvtepi8_epi16(__A);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_cvtepi8_epi16((__m256i)(__v32qs){-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20, 21, -22, 23, -24, 25, -26, 27, -28}), -3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20, 21, -22, 23, -24, 25, -26, 27, -28));
__m512i test_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepi8_epi16
+ // CHECK-LABEL: test_mm512_mask_cvtepi8_epi16
// CHECK: sext <32 x i8> %{{.*}} to <32 x i16>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_cvtepi8_epi16(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepi8_epi16
+ // CHECK-LABEL: test_mm512_maskz_cvtepi8_epi16
// CHECK: sext <32 x i8> %{{.*}} to <32 x i16>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_cvtepi8_epi16(__U, __A);
}
__m512i test_mm512_cvtepu8_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm512_cvtepu8_epi16
+ // CHECK-LABEL: test_mm512_cvtepu8_epi16
// CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
return _mm512_cvtepu8_epi16(__A);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_cvtepu8_epi16((__m256i)(__v32qs){-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20, 21, -22, 23, -24, 25, -26, 27, -28}), 253, 2, 255, 0, 1, 254, 3, 252, 5, 250, 7, 248, 9, 246, 11, 244, 13, 242, 15, 240, 17, 238, 19, 236, 21, 234, 23, 232, 25, 230, 27, 228));
__m512i test_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepu8_epi16
+ // CHECK-LABEL: test_mm512_mask_cvtepu8_epi16
// CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_cvtepu8_epi16(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepu8_epi16
+ // CHECK-LABEL: test_mm512_maskz_cvtepu8_epi16
// CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_cvtepu8_epi16(__U, __A);
}
__m512i test_mm512_shufflehi_epi16(__m512i __A) {
- // CHECK-LABEL: @test_mm512_shufflehi_epi16
+ // CHECK-LABEL: test_mm512_shufflehi_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 13, i32 13, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 21, i32 21, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 29, i32 29, i32 28, i32 28>
return _mm512_shufflehi_epi16(__A, 5);
}
__m512i test_mm512_mask_shufflehi_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_shufflehi_epi16
+ // CHECK-LABEL: test_mm512_mask_shufflehi_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 13, i32 13, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 21, i32 21, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 29, i32 29, i32 28, i32 28>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_shufflehi_epi16(__W, __U, __A, 5);
}
__m512i test_mm512_maskz_shufflehi_epi16(__mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_shufflehi_epi16
+ // CHECK-LABEL: test_mm512_maskz_shufflehi_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 13, i32 13, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 21, i32 21, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 29, i32 29, i32 28, i32 28>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_shufflehi_epi16(__U, __A, 5);
}
__m512i test_mm512_shufflelo_epi16(__m512i __A) {
- // CHECK-LABEL: @test_mm512_shufflelo_epi16
+ // CHECK-LABEL: test_mm512_shufflelo_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> poison, <32 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 9, i32 9, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 17, i32 17, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 25, i32 25, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
return _mm512_shufflelo_epi16(__A, 5);
}
__m512i test_mm512_mask_shufflelo_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_shufflelo_epi16
+ // CHECK-LABEL: test_mm512_mask_shufflelo_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> poison, <32 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 9, i32 9, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 17, i32 17, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 25, i32 25, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_shufflelo_epi16(__W, __U, __A, 5);
}
__m512i test_mm512_maskz_shufflelo_epi16(__mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_shufflelo_epi16
+ // CHECK-LABEL: test_mm512_maskz_shufflelo_epi16
// CHECK: shufflevector <32 x i16> %{{.*}}, <32 x i16> poison, <32 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 9, i32 9, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 17, i32 17, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 25, i32 25, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_shufflelo_epi16(__U, __A, 5);
}
__m512i test_mm512_sllv_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_sllv_epi16
+ // CHECK-LABEL: test_mm512_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.512(
return _mm512_sllv_epi16(__A, __B);
}
__m512i test_mm512_mask_sllv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_sllv_epi16
+ // CHECK-LABEL: test_mm512_mask_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.512(
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_sllv_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_sllv_epi16
+ // CHECK-LABEL: test_mm512_maskz_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.512(
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_sllv_epi16(__U, __A, __B);
}
__m512i test_mm512_sll_epi16(__m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_sll_epi16
+ // CHECK-LABEL: test_mm512_sll_epi16
// CHECK: @llvm.x86.avx512.psll.w.512
return _mm512_sll_epi16(__A, __B);
}
__m512i test_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_mask_sll_epi16
+ // CHECK-LABEL: test_mm512_mask_sll_epi16
// CHECK: @llvm.x86.avx512.psll.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_sll_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_maskz_sll_epi16
+ // CHECK-LABEL: test_mm512_maskz_sll_epi16
// CHECK: @llvm.x86.avx512.psll.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_sll_epi16(__U, __A, __B);
}
__m512i test_mm512_slli_epi16(__m512i __A) {
- // CHECK-LABEL: @test_mm512_slli_epi16
+ // CHECK-LABEL: test_mm512_slli_epi16
// CHECK: @llvm.x86.avx512.pslli.w.512
return _mm512_slli_epi16(__A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_slli_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 0), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
+TEST_CONSTEXPR(match_v32hi(_mm512_slli_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 1), 0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e));
+TEST_CONSTEXPR(match_v32hi(_mm512_slli_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 15), 0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000, 0x0, 0x8000));
+TEST_CONSTEXPR(match_v32hi(_mm512_slli_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v32hi(_mm512_slli_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 17), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m512i test_mm512_slli_epi16_2(__m512i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm512_slli_epi16_2
+ // CHECK-LABEL: test_mm512_slli_epi16_2
// CHECK: @llvm.x86.avx512.pslli.w.512
return _mm512_slli_epi16(__A, __B);
}
__m512i test_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_slli_epi16
+ // CHECK-LABEL: test_mm512_mask_slli_epi16
// CHECK: @llvm.x86.avx512.pslli.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_slli_epi16(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_slli_epi16((__m512i)(__v32hi){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, (__mmask32)~(__mmask32)0, (__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 1), 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e));
__m512i test_mm512_mask_slli_epi16_2(__m512i __W, __mmask32 __U, __m512i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm512_mask_slli_epi16_2
+ // CHECK-LABEL: test_mm512_mask_slli_epi16_2
// CHECK: @llvm.x86.avx512.pslli.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_slli_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_slli_epi16
+ // CHECK-LABEL: test_mm512_maskz_slli_epi16
// CHECK: @llvm.x86.avx512.pslli.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_slli_epi16(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_slli_epi16((__mmask32)0x00ffcc71, (__m512i)(__v32hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_slli_epi16((__mmask32)0, (__m512i)(__v32hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_slli_epi16((__mmask32)0xffffffff, (__m512i)(__v32hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e));
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_slli_epi16((__mmask32)0x7fffffff, (__m512i)(__v32hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0));
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_slli_epi16((__mmask32)0x71ccff00, (__m512i)(__v32hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0, 0, 0, 0, 0, 0, 0, 0, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0, 0, 0x4, 0x6, 0, 0, 0xc, 0xe, 0x10, 0, 0, 0, 0x18, 0x1a, 0x1c, 0));
__m512i test_mm512_maskz_slli_epi16_2(__mmask32 __U, __m512i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm512_maskz_slli_epi16_2
+ // CHECK-LABEL: test_mm512_maskz_slli_epi16_2
// CHECK: @llvm.x86.avx512.pslli.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_slli_epi16(__U, __A, __B);
}
__m512i test_mm512_bslli_epi128(__m512i __A) {
- // CHECK-LABEL: @test_mm512_bslli_epi128
+ // CHECK-LABEL: test_mm512_bslli_epi128
// CHECK: shufflevector <64 x i8> zeroinitializer, <64 x i8> %{{.*}}, <64 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122>
return _mm512_bslli_epi128(__A, 5);
}
__m512i test_mm512_srlv_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_srlv_epi16
+ // CHECK-LABEL: test_mm512_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.512(
return _mm512_srlv_epi16(__A, __B);
}
__m512i test_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_srlv_epi16
+ // CHECK-LABEL: test_mm512_mask_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.512(
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_srlv_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_srlv_epi16
+ // CHECK-LABEL: test_mm512_maskz_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.512(
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_srlv_epi16(__U, __A, __B);
}
__m512i test_mm512_srav_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_srav_epi16
+ // CHECK-LABEL: test_mm512_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.512(
return _mm512_srav_epi16(__A, __B);
}
__m512i test_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_srav_epi16
+ // CHECK-LABEL: test_mm512_mask_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.512(
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_srav_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_srav_epi16
+ // CHECK-LABEL: test_mm512_maskz_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.512(
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_srav_epi16(__U, __A, __B);
}
__m512i test_mm512_sra_epi16(__m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_sra_epi16
+ // CHECK-LABEL: test_mm512_sra_epi16
// CHECK: @llvm.x86.avx512.psra.w.512
return _mm512_sra_epi16(__A, __B);
}
__m512i test_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_mask_sra_epi16
+ // CHECK-LABEL: test_mm512_mask_sra_epi16
// CHECK: @llvm.x86.avx512.psra.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_sra_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_maskz_sra_epi16
+ // CHECK-LABEL: test_mm512_maskz_sra_epi16
// CHECK: @llvm.x86.avx512.psra.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_sra_epi16(__U, __A, __B);
}
__m512i test_mm512_srai_epi16(__m512i __A) {
- // CHECK-LABEL: @test_mm512_srai_epi16
+ // CHECK-LABEL: test_mm512_srai_epi16
// CHECK: @llvm.x86.avx512.psrai.w.512
return _mm512_srai_epi16(__A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_srai_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 10), 0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0));
__m512i test_mm512_srai_epi16_2(__m512i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm512_srai_epi16_2
+ // CHECK-LABEL: test_mm512_srai_epi16_2
// CHECK: @llvm.x86.avx512.psrai.w.512
return _mm512_srai_epi16(__A, __B);
}
__m512i test_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_srai_epi16
+ // CHECK-LABEL: test_mm512_mask_srai_epi16
// CHECK: @llvm.x86.avx512.psrai.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_srai_epi16(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_srai_epi16((__m512i)(__v32hi){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, (__mmask32)~(__mmask32)0, (__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 1), 0x0, 0x0, 0x1, 0x1, 0x2, 0x2, 0x3, 0x3, 0x4, 0x4, 0x5, 0x5, 0x6, 0x6, 0x7, 0x7, 0x8, 0x8, 0x9, 0x9, 0xa, 0xa, 0xb, 0xb, 0xc, 0xc, 0xd, 0xd, 0xe, 0xe, 0xf, 0xf));
__m512i test_mm512_mask_srai_epi16_2(__m512i __W, __mmask32 __U, __m512i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm512_mask_srai_epi16_2
+ // CHECK-LABEL: test_mm512_mask_srai_epi16_2
// CHECK: @llvm.x86.avx512.psrai.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_srai_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_srai_epi16
+ // CHECK-LABEL: test_mm512_maskz_srai_epi16
// CHECK: @llvm.x86.avx512.psrai.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_srai_epi16(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_srai_epi16((__mmask32)0xAAAAAAAA, (__m512i)(__v32hi){-32768, 32767, -3, -2, -1, 0, 1, 2, -1234, 1234, -32767, 32766, -5, 5, -256, 256, -42, 42, -7, 7, -30000, 30000, -1, -1, 0, -2, 2, -32768, 32767, -32768, -123, 123 }, 5), 0, 1023, 0, -1, 0, 0, 0, 0, 0, 38, 0, 1023, 0, 0, 0, 8, 0, 1, 0, 0, 0, 937, 0, -1, 0, -1, 0, -1024, 0, -1024, 0, 3 ));
__m512i test_mm512_maskz_srai_epi16_2(__mmask32 __U, __m512i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm512_maskz_srai_epi16_2
+ // CHECK-LABEL: test_mm512_maskz_srai_epi16_2
// CHECK: @llvm.x86.avx512.psrai.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_srai_epi16(__U, __A, __B);
}
__m512i test_mm512_srl_epi16(__m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_srl_epi16
+ // CHECK-LABEL: test_mm512_srl_epi16
// CHECK: @llvm.x86.avx512.psrl.w.512
return _mm512_srl_epi16(__A, __B);
}
__m512i test_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_mask_srl_epi16
+ // CHECK-LABEL: test_mm512_mask_srl_epi16
// CHECK: @llvm.x86.avx512.psrl.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_srl_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_maskz_srl_epi16
+ // CHECK-LABEL: test_mm512_maskz_srl_epi16
// CHECK: @llvm.x86.avx512.psrl.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_srl_epi16(__U, __A, __B);
}
__m512i test_mm512_srli_epi16(__m512i __A) {
- // CHECK-LABEL: @test_mm512_srli_epi16
+ // CHECK-LABEL: test_mm512_srli_epi16
// CHECK: @llvm.x86.avx512.psrli.w.512
return _mm512_srli_epi16(__A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_srli_epi16((__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 15), 0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0));
__m512i test_mm512_srli_epi16_2(__m512i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm512_srli_epi16_2
+ // CHECK-LABEL: test_mm512_srli_epi16_2
// CHECK: @llvm.x86.avx512.psrli.w.512
return _mm512_srli_epi16(__A, __B);
}
__m512i test_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_srli_epi16
+ // CHECK-LABEL: test_mm512_mask_srli_epi16
// CHECK: @llvm.x86.avx512.psrli.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_srli_epi16(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_srli_epi16((__m512i)(__v32hi){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, (__mmask32)~(__mmask32)0, (__m512i)(__v32hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, 1), 0x0, 0x0, 0x1, 0x1, 0x2, 0x2, 0x3, 0x3, 0x4, 0x4, 0x5, 0x5, 0x6, 0x6, 0x7, 0x7, 0x8, 0x8, 0x9, 0x9, 0xa, 0xa, 0xb, 0xb, 0xc, 0xc, 0xd, 0xd, 0xe, 0xe, 0xf, 0xf));
__m512i test_mm512_mask_srli_epi16_2(__m512i __W, __mmask32 __U, __m512i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm512_mask_srli_epi16_2
+ // CHECK-LABEL: test_mm512_mask_srli_epi16_2
// CHECK: @llvm.x86.avx512.psrli.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_srli_epi16(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_srli_epi16
+ // CHECK-LABEL: test_mm512_maskz_srli_epi16
// CHECK: @llvm.x86.avx512.psrli.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_srli_epi16(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_srli_epi16((__mmask32)0x71ccff00, (__m512i)(__v32hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0, 0, 0, 0, 0, 0, 0, 0, 0x4, 0x4, 0x5, 0x5, 0x6, 0x6, 0x7, 0x7, 0, 0, 0x1, 0x1, 0, 0, 0x3, 0x3, 0x4, 0, 0, 0, 0x6, 0x6, 0x7, 0 ));
__m512i test_mm512_maskz_srli_epi16_2(__mmask32 __U, __m512i __A, int __B) {
- // CHECK-LABEL: @test_mm512_maskz_srli_epi16_2
+ // CHECK-LABEL: test_mm512_maskz_srli_epi16_2
// CHECK: @llvm.x86.avx512.psrli.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_srli_epi16(__U, __A, __B);
}
__m512i test_mm512_bsrli_epi128(__m512i __A) {
- // CHECK-LABEL: @test_mm512_bsrli_epi128
+ // CHECK-LABEL: test_mm512_bsrli_epi128
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> zeroinitializer, <64 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 66, i32 67, i32 68, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 82, i32 83, i32 84, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 98, i32 99, i32 100, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113, i32 114, i32 115, i32 116>
return _mm512_bsrli_epi128(__A, 5);
}
__m512i test_mm512_mask_mov_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_mov_epi16
+ // CHECK-LABEL: test_mm512_mask_mov_epi16
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mov_epi16(__W, __U, __A);
}
__m512i test_mm512_maskz_mov_epi16(__mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_mov_epi16
+ // CHECK-LABEL: test_mm512_maskz_mov_epi16
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mov_epi16(__U, __A);
}
__m512i test_mm512_mask_mov_epi8(__m512i __W, __mmask64 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_mov_epi8
+ // CHECK-LABEL: test_mm512_mask_mov_epi8
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_mov_epi8(__W, __U, __A);
}
__m512i test_mm512_maskz_mov_epi8(__mmask64 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_mov_epi8
+ // CHECK-LABEL: test_mm512_maskz_mov_epi8
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_mov_epi8(__U, __A);
}
__m512i test_mm512_mask_set1_epi8(__m512i __O, __mmask64 __M, char __A) {
- // CHECK-LABEL: @test_mm512_mask_set1_epi8
+ // CHECK-LABEL: test_mm512_mask_set1_epi8
// CHECK: insertelement <64 x i8> poison, i8 %{{.*}}, i32 0
// CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 1
// CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 2
@@ -1956,7 +2026,7 @@ __m512i test_mm512_mask_set1_epi8(__m512i __O, __mmask64 __M, char __A) {
}
__m512i test_mm512_maskz_set1_epi8(__mmask64 __M, char __A) {
- // CHECK-LABEL: @test_mm512_maskz_set1_epi8
+ // CHECK-LABEL: test_mm512_maskz_set1_epi8
// CHECK: insertelement <64 x i8> poison, i8 %{{.*}}, i32 0
// CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 1
// CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 2
@@ -2026,7 +2096,7 @@ __m512i test_mm512_maskz_set1_epi8(__mmask64 __M, char __A) {
}
__mmask64 test_mm512_kunpackd(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_mm512_kunpackd
+ // CHECK-LABEL: test_mm512_kunpackd
// CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: [[LHS2:%.*]] = shufflevector <64 x i1> [[LHS]], <64 x i1> [[LHS]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -2036,7 +2106,7 @@ __mmask64 test_mm512_kunpackd(__m512i __A, __m512i __B, __m512i __C, __m512i __D
}
__mmask32 test_mm512_kunpackw(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_mm512_kunpackw
+ // CHECK-LABEL: test_mm512_kunpackw
// CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: [[LHS2:%.*]] = shufflevector <32 x i1> [[LHS]], <32 x i1> [[LHS]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -2047,74 +2117,74 @@ __mmask32 test_mm512_kunpackw(__m512i __A, __m512i __B, __m512i __C, __m512i __D
__m512i test_mm512_loadu_epi16 (void *__P)
{
- // CHECK-LABEL: @test_mm512_loadu_epi16
+ // CHECK-LABEL: test_mm512_loadu_epi16
// CHECK: load <8 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm512_loadu_epi16 (__P);
}
__m512i test_mm512_mask_loadu_epi16(__m512i __W, __mmask32 __U, void const *__P) {
- // CHECK-LABEL: @test_mm512_mask_loadu_epi16
+ // CHECK-LABEL: test_mm512_mask_loadu_epi16
// CHECK: @llvm.masked.load.v32i16.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_mask_loadu_epi16(__W, __U, __P);
}
__m512i test_mm512_maskz_loadu_epi16(__mmask32 __U, void const *__P) {
- // CHECK-LABEL: @test_mm512_maskz_loadu_epi16
+ // CHECK-LABEL: test_mm512_maskz_loadu_epi16
// CHECK: @llvm.masked.load.v32i16.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_maskz_loadu_epi16(__U, __P);
}
__m512i test_mm512_loadu_epi8 (void *__P)
{
- // CHECK-LABEL: @test_mm512_loadu_epi8
+ // CHECK-LABEL: test_mm512_loadu_epi8
// CHECK: load <8 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm512_loadu_epi8 (__P);
}
__m512i test_mm512_mask_loadu_epi8(__m512i __W, __mmask64 __U, void const *__P) {
- // CHECK-LABEL: @test_mm512_mask_loadu_epi8
+ // CHECK-LABEL: test_mm512_mask_loadu_epi8
// CHECK: @llvm.masked.load.v64i8.p0(ptr %{{.*}}, i32 1, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_mask_loadu_epi8(__W, __U, __P);
}
__m512i test_mm512_maskz_loadu_epi8(__mmask64 __U, void const *__P) {
- // CHECK-LABEL: @test_mm512_maskz_loadu_epi8
+ // CHECK-LABEL: test_mm512_maskz_loadu_epi8
// CHECK: @llvm.masked.load.v64i8.p0(ptr %{{.*}}, i32 1, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_maskz_loadu_epi8(__U, __P);
}
void test_mm512_storeu_epi16(void *__P, __m512i __A) {
- // CHECK-LABEL: @test_mm512_storeu_epi16
+ // CHECK-LABEL: test_mm512_storeu_epi16
// CHECK: store <8 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm512_storeu_epi16(__P, __A);
}
void test_mm512_mask_storeu_epi16(void *__P, __mmask32 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_storeu_epi16
+ // CHECK-LABEL: test_mm512_mask_storeu_epi16
// CHECK: @llvm.masked.store.v32i16.p0(<32 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <32 x i1> %{{.*}})
return _mm512_mask_storeu_epi16(__P, __U, __A);
}
__mmask64 test_mm512_test_epi8_mask(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_test_epi8_mask
+ // CHECK-LABEL: test_mm512_test_epi8_mask
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: icmp ne <64 x i8> %{{.*}}, %{{.*}}
return _mm512_test_epi8_mask(__A, __B);
}
void test_mm512_storeu_epi8(void *__P, __m512i __A) {
- // CHECK-LABEL: @test_mm512_storeu_epi8
+ // CHECK-LABEL: test_mm512_storeu_epi8
// CHECK: store <8 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm512_storeu_epi8(__P, __A);
}
void test_mm512_mask_storeu_epi8(void *__P, __mmask64 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_storeu_epi8
+ // CHECK-LABEL: test_mm512_mask_storeu_epi8
// CHECK: @llvm.masked.store.v64i8.p0(<64 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <64 x i1> %{{.*}})
return _mm512_mask_storeu_epi8(__P, __U, __A);
}
__mmask64 test_mm512_mask_test_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_test_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_test_epi8_mask
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: icmp ne <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
@@ -2122,14 +2192,14 @@ __mmask64 test_mm512_mask_test_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B
}
__mmask32 test_mm512_test_epi16_mask(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_test_epi16_mask
+ // CHECK-LABEL: test_mm512_test_epi16_mask
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: icmp ne <32 x i16> %{{.*}}, %{{.*}}
return _mm512_test_epi16_mask(__A, __B);
}
__mmask32 test_mm512_mask_test_epi16_mask(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_test_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_test_epi16_mask
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: icmp ne <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
@@ -2137,14 +2207,14 @@ __mmask32 test_mm512_mask_test_epi16_mask(__mmask32 __U, __m512i __A, __m512i __
}
__mmask64 test_mm512_testn_epi8_mask(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_testn_epi8_mask
+ // CHECK-LABEL: test_mm512_testn_epi8_mask
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
return _mm512_testn_epi8_mask(__A, __B);
}
__mmask64 test_mm512_mask_testn_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_testn_epi8_mask
+ // CHECK-LABEL: test_mm512_mask_testn_epi8_mask
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
@@ -2152,14 +2222,14 @@ __mmask64 test_mm512_mask_testn_epi8_mask(__mmask64 __U, __m512i __A, __m512i __
}
__mmask32 test_mm512_testn_epi16_mask(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_testn_epi16_mask
+ // CHECK-LABEL: test_mm512_testn_epi16_mask
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
return _mm512_testn_epi16_mask(__A, __B);
}
__mmask32 test_mm512_mask_testn_epi16_mask(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_testn_epi16_mask
+ // CHECK-LABEL: test_mm512_mask_testn_epi16_mask
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
@@ -2167,67 +2237,69 @@ __mmask32 test_mm512_mask_testn_epi16_mask(__mmask32 __U, __m512i __A, __m512i _
}
__mmask64 test_mm512_movepi8_mask(__m512i __A) {
- // CHECK-LABEL: @test_mm512_movepi8_mask
+ // CHECK-LABEL: test_mm512_movepi8_mask
// CHECK: [[CMP:%.*]] = icmp slt <64 x i8> %{{.*}}, zeroinitializer
return _mm512_movepi8_mask(__A);
}
__m512i test_mm512_movm_epi8(__mmask64 __A) {
- // CHECK-LABEL: @test_mm512_movm_epi8
+ // CHECK-LABEL: test_mm512_movm_epi8
// CHECK: %{{.*}} = bitcast i64 %{{.*}} to <64 x i1>
// CHECK: %vpmovm2.i = sext <64 x i1> %{{.*}} to <64 x i8>
return _mm512_movm_epi8(__A);
}
__m512i test_mm512_movm_epi16(__mmask32 __A) {
- // CHECK-LABEL: @test_mm512_movm_epi16
+ // CHECK-LABEL: test_mm512_movm_epi16
// CHECK: %{{.*}} = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: %vpmovm2.i = sext <32 x i1> %{{.*}} to <32 x i16>
return _mm512_movm_epi16(__A);
}
__m512i test_mm512_broadcastb_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm512_broadcastb_epi8
+ // CHECK-LABEL: test_mm512_broadcastb_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <64 x i32> zeroinitializer
return _mm512_broadcastb_epi8(__A);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_broadcastb_epi8((__m128i)(__v16qi){42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42));
__m512i test_mm512_mask_broadcastb_epi8(__m512i __O, __mmask64 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcastb_epi8
+ // CHECK-LABEL: test_mm512_mask_broadcastb_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <64 x i32> zeroinitializer
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_broadcastb_epi8(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcastb_epi8(__mmask64 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcastb_epi8
+ // CHECK-LABEL: test_mm512_maskz_broadcastb_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <64 x i32> zeroinitializer
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_broadcastb_epi8(__M, __A);
}
__m512i test_mm512_broadcastw_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm512_broadcastw_epi16
+ // CHECK-LABEL: test_mm512_broadcastw_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <32 x i32> zeroinitializer
return _mm512_broadcastw_epi16(__A);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_broadcastw_epi16((__m128i)(__v8hi){42, 3, 10, 8, 0, 256, 256, 128}), 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42));
__m512i test_mm512_mask_broadcastw_epi16(__m512i __O, __mmask32 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcastw_epi16
+ // CHECK-LABEL: test_mm512_mask_broadcastw_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <32 x i32> zeroinitializer
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_broadcastw_epi16(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcastw_epi16(__mmask32 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcastw_epi16
+ // CHECK-LABEL: test_mm512_maskz_broadcastw_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <32 x i32> zeroinitializer
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_broadcastw_epi16(__M, __A);
}
__m512i test_mm512_mask_set1_epi16(__m512i __O, __mmask32 __M, short __A) {
- // CHECK-LABEL: @test_mm512_mask_set1_epi16
+ // CHECK-LABEL: test_mm512_mask_set1_epi16
// CHECK: insertelement <32 x i16> poison, i16 %{{.*}}, i32 0
// CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 1
// CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 2
@@ -2265,7 +2337,7 @@ __m512i test_mm512_mask_set1_epi16(__m512i __O, __mmask32 __M, short __A) {
}
__m512i test_mm512_maskz_set1_epi16(__mmask32 __M, short __A) {
- // CHECK-LABEL: @test_mm512_maskz_set1_epi16
+ // CHECK-LABEL: test_mm512_maskz_set1_epi16
// CHECK: insertelement <32 x i16> poison, i16 %{{.*}}, i32 0
// CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 1
// CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 2
@@ -2302,39 +2374,39 @@ __m512i test_mm512_maskz_set1_epi16(__mmask32 __M, short __A) {
return _mm512_maskz_set1_epi16(__M, __A);
}
__m512i test_mm512_permutexvar_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_permutexvar_epi16
+ // CHECK-LABEL: test_mm512_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.512
return _mm512_permutexvar_epi16(__A, __B);
}
__m512i test_mm512_maskz_permutexvar_epi16(__mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_permutexvar_epi16
+ // CHECK-LABEL: test_mm512_maskz_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_permutexvar_epi16(__M, __A, __B);
}
__m512i test_mm512_mask_permutexvar_epi16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_permutexvar_epi16
+ // CHECK-LABEL: test_mm512_mask_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_permutexvar_epi16(__W, __M, __A, __B);
}
__m512i test_mm512_alignr_epi8(__m512i __A,__m512i __B){
- // CHECK-LABEL: @test_mm512_alignr_epi8
+ // CHECK-LABEL: test_mm512_alignr_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113>
return _mm512_alignr_epi8(__A, __B, 2);
}
__m512i test_mm512_mask_alignr_epi8(__m512i __W, __mmask64 __U, __m512i __A,__m512i __B){
- // CHECK-LABEL: @test_mm512_mask_alignr_epi8
+ // CHECK-LABEL: test_mm512_mask_alignr_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113>
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_alignr_epi8(__W, __U, __A, __B, 2);
}
__m512i test_mm512_maskz_alignr_epi8(__mmask64 __U, __m512i __A,__m512i __B){
- // CHECK-LABEL: @test_mm512_maskz_alignr_epi8
+ // CHECK-LABEL: test_mm512_maskz_alignr_epi8
// CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113>
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_alignr_epi8(__U, __A, __B, 2);
@@ -2343,54 +2415,54 @@ __m512i test_mm512_maskz_alignr_epi8(__mmask64 __U, __m512i __A,__m512i __B){
__m512i test_mm512_mm_dbsad_epu8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mm_dbsad_epu8
+ // CHECK-LABEL: test_mm512_mm_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.512
return _mm512_dbsad_epu8(__A, __B, 170);
}
__m512i test_mm512_mm_mask_dbsad_epu8(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mm_mask_dbsad_epu8
+ // CHECK-LABEL: test_mm512_mm_mask_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.512
//CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_dbsad_epu8(__W, __U, __A, __B, 170);
}
__m512i test_mm512_mm_maskz_dbsad_epu8(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mm_maskz_dbsad_epu8
+ // CHECK-LABEL: test_mm512_mm_maskz_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.512
//CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_dbsad_epu8(__U, __A, __B, 170);
}
__m512i test_mm512_sad_epu8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_sad_epu8
+ // CHECK-LABEL: test_mm512_sad_epu8
// CHECK: @llvm.x86.avx512.psad.bw.512
return _mm512_sad_epu8(__A, __B);
}
__mmask32 test_mm512_movepi16_mask(__m512i __A) {
- // CHECK-LABEL: @test_mm512_movepi16_mask
+ // CHECK-LABEL: test_mm512_movepi16_mask
// CHECK: [[CMP:%.*]] = icmp slt <32 x i16> %{{.*}}, zeroinitializer
return _mm512_movepi16_mask(__A);
}
void test_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
{
- // CHECK-LABEL: @test_mm512_mask_cvtepi16_storeu_epi8
+ // CHECK-LABEL: test_mm512_mask_cvtepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.mem.512
_mm512_mask_cvtepi16_storeu_epi8 ( __P, __M, __A);
}
void test_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
{
- // CHECK-LABEL: @test_mm512_mask_cvtsepi16_storeu_epi8
+ // CHECK-LABEL: test_mm512_mask_cvtsepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.mem.512
_mm512_mask_cvtsepi16_storeu_epi8 ( __P, __M, __A);
}
void test_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
{
- // CHECK-LABEL: @test_mm512_mask_cvtusepi16_storeu_epi8
+ // CHECK-LABEL: test_mm512_mask_cvtusepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.mem.512
_mm512_mask_cvtusepi16_storeu_epi8 ( __P, __M, __A);
}
diff --git a/clang/test/CodeGen/X86/avx512cd-builtins.c b/clang/test/CodeGen/X86/avx512cd-builtins.c
index 3c1415c..b9d42b7 100644
--- a/clang/test/CodeGen/X86/avx512cd-builtins.c
+++ b/clang/test/CodeGen/X86/avx512cd-builtins.c
@@ -1,79 +1,118 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512cd -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m512i test_mm512_conflict_epi64(__m512i __A) {
- // CHECK-LABEL: @test_mm512_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.512
+ // CHECK-LABEL: test_mm512_conflict_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}})
return _mm512_conflict_epi64(__A);
}
__m512i test_mm512_mask_conflict_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.512
+ // CHECK-LABEL: test_mm512_mask_conflict_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_conflict_epi64(__W,__U,__A);
}
__m512i test_mm512_maskz_conflict_epi64(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.512
+ // CHECK-LABEL: test_mm512_maskz_conflict_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_conflict_epi64(__U,__A);
}
__m512i test_mm512_conflict_epi32(__m512i __A) {
- // CHECK-LABEL: @test_mm512_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.512
+ // CHECK-LABEL: test_mm512_conflict_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}})
return _mm512_conflict_epi32(__A);
}
__m512i test_mm512_mask_conflict_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.512
+ // CHECK-LABEL: test_mm512_mask_conflict_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_conflict_epi32(__W,__U,__A);
}
__m512i test_mm512_maskz_conflict_epi32(__mmask16 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.512
+ // CHECK-LABEL: test_mm512_maskz_conflict_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_conflict_epi32(__U,__A);
}
__m512i test_mm512_lzcnt_epi32(__m512i __A) {
- // CHECK-LABEL: @test_mm512_lzcnt_epi32
- // CHECK: call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm512_lzcnt_epi32
+ // CHECK: call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <16 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <16 x i1> [[ISZERO]], <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_lzcnt_epi32(__A);
}
+
+TEST_CONSTEXPR(match_v16si(_mm512_lzcnt_epi32((__m512i)(__v16si){1, 2, 4, 8, 16, 32, 64, 128, 3, 5, 6, 7, 9, 10, 11, 12}), 31, 30, 29, 28, 27, 26, 25, 24, 30, 29, 29, 29, 28, 28, 28, 28));
+TEST_CONSTEXPR(match_v16si(_mm512_lzcnt_epi32((__m512i)(__v16si){0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32));
+
__m512i test_mm512_mask_lzcnt_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_lzcnt_epi32
- // CHECK: call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm512_mask_lzcnt_epi32
+ // CHECK: call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <16 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <16 x i1> [[ISZERO]], <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_lzcnt_epi32(__W,__U,__A);
}
+
+TEST_CONSTEXPR(match_v16si(_mm512_mask_lzcnt_epi32(_mm512_set1_epi32(32), /*1010 1100 1010 1101=*/0xacad, (__m512i)(__v16si){1, 2, 4, 8, 16, 32, 64, 128, 3, 5, 6, 7, 9, 10, 11, 12}), 31, 32, 29, 28, 32, 26, 32, 24, 32, 32, 29, 29, 32, 28, 32, 28));
+
__m512i test_mm512_maskz_lzcnt_epi32(__mmask16 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_lzcnt_epi32
- // CHECK: call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm512_maskz_lzcnt_epi32
+ // CHECK: call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <16 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <16 x i1> [[ISZERO]], <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_lzcnt_epi32(__U,__A);
}
+
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_lzcnt_epi32(/*1010 1100 1010 1101=*/0xacad, (__m512i)(__v16si){1, 2, 4, 8, 16, 32, 64, 128, 3, 5, 6, 7, 9, 10, 11, 12}), 31, 0, 29, 28, 0, 26, 0, 24, 0, 0, 29, 29, 0, 28, 0, 28));
+
__m512i test_mm512_lzcnt_epi64(__m512i __A) {
- // CHECK-LABEL: @test_mm512_lzcnt_epi64
- // CHECK: call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm512_lzcnt_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.ctlz.v8i64(<8 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <8 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <8 x i1> [[ISZERO]], <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_lzcnt_epi64(__A);
}
+
+TEST_CONSTEXPR(match_v8di(_mm512_lzcnt_epi64((__m512i)(__v8di){1, 2, 4, 8, 16, 32, 64, 128}), 63, 62, 61, 60, 59, 58, 57, 56));
+TEST_CONSTEXPR(match_v8di(_mm512_lzcnt_epi64((__m512i)(__v8di){0, 0, 0, 0, 0, 0, 0, 0}), 64, 64, 64, 64, 64, 64, 64, 64));
+
__m512i test_mm512_mask_lzcnt_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_lzcnt_epi64
- // CHECK: call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm512_mask_lzcnt_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.ctlz.v8i64(<8 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <8 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <8 x i1> [[ISZERO]], <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_lzcnt_epi64(__W,__U,__A);
}
+
+TEST_CONSTEXPR(match_v8di(_mm512_mask_lzcnt_epi64(_mm512_set1_epi64((long long) 64), /*0101 0111=*/0x57, (__m512i)(__v8di){1, 2, 4, 8, 16, 32, 64, 128}), 63, 62, 61, 64, 59, 64, 57, 64));
+
__m512i test_mm512_maskz_lzcnt_epi64(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_lzcnt_epi64
- // CHECK: call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm512_maskz_lzcnt_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.ctlz.v8i64(<8 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <8 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <8 x i1> [[ISZERO]], <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_lzcnt_epi64(__U,__A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_lzcnt_epi64(/*0101 0111=*/0x57, (__m512i)(__v8di){1, 2, 4, 8, 16, 32, 64, 128}), 63, 62, 61, 0, 59, 0, 57, 0));
+
__m512i test_mm512_broadcastmb_epi64(__m512i a, __m512i b) {
- // CHECK-LABEL: @test_mm512_broadcastmb_epi64
+ // CHECK-LABEL: test_mm512_broadcastmb_epi64
// CHECK: icmp eq <8 x i64> %{{.*}}, %{{.*}}
// CHECK: zext i8 %{{.*}} to i64
// CHECK: insertelement <8 x i64> poison, i64 %{{.*}}, i32 0
@@ -88,7 +127,7 @@ __m512i test_mm512_broadcastmb_epi64(__m512i a, __m512i b) {
}
__m512i test_mm512_broadcastmw_epi32(__m512i a, __m512i b) {
- // CHECK-LABEL: @test_mm512_broadcastmw_epi32
+ // CHECK-LABEL: test_mm512_broadcastmw_epi32
// CHECK: icmp eq <16 x i32> %{{.*}}, %{{.*}}
// CHECK: zext i16 %{{.*}} to i32
// CHECK: insertelement <16 x i32> poison, i32 %{{.*}}
diff --git a/clang/test/CodeGen/X86/avx512dq-builtins.c b/clang/test/CodeGen/X86/avx512dq-builtins.c
index 1ebd369..d2bd780 100644
--- a/clang/test/CodeGen/X86/avx512dq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512dq-builtins.c
@@ -1,17 +1,26 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__mmask8 test_knot_mask8(__mmask8 a) {
- // CHECK-LABEL: @test_knot_mask8
+ // CHECK-LABEL: test_knot_mask8
// CHECK: [[IN:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[NOT:%.*]] = xor <8 x i1> [[IN]], splat (i1 true)
return _knot_mask8(a);
}
__mmask8 test_kand_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kand_mask8
+ // CHECK-LABEL: test_kand_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = and <8 x i1> [[LHS]], [[RHS]]
@@ -21,7 +30,7 @@ __mmask8 test_kand_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
}
__mmask8 test_kandn_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kandn_mask8
+ // CHECK-LABEL: test_kandn_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[NOT:%.*]] = xor <8 x i1> [[LHS]], splat (i1 true)
@@ -32,7 +41,7 @@ __mmask8 test_kandn_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
}
__mmask8 test_kor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kor_mask8
+ // CHECK-LABEL: test_kor_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = or <8 x i1> [[LHS]], [[RHS]]
@@ -42,7 +51,7 @@ __mmask8 test_kor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m5
}
__mmask8 test_kxnor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kxnor_mask8
+ // CHECK-LABEL: test_kxnor_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[NOT:%.*]] = xor <8 x i1> [[LHS]], splat (i1 true)
@@ -53,7 +62,7 @@ __mmask8 test_kxnor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
}
__mmask8 test_kxor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kxor_mask8
+ // CHECK-LABEL: test_kxor_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = xor <8 x i1> [[LHS]], [[RHS]]
@@ -63,7 +72,7 @@ __mmask8 test_kxor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
}
unsigned char test_kortestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_kortestz_mask8_u8
+ // CHECK-LABEL: test_kortestz_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[OR:%.*]] = or <8 x i1> [[LHS]], [[RHS]]
@@ -76,7 +85,7 @@ unsigned char test_kortestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m5
}
unsigned char test_kortestc_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_kortestc_mask8_u8
+ // CHECK-LABEL: test_kortestc_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[OR:%.*]] = or <8 x i1> [[LHS]], [[RHS]]
@@ -89,7 +98,7 @@ unsigned char test_kortestc_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m5
}
unsigned char test_kortest_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_kortest_mask8_u8
+ // CHECK-LABEL: test_kortest_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[OR:%.*]] = or <8 x i1> [[LHS]], [[RHS]]
@@ -109,7 +118,7 @@ unsigned char test_kortest_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestz_mask8_u8
+ // CHECK-LABEL: test_ktestz_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestz.b(<8 x i1> [[LHS]], <8 x i1> [[RHS]])
@@ -119,7 +128,7 @@ unsigned char test_ktestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512
}
unsigned char test_ktestc_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestc_mask8_u8
+ // CHECK-LABEL: test_ktestc_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.b(<8 x i1> [[LHS]], <8 x i1> [[RHS]])
@@ -129,7 +138,7 @@ unsigned char test_ktestc_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512
}
unsigned char test_ktest_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_ktest_mask8_u8
+ // CHECK-LABEL: test_ktest_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.b(<8 x i1> [[LHS]], <8 x i1> [[RHS]])
@@ -143,7 +152,7 @@ unsigned char test_ktest_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i
}
unsigned char test_ktestz_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestz_mask16_u8
+ // CHECK-LABEL: test_ktestz_mask16_u8
// CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestz.w(<16 x i1> [[LHS]], <16 x i1> [[RHS]])
@@ -153,7 +162,7 @@ unsigned char test_ktestz_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktestc_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestc_mask16_u8
+ // CHECK-LABEL: test_ktestc_mask16_u8
// CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.w(<16 x i1> [[LHS]], <16 x i1> [[RHS]])
@@ -163,7 +172,7 @@ unsigned char test_ktestc_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktest_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_ktest_mask16_u8
+ // CHECK-LABEL: test_ktest_mask16_u8
// CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.w(<16 x i1> [[LHS]], <16 x i1> [[RHS]])
@@ -177,7 +186,7 @@ unsigned char test_ktest_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m512
}
__mmask8 test_kadd_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kadd_mask8
+ // CHECK-LABEL: test_kadd_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = call <8 x i1> @llvm.x86.avx512.kadd.b(<8 x i1> [[LHS]], <8 x i1> [[RHS]])
@@ -187,7 +196,7 @@ __mmask8 test_kadd_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
}
__mmask16 test_kadd_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kadd_mask16
+ // CHECK-LABEL: test_kadd_mask16
// CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RES:%.*]] = call <16 x i1> @llvm.x86.avx512.kadd.w(<16 x i1> [[LHS]], <16 x i1> [[RHS]])
@@ -197,79 +206,80 @@ __mmask16 test_kadd_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
}
__mmask8 test_kshiftli_mask8(__m512i A, __m512i B, __m512i C, __m512i D) {
- // CHECK-LABEL: @test_kshiftli_mask8
+ // CHECK-LABEL: test_kshiftli_mask8
// CHECK: [[VAL:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = shufflevector <8 x i1> zeroinitializer, <8 x i1> [[VAL]], <8 x i32> <i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13>
return _mm512_mask_cmpneq_epu64_mask(_kshiftli_mask8(_mm512_cmpneq_epu64_mask(A, B), 2), C, D);
}
__mmask8 test_kshiftri_mask8(__m512i A, __m512i B, __m512i C, __m512i D) {
- // CHECK-LABEL: @test_kshiftri_mask8
+ // CHECK-LABEL: test_kshiftri_mask8
// CHECK: [[VAL:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = shufflevector <8 x i1> [[VAL]], <8 x i1> zeroinitializer, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
return _mm512_mask_cmpneq_epu64_mask(_kshiftri_mask8(_mm512_cmpneq_epu64_mask(A, B), 2), C, D);
}
unsigned int test_cvtmask8_u32(__m512i A, __m512i B) {
- // CHECK-LABEL: @test_cvtmask8_u32
+ // CHECK-LABEL: test_cvtmask8_u32
// CHECK: zext i8 %{{.*}} to i32
return _cvtmask8_u32(_mm512_cmpneq_epu64_mask(A, B));
}
__mmask8 test_cvtu32_mask8(__m512i A, __m512i B, unsigned int C) {
- // CHECK-LABEL: @test_cvtu32_mask8
+ // CHECK-LABEL: test_cvtu32_mask8
// CHECK: trunc i32 %{{.*}} to i8
return _mm512_mask_cmpneq_epu64_mask(_cvtu32_mask8(C), A, B);
}
__mmask8 test_load_mask8(__mmask8 *A, __m512i B, __m512i C) {
- // CHECK-LABEL: @test_load_mask8
+ // CHECK-LABEL: test_load_mask8
// CHECK: [[LOAD:%.*]] = load i8, ptr %{{.*}}
return _mm512_mask_cmpneq_epu64_mask(_load_mask8(A), B, C);
}
void test_store_mask8(__mmask8 *A, __m512i B, __m512i C) {
- // CHECK-LABEL: @test_store_mask8
+ // CHECK-LABEL: test_store_mask8
// CHECK: store i8 %{{.*}}, ptr %{{.*}}
_store_mask8(A, _mm512_cmpneq_epu64_mask(B, C));
}
__m512i test_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mullo_epi64
+ // CHECK-LABEL: test_mm512_mullo_epi64
// CHECK: mul <8 x i64>
return (__m512i) _mm512_mullo_epi64(__A, __B);
}
__m512i test_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_mullo_epi64
+ // CHECK-LABEL: test_mm512_mask_mullo_epi64
// CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return (__m512i) _mm512_mask_mullo_epi64(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_mullo_epi64
+ // CHECK-LABEL: test_mm512_maskz_mullo_epi64
// CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return (__m512i) _mm512_maskz_mullo_epi64(__U, __A, __B);
}
__m512d test_mm512_xor_pd (__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_xor_pd
+ // CHECK-LABEL: test_mm512_xor_pd
// CHECK: xor <8 x i64>
return (__m512d) _mm512_xor_pd(__A, __B);
}
__m512d test_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_xor_pd
+ // CHECK-LABEL: test_mm512_mask_xor_pd
// CHECK: xor <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
return (__m512d) _mm512_mask_xor_pd(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_m512d(_mm512_xor_pd((__m512d){-4.0, -5.0, +6.0, +7.0, +7.0, +6.0, -5.0, -4.0}, (__m512d){+0.0, -0.0, -0.0, +7.0, +7.0, -0.0, -0.0, +0.0}), -4.0, +5.0, -6.0, +0.0, +0.0, -6.0, +5.0, -4.0));
__m512d test_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_xor_pd
+ // CHECK-LABEL: test_mm512_maskz_xor_pd
// CHECK: xor <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -277,13 +287,14 @@ __m512d test_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_xor_ps (__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_xor_ps
+ // CHECK-LABEL: test_mm512_xor_ps
// CHECK: xor <16 x i32>
return (__m512) _mm512_xor_ps(__A, __B);
}
+TEST_CONSTEXPR(match_m512(_mm512_xor_ps((__m512){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f, -4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m512){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f, +0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -4.0f, +5.0f, -6.0f, +0.0f, +0.0f, -6.0f, +5.0f, -4.0f, -4.0f, +5.0f, -6.0f, +0.0f, +0.0f, -6.0f, +5.0f, -4.0f));
__m512 test_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_xor_ps
+ // CHECK-LABEL: test_mm512_mask_xor_ps
// CHECK: xor <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -291,7 +302,7 @@ __m512 test_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B
}
__m512 test_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_xor_ps
+ // CHECK-LABEL: test_mm512_maskz_xor_ps
// CHECK: xor <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -299,13 +310,14 @@ __m512 test_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_or_pd (__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_or_pd
+ // CHECK-LABEL: test_mm512_or_pd
// CHECK: or <8 x i64>
return (__m512d) _mm512_or_pd(__A, __B);
}
+TEST_CONSTEXPR(match_m512d(_mm512_or_pd((__m512d){-4.0, -5.0, +6.0, +7.0, +7.0, +6.0, -5.0, -4.0}, (__m512d){+0.0, -0.0, -0.0, +7.0, +7.0, -0.0, -0.0, +0.0}), -4.0, -5.0, -6.0, +7.0, +7.0, -6.0, -5.0, -4.0));
__m512d test_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_or_pd
+ // CHECK-LABEL: test_mm512_mask_or_pd
// CHECK: or <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -313,7 +325,7 @@ __m512d test_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d _
}
__m512d test_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_or_pd
+ // CHECK-LABEL: test_mm512_maskz_or_pd
// CHECK: or <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -321,13 +333,14 @@ __m512d test_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_or_ps (__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_or_ps
+ // CHECK-LABEL: test_mm512_or_ps
// CHECK: or <16 x i32>
return (__m512) _mm512_or_ps(__A, __B);
}
+TEST_CONSTEXPR(match_m512(_mm512_or_ps((__m512){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f, -4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m512){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f, +0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -4.0f, -5.0f, -6.0f, +7.0f, +7.0f, -6.0f, -5.0f, -4.0f, -4.0f, -5.0f, -6.0f, +7.0f, +7.0f, -6.0f, -5.0f, -4.0f));
__m512 test_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_or_ps
+ // CHECK-LABEL: test_mm512_mask_or_ps
// CHECK: or <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -335,7 +348,7 @@ __m512 test_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
}
__m512 test_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_or_ps
+ // CHECK-LABEL: test_mm512_maskz_or_ps
// CHECK: or <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -343,13 +356,14 @@ __m512 test_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_and_pd (__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_and_pd
+ // CHECK-LABEL: test_mm512_and_pd
// CHECK: and <8 x i64>
return (__m512d) _mm512_and_pd(__A, __B);
}
+TEST_CONSTEXPR(match_m512d(_mm512_and_pd((__m512d){-4.0, -5.0, +6.0, +7.0, +7.0, +6.0, -5.0, -4.0}, (__m512d){+0.0, -0.0, -0.0, +7.0, +7.0, -0.0, -0.0, +0.0}), +0.0, -0.0, +0.0, +7.0, +7.0, +0.0, -0.0, +0.0));
__m512d test_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_and_pd
+ // CHECK-LABEL: test_mm512_mask_and_pd
// CHECK: and <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -357,7 +371,7 @@ __m512d test_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d
}
__m512d test_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_and_pd
+ // CHECK-LABEL: test_mm512_maskz_and_pd
// CHECK: and <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -365,13 +379,14 @@ __m512d test_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_and_ps (__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_and_ps
+ // CHECK-LABEL: test_mm512_and_ps
// CHECK: and <16 x i32>
return (__m512) _mm512_and_ps(__A, __B);
}
+TEST_CONSTEXPR(match_m512(_mm512_and_ps((__m512){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f, -4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m512){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f, +0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), +0.0f, -0.0f, +0.0f, +7.0f, +7.0f, +0.0f, -0.0f, +0.0f, +0.0f, -0.0f, +0.0f, +7.0f, +7.0f, +0.0f, -0.0f, +0.0f));
__m512 test_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_and_ps
+ // CHECK-LABEL: test_mm512_mask_and_ps
// CHECK: and <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -379,7 +394,7 @@ __m512 test_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B
}
__m512 test_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_and_ps
+ // CHECK-LABEL: test_mm512_maskz_and_ps
// CHECK: and <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -387,14 +402,15 @@ __m512 test_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_andnot_pd (__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_andnot_pd
+ // CHECK-LABEL: test_mm512_andnot_pd
// CHECK: xor <8 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <8 x i64>
return (__m512d) _mm512_andnot_pd(__A, __B);
}
+TEST_CONSTEXPR(match_m512d(_mm512_andnot_pd((__m512d){-4.0, -5.0, +6.0, +7.0, +7.0, +6.0, -5.0, -4.0}, (__m512d){+0.0, -0.0, -0.0, +7.0, +7.0, -0.0, -0.0, +0.0}), +0.0, +0.0, -0.0, +0.0, +0.0, -0.0, +0.0, +0.0));
__m512d test_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_andnot_pd
+ // CHECK-LABEL: test_mm512_mask_andnot_pd
// CHECK: xor <8 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <8 x i64> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -402,7 +418,7 @@ __m512d test_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m51
}
__m512d test_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_andnot_pd
+ // CHECK-LABEL: test_mm512_maskz_andnot_pd
// CHECK: xor <8 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <8 x i64> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -410,14 +426,15 @@ __m512d test_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_andnot_ps (__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_andnot_ps
+ // CHECK-LABEL: test_mm512_andnot_ps
// CHECK: xor <16 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <16 x i32>
return (__m512) _mm512_andnot_ps(__A, __B);
}
+TEST_CONSTEXPR(match_m512(_mm512_andnot_ps((__m512){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f, -4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m512){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f, +0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), +0.0f, +0.0f, -0.0f, +0.0f, +0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f, +0.0f, -0.0f, +0.0f, +0.0f));
__m512 test_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_andnot_ps
+ // CHECK-LABEL: test_mm512_mask_andnot_ps
// CHECK: xor <16 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -425,7 +442,7 @@ __m512 test_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512
}
__m512 test_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_andnot_ps
+ // CHECK-LABEL: test_mm512_maskz_andnot_ps
// CHECK: xor <16 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -433,491 +450,491 @@ __m512 test_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
}
__m512i test_mm512_cvtpd_epi64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvtpd_epi64
+ // CHECK-LABEL: test_mm512_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_cvtpd_epi64(__A);
}
__m512i test_mm512_mask_cvtpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtpd_epi64
+ // CHECK-LABEL: test_mm512_mask_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_mask_cvtpd_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtpd_epi64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtpd_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_maskz_cvtpd_epi64(__U, __A);
}
__m512i test_mm512_cvt_roundpd_epi64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_cvt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_cvt_roundpd_epi64(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvt_roundpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_mask_cvt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_mask_cvt_roundpd_epi64(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvt_roundpd_epi64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_maskz_cvt_roundpd_epi64(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvtpd_epu64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvtpd_epu64
+ // CHECK-LABEL: test_mm512_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_cvtpd_epu64(__A);
}
__m512i test_mm512_mask_cvtpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtpd_epu64
+ // CHECK-LABEL: test_mm512_mask_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_mask_cvtpd_epu64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtpd_epu64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtpd_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_maskz_cvtpd_epu64(__U, __A);
}
__m512i test_mm512_cvt_roundpd_epu64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_cvt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_cvt_roundpd_epu64(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvt_roundpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_mask_cvt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_mask_cvt_roundpd_epu64(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvt_roundpd_epu64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_maskz_cvt_roundpd_epu64(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvtps_epi64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvtps_epi64
+ // CHECK-LABEL: test_mm512_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_cvtps_epi64(__A);
}
__m512i test_mm512_mask_cvtps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtps_epi64
+ // CHECK-LABEL: test_mm512_mask_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_mask_cvtps_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtps_epi64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtps_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_maskz_cvtps_epi64(__U, __A);
}
__m512i test_mm512_cvt_roundps_epi64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundps_epi64
+ // CHECK-LABEL: test_mm512_cvt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_cvt_roundps_epi64(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvt_roundps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundps_epi64
+ // CHECK-LABEL: test_mm512_mask_cvt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_mask_cvt_roundps_epi64(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvt_roundps_epi64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundps_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_maskz_cvt_roundps_epi64(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvtps_epu64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvtps_epu64
+ // CHECK-LABEL: test_mm512_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_cvtps_epu64(__A);
}
__m512i test_mm512_mask_cvtps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtps_epu64
+ // CHECK-LABEL: test_mm512_mask_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_mask_cvtps_epu64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtps_epu64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtps_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_maskz_cvtps_epu64(__U, __A);
}
__m512i test_mm512_cvt_roundps_epu64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundps_epu64
+ // CHECK-LABEL: test_mm512_cvt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_cvt_roundps_epu64(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvt_roundps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundps_epu64
+ // CHECK-LABEL: test_mm512_mask_cvt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_mask_cvt_roundps_epu64(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvt_roundps_epu64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundps_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_maskz_cvt_roundps_epu64(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_cvtepi64_pd(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepi64_pd
+ // CHECK-LABEL: test_mm512_cvtepi64_pd
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x double>
return _mm512_cvtepi64_pd(__A);
}
__m512d test_mm512_mask_cvtepi64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepi64_pd
+ // CHECK-LABEL: test_mm512_mask_cvtepi64_pd
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_cvtepi64_pd(__W, __U, __A);
}
__m512d test_mm512_maskz_cvtepi64_pd(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepi64_pd
+ // CHECK-LABEL: test_mm512_maskz_cvtepi64_pd
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvtepi64_pd(__U, __A);
}
__m512d test_mm512_cvt_roundepi64_pd(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundepi64_pd
+ // CHECK-LABEL: test_mm512_cvt_roundepi64_pd
// CHECK: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
return _mm512_cvt_roundepi64_pd(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_cvt_roundepi64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundepi64_pd
+ // CHECK-LABEL: test_mm512_mask_cvt_roundepi64_pd
// CHECK: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_cvt_roundepi64_pd(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_maskz_cvt_roundepi64_pd(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundepi64_pd
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundepi64_pd
// CHECK: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvt_roundepi64_pd(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_cvtepi64_ps(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepi64_ps
+ // CHECK-LABEL: test_mm512_cvtepi64_ps
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x float>
return _mm512_cvtepi64_ps(__A);
}
__m256 test_mm512_mask_cvtepi64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepi64_ps
+ // CHECK-LABEL: test_mm512_mask_cvtepi64_ps
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_cvtepi64_ps(__W, __U, __A);
}
__m256 test_mm512_maskz_cvtepi64_ps(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepi64_ps
+ // CHECK-LABEL: test_mm512_maskz_cvtepi64_ps
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_cvtepi64_ps(__U, __A);
}
__m256 test_mm512_cvt_roundepi64_ps(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundepi64_ps
+ // CHECK-LABEL: test_mm512_cvt_roundepi64_ps
// CHECK: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
return _mm512_cvt_roundepi64_ps(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_mask_cvt_roundepi64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundepi64_ps
+ // CHECK-LABEL: test_mm512_mask_cvt_roundepi64_ps
// CHECK: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_cvt_roundepi64_ps(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_maskz_cvt_roundepi64_ps(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundepi64_ps
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundepi64_ps
// CHECK: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_cvt_roundepi64_ps(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvttpd_epi64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvttpd_epi64
+ // CHECK-LABEL: test_mm512_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_cvttpd_epi64(__A);
}
__m512i test_mm512_mask_cvttpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvttpd_epi64
+ // CHECK-LABEL: test_mm512_mask_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_mask_cvttpd_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvttpd_epi64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvttpd_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_maskz_cvttpd_epi64(__U, __A);
}
__m512i test_mm512_cvtt_roundpd_epi64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvtt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_cvtt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_cvtt_roundpd_epi64(__A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvtt_roundpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_mask_cvtt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_mask_cvtt_roundpd_epi64(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvtt_roundpd_epi64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvtt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_maskz_cvtt_roundpd_epi64(__U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvttpd_epu64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvttpd_epu64
+ // CHECK-LABEL: test_mm512_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_cvttpd_epu64(__A);
}
__m512i test_mm512_mask_cvttpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvttpd_epu64
+ // CHECK-LABEL: test_mm512_mask_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_mask_cvttpd_epu64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvttpd_epu64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvttpd_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_maskz_cvttpd_epu64(__U, __A);
}
__m512i test_mm512_cvtt_roundpd_epu64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvtt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_cvtt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_cvtt_roundpd_epu64(__A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvtt_roundpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_mask_cvtt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_mask_cvtt_roundpd_epu64(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvtt_roundpd_epu64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvtt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_maskz_cvtt_roundpd_epu64(__U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvttps_epi64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvttps_epi64
+ // CHECK-LABEL: test_mm512_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_cvttps_epi64(__A);
}
__m512i test_mm512_mask_cvttps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvttps_epi64
+ // CHECK-LABEL: test_mm512_mask_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_mask_cvttps_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvttps_epi64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvttps_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_maskz_cvttps_epi64(__U, __A);
}
__m512i test_mm512_cvtt_roundps_epi64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvtt_roundps_epi64
+ // CHECK-LABEL: test_mm512_cvtt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_cvtt_roundps_epi64(__A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvtt_roundps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtt_roundps_epi64
+ // CHECK-LABEL: test_mm512_mask_cvtt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_mask_cvtt_roundps_epi64(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvtt_roundps_epi64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtt_roundps_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvtt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_maskz_cvtt_roundps_epi64(__U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvttps_epu64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvttps_epu64
+ // CHECK-LABEL: test_mm512_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_cvttps_epu64(__A);
}
__m512i test_mm512_mask_cvttps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvttps_epu64
+ // CHECK-LABEL: test_mm512_mask_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_mask_cvttps_epu64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvttps_epu64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvttps_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_maskz_cvttps_epu64(__U, __A);
}
__m512i test_mm512_cvtt_roundps_epu64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvtt_roundps_epu64
+ // CHECK-LABEL: test_mm512_cvtt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_cvtt_roundps_epu64(__A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvtt_roundps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtt_roundps_epu64
+ // CHECK-LABEL: test_mm512_mask_cvtt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_mask_cvtt_roundps_epu64(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvtt_roundps_epu64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtt_roundps_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvtt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_maskz_cvtt_roundps_epu64(__U, __A, _MM_FROUND_NO_EXC);
}
__m512d test_mm512_cvtepu64_pd(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepu64_pd
+ // CHECK-LABEL: test_mm512_cvtepu64_pd
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x double>
return _mm512_cvtepu64_pd(__A);
}
__m512d test_mm512_mask_cvtepu64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepu64_pd
+ // CHECK-LABEL: test_mm512_mask_cvtepu64_pd
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_cvtepu64_pd(__W, __U, __A);
}
__m512d test_mm512_maskz_cvtepu64_pd(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepu64_pd
+ // CHECK-LABEL: test_mm512_maskz_cvtepu64_pd
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvtepu64_pd(__U, __A);
}
__m512d test_mm512_cvt_roundepu64_pd(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundepu64_pd
+ // CHECK-LABEL: test_mm512_cvt_roundepu64_pd
// CHECK: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
return _mm512_cvt_roundepu64_pd(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_cvt_roundepu64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundepu64_pd
+ // CHECK-LABEL: test_mm512_mask_cvt_roundepu64_pd
// CHECK: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_cvt_roundepu64_pd(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_maskz_cvt_roundepu64_pd(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundepu64_pd
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundepu64_pd
// CHECK: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvt_roundepu64_pd(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_cvtepu64_ps(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepu64_ps
+ // CHECK-LABEL: test_mm512_cvtepu64_ps
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x float>
return _mm512_cvtepu64_ps(__A);
}
__m256 test_mm512_mask_cvtepu64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepu64_ps
+ // CHECK-LABEL: test_mm512_mask_cvtepu64_ps
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_cvtepu64_ps(__W, __U, __A);
}
__m256 test_mm512_maskz_cvtepu64_ps(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepu64_ps
+ // CHECK-LABEL: test_mm512_maskz_cvtepu64_ps
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_cvtepu64_ps(__U, __A);
}
__m256 test_mm512_cvt_roundepu64_ps(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundepu64_ps
+ // CHECK-LABEL: test_mm512_cvt_roundepu64_ps
// CHECK: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
return _mm512_cvt_roundepu64_ps(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_mask_cvt_roundepu64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundepu64_ps
+ // CHECK-LABEL: test_mm512_mask_cvt_roundepu64_ps
// CHECK: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_cvt_roundepu64_ps(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_maskz_cvt_roundepu64_ps(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundepu64_ps
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundepu64_ps
// CHECK: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_cvt_roundepu64_ps(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_range_pd(__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_range_pd
+ // CHECK-LABEL: test_mm512_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_range_pd(__A, __B, 4);
}
__m512d test_mm512_mask_range_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_range_pd
+ // CHECK-LABEL: test_mm512_mask_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_mask_range_pd(__W, __U, __A, __B, 4);
}
__m512d test_mm512_maskz_range_pd(__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_pd
+ // CHECK-LABEL: test_mm512_maskz_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_maskz_range_pd(__U, __A, __B, 4);
}
__m512d test_mm512_range_round_pd(__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_range_round_pd
+ // CHECK-LABEL: test_mm512_range_round_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_range_round_pd(__A, __B, 4, 8);
}
__m512d test_mm512_mask_range_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_range_round_pd
+ // CHECK-LABEL: test_mm512_mask_range_round_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_mask_range_round_pd(__W, __U, __A, __B, 4, 8);
}
__m512d test_mm512_maskz_range_round_pd(__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_round_pd
+ // CHECK-LABEL: test_mm512_maskz_range_round_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_maskz_range_round_pd(__U, __A, __B, 4, 8);
}
__m128d test_mm512_range_round_sd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_range_round_sd
+ // CHECK-LABEL: test_mm512_range_round_sd
// CHECK: @llvm.x86.avx512.mask.range.sd
return _mm_range_round_sd(__A, __B, 4, 8);
}
@@ -929,31 +946,31 @@ __m128d test_mm512_mask_range_round_sd(__m128d __W, __mmask8 __U, __m128d __A, _
}
__m128d test_mm512_maskz_range_round_sd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_round_sd
+ // CHECK-LABEL: test_mm512_maskz_range_round_sd
// CHECK: @llvm.x86.avx512.mask.range.sd
return _mm_maskz_range_round_sd(__U, __A, __B, 4, 8);
}
__m128 test_mm512_range_round_ss(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm512_range_round_ss
+ // CHECK-LABEL: test_mm512_range_round_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_range_round_ss(__A, __B, 4, 8);
}
__m128 test_mm512_mask_range_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm512_mask_range_round_ss
+ // CHECK-LABEL: test_mm512_mask_range_round_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_mask_range_round_ss(__W, __U, __A, __B, 4, 8);
}
__m128 test_mm512_maskz_range_round_ss(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_round_ss
+ // CHECK-LABEL: test_mm512_maskz_range_round_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_maskz_range_round_ss(__U, __A, __B, 4, 8);
}
__m128d test_mm_range_sd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_range_sd
+ // CHECK-LABEL: test_mm_range_sd
// CHECK: @llvm.x86.avx512.mask.range.sd
return _mm_range_sd(__A, __B, 4);
}
@@ -965,558 +982,564 @@ __m128d test_mm_mask_range_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __
}
__m128d test_mm_maskz_range_sd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_range_sd
+ // CHECK-LABEL: test_mm_maskz_range_sd
// CHECK: @llvm.x86.avx512.mask.range.sd
return _mm_maskz_range_sd(__U, __A, __B, 4);
}
__m128 test_mm_range_ss(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_range_ss
+ // CHECK-LABEL: test_mm_range_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_range_ss(__A, __B, 4);
}
__m128 test_mm_mask_range_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_range_ss
+ // CHECK-LABEL: test_mm_mask_range_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_mask_range_ss(__W, __U, __A, __B, 4);
}
__m128 test_mm_maskz_range_ss(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_range_ss
+ // CHECK-LABEL: test_mm_maskz_range_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_maskz_range_ss(__U, __A, __B, 4);
}
__m512 test_mm512_range_ps(__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_range_ps
+ // CHECK-LABEL: test_mm512_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_range_ps(__A, __B, 4);
}
__m512 test_mm512_mask_range_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_range_ps
+ // CHECK-LABEL: test_mm512_mask_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_mask_range_ps(__W, __U, __A, __B, 4);
}
__m512 test_mm512_maskz_range_ps(__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_ps
+ // CHECK-LABEL: test_mm512_maskz_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_maskz_range_ps(__U, __A, __B, 4);
}
__m512 test_mm512_range_round_ps(__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_range_round_ps
+ // CHECK-LABEL: test_mm512_range_round_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_range_round_ps(__A, __B, 4, 8);
}
__m512 test_mm512_mask_range_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_range_round_ps
+ // CHECK-LABEL: test_mm512_mask_range_round_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_mask_range_round_ps(__W, __U, __A, __B, 4, 8);
}
__m512 test_mm512_maskz_range_round_ps(__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_round_ps
+ // CHECK-LABEL: test_mm512_maskz_range_round_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_maskz_range_round_ps(__U, __A, __B, 4, 8);
}
__m512d test_mm512_reduce_pd(__m512d __A) {
- // CHECK-LABEL: @test_mm512_reduce_pd
+ // CHECK-LABEL: test_mm512_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_reduce_pd(__A, 4);
}
__m512d test_mm512_mask_reduce_pd(__m512d __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_pd
+ // CHECK-LABEL: test_mm512_mask_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_mask_reduce_pd(__W, __U, __A, 4);
}
__m512d test_mm512_maskz_reduce_pd(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_pd
+ // CHECK-LABEL: test_mm512_maskz_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_maskz_reduce_pd(__U, __A, 4);
}
__m512 test_mm512_reduce_ps(__m512 __A) {
- // CHECK-LABEL: @test_mm512_reduce_ps
+ // CHECK-LABEL: test_mm512_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_reduce_ps(__A, 4);
}
__m512 test_mm512_mask_reduce_ps(__m512 __W, __mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_ps
+ // CHECK-LABEL: test_mm512_mask_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_mask_reduce_ps(__W, __U, __A, 4);
}
__m512 test_mm512_maskz_reduce_ps(__mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_ps
+ // CHECK-LABEL: test_mm512_maskz_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_maskz_reduce_ps(__U, __A, 4);
}
__m512d test_mm512_reduce_round_pd(__m512d __A) {
- // CHECK-LABEL: @test_mm512_reduce_round_pd
+ // CHECK-LABEL: test_mm512_reduce_round_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_reduce_round_pd(__A, 4, 8);
}
__m512d test_mm512_mask_reduce_round_pd(__m512d __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_round_pd
+ // CHECK-LABEL: test_mm512_mask_reduce_round_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_mask_reduce_round_pd(__W, __U, __A, 4, 8);
}
__m512d test_mm512_maskz_reduce_round_pd(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_round_pd
+ // CHECK-LABEL: test_mm512_maskz_reduce_round_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_maskz_reduce_round_pd(__U, __A, 4, 8);
}
__m512 test_mm512_reduce_round_ps(__m512 __A) {
- // CHECK-LABEL: @test_mm512_reduce_round_ps
+ // CHECK-LABEL: test_mm512_reduce_round_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_reduce_round_ps(__A, 4, 8);
}
__m512 test_mm512_mask_reduce_round_ps(__m512 __W, __mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_round_ps
+ // CHECK-LABEL: test_mm512_mask_reduce_round_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_mask_reduce_round_ps(__W, __U, __A, 4, 8);
}
__m512 test_mm512_maskz_reduce_round_ps(__mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_round_ps
+ // CHECK-LABEL: test_mm512_maskz_reduce_round_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_maskz_reduce_round_ps(__U, __A, 4, 8);
}
__m128 test_mm_reduce_ss(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_reduce_ss
+ // CHECK-LABEL: test_mm_reduce_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_reduce_ss(__A, __B, 4);
}
__m128 test_mm_mask_reduce_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_ss
+ // CHECK-LABEL: test_mm_mask_reduce_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_mask_reduce_ss(__W, __U, __A, __B, 4);
}
__m128 test_mm_maskz_reduce_ss(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_ss
+ // CHECK-LABEL: test_mm_maskz_reduce_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_maskz_reduce_ss(__U, __A, __B, 4);
}
__m128 test_mm_reduce_round_ss(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_reduce_round_ss
+ // CHECK-LABEL: test_mm_reduce_round_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_reduce_round_ss(__A, __B, 4, 8);
}
__m128 test_mm_mask_reduce_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_round_ss
+ // CHECK-LABEL: test_mm_mask_reduce_round_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_mask_reduce_round_ss(__W, __U, __A, __B, 4, 8);
}
__m128 test_mm_maskz_reduce_round_ss(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_round_ss
+ // CHECK-LABEL: test_mm_maskz_reduce_round_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_maskz_reduce_round_ss(__U, __A, __B, 4, 8);
}
__m128d test_mm_reduce_sd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_reduce_sd
+ // CHECK-LABEL: test_mm_reduce_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_reduce_sd(__A, __B, 4);
}
__m128d test_mm_mask_reduce_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_sd
+ // CHECK-LABEL: test_mm_mask_reduce_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_mask_reduce_sd(__W, __U, __A, __B, 4);
}
__m128d test_mm_maskz_reduce_sd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_sd
+ // CHECK-LABEL: test_mm_maskz_reduce_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_maskz_reduce_sd(__U, __A, __B, 4);
}
__m128d test_mm_reduce_round_sd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_reduce_round_sd
+ // CHECK-LABEL: test_mm_reduce_round_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_reduce_round_sd(__A, __B, 4, 8);
}
__m128d test_mm_mask_reduce_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_round_sd
+ // CHECK-LABEL: test_mm_mask_reduce_round_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_mask_reduce_round_sd(__W, __U, __A, __B, 4, 8);
}
__m128d test_mm_maskz_reduce_round_sd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_round_sd
+ // CHECK-LABEL: test_mm_maskz_reduce_round_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_maskz_reduce_round_sd(__U, __A, __B, 4, 8);
}
__mmask16 test_mm512_movepi32_mask(__m512i __A) {
- // CHECK-LABEL: @test_mm512_movepi32_mask
+ // CHECK-LABEL: test_mm512_movepi32_mask
// CHECK: [[CMP:%.*]] = icmp slt <16 x i32> %{{.*}}, zeroinitializer
return _mm512_movepi32_mask(__A);
}
__m512i test_mm512_movm_epi32(__mmask16 __A) {
- // CHECK-LABEL: @test_mm512_movm_epi32
+ // CHECK-LABEL: test_mm512_movm_epi32
// CHECK: %{{.*}} = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: %vpmovm2.i = sext <16 x i1> %{{.*}} to <16 x i32>
return _mm512_movm_epi32(__A);
}
__m512i test_mm512_movm_epi64(__mmask8 __A) {
- // CHECK-LABEL: @test_mm512_movm_epi64
+ // CHECK-LABEL: test_mm512_movm_epi64
// CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: %vpmovm2.i = sext <8 x i1> %{{.*}} to <8 x i64>
return _mm512_movm_epi64(__A);
}
__mmask8 test_mm512_movepi64_mask(__m512i __A) {
- // CHECK-LABEL: @test_mm512_movepi64_mask
+ // CHECK-LABEL: test_mm512_movepi64_mask
// CHECK: [[CMP:%.*]] = icmp slt <8 x i64> %{{.*}}, zeroinitializer
return _mm512_movepi64_mask(__A);
}
__m512 test_mm512_broadcast_f32x2(__m128 __A) {
- // CHECK-LABEL: @test_mm512_broadcast_f32x2
+ // CHECK-LABEL: test_mm512_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_f32x2(__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_broadcast_f32x2((__m128)(__v4sf){1.0, -2.0, 3.0, -4.0}), 1.0, -2.0, 1.0, -2.0, 1.0, -2.0, 1.0, -2.0, 1.0, -2.0, 1.0, -2.0, 1.0, -2.0, 1.0, -2.0));
__m512 test_mm512_mask_broadcast_f32x2(__m512 __O, __mmask16 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_f32x2
+ // CHECK-LABEL: test_mm512_mask_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_broadcast_f32x2(__O, __M, __A);
}
__m512 test_mm512_maskz_broadcast_f32x2(__mmask16 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_f32x2
+ // CHECK-LABEL: test_mm512_maskz_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_broadcast_f32x2(__M, __A);
}
__m512 test_mm512_broadcast_f32x8(float const* __A) {
- // CHECK-LABEL: @test_mm512_broadcast_f32x8
+ // CHECK-LABEL: test_mm512_broadcast_f32x8
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm512_broadcast_f32x8(_mm256_loadu_ps(__A));
}
+TEST_CONSTEXPR(match_m512(_mm512_broadcast_f32x8((__m256)(__v8sf){1.0f, 2.0f, 3.0f, 4.0f, -5.0f, -6.0f, -7.0f, -8.0f}), 1.0f, 2.0f, 3.0f, 4.0f, -5.0f, -6.0f, -7.0f, -8.0f, 1.0f, 2.0f, 3.0f, 4.0f, -5.0f, -6.0f, -7.0f, -8.0f));
__m512 test_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, float const* __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_f32x8
+ // CHECK-LABEL: test_mm512_mask_broadcast_f32x8
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_broadcast_f32x8(__O, __M, _mm256_loadu_ps(__A));
}
__m512 test_mm512_maskz_broadcast_f32x8(__mmask16 __M, float const* __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_f32x8
+ // CHECK-LABEL: test_mm512_maskz_broadcast_f32x8
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_broadcast_f32x8(__M, _mm256_loadu_ps(__A));
}
__m512d test_mm512_broadcast_f64x2(double const* __A) {
- // CHECK-LABEL: @test_mm512_broadcast_f64x2
+ // CHECK-LABEL: test_mm512_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_f64x2(_mm_loadu_pd(__A));
}
+TEST_CONSTEXPR(match_m512d(_mm512_broadcast_f64x2((__m128d)(__v2df){1.0, -2.0}), 1.0, -2.0, 1.0, -2.0, 1.0, -2.0, 1.0, -2.0));
__m512d test_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, double const* __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_f64x2
+ // CHECK-LABEL: test_mm512_mask_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A));
}
__m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_f64x2
+ // CHECK-LABEL: test_mm512_maskz_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A));
}
__m512i test_mm512_broadcast_i32x2(__m128i __A) {
- // CHECK-LABEL: @test_mm512_broadcast_i32x2
+ // CHECK-LABEL: test_mm512_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_i32x2(__A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_broadcast_i32x2((__m128i)(__v4si){1, -2, 3, -4}), 1, -2, 1, -2, 1, -2, 1, -2, 1, -2, 1, -2, 1, -2, 1, -2));
__m512i test_mm512_mask_broadcast_i32x2(__m512i __O, __mmask16 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_i32x2
+ // CHECK-LABEL: test_mm512_mask_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_broadcast_i32x2(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_i32x2
+ // CHECK-LABEL: test_mm512_maskz_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_broadcast_i32x2(__M, __A);
}
__m512i test_mm512_broadcast_i32x8(__m256i const* __A) {
- // CHECK-LABEL: @test_mm512_broadcast_i32x8
+ // CHECK-LABEL: test_mm512_broadcast_i32x8
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm512_broadcast_i32x8(_mm256_loadu_si256(__A));
}
+TEST_CONSTEXPR(match_v16si(_mm512_broadcast_i32x8((__m256i)(__v8si){1, 2, 3, 4, -5, -6, -7, -8}), 1, 2, 3, 4, -5, -6, -7, -8, 1, 2, 3, 4, -5, -6, -7, -8));
__m512i test_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i const* __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_i32x8
+ // CHECK-LABEL: test_mm512_mask_broadcast_i32x8
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_broadcast_i32x8(__O, __M, _mm256_loadu_si256(__A));
}
__m512i test_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i const* __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_i32x8
+ // CHECK-LABEL: test_mm512_maskz_broadcast_i32x8
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_broadcast_i32x8(__M, _mm256_loadu_si256(__A));
}
__m512i test_mm512_broadcast_i64x2(__m128i const* __A) {
- // CHECK-LABEL: @test_mm512_broadcast_i64x2
+ // CHECK-LABEL: test_mm512_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_i64x2(_mm_loadu_si128(__A));
}
+TEST_CONSTEXPR(match_v8di(_mm512_broadcast_i64x2((__m128i)(__v2di){1, -2}), 1, -2, 1, -2, 1, -2, 1, -2));
__m512i test_mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_i64x2
+ // CHECK-LABEL: test_mm512_mask_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_broadcast_i64x2(__O, __M, _mm_loadu_si128(__A));
}
__m512i test_mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_i64x2
+ // CHECK-LABEL: test_mm512_maskz_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_broadcast_i64x2(__M, _mm_loadu_si128(__A));
}
__m256 test_mm512_extractf32x8_ps(__m512 __A) {
- // CHECK-LABEL: @test_mm512_extractf32x8_ps
+ // CHECK-LABEL: test_mm512_extractf32x8_ps
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm512_extractf32x8_ps(__A, 1);
}
__m256 test_mm512_mask_extractf32x8_ps(__m256 __W, __mmask8 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_mask_extractf32x8_ps
+ // CHECK-LABEL: test_mm512_mask_extractf32x8_ps
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_extractf32x8_ps(__W, __U, __A, 1);
}
__m256 test_mm512_maskz_extractf32x8_ps(__mmask8 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_maskz_extractf32x8_ps
+ // CHECK-LABEL: test_mm512_maskz_extractf32x8_ps
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_extractf32x8_ps(__U, __A, 1);
}
__m128d test_mm512_extractf64x2_pd(__m512d __A) {
- // CHECK-LABEL: @test_mm512_extractf64x2_pd
+ // CHECK-LABEL: test_mm512_extractf64x2_pd
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> poison, <2 x i32> <i32 6, i32 7>
return _mm512_extractf64x2_pd(__A, 3);
}
__m128d test_mm512_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_extractf64x2_pd
+ // CHECK-LABEL: test_mm512_mask_extractf64x2_pd
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> poison, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm512_mask_extractf64x2_pd(__W, __U, __A, 3);
}
__m128d test_mm512_maskz_extractf64x2_pd(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_extractf64x2_pd
+ // CHECK-LABEL: test_mm512_maskz_extractf64x2_pd
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> poison, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm512_maskz_extractf64x2_pd(__U, __A, 3);
}
__m256i test_mm512_extracti32x8_epi32(__m512i __A) {
- // CHECK-LABEL: @test_mm512_extracti32x8_epi32
+ // CHECK-LABEL: test_mm512_extracti32x8_epi32
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm512_extracti32x8_epi32(__A, 1);
}
__m256i test_mm512_mask_extracti32x8_epi32(__m256i __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_extracti32x8_epi32
+ // CHECK-LABEL: test_mm512_mask_extracti32x8_epi32
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm512_mask_extracti32x8_epi32(__W, __U, __A, 1);
}
__m256i test_mm512_maskz_extracti32x8_epi32(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_extracti32x8_epi32
+ // CHECK-LABEL: test_mm512_maskz_extracti32x8_epi32
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm512_maskz_extracti32x8_epi32(__U, __A, 1);
}
__m128i test_mm512_extracti64x2_epi64(__m512i __A) {
- // CHECK-LABEL: @test_mm512_extracti64x2_epi64
+ // CHECK-LABEL: test_mm512_extracti64x2_epi64
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> poison, <2 x i32> <i32 6, i32 7>
return _mm512_extracti64x2_epi64(__A, 3);
}
__m128i test_mm512_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_extracti64x2_epi64
+ // CHECK-LABEL: test_mm512_mask_extracti64x2_epi64
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> poison, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm512_mask_extracti64x2_epi64(__W, __U, __A, 3);
}
__m128i test_mm512_maskz_extracti64x2_epi64(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_extracti64x2_epi64
+ // CHECK-LABEL: test_mm512_maskz_extracti64x2_epi64
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> poison, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm512_maskz_extracti64x2_epi64(__U, __A, 3);
}
__m512 test_mm512_insertf32x8(__m512 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm512_insertf32x8
+ // CHECK-LABEL: test_mm512_insertf32x8
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
return _mm512_insertf32x8(__A, __B, 1);
}
__m512 test_mm512_mask_insertf32x8(__m512 __W, __mmask16 __U, __m512 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm512_mask_insertf32x8
+ // CHECK-LABEL: test_mm512_mask_insertf32x8
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_insertf32x8(__W, __U, __A, __B, 1);
}
__m512 test_mm512_maskz_insertf32x8(__mmask16 __U, __m512 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm512_maskz_insertf32x8
+ // CHECK-LABEL: test_mm512_maskz_insertf32x8
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_insertf32x8(__U, __A, __B, 1);
}
__m512d test_mm512_insertf64x2(__m512d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_insertf64x2
+ // CHECK-LABEL: test_mm512_insertf64x2
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
return _mm512_insertf64x2(__A, __B, 3);
}
__m512d test_mm512_mask_insertf64x2(__m512d __W, __mmask8 __U, __m512d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_mask_insertf64x2
+ // CHECK-LABEL: test_mm512_mask_insertf64x2
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_insertf64x2(__W, __U, __A, __B, 3);
}
__m512d test_mm512_maskz_insertf64x2(__mmask8 __U, __m512d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_maskz_insertf64x2
+ // CHECK-LABEL: test_mm512_maskz_insertf64x2
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_insertf64x2(__U, __A, __B, 3);
}
__m512i test_mm512_inserti32x8(__m512i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm512_inserti32x8
+ // CHECK-LABEL: test_mm512_inserti32x8
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
return _mm512_inserti32x8(__A, __B, 1);
}
__m512i test_mm512_mask_inserti32x8(__m512i __W, __mmask16 __U, __m512i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm512_mask_inserti32x8
+ // CHECK-LABEL: test_mm512_mask_inserti32x8
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_inserti32x8(__W, __U, __A, __B, 1);
}
__m512i test_mm512_maskz_inserti32x8(__mmask16 __U, __m512i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm512_maskz_inserti32x8
+ // CHECK-LABEL: test_mm512_maskz_inserti32x8
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_inserti32x8(__U, __A, __B, 1);
}
__m512i test_mm512_inserti64x2(__m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_inserti64x2
+ // CHECK-LABEL: test_mm512_inserti64x2
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
return _mm512_inserti64x2(__A, __B, 1);
}
__m512i test_mm512_mask_inserti64x2(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_mask_inserti64x2
+ // CHECK-LABEL: test_mm512_mask_inserti64x2
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_inserti64x2(__W, __U, __A, __B, 1);
}
__m512i test_mm512_maskz_inserti64x2(__mmask8 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_maskz_inserti64x2
+ // CHECK-LABEL: test_mm512_maskz_inserti64x2
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_inserti64x2(__U, __A, __B, 1);
}
__mmask8 test_mm512_mask_fpclass_pd_mask(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_fpclass_pd_mask
+ // CHECK-LABEL: test_mm512_mask_fpclass_pd_mask
// CHECK: @llvm.x86.avx512.fpclass.pd.512
return _mm512_mask_fpclass_pd_mask(__U, __A, 4);
}
__mmask8 test_mm512_fpclass_pd_mask(__m512d __A) {
- // CHECK-LABEL: @test_mm512_fpclass_pd_mask
+ // CHECK-LABEL: test_mm512_fpclass_pd_mask
// CHECK: @llvm.x86.avx512.fpclass.pd.512
return _mm512_fpclass_pd_mask(__A, 4);
}
__mmask16 test_mm512_mask_fpclass_ps_mask(__mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_mask_fpclass_ps_mask
+ // CHECK-LABEL: test_mm512_mask_fpclass_ps_mask
// CHECK: @llvm.x86.avx512.fpclass.ps.512
return _mm512_mask_fpclass_ps_mask(__U, __A, 4);
}
__mmask16 test_mm512_fpclass_ps_mask(__m512 __A) {
- // CHECK-LABEL: @test_mm512_fpclass_ps_mask
+ // CHECK-LABEL: test_mm512_fpclass_ps_mask
// CHECK: @llvm.x86.avx512.fpclass.ps.512
return _mm512_fpclass_ps_mask(__A, 4);
}
__mmask8 test_mm_fpclass_sd_mask(__m128d __A) {
- // CHECK-LABEL: @test_mm_fpclass_sd_mask
+ // CHECK-LABEL: test_mm_fpclass_sd_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.sd
return _mm_fpclass_sd_mask (__A, 2);
}
__mmask8 test_mm_mask_fpclass_sd_mask(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_fpclass_sd_mask
+ // CHECK-LABEL: test_mm_mask_fpclass_sd_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.sd
return _mm_mask_fpclass_sd_mask (__U, __A, 2);
}
__mmask8 test_mm_fpclass_ss_mask(__m128 __A) {
- // CHECK-LABEL: @test_mm_fpclass_ss_mask
+ // CHECK-LABEL: test_mm_fpclass_ss_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.ss
return _mm_fpclass_ss_mask ( __A, 2);
}
__mmask8 test_mm_mask_fpclass_ss_mask(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_fpclass_ss_mask
+ // CHECK-LABEL: test_mm_mask_fpclass_ss_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.ss
return _mm_mask_fpclass_ss_mask (__U, __A, 2);
}
diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c
index 84e700c..123b8de 100644
--- a/clang/test/CodeGen/X86/avx512f-builtins.c
+++ b/clang/test/CodeGen/X86/avx512f-builtins.c
@@ -3,6 +3,11 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
#include "builtin_test_helpers.h"
@@ -154,6 +159,7 @@ __m512 test_mm512_add_ps(__m512 a, __m512 b)
// CHECK: fadd <16 x float>
return _mm512_add_ps(a, b);
}
+TEST_CONSTEXPR(match_m512(_mm512_add_ps((__m512){-1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}, (__m512){-1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}), -2.0f, -4.0f, -6.0f, -8.0f, -10.0f, -12.0f, -14.0f, -16.0f, +2.0f, +4.0f, +6.0f, +8.0f, +10.0f, +12.0f, +14.0f, +16.0f));
__m512d test_mm512_add_pd(__m512d a, __m512d b)
{
@@ -161,6 +167,7 @@ __m512d test_mm512_add_pd(__m512d a, __m512d b)
// CHECK: fadd <8 x double>
return _mm512_add_pd(a, b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_add_pd((__m512d){-1.0, -2.0, -3.0, -4.0, +1.0, +2.0, +3.0, +4.0}, (__m512d){-1.0, -2.0, -3.0, -4.0, +1.0, +2.0, +3.0, +4.0}), -2.0, -4.0, -6.0, -8.0, +2.0, +4.0, +6.0, +8.0));
__m512 test_mm512_mul_ps(__m512 a, __m512 b)
{
@@ -168,6 +175,7 @@ __m512 test_mm512_mul_ps(__m512 a, __m512 b)
// CHECK: fmul <16 x float>
return _mm512_mul_ps(a, b);
}
+TEST_CONSTEXPR(match_m512(_mm512_mul_ps((__m512){-1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}, (__m512){-1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}), +1.0f, +4.0f, +9.0f, +16.0f, +25.0f, +36.0f, +49.0f, +64.0f, +1.0f, +4.0f, +9.0f, +16.0f, +25.0f, +36.0f, +49.0f, +64.0f));
__m512d test_mm512_mul_pd(__m512d a, __m512d b)
{
@@ -175,6 +183,7 @@ __m512d test_mm512_mul_pd(__m512d a, __m512d b)
// CHECK: fmul <8 x double>
return _mm512_mul_pd(a, b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mul_pd((__m512d){-1.0, -2.0, -3.0, -4.0, +1.0, +2.0, +3.0, +4.0}, (__m512d){-1.0, -2.0, -3.0, -4.0, +1.0, +2.0, +3.0, +4.0}), +1.0, +4.0, +9.0, +16.0, +1.0, +4.0, +9.0, +16.0));
void test_mm512_storeu_si512 (void *__P, __m512i __A)
{
@@ -435,6 +444,22 @@ __m512d test_mm512_set1_pd(double d)
// CHECK: insertelement <8 x double> {{.*}}, i32 7
return _mm512_set1_pd(d);
}
+TEST_CONSTEXPR(match_m512d(_mm512_set1_pd(-100.0), -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0));
+
+__m512 test_mm512_set1_ps(float d)
+{
+ // CHECK-LABEL: test_mm512_set1_ps
+ // CHECK: insertelement <16 x float> {{.*}}, i32 0
+ // CHECK: insertelement <16 x float> {{.*}}, i32 1
+ // CHECK: insertelement <16 x float> {{.*}}, i32 2
+ // CHECK: insertelement <16 x float> {{.*}}, i32 3
+ // CHECK: insertelement <16 x float> {{.*}}, i32 4
+ // CHECK: insertelement <16 x float> {{.*}}, i32 5
+ // CHECK: insertelement <16 x float> {{.*}}, i32 6
+ // CHECK: insertelement <16 x float> {{.*}}, i32 15
+ return _mm512_set1_ps(d);
+}
+TEST_CONSTEXPR(match_m512(_mm512_set1_ps(-55.0f), -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f));
__mmask16 test_mm512_knot(__mmask16 a)
{
@@ -758,6 +783,8 @@ __m512 test_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C) {
__m512 test_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) {
// CHECK-LABEL: test_mm512_mask_fmadd_ps
// CHECK: call {{.*}}<16 x float> @llvm.fma.v16f32(<16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}})
+ // CHECK: bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_fmadd_ps(__A, __U, __B, __C);
}
__m512 test_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) {
@@ -1261,6 +1288,7 @@ __m512d test_mm512_unpackhi_pd(__m512d a, __m512d b)
// CHECK: shufflevector <8 x double> {{.*}} <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
return _mm512_unpackhi_pd(a, b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_unpackhi_pd((__m512d){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}, (__m512d){9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0}), +2.0, +10.0, +4.0, +12.0, +6.0, +14.0, +8.0, +16.0));
__m512d test_mm512_unpacklo_pd(__m512d a, __m512d b)
{
@@ -1268,6 +1296,7 @@ __m512d test_mm512_unpacklo_pd(__m512d a, __m512d b)
// CHECK: shufflevector <8 x double> {{.*}} <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
return _mm512_unpacklo_pd(a, b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_unpacklo_pd((__m512d){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}, (__m512d){9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0}), +1.0, +9.0, +3.0, +11.0, +5.0, +13.0, +7.0, +15.0));
__m512 test_mm512_unpackhi_ps(__m512 a, __m512 b)
{
@@ -1275,6 +1304,7 @@ __m512 test_mm512_unpackhi_ps(__m512 a, __m512 b)
// CHECK: shufflevector <16 x float> {{.*}} <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
return _mm512_unpackhi_ps(a, b);
}
+TEST_CONSTEXPR(match_m512(_mm512_unpackhi_ps((__m512){0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f}, (__m512){16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f}), +2.0f, +18.0f, +3.0f, +19.0f, +6.0f, +22.0f, +7.0f, +23.0f, +10.0f, +26.0f, +11.0f, +27.0f, +14.0f, +30.0f, +15.0f, +31.0f));
__m512 test_mm512_unpacklo_ps(__m512 a, __m512 b)
{
@@ -1282,6 +1312,7 @@ __m512 test_mm512_unpacklo_ps(__m512 a, __m512 b)
// CHECK: shufflevector <16 x float> {{.*}} <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
return _mm512_unpacklo_ps(a, b);
}
+TEST_CONSTEXPR(match_m512(_mm512_unpacklo_ps((__m512){0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f}, (__m512){16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f}), +0.0f, +16.0f, +1.0f, +17.0f, +4.0f, +20.0f, +5.0f, +21.0f, +8.0f, +24.0f, +9.0f, +25.0f, +12.0f, +28.0f, +13.0f, +29.0f));
__mmask16 test_mm512_cmp_round_ps_mask(__m512 a, __m512 b) {
// CHECK-LABEL: test_mm512_cmp_round_ps_mask
@@ -2717,6 +2748,7 @@ __m512i test_mm512_mask_and_epi32(__m512i __src,__mmask16 __k, __m512i __a, __m5
// CHECK: select <16 x i1> %[[MASK]], <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_and_epi32(__src, __k,__a, __b);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_and_epi32((__m512i)(__v16si){1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000}, (__mmask16)0x0001, (__m512i)(__v16si){7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}, (__m512i)(__v16si){3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}), 3, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000));
__m512i test_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_maskz_and_epi32
@@ -2733,6 +2765,7 @@ __m512i test_mm512_mask_and_epi64(__m512i __src,__mmask8 __k, __m512i __a, __m51
// CHECK: select <8 x i1> %[[MASK]], <8 x i64> %[[AND_RES]], <8 x i64> %{{.*}}
return _mm512_mask_and_epi64(__src, __k,__a, __b);
}
+TEST_CONSTEXPR(match_m512i(_mm512_mask_and_epi64((__m512i){1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000}, (__mmask8)0x01, (__m512i){7, 7, 7, 7, 7, 7, 7, 7}, (__m512i){3, 3, 3, 3, 3, 3, 3, 3}), 3, 1000, 1000, 1000, 1000, 1000, 1000, 1000));
__m512i test_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_maskz_and_epi64
@@ -2811,36 +2844,42 @@ __m512i test_mm512_and_epi32(__m512i __src,__mmask16 __k, __m512i __a, __m512i _
// CHECK: and <16 x i32>
return _mm512_and_epi32(__a, __b);
}
+TEST_CONSTEXPR(match_v16si(_mm512_and_epi32((__m512i)(__v16si){0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v16si){0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1));
__m512i test_mm512_and_epi64(__m512i __src,__mmask8 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_and_epi64
// CHECK: and <8 x i64>
return _mm512_and_epi64(__a, __b);
}
+TEST_CONSTEXPR(match_v8di(_mm512_and_epi64((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, 0, -1, 0, 0, 0, -1));
__m512i test_mm512_or_epi32(__m512i __src,__mmask16 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_or_epi32
// CHECK: or <16 x i32>
return _mm512_or_epi32(__a, __b);
}
+TEST_CONSTEXPR(match_v16si(_mm512_or_epi32((__m512i)(__v16si){0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v16si){0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1}), 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1));
__m512i test_mm512_or_epi64(__m512i __src,__mmask8 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_or_epi64
// CHECK: or <8 x i64>
return _mm512_or_epi64(__a, __b);
}
+TEST_CONSTEXPR(match_v8di(_mm512_or_epi64((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, -1, -1, -1, 0, -1, -1, -1));
__m512i test_mm512_xor_epi32(__m512i __src,__mmask16 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_xor_epi32
// CHECK: xor <16 x i32>
return _mm512_xor_epi32(__a, __b);
}
+TEST_CONSTEXPR(match_v16si(_mm512_xor_epi32((__m512i)(__v16si){0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v16si){0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1}), 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0));
__m512i test_mm512_xor_epi64(__m512i __src,__mmask8 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_xor_epi64
// CHECK: xor <8 x i64>
return _mm512_xor_epi64(__a, __b);
}
+TEST_CONSTEXPR(match_v8di(_mm512_xor_epi64((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, -1, -1, 0, 0, -1, -1, 0));
__m512i test_mm512_maskz_andnot_epi32 (__mmask16 __k,__m512i __A, __m512i __B){
// CHECK-LABEL: test_mm512_maskz_andnot_epi32
@@ -2869,6 +2908,7 @@ __m512i test_mm512_andnot_si512(__m512i __A, __m512i __B)
return _mm512_andnot_si512(__A, __B);
}
+TEST_CONSTEXPR(match_v8di(_mm512_andnot_si512((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, -1, 0, 0, -1, 0, 0));
__m512i test_mm512_andnot_epi32(__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_andnot_epi32
@@ -2876,6 +2916,7 @@ __m512i test_mm512_andnot_epi32(__m512i __A, __m512i __B) {
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
return _mm512_andnot_epi32(__A,__B);
}
+TEST_CONSTEXPR(match_v16si(_mm512_andnot_epi32((__m512i)(__v16si){0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v16si){0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, -1, 0, 0));
__m512i test_mm512_maskz_andnot_epi64 (__mmask8 __k,__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_maskz_andnot_epi64
@@ -2900,6 +2941,7 @@ __m512i test_mm512_andnot_epi64(__m512i __A, __m512i __B) {
// CHECK: and <8 x i64> %{{.*}}, %{{.*}}
return _mm512_andnot_epi64(__A,__B);
}
+TEST_CONSTEXPR(match_v8di(_mm512_andnot_epi64((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, -1, 0, 0, -1, 0, 0));
__m512i test_mm512_maskz_sub_epi32 (__mmask16 __k,__m512i __A, __m512i __B) {
//CHECK-LABEL: test_mm512_maskz_sub_epi32
@@ -2994,6 +3036,7 @@ __m512i test_mm512_mul_epi32(__m512i __A, __m512i __B) {
//CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
return _mm512_mul_epi32(__A,__B);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mul_epi32((__m512i)(__v16si){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m512i)(__v16si){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -32, 84, -120, 140, -144, 132, -104, -60));
__m512i test_mm512_maskz_mul_epi32 (__mmask8 __k,__m512i __A, __m512i __B) {
//CHECK-LABEL: test_mm512_maskz_mul_epi32
@@ -3024,6 +3067,7 @@ __m512i test_mm512_mul_epu32 (__m512i __A, __m512i __B) {
//CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
return _mm512_mul_epu32(__A,__B);
}
+TEST_CONSTEXPR(match_m512i(_mm512_mul_epu32((__m512i)(__v16si){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m512i)(__v16si){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 4294967264, 84, 21474836360, 140, 38654705520, 132, 55834574744, 64424509380));
__m512i test_mm512_maskz_mul_epu32 (__mmask8 __k,__m512i __A, __m512i __B) {
//CHECK-LABEL: test_mm512_maskz_mul_epu32
@@ -3542,6 +3586,7 @@ __m512d test_mm512_div_pd(__m512d __a, __m512d __b) {
// CHECK: fdiv <8 x double>
return _mm512_div_pd(__a,__b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_div_pd((__m512d){+8.0, +6.0, +4.0, +2.0, -8.0, -6.0, -4.0, -2.0}, (__m512d){+2.0, +2.0, +2.0, +2.0, -2.0, -2.0, -2.0, -2.0}), +4.0, +3.0, +2.0, +1.0, +4.0, +3.0, +2.0, +1.0));
__m512d test_mm512_mask_div_pd(__m512d __w, __mmask8 __u, __m512d __a, __m512d __b) {
// CHECK-LABEL: test_mm512_mask_div_pd
// CHECK: fdiv <8 x double> %{{.*}}, %{{.*}}
@@ -3576,6 +3621,7 @@ __m512 test_mm512_div_ps(__m512 __A, __m512 __B) {
// CHECK: fdiv <16 x float>
return _mm512_div_ps(__A,__B);
}
+TEST_CONSTEXPR(match_m512(_mm512_div_ps((__m512){+16.0f, +14.0f, +12.0f, +10.0f, +8.0f, +6.0f, +4.0f, +2.0f, -16.0f, -14.0f, -12.0f, -10.0f, -8.0f, -6.0f, -4.0f, -2.0f}, (__m512){+2.0f, +2.0f, +2.0f, +2.0f, +2.0f, +2.0f, +2.0f, +2.0f, -2.0f, -2.0f, -2.0f, -2.0f, -2.0f, -2.0f, -2.0f, -2.0f}), +8.0f, +7.0f, +6.0f, +5.0f, +4.0f, +3.0f, +2.0f, +1.0f, +8.0f, +7.0f, +6.0f, +5.0f, +4.0f, +3.0f, +2.0f, +1.0f));
__m512 test_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: test_mm512_mask_div_ps
// CHECK: fdiv <16 x float> %{{.*}}, %{{.*}}
@@ -3805,6 +3851,8 @@ __m512i test_mm512_cvtepi8_epi32(__m128i __A) {
return _mm512_cvtepi8_epi32(__A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_cvtepi8_epi32(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), -3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12));
+
__m512i test_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A) {
// CHECK-LABEL: test_mm512_mask_cvtepi8_epi32
// CHECK: sext <16 x i8> %{{.*}} to <16 x i32>
@@ -3825,6 +3873,8 @@ __m512i test_mm512_cvtepi8_epi64(__m128i __A) {
return _mm512_cvtepi8_epi64(__A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_cvtepi8_epi64(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 0, 0, 0, 0, 0, 0, 0, 0)), -3, 2, -1, 0, 1, -2, 3, -4));
+
__m512i test_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm512_mask_cvtepi8_epi64
// CHECK: sext <8 x i8> %{{.*}} to <8 x i64>
@@ -3845,6 +3895,8 @@ __m512i test_mm512_cvtepi32_epi64(__m256i __X) {
return _mm512_cvtepi32_epi64(__X);
}
+TEST_CONSTEXPR(match_v8di(_mm512_cvtepi32_epi64(_mm256_setr_epi32(-70000, 2, -1, 0, 1, -2, 3, -4)), -70000, 2, -1, 0, 1, -2, 3, -4));
+
__m512i test_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X) {
// CHECK-LABEL: test_mm512_mask_cvtepi32_epi64
// CHECK: sext <8 x i32> %{{.*}} to <8 x i64>
@@ -3865,6 +3917,8 @@ __m512i test_mm512_cvtepi16_epi32(__m256i __A) {
return _mm512_cvtepi16_epi32(__A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_cvtepi16_epi32(_mm256_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), -300, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12));
+
__m512i test_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A) {
// CHECK-LABEL: test_mm512_mask_cvtepi16_epi32
// CHECK: sext <16 x i16> %{{.*}} to <16 x i32>
@@ -3885,6 +3939,8 @@ __m512i test_mm512_cvtepi16_epi64(__m128i __A) {
return _mm512_cvtepi16_epi64(__A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_cvtepi16_epi64(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), -300, 2, -1, 0, 1, -2, 3, -4));
+
__m512i test_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm512_mask_cvtepi16_epi64
// CHECK: sext <8 x i16> %{{.*}} to <8 x i64>
@@ -3905,6 +3961,8 @@ __m512i test_mm512_cvtepu8_epi32(__m128i __A) {
return _mm512_cvtepu8_epi32(__A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_cvtepu8_epi32(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), 253, 2, 255, 0, 1, 254, 3, 252, 5, 250, 7, 248, 9, 246, 11, 244));
+
__m512i test_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A) {
// CHECK-LABEL: test_mm512_mask_cvtepu8_epi32
// CHECK: zext <16 x i8> %{{.*}} to <16 x i32>
@@ -3925,6 +3983,8 @@ __m512i test_mm512_cvtepu8_epi64(__m128i __A) {
return _mm512_cvtepu8_epi64(__A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_cvtepu8_epi64(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 0, 0, 0, 0, 0, 0, 0, 0)), 253, 2, 255, 0, 1, 254, 3, 252));
+
__m512i test_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm512_mask_cvtepu8_epi64
// CHECK: zext <8 x i8> %{{.*}} to <8 x i64>
@@ -3944,6 +4004,7 @@ __m512i test_mm512_cvtepu32_epi64(__m256i __X) {
// CHECK: zext <8 x i32> %{{.*}} to <8 x i64>
return _mm512_cvtepu32_epi64(__X);
}
+TEST_CONSTEXPR(match_v8di(_mm512_cvtepu32_epi64(_mm256_setr_epi32(-70000, 2, -1, 0, 1, -2, 3, -4)), 4294897296, 2, 4294967295, 0, 1, 4294967294, 3, 4294967292));
__m512i test_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X) {
// CHECK-LABEL: test_mm512_mask_cvtepu32_epi64
@@ -3964,6 +4025,7 @@ __m512i test_mm512_cvtepu16_epi32(__m256i __A) {
// CHECK: zext <16 x i16> %{{.*}} to <16 x i32>
return _mm512_cvtepu16_epi32(__A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_cvtepu16_epi32(_mm256_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12)), 65236, 2, 65535, 0, 1, 65534, 3, 65532, 5, 65530, 7, 65528, 9, 65526, 11, 65524));
__m512i test_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A) {
// CHECK-LABEL: test_mm512_mask_cvtepu16_epi32
@@ -3984,6 +4046,7 @@ __m512i test_mm512_cvtepu16_epi64(__m128i __A) {
// CHECK: zext <8 x i16> %{{.*}} to <8 x i64>
return _mm512_cvtepu16_epi64(__A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_cvtepu16_epi64(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), 65236, 2, 65535, 0, 1, 65534, 3, 65532));
__m512i test_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm512_mask_cvtepu16_epi64
@@ -3999,12 +4062,12 @@ __m512i test_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) {
return _mm512_maskz_cvtepu16_epi64(__U, __A);
}
-
__m512i test_mm512_rol_epi32(__m512i __A) {
// CHECK-LABEL: test_mm512_rol_epi32
// CHECK: @llvm.fshl.v16i32
return _mm512_rol_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_rol_epi32(((__m512i)(__v16si){1, -2, 3, -4, -5, 6, -7, 8, 9, -10, 11, -12, 13, -14, 15, -16}), 5), 32, -33, 96, -97, -129, 192, -193, 256, 288, -289, 352, -353, 416, -417, 480, -481));
__m512i test_mm512_mask_rol_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_rol_epi32
@@ -4012,6 +4075,7 @@ __m512i test_mm512_mask_rol_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_rol_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_rol_epi32(((__m512i)(__v16si){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}), 0xC873, ((__m512i)(__v16si){1, -2, 3, -4, -5, 6, -7, 8, 9, -10, 11, -12, 13, -14, 15, -16}), 5), 32, -33, 99, 99, -129, 192, -193, 99, 99, 99, 99, -353, 99, 99, 480, -481));
__m512i test_mm512_maskz_rol_epi32(__mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_rol_epi32
@@ -4019,12 +4083,14 @@ __m512i test_mm512_maskz_rol_epi32(__mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_rol_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_rol_epi32(0x378C, ((__m512i)(__v16si){1, -2, 3, -4, -5, 6, -7, 8, 9, -10, 11, -12, 13, -14, 15, -16}), 5), 0, 0, 96, -97, 0, 0, 0, 256, 288, -289, 352, 0, 416, -417, 0, 0));
__m512i test_mm512_rol_epi64(__m512i __A) {
// CHECK-LABEL: test_mm512_rol_epi64
// CHECK: @llvm.fshl.v8i64
return _mm512_rol_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_rol_epi64(((__m512i)(__v8di){1, -2, 3, -4, -5, 6, -7, 8}), 5), 32, -33, 96, -97, -129, 192, -193, 256));
__m512i test_mm512_mask_rol_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_rol_epi64
@@ -4032,6 +4098,7 @@ __m512i test_mm512_mask_rol_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_rol_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_rol_epi64(((__m512i)(__v8di){99, 99, 99, 99, 99, 99, 99, 99}), 0x73, ((__m512i)(__v8di){1, -2, 3, -4, -5, 6, -7, 8}), 5), 32, -33, 99, 99, -129, 192, -193, 99));
__m512i test_mm512_maskz_rol_epi64(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_rol_epi64
@@ -4039,6 +4106,7 @@ __m512i test_mm512_maskz_rol_epi64(__mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_rol_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_rol_epi64(0x37, ((__m512i)(__v8di){1, -2, 3, -4, -5, 6, -7, 8}), 5), 32, -33, 96, 0, -129, 192, 0, 0));
__m512i test_mm512_rolv_epi32(__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_rolv_epi32
@@ -4085,6 +4153,7 @@ __m512i test_mm512_ror_epi32(__m512i __A) {
// CHECK: @llvm.fshr.v16i32
return _mm512_ror_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_ror_epi32(((__m512i)(__v16si){1, -2, 3, -4, -5, 6, -7, 8, 9, -10, 11, -12, 13, -14, 15, -16}), 5), 134217728, -134217729, 402653184, -402653185, -536870913, 805306368, -805306369, 1073741824, 1207959552, -1207959553, 1476395008, -1476395009, 1744830464, -1744830465, 2013265920, -2013265921));
__m512i test_mm512_mask_ror_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_ror_epi32
@@ -4092,6 +4161,7 @@ __m512i test_mm512_mask_ror_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_ror_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_ror_epi32(((__m512i)(__v16si){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}), 0xC873, ((__m512i)(__v16si){1, -2, 3, -4, -5, 6, -7, 8, 9, -10, 11, -12, 13, -14, 15, -16}), 5), 134217728, -134217729, 99, 99, -536870913, 805306368, -805306369, 99, 99, 99, 99, -1476395009, 99, 99, 2013265920, -2013265921));
__m512i test_mm512_maskz_ror_epi32(__mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_ror_epi32
@@ -4099,12 +4169,14 @@ __m512i test_mm512_maskz_ror_epi32(__mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_ror_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_ror_epi32(0x378C, ((__m512i)(__v16si){1, -2, 3, -4, -5, 6, -7, 8, 9, -10, 11, -12, 13, -14, 15, -16}), 5), 0, 0, 402653184, -402653185, 0, 0, 0, 1073741824, 1207959552, -1207959553, 1476395008, 0, 1744830464, -1744830465, 0, 0));
__m512i test_mm512_ror_epi64(__m512i __A) {
// CHECK-LABEL: test_mm512_ror_epi64
// CHECK: @llvm.fshr.v8i64
return _mm512_ror_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_ror_epi64(((__m512i)(__v8di){1, -2, 3, -4, -5, 6, -7, 8}), 5), 576460752303423488LL, -576460752303423489LL, 1729382256910270464LL, -1729382256910270465LL, -2305843009213693953LL, 3458764513820540928LL, -3458764513820540929LL, 4611686018427387904LL));
__m512i test_mm512_mask_ror_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_ror_epi64
@@ -4112,6 +4184,7 @@ __m512i test_mm512_mask_ror_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_ror_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_ror_epi64(((__m512i)(__v8di){99, 99, 99, 99, 99, 99, 99, 99}), 0x73, ((__m512i)(__v8di){1, -2, 3, -4, -5, 6, -7, 8}), 5), 576460752303423488LL, -576460752303423489LL, 99, 99, -2305843009213693953LL, 3458764513820540928LL, -3458764513820540929LL, 99));
__m512i test_mm512_maskz_ror_epi64(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_ror_epi64
@@ -4119,7 +4192,7 @@ __m512i test_mm512_maskz_ror_epi64(__mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_ror_epi64(__U, __A, 5);
}
-
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_ror_epi64(0x37, ((__m512i)(__v8di){1, -2, 3, -4, -5, 6, -7, 8}), 5), 576460752303423488LL, -576460752303423489LL, 1729382256910270464LL, 0, -2305843009213693953LL, 3458764513820540928LL, 0, 0));
__m512i test_mm512_rorv_epi32(__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_rorv_epi32
@@ -4166,6 +4239,11 @@ __m512i test_mm512_slli_epi32(__m512i __A) {
// CHECK: @llvm.x86.avx512.pslli.d.512
return _mm512_slli_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_slli_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 0), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
+TEST_CONSTEXPR(match_v16si(_mm512_slli_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e));
+TEST_CONSTEXPR(match_v16si(_mm512_slli_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 10), 0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800, 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00));
+TEST_CONSTEXPR(match_v16si(_mm512_slli_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 32), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v16si(_mm512_slli_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 33), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m512i test_mm512_slli_epi32_2(__m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_slli_epi32_2
@@ -4179,6 +4257,7 @@ __m512i test_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_slli_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_slli_epi32((__m512i)(__v16si){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, (__mmask16)0x5555, (__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x0, 101, 0x4, 103, 0x8, 105, 0xc, 107, 0x10, 109, 0x14, 111, 0x18, 113, 0x1c, 115));
__m512i test_mm512_mask_slli_epi32_2(__m512i __W, __mmask16 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_mask_slli_epi32_2
@@ -4193,6 +4272,11 @@ __m512i test_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_slli_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_slli_epi32((__mmask16)0x00ffcc71, (__m512i)(__v16si){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 32), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_slli_epi32((__mmask16)0, (__m512i)(__v16si){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_slli_epi32((__mmask16)0xffff, (__m512i)(__v16si){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e));
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_slli_epi32((__mmask16)0x7fff, (__m512i)(__v16si){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0));
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_slli_epi32((__mmask16)0x71cc, (__m512i)(__v16si){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0, 0, 0x4, 0x6, 0, 0, 0xc, 0xe, 0x10, 0, 0, 0, 0x18, 0x1a, 0x1c, 0));
__m512i test_mm512_maskz_slli_epi32_2(__mmask16 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_maskz_slli_epi32_2
@@ -4206,6 +4290,7 @@ __m512i test_mm512_slli_epi64(__m512i __A) {
// CHECK: @llvm.x86.avx512.pslli.q.512
return _mm512_slli_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_slli_epi64((__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, 1), 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe));
__m512i test_mm512_slli_epi64_2(__m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_slli_epi64_2
@@ -4219,6 +4304,7 @@ __m512i test_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_slli_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_slli_epi64((__m512i)(__v8di){100, 101, 102, 103, 104, 105, 106, 107}, (__mmask8)0x0F, (__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, 2), 0x0, 0x4, 0x8, 0xc, 104, 105, 106, 107));
__m512i test_mm512_mask_slli_epi64_2(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_mask_slli_epi64_2
@@ -4233,6 +4319,11 @@ __m512i test_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_slli_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_slli_epi64((__mmask8)0x00ffcc71, (__m512i)(__v8di){0xff, 1, 2, 3, 4, 5, 6, 7}, 64), 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_slli_epi64((__mmask8)0, (__m512i)(__v8di){0xff, 1, 2, 3, 4, 5, 6, 7}, 16), 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_slli_epi64((__mmask8)0xff, (__m512i)(__v8di){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe));
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_slli_epi64((__mmask8)0x7f, (__m512i)(__v8di){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0));
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_slli_epi64((__mmask8)0x71, (__m512i)(__v8di){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x1fe, 0, 0, 0, 0x8, 0xa, 0xc, 0));
__m512i test_mm512_maskz_slli_epi64_2(__mmask8 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_maskz_slli_epi64_2
@@ -4246,6 +4337,7 @@ __m512i test_mm512_srli_epi32(__m512i __A) {
// CHECK: @llvm.x86.avx512.psrli.d.512
return _mm512_srli_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_srli_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 10), 0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0));
__m512i test_mm512_srli_epi32_2(__m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_srli_epi32_2
@@ -4259,6 +4351,7 @@ __m512i test_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_srli_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_srli_epi32((__m512i)(__v16si){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, (__mmask16)0x5555, (__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x0, 101, 0x1, 103, 0x2, 105, 0x3, 107, 0x4, 109, 0x5, 111, 0x6, 113, 0x7, 115));
__m512i test_mm512_mask_srli_epi32_2(__m512i __W, __mmask16 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_mask_srli_epi32_2
@@ -4273,6 +4366,7 @@ __m512i test_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_srli_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_srli_epi32((__mmask16)0x71cc, (__m512i)(__v16si){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0, 0, 0x1, 0x1, 0, 0, 0x3, 0x3, 0x4, 0, 0, 0, 0x6, 0x6, 0x7, 0));
__m512i test_mm512_maskz_srli_epi32_2(__mmask16 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_maskz_srli_epi32_2
@@ -4286,6 +4380,7 @@ __m512i test_mm512_srli_epi64(__m512i __A) {
// CHECK: @llvm.x86.avx512.psrli.q.512
return _mm512_srli_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_srli_epi64((__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, 1), 0x0, 0x0, 0x1, 0x1, 0x2, 0x2, 0x3, 0x3));
__m512i test_mm512_srli_epi64_2(__m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_srli_epi64_2
@@ -4299,6 +4394,7 @@ __m512i test_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_srli_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_srli_epi64((__m512i)(__v8di){100, 101, 102, 103, 104, 105, 106, 107}, (__mmask8)0x0F, (__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, 2), 0x0, 0x0, 0x0, 0x0, 104, 105, 106, 107));
__m512i test_mm512_mask_srli_epi64_2(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_mask_srli_epi64_2
@@ -4313,6 +4409,7 @@ __m512i test_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_srli_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_srli_epi64((__mmask8)0x71, (__m512i)(__v8di){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x7f, 0, 0, 0, 0x2, 0x2, 0x3, 0));
__m512i test_mm512_maskz_srli_epi64_2(__mmask8 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_maskz_srli_epi64_2
@@ -4386,6 +4483,7 @@ __m512d test_mm512_movedup_pd(__m512d __A) {
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
return _mm512_movedup_pd(__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_movedup_pd((__m512d){-1.0, +2.0, +3.0, +4.0, -5.0, -6.0, +7.0, +8.0}), -1.0, -1.0, +3.0, +3.0, -5.0, -5.0, +7.0, +7.0));
__m512d test_mm512_mask_movedup_pd(__m512d __W, __mmask8 __U, __m512d __A) {
// CHECK-LABEL: test_mm512_mask_movedup_pd
@@ -4635,6 +4733,7 @@ __m512i test_mm512_unpackhi_epi32(__m512i __A, __m512i __B) {
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
return _mm512_unpackhi_epi32(__A, __B);
}
+TEST_CONSTEXPR(match_v16si(_mm512_unpackhi_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m512i)(__v16si){16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31));
__m512d test_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: test_mm512_maskz_unpackhi_pd
@@ -5421,6 +5520,7 @@ __m512i test_mm512_unpackhi_epi64(__m512i __A, __m512i __B) {
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
return _mm512_unpackhi_epi64(__A, __B);
}
+TEST_CONSTEXPR(match_m512i(_mm512_unpackhi_epi64((__m512i){0, 1, 2, 3, 4, 5, 6, 7}, (__m512i){8, 9, 10, 11, 12, 13, 14, 15}), 1, 9, 3, 11, 5, 13, 7, 15));
__m512i test_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mask_unpackhi_epi64
@@ -5441,6 +5541,7 @@ __m512i test_mm512_unpacklo_epi32(__m512i __A, __m512i __B) {
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
return _mm512_unpacklo_epi32(__A, __B);
}
+TEST_CONSTEXPR(match_v16si(_mm512_unpacklo_epi32((__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m512i)(__v16si){16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29));
__m512i test_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mask_unpacklo_epi32
@@ -5461,6 +5562,7 @@ __m512i test_mm512_unpacklo_epi64(__m512i __A, __m512i __B) {
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
return _mm512_unpacklo_epi64(__A, __B);
}
+TEST_CONSTEXPR(match_m512i(_mm512_unpacklo_epi64((__m512i){0, 1, 2, 3, 4, 5, 6, 7}, (__m512i){8, 9, 10, 11, 12, 13, 14, 15}), 0, 8, 2, 10, 4, 12, 6, 14));
__m512i test_mm512_mask_unpacklo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mask_unpacklo_epi64
@@ -5687,6 +5789,7 @@ __m512i test_mm512_srai_epi32(__m512i __A) {
// CHECK: @llvm.x86.avx512.psrai.d.512
return _mm512_srai_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_srai_epi32((__m512i)(__v16si){0, -2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 10), 0, -1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0));
__m512i test_mm512_srai_epi32_2(__m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_srai_epi32_2
@@ -5700,6 +5803,7 @@ __m512i test_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_srai_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_srli_epi32((__m512i)(__v16si){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, (__mmask16)0x5555, (__m512i)(__v16si){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x0, 101, 0x1, 103, 0x2, 105, 0x3, 107, 0x4, 109, 0x5, 111, 0x6, 113, 0x7, 115));
__m512i test_mm512_mask_srai_epi32_2(__m512i __W, __mmask16 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_mask_srai_epi32_2
@@ -5714,6 +5818,7 @@ __m512i test_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_srai_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_srai_epi32((__mmask16)0x71cc, (__m512i)(__v16si){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0, 0, 0x1, 0x1, 0, 0, 0x3, 0x3, 0x4, 0, 0, 0, 0x6, 0x6, 0x7, 0));
__m512i test_mm512_maskz_srai_epi32_2(__mmask16 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_maskz_srai_epi32_2
@@ -5727,6 +5832,7 @@ __m512i test_mm512_srai_epi64(__m512i __A) {
// CHECK: @llvm.x86.avx512.psrai.q.512
return _mm512_srai_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_srai_epi64((__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, 1), 0x0, 0x0, 0x1, 0x1, 0x2, 0x2, 0x3, 0x3));
__m512i test_mm512_srai_epi64_2(__m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_srai_epi64_2
@@ -5740,6 +5846,7 @@ __m512i test_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_srai_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_srai_epi64((__m512i)(__v8di){100, 101, 102, 103, 104, 105, 106, 107}, (__mmask8)0x0F, (__m512i)(__v8di){0, 1, 2, 3, 4, 5, 6, 7}, 2), 0x0, 0x0, 0x0, 0x0, 104, 105, 106, 107));
__m512i test_mm512_mask_srai_epi64_2(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_mask_srai_epi64_2
@@ -5754,6 +5861,7 @@ __m512i test_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_srai_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_srai_epi64((__mmask8)0x71, (__m512i)(__v8di){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x7f, 0, 0, 0, 0x2, 0x2, 0x3, 0));
__m512i test_mm512_maskz_srai_epi64_2(__mmask8 __U, __m512i __A, unsigned int __B) {
// CHECK-LABEL: test_mm512_maskz_srai_epi64_2
@@ -6251,6 +6359,7 @@ __m512 test_mm512_broadcast_f32x4(float const* __A) {
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm512_broadcast_f32x4(_mm_loadu_ps(__A));
}
+TEST_CONSTEXPR(match_m512(_mm512_broadcast_f32x4((__m128)(__v4sf){1.0f, 2.0f, -3.0f, -4.0f}), 1.0f, 2.0f, -3.0f, -4.0f, 1.0f, 2.0f, -3.0f, -4.0f, 1.0f, 2.0f, -3.0f, -4.0f, 1.0f, 2.0f, -3.0f, -4.0f));
__m512 test_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, float const* __A) {
// CHECK-LABEL: test_mm512_mask_broadcast_f32x4
@@ -6271,6 +6380,7 @@ __m512d test_mm512_broadcast_f64x4(double const* __A) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm512_broadcast_f64x4(_mm256_loadu_pd(__A));
}
+TEST_CONSTEXPR(match_m512d(_mm512_broadcast_f64x4((__m256d)(__v4df){1.0, 2.0, -3.0, -4.0}), 1.0, 2.0, -3.0, -4.0, 1.0, 2.0, -3.0, -4.0));
__m512d test_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, double const* __A) {
// CHECK-LABEL: test_mm512_mask_broadcast_f64x4
@@ -6291,6 +6401,7 @@ __m512i test_mm512_broadcast_i32x4(__m128i const* __A) {
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm512_broadcast_i32x4(_mm_loadu_si128(__A));
}
+TEST_CONSTEXPR(match_v16si(_mm512_broadcast_i32x4((__m128i)(__v4si){1, 2, -3, -4}), 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4, 1, 2, -3, -4));
__m512i test_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i const* __A) {
// CHECK-LABEL: test_mm512_mask_broadcast_i32x4
@@ -6311,6 +6422,7 @@ __m512i test_mm512_broadcast_i64x4(__m256i const* __A) {
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm512_broadcast_i64x4(_mm256_loadu_si256(__A));
}
+TEST_CONSTEXPR(match_v8di(_mm512_broadcast_i64x4((__m256i)(__v4di){1, 2, -3, -4}), 1, 2, -3, -4, 1, 2, -3, -4));
__m512i test_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i const* __A) {
// CHECK-LABEL: test_mm512_mask_broadcast_i64x4
@@ -6331,6 +6443,7 @@ __m512d test_mm512_broadcastsd_pd(__m128d __A) {
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> zeroinitializer
return _mm512_broadcastsd_pd(__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_broadcastsd_pd((__m128d)(__v2df){1.0, 2.0}), 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0));
__m512d test_mm512_mask_broadcastsd_pd(__m512d __O, __mmask8 __M, __m128d __A) {
// CHECK-LABEL: test_mm512_mask_broadcastsd_pd
@@ -6351,6 +6464,7 @@ __m512 test_mm512_broadcastss_ps(__m128 __A) {
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> zeroinitializer
return _mm512_broadcastss_ps(__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_broadcastss_ps((__m128)(__v4sf){1.0f, 2.0f, -3.0f, -4.0f}), 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f));
__m512 test_mm512_mask_broadcastss_ps(__m512 __O, __mmask16 __M, __m128 __A) {
// CHECK-LABEL: test_mm512_mask_broadcastss_ps
@@ -6371,6 +6485,7 @@ __m512i test_mm512_broadcastd_epi32(__m128i __A) {
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> zeroinitializer
return _mm512_broadcastd_epi32(__A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_broadcastd_epi32((__m128i)(__v4si){-42, 0, 0, 0}), -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42));
__m512i test_mm512_mask_broadcastd_epi32(__m512i __O, __mmask16 __M, __m128i __A) {
// CHECK-LABEL: test_mm512_mask_broadcastd_epi32
@@ -6391,6 +6506,7 @@ __m512i test_mm512_broadcastq_epi64(__m128i __A) {
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> zeroinitializer
return _mm512_broadcastq_epi64(__A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_broadcastq_epi64((__m128i)(__v2di){-42, 0}), -42, -42, -42, -42, -42, -42, -42, -42));
__m512i test_mm512_mask_broadcastq_epi64(__m512i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: test_mm512_mask_broadcastq_epi64
@@ -8682,6 +8798,7 @@ __m512 test_mm512_movehdup_ps(__m512 __A) {
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
return _mm512_movehdup_ps(__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_movehdup_ps((__m512){+1.0f,-1.0f,+2.0f,-2.0f,+3.0f,-3.0f,+4.0f,-4.0f,+5.0f,-5.0f,+6.0f,-6.0f,+7.0f,-7.0f,+8.0f,-8.0f}), -1.0f, -1.0f, -2.0f, -2.0f, -3.0f, -3.0f, -4.0f, -4.0f, -5.0f, -5.0f, -6.0f, -6.0f, -7.0f, -7.0f, -8.0f, -8.0f));
__m512 test_mm512_mask_movehdup_ps(__m512 __W, __mmask16 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_mask_movehdup_ps
@@ -8702,6 +8819,7 @@ __m512 test_mm512_moveldup_ps(__m512 __A) {
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
return _mm512_moveldup_ps(__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_moveldup_ps((__m512){+1.0f,-1.0f,+2.0f,-2.0f,+3.0f,-3.0f,+4.0f,-4.0f,+5.0f,-5.0f,+6.0f,-6.0f,+7.0f,-7.0f,+8.0f,-8.0f}), +1.0f, +1.0f, +2.0f, +2.0f, +3.0f, +3.0f, +4.0f, +4.0f, +5.0f, +5.0f, +6.0f, +6.0f, +7.0f, +7.0f, +8.0f, +8.0f));
__m512 test_mm512_mask_moveldup_ps(__m512 __W, __mmask16 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_mask_moveldup_ps
@@ -8855,6 +8973,8 @@ __m512d test_mm512_cvtps_pd(__m256 __A) {
return _mm512_cvtps_pd(__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_cvtps_pd((__m256){0.0f, 1.0f, 2.0f, 4.0f, -8.0f, -16.0f, -32.0f, -64.0f}), 0.0, 1.0, 2.0, 4.0, -8.0, -16.0, -32.0, -64.0));
+
__m512d test_mm512_cvtpslo_pd(__m512 __A) {
// CHECK-LABEL: test_mm512_cvtpslo_pd
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -8862,6 +8982,8 @@ __m512d test_mm512_cvtpslo_pd(__m512 __A) {
return _mm512_cvtpslo_pd(__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_cvtpslo_pd((__m512){0.0f, 1.0f, 2.0f, 4.0f, -8.0f, -16.0f, -32.0f, -64.0f, -128.0f, -256.0f, -512.0f, -1024.0f, -2048.0f, -4096.0f, -8192.0f, -16384.0f}), 0.0, 1.0, 2.0, 4.0, -8.0, -16.0, -32.0, -64.0));
+
__m512d test_mm512_mask_cvtps_pd(__m512d __W, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm512_mask_cvtps_pd
// CHECK: fpext <8 x float> %{{.*}} to <8 x double>
@@ -8869,6 +8991,8 @@ __m512d test_mm512_mask_cvtps_pd(__m512d __W, __mmask8 __U, __m256 __A) {
return _mm512_mask_cvtps_pd(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_cvtps_pd(_mm512_set1_pd(-777.0), /*1010 1101=*/0xad, (__m256){0.0f, 1.0f, 2.0f, 4.0f, -8.0f, -16.0f, -32.0f, -64.0f}), 0.0, -777.0, 2.0, 4.0, -777.0, -16.0, -777.0, -64.0));
+
__m512d test_mm512_mask_cvtpslo_pd(__m512d __W, __mmask8 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_mask_cvtpslo_pd
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -8877,35 +9001,45 @@ __m512d test_mm512_mask_cvtpslo_pd(__m512d __W, __mmask8 __U, __m512 __A) {
return _mm512_mask_cvtpslo_pd(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_cvtpslo_pd(_mm512_set1_pd(-777.0), /*1010 1101=*/0xad, (__m512){0.0f, 1.0f, 2.0f, 4.0f, -8.0f, -16.0f, -32.0f, -64.0f, -128.0f, -256.0f, -512.0f, -1024.0f, -2048.0f, -4096.0f, -8192.0f, -16384.0f}), 0.0, -777.0, 2.0, 4.0, -777.0, -16.0, -777.0, -64.0));
+
+
__m512d test_mm512_maskz_cvtps_pd(__mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm512_maskz_cvtps_pd
// CHECK: fpext <8 x float> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvtps_pd(__U, __A);
}
+
+TEST_CONSTEXPR(match_m512d(_mm512_maskz_cvtps_pd(/*1010 1101=*/0xad, (__m256){0.0f, 1.0f, 2.0f, 4.0f, -8.0f, -16.0f, -32.0f, -64.0f}), 0.0, 0.0, 2.0, 4.0, 0.0, -16.0, 0.0, -64.0));
+
__m512d test_mm512_mask_mov_pd(__m512d __W, __mmask8 __U, __m512d __A) {
// CHECK-LABEL: test_mm512_mask_mov_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_mov_pd(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_mov_pd((__m512d){-8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -2.0, -1.0}, 0xC3, (__m512d){+1.0, +2.0, +3.0, +4.0, +5.0, +6.0, +7.0, +8.0}), +1.0, +2.0, -6.0, -5.0, -4.0, -3.0, +7.0, +8.0));
__m512d test_mm512_maskz_mov_pd(__mmask8 __U, __m512d __A) {
// CHECK-LABEL: test_mm512_maskz_mov_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_mov_pd(__U, __A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_maskz_mov_pd(0xC3, (__m512d){+1.0, +2.0, +3.0, +4.0, +5.0, +6.0, +7.0, +8.0}), +1.0, +2.0, +0.0, +0.0, +0.0, +0.0, +7.0, +8.0));
__m512 test_mm512_mask_mov_ps(__m512 __W, __mmask16 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_mask_mov_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_mov_ps(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m512(_mm512_mask_mov_ps((__m512){-16.0f, -15.0f, -14.0f, -13.0f, -12.0f, -11.0f, -10.0f, -9.0f, -8.0f, -7.0f, -6.0f, -5.0f, -4.0f, -3.0f, -2.0f, -1.0f}, 0x0FF0, (__m512){+1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f, +9.0f, +10.0f, +11.0f, +12.0f, +13.0f, +14.0f, +15.0f, +16.0f}), -16.0f, -15.0f, -14.0f, -13.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, -4.0f, -3.0f, -2.0f, -1.0f));
__m512 test_mm512_maskz_mov_ps(__mmask16 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_maskz_mov_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_mov_ps(__U, __A);
}
+TEST_CONSTEXPR(match_m512(_mm512_maskz_mov_ps(0xF3F3, (__m512){+1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f, +9.0f, +10.0f, +11.0f, +12.0f, +13.0f, +14.0f, +15.0f, +16.0f}), +1.0f, +2.0f, 0.0f, 0.0f, +5.0f, +6.0f, +7.0f, +8.0f, +9.0f, +10.0f, 0.0f, 0.0f, +13.0f, +14.0f, +15.0f, +16.0f));
void test_mm512_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m512d __A) {
// CHECK-LABEL: test_mm512_mask_compressstoreu_pd
@@ -9024,6 +9158,7 @@ __m512i test_mm512_set1_epi8(char d)
// CHECK: insertelement <64 x i8> {{.*}}, i32 63
return _mm512_set1_epi8(d);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_set1_epi8(127), 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127));
__m512i test_mm512_set1_epi16(short d)
{
@@ -9039,6 +9174,37 @@ __m512i test_mm512_set1_epi16(short d)
// CHECK: insertelement <32 x i16> {{.*}}, i32 31
return _mm512_set1_epi16(d);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_set1_epi16(-511), -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511));
+
+__m512i test_mm512_set1_epi32(int d)
+{
+ // CHECK-LABEL: test_mm512_set1_epi32
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 0
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 1
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 2
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 3
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 4
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 5
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 6
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 15
+ return _mm512_set1_epi32(d);
+}
+TEST_CONSTEXPR(match_v16si(_mm512_set1_epi32(99), 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
+
+__m512i test_mm512_set1_epi64(long long d)
+{
+ // CHECK-LABEL: test_mm512_set1_epi64
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 0
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 1
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 2
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 3
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 4
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 5
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 6
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 7
+ return _mm512_set1_epi64(d);
+}
+TEST_CONSTEXPR(match_v8di(_mm512_set1_epi64(-42), -42, -42, -42, -42, -42, -42, -42, -42));
__m512i test_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
{
@@ -9046,6 +9212,7 @@ __m512i test_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
// CHECK: insertelement <16 x i32> {{.*}}, i32 15
return _mm512_set4_epi32 (__A,__B,__C,__D);
}
+TEST_CONSTEXPR(match_v16si(_mm512_set4_epi32(10, 20, 30, 40), 40, 30, 20, 10, 40, 30, 20, 10, 40, 30, 20, 10, 40, 30, 20, 10));
__m512i test_mm512_set4_epi64 (long long __A, long long __B, long long __C, long long __D)
{
@@ -9053,6 +9220,7 @@ __m512i test_mm512_set4_epi64 (long long __A, long long __B, long long __C, long
// CHECK: insertelement <8 x i64> {{.*}}, i32 7
return _mm512_set4_epi64 (__A,__B,__C,__D);
}
+TEST_CONSTEXPR(match_v8di(_mm512_set4_epi64(1, -3, 5, -7), -7, 5, -3, 1, -7, 5, -3, 1));
__m512d test_mm512_set4_pd (double __A, double __B, double __C, double __D)
{
@@ -9060,6 +9228,7 @@ __m512d test_mm512_set4_pd (double __A, double __B, double __C, double __D)
// CHECK: insertelement <8 x double> {{.*}}, i32 7
return _mm512_set4_pd (__A,__B,__C,__D);
}
+TEST_CONSTEXPR(match_m512d(_mm512_set4_pd(10.0, 20.0, 30.0, 40.0), 40.0, 30.0, 20.0, 10.0, 40.0, 30.0, 20.0, 10.0));
__m512 test_mm512_set4_ps (float __A, float __B, float __C, float __D)
{
@@ -9067,6 +9236,7 @@ __m512 test_mm512_set4_ps (float __A, float __B, float __C, float __D)
// CHECK: insertelement <16 x float> {{.*}}, i32 15
return _mm512_set4_ps (__A,__B,__C,__D);
}
+TEST_CONSTEXPR(match_m512(_mm512_set4_ps(1.1f, 2.2f, 3.3f, 4.4f), 4.4f, 3.3f, 2.2f, 1.1f, 4.4f, 3.3f, 2.2f, 1.1f, 4.4f, 3.3f, 2.2f, 1.1f, 4.4f, 3.3f, 2.2f, 1.1f));
__m512i test_mm512_setr4_epi32(int e0, int e1, int e2, int e3)
{
@@ -9118,6 +9288,8 @@ __m256 test_mm512_castps512_ps256 (__m512 __A)
return _mm512_castps512_ps256 (__A);
}
+TEST_CONSTEXPR(match_m256(_mm512_castps512_ps256((__m512){0.0f, 1.0f, 2.0f, 4.0f, -8.0f, -16.0f, -32.0f, -64.0f, -128.0f, -256.0f, -512.0f, -1024.0f, -2048.0f, -4096.0f, -8192.0f, -16384.0f}), 0.0f, 1.0f, 2.0f, 4.0f, -8.0f, -16.0f, -32.0f, -64.0f));
+
__m512i test_mm512_castps_si512 (__m512 __A)
{
// CHECK-LABEL: test_mm512_castps_si512
@@ -9170,6 +9342,8 @@ __m256i test_mm512_castsi512_si256 (__m512i __A)
return _mm512_castsi512_si256 (__A);
}
+TEST_CONSTEXPR(match_v8si(_mm512_castsi512_si256((__m512i)(__v16si){0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384}), 0, 1, 2, 4, 8, 16, 32, 64));
+
__m128 test_mm_cvt_roundsd_ss(__m128 __A, __m128d __B) {
// CHECK-LABEL: test_mm_cvt_roundsd_ss
// CHECK: @llvm.x86.avx512.mask.cvtsd2ss.round
@@ -9317,6 +9491,8 @@ __m512 test_mm512_cvtepu32_ps (__m512i __A)
return _mm512_cvtepu32_ps (__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_cvtepu32_ps((__m512i)(__v16su){0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384}), 0.0f, 1.0f, 2.0f, 4.0f, 8.0f, 16.0f, 32.0f, 64.0f, 128.0f, 256.0f, 512.0f, 1024.0f, 2048.0f, 4096.0f, 8192.0f, 16384.0f));
+
__m512 test_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
{
// CHECK-LABEL: test_mm512_mask_cvtepu32_ps
@@ -9325,6 +9501,8 @@ __m512 test_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
return _mm512_mask_cvtepu32_ps (__W,__U,__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_mask_cvtepu32_ps(_mm512_set1_ps(-777.0f), /*1010 1100 1010 1101=*/0xacad, (__m512i)(__v16su){0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384}), 0.0f, -777.0f, 2.0f, 4.0f, -777.0f, 16.0f, -777.0f, 64.0f, -777.0f, -777.0f, 512.0f, 1024.0f, -777.0f, 4096.0f, -777.0f, 16384.0f));
+
__m512 test_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
{
// CHECK-LABEL: test_mm512_maskz_cvtepu32_ps
@@ -9333,6 +9511,8 @@ __m512 test_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
return _mm512_maskz_cvtepu32_ps (__U,__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_maskz_cvtepu32_ps(/*1010 1100 1010 1101=*/0xacad, (__m512i)(__v16su){0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384}), 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 16.0f, 0.0f, 64.0f, 0.0f, 0.0f, 512.0f, 1024.0f, 0.0f, 4096.0f, 0.0f, 16384.0f));
+
__m512d test_mm512_cvtepi32_pd (__m256i __A)
{
// CHECK-LABEL: test_mm512_cvtepi32_pd
@@ -9340,6 +9520,8 @@ __m512d test_mm512_cvtepi32_pd (__m256i __A)
return _mm512_cvtepi32_pd (__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_cvtepi32_pd((__m256i)(__v8si){-8, -4, -2, -1, 0, 1, 2, 4}), -8.0, -4.0, -2.0, -1.0, 0.0, 1.0, 2.0, 4.0));
+
__m512d test_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
// CHECK-LABEL: test_mm512_mask_cvtepi32_pd
@@ -9348,6 +9530,8 @@ __m512d test_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
return _mm512_mask_cvtepi32_pd (__W,__U,__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_cvtepi32_pd(_mm512_set1_pd(-777.0), /*0101 1100=*/0x5c, (__m256i)(__v8si){-8, -4, -2, -1, 0, 1, 2, 4}), -777.0, -777.0, -2.0, -1.0, 0.0, -777.0, 2.0, -777.0));
+
__m512d test_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
{
// CHECK-LABEL: test_mm512_maskz_cvtepi32_pd
@@ -9356,6 +9540,8 @@ __m512d test_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
return _mm512_maskz_cvtepi32_pd (__U,__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_maskz_cvtepi32_pd(/*0101 1100=*/0x5c, (__m256i)(__v8si){-8, -4, -2, -1, 0, 1, 2, 4}), 0.0, 0.0, -2.0, -1.0, 0.0, 0.0, 2.0, 0.0));
+
__m512d test_mm512_cvtepi32lo_pd (__m512i __A)
{
// CHECK-LABEL: test_mm512_cvtepi32lo_pd
@@ -9364,6 +9550,8 @@ __m512d test_mm512_cvtepi32lo_pd (__m512i __A)
return _mm512_cvtepi32lo_pd (__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_cvtepi32lo_pd((__m512i)(__v16si){-128, -64, -32, -16, -8, -4, -2, -1, 0, 1, 2, 4, 8, 16, 32, 64}), -128.0, -64.0, -32.0, -16.0, -8.0, -4.0, -2.0, -1.0));
+
__m512d test_mm512_mask_cvtepi32lo_pd (__m512d __W, __mmask8 __U, __m512i __A)
{
// CHECK-LABEL: test_mm512_mask_cvtepi32lo_pd
@@ -9373,6 +9561,8 @@ __m512d test_mm512_mask_cvtepi32lo_pd (__m512d __W, __mmask8 __U, __m512i __A)
return _mm512_mask_cvtepi32lo_pd (__W, __U, __A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_cvtepi32lo_pd(_mm512_set1_pd(-777.0), /*1010 1101=*/0xad, (__m512i)(__v16si){-128, -64, -32, -16, -8, -4, -2, -1, 0, 1, 2, 4, 8, 16, 32, 64}), -128.0, -777.0, -32.0, -16.0, -777.0, -4.0, -777.0, -1.0));
+
__m512 test_mm512_cvtepi32_ps (__m512i __A)
{
// CHECK-LABEL: test_mm512_cvtepi32_ps
@@ -9380,6 +9570,8 @@ __m512 test_mm512_cvtepi32_ps (__m512i __A)
return _mm512_cvtepi32_ps (__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_cvtepi32_ps((__m512i)(__v16si){-128, -64, -32, -16, -8, -4, -2, -1, 0, 1, 2, 4, 8, 16, 32, 64}), -128.0f, -64.0f, -32.0f, -16.0f, -8.0f, -4.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 4.0f, 8.0f, 16.0f, 32.0f, 64.0f));
+
__m512 test_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
{
// CHECK-LABEL: test_mm512_mask_cvtepi32_ps
@@ -9388,6 +9580,8 @@ __m512 test_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
return _mm512_mask_cvtepi32_ps (__W,__U,__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_mask_cvtepi32_ps(_mm512_set1_ps(-777.0f), /*1010 1100 1010 1101=*/0xacad, (__m512i)(__v16si){-128, -64, -32, -16, -8, -4, -2, -1, 0, 1, 2, 4, 8, 16, 32, 64}), -128.0f, -777.0f, -32.0f, -16.0f, -777.0f, -4.0f, -777.0f, -1.0f, -777.0f, -777.0f, 2.0f, 4.0f, -777.0f, 16.0f, -777.0f, 64.0f));
+
__m512 test_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
{
// CHECK-LABEL: test_mm512_maskz_cvtepi32_ps
@@ -9396,6 +9590,8 @@ __m512 test_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
return _mm512_maskz_cvtepi32_ps (__U,__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_maskz_cvtepi32_ps(/*1010 1100 1010 1101=*/0xacad, (__m512i)(__v16si){-128, -64, -32, -16, -8, -4, -2, -1, 0, 1, 2, 4, 8, 16, 32, 64}), -128.0f, 0.0f, -32.0f, -16.0f, 0.0f, -4.0f, 0.0f, -1.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 16.0f, 0.0f, 64.0f));
+
__m512d test_mm512_cvtepu32_pd(__m256i __A)
{
// CHECK-LABEL: test_mm512_cvtepu32_pd
@@ -9403,6 +9599,8 @@ __m512d test_mm512_cvtepu32_pd(__m256i __A)
return _mm512_cvtepu32_pd(__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_cvtepu32_pd((__m256i)(__v8su){0, 1, 2, 4, 8, 16, 32, 64}), 0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0));
+
__m512d test_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
// CHECK-LABEL: test_mm512_mask_cvtepu32_pd
@@ -9411,6 +9609,8 @@ __m512d test_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
return _mm512_mask_cvtepu32_pd (__W,__U,__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_cvtepu32_pd(_mm512_set1_pd(-777.0), /*0101 1100=*/0x5c, (__m256i)(__v8su){0, 1, 2, 4, 8, 16, 32, 64}), -777.0, -777.0, 2.0, 4.0, 8.0, -777.0, 32.0, -777.0));
+
__m512d test_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
{
// CHECK-LABEL: test_mm512_maskz_cvtepu32_pd
@@ -9419,6 +9619,8 @@ __m512d test_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
return _mm512_maskz_cvtepu32_pd (__U,__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_maskz_cvtepu32_pd(/*0101 1100=*/0x5c, (__m256i)(__v8su){0, 1, 2, 4, 8, 16, 32, 64}), 0.0, 0.0, 2.0, 4.0, 8.0, 0.0, 32.0, 0.0));
+
__m512d test_mm512_cvtepu32lo_pd (__m512i __A)
{
// CHECK-LABEL: test_mm512_cvtepu32lo_pd
@@ -9427,6 +9629,9 @@ __m512d test_mm512_cvtepu32lo_pd (__m512i __A)
return _mm512_cvtepu32lo_pd (__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_cvtepu32lo_pd((__m512i)(__v16su){0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384}), 0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0));
+
+
__m512d test_mm512_mask_cvtepu32lo_pd (__m512d __W, __mmask8 __U, __m512i __A)
{
// CHECK-LABEL: test_mm512_mask_cvtepu32lo_pd
@@ -9436,6 +9641,8 @@ __m512d test_mm512_mask_cvtepu32lo_pd (__m512d __W, __mmask8 __U, __m512i __A)
return _mm512_mask_cvtepu32lo_pd (__W, __U, __A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_cvtepu32lo_pd(_mm512_set1_pd(-777.0), /*1010 1101=*/0xad, (__m512i)(__v16su){0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384}), 0.0, -777.0, 2.0, 4.0, -777.0, 16.0, -777.0, 64.0));
+
__m256 test_mm512_cvtpd_ps (__m512d __A)
{
// CHECK-LABEL: test_mm512_cvtpd_ps
@@ -10206,6 +10413,11 @@ __m512i test_mm512_set_epi8(char e63, char e62, char e61, char e60, char e59,
e25, e24, e23, e22, e21, e20, e19, e18, e17, e16, e15, e14, e13, e12,
e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_set_epi8(63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48,
+ 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32,
+ 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63));
__m512i test_mm512_set_epi16(short e31, short e30, short e29, short e28,
short e27, short e26, short e25, short e24, short e23, short e22,
@@ -10249,8 +10461,9 @@ __m512i test_mm512_set_epi16(short e31, short e30, short e29, short e28,
return _mm512_set_epi16(e31, e30, e29, e28, e27, e26, e25, e24, e23, e22,
e21, e20, e19, e18, e17, e16, e15, e14, e13, e12, e11, e10, e9, e8, e7,
e6, e5, e4, e3, e2, e1, e0);
-
}
+TEST_CONSTEXPR(match_v32hi(_mm512_set_epi16(-31, 30, -29, 28, -27, 26, -25, 24, -23, 22, -21, 20, -19, 18, -17, 16, -15, 14, -13, 12, -11, 10, -9, 8, -7, 6, -5, 4, -3, 2, -1, 0), 0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, 18, -19, 20, -21, 22, -23, 24, -25, 26, -27, 28, -29, 30, -31));
+
__m512i test_mm512_set_epi32 (int __A, int __B, int __C, int __D,
int __E, int __F, int __G, int __H,
int __I, int __J, int __K, int __L,
@@ -10276,6 +10489,7 @@ __m512i test_mm512_set_epi32 (int __A, int __B, int __C, int __D,
return _mm512_set_epi32( __A, __B, __C, __D,__E, __F, __G, __H,
__I, __J, __K, __L,__M, __N, __O, __P);
}
+TEST_CONSTEXPR(match_v16si(_mm512_set_epi32(-15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0), 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15));
__m512i test_mm512_setr_epi32 (int __A, int __B, int __C, int __D,
int __E, int __F, int __G, int __H,
@@ -10318,6 +10532,7 @@ __m512i test_mm512_setr_epi32 (int __A, int __B, int __C, int __D,
return _mm512_setr_epi32( __A, __B, __C, __D,__E, __F, __G, __H,
__I, __J, __K, __L,__M, __N, __O, __P);
}
+TEST_CONSTEXPR(match_v16si(_mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
__m512i test_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
{
@@ -10365,6 +10580,7 @@ __m512i test_mm512_set_epi64 (long long __A, long long __B, long long __C,
//CHECK: insertelement{{.*}}i32 7
return _mm512_set_epi64(__A, __B, __C, __D, __E, __F, __G, __H );
}
+TEST_CONSTEXPR(match_v8di(_mm512_set_epi64(-15, 13, -11, 9, -7, 5, -3, 1), 1, -3, 5, -7, 9, -11, 13, -15));
__m512i test_mm512_setr_epi64 (long long __A, long long __B, long long __C,
long long __D, long long __E, long long __F,
@@ -10389,6 +10605,7 @@ __m512i test_mm512_setr_epi64 (long long __A, long long __B, long long __C,
//CHECK: insertelement{{.*}}i32 7
return _mm512_setr_epi64(__A, __B, __C, __D, __E, __F, __G, __H );
}
+TEST_CONSTEXPR(match_v8di(_mm512_setr_epi64(-1, 3, -5, 7, -9, 11, -13, 15), -1, 3, -5, 7, -9, 11, -13, 15));
__m512d test_mm512_set_pd (double __A, double __B, double __C, double __D,
double __E, double __F, double __G, double __H)
@@ -10404,6 +10621,7 @@ __m512d test_mm512_set_pd (double __A, double __B, double __C, double __D,
//CHECK: insertelement{{.*}}i32 7
return _mm512_set_pd( __A, __B, __C, __D, __E, __F, __G, __H);
}
+TEST_CONSTEXPR(match_m512d(_mm512_set_pd(20.0, 40.0, 60.0, 80.0, 100.0, 120.0, 140.0, 160.0), 160.0, 140.0, 120.0, 100.0, 80.0, 60.0, 40.0, 20.0));
__m512d test_mm512_setr_pd (double __A, double __B, double __C, double __D,
double __E, double __F, double __G, double __H)
@@ -10427,6 +10645,7 @@ __m512d test_mm512_setr_pd (double __A, double __B, double __C, double __D,
//CHECK: insertelement{{.*}}i32 7
return _mm512_setr_pd( __A, __B, __C, __D, __E, __F, __G, __H);
}
+TEST_CONSTEXPR(match_m512d(_mm512_setr_pd(-20.0, 40.0, -60.0, 80.0, -100.0, 120.0, -140.0, 160.0), -20.0, 40.0, -60.0, 80.0, -100.0, 120.0, -140.0, 160.0));
__m512 test_mm512_set_ps (float __A, float __B, float __C, float __D,
float __E, float __F, float __G, float __H,
@@ -10453,6 +10672,9 @@ __m512 test_mm512_set_ps (float __A, float __B, float __C, float __D,
return _mm512_set_ps( __A, __B, __C, __D, __E, __F, __G, __H,
__I, __J, __K, __L, __M, __N, __O, __P);
}
+TEST_CONSTEXPR(match_m512(_mm512_set_ps(-16.0f, 15.0f, -14.0f, 13.0f, -12.0f, 11.0f, -10.0f, 9.0f, -8.0f, 7.0f, -6.0f, 5.0f, -4.0f, 3.0f, -2.0f, 1.0f), 1.0f, -2.0f, 3.0f, -4.0f, 5.0f, -6.0f, 7.0f, -8.0f, 9.0f, -10.0f, 11.0f, -12.0f, 13.0f, -14.0f, 15.0f, -16.0f));
+
+TEST_CONSTEXPR(match_v8di(_mm512_abs_epi64((__m512i)(__v8di){-1, 2, 2, 2, 2, 2, 2, 2}), 1, 2, 2, 2, 2, 2, 2, 2));
__m512i test_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
@@ -10461,6 +10683,7 @@ __m512i test_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> [[ABS]], <8 x i64> %{{.*}}
return _mm512_mask_abs_epi64 (__W,__U,__A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_abs_epi64((__m512i)(__v8di){99, 99, 99, 99, 99, 99, 99, 99}, (__mmask8)0x01, (__m512i)(__v8di){-1, 2, 2, 2, 2, 2, 2, 2}), 1, 99, 99, 99, 99, 99, 99, 99));
__m512i test_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
{
@@ -10469,6 +10692,9 @@ __m512i test_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> [[ABS]], <8 x i64> %{{.*}}
return _mm512_maskz_abs_epi64 (__U,__A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_abs_epi64((__mmask8)0x01, (__m512i)(__v8di){-1, 2, 2, 2, 2, 2, 2, 2}), 1, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v16si(_mm512_abs_epi32((__m512i)(__v16si){-1, 2, 2, 2, 2, 2, 2, 2, -1, 2, 2, 2, 2, 2, 2, 2}), 1, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2));
__m512i test_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
@@ -10479,6 +10705,7 @@ __m512i test_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> [[ABS]], <16 x i32> %{{.*}}
return _mm512_mask_abs_epi32 (__W,__U,__A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_abs_epi32((__m512i)(__v16si){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, (__mmask16)0x0001, (__m512i)(__v16si){-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
__m512i test_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
{
@@ -10489,6 +10716,7 @@ __m512i test_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> [[ABS]], <16 x i32> %{{.*}}
return _mm512_maskz_abs_epi32 (__U,__A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_abs_epi32((__mmask16)0x0001, (__m512i)(__v16si){-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m512 test_mm512_setr_ps (float __A, float __B, float __C, float __D,
float __E, float __F, float __G, float __H,
@@ -10531,6 +10759,7 @@ __m512 test_mm512_setr_ps (float __A, float __B, float __C, float __D,
return _mm512_setr_ps( __A, __B, __C, __D, __E, __F, __G, __H,
__I, __J, __K, __L, __M, __N, __O, __P);
}
+TEST_CONSTEXPR(match_m512(_mm512_setr_ps(-1.0f, 2.0f, -3.0f, 4.0f, -5.0f, 6.0f, -7.0f, 8.0f, -9.0f, 10.0f, -11.0f, 12.0f, -13.0f, 14.0f, -15.0f, 16.0f), -1.0f, 2.0f, -3.0f, 4.0f, -5.0f, 6.0f, -7.0f, 8.0f, -9.0f, 10.0f, -11.0f, 12.0f, -13.0f, 14.0f, -15.0f, 16.0f));
int test_mm_cvtss_i32(__m128 A) {
// CHECK-LABEL: test_mm_cvtss_i32
@@ -10773,6 +11002,7 @@ __m512d test_mm512_abs_pd(__m512d a){
// CHECK: and <8 x i64>
return _mm512_abs_pd(a);
}
+TEST_CONSTEXPR(match_m512d(_mm512_abs_pd((__m512d){-1.0, 2.0, -3.0, 4.0, -5.0, 6.0, -7.0, 8.0}), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0));
__m512d test_mm512_mask_abs_pd (__m512d __W, __mmask8 __U, __m512d __A){
// CHECK-LABEL: test_mm512_mask_abs_pd
@@ -10781,12 +11011,14 @@ __m512d test_mm512_mask_abs_pd (__m512d __W, __mmask8 __U, __m512d __A){
// CHECK: select <8 x i1> %[[MASK]], <8 x i64> %[[AND_RES]], <8 x i64> %{{.*}}
return _mm512_mask_abs_pd (__W,__U,__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_abs_pd((__m512d){99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0}, (__mmask8)0x01, (__m512d){-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}), 1.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0));
__m512 test_mm512_abs_ps(__m512 a){
// CHECK-LABEL: test_mm512_abs_ps
// CHECK: and <16 x i32>
return _mm512_abs_ps(a);
}
+TEST_CONSTEXPR(match_m512(_mm512_abs_ps((__m512){-1.0f, 2.0f, -3.0f, 4.0f, -5.0f, 6.0f, -7.0f, 8.0f, -9.0f, 10.0f, -11.0f, 12.0f, -13.0f, 14.0f, -15.0f, -16.0f}), 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f));
__m512 test_mm512_mask_abs_ps(__m512 __W, __mmask16 __U, __m512 __A){
// CHECK-LABEL: test_mm512_mask_abs_ps
@@ -10795,6 +11027,7 @@ __m512 test_mm512_mask_abs_ps(__m512 __W, __mmask16 __U, __m512 __A){
// CHECK: select <16 x i1> %[[MASK]], <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_abs_ps( __W, __U, __A);
}
+TEST_CONSTEXPR(match_m512(_mm512_mask_abs_ps((__m512){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, (__mmask16)0x0001, (__m512){-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
__m512d test_mm512_zextpd128_pd512(__m128d A) {
// CHECK-LABEL: test_mm512_zextpd128_pd512
diff --git a/clang/test/CodeGen/X86/avx512fp16-builtins.c b/clang/test/CodeGen/X86/avx512fp16-builtins.c
index d277d05..1dbbbe2 100644
--- a/clang/test/CodeGen/X86/avx512fp16-builtins.c
+++ b/clang/test/CodeGen/X86/avx512fp16-builtins.c
@@ -1,51 +1,64 @@
-// RUN: %clang_cc1 -ffreestanding -flax-vector-conversions=none %s -triple=x86_64-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding -flax-vector-conversions=none %s -triple=x86_64-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -ffreestanding -flax-vector-conversions=none %s -triple=i686-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c++ -ffreestanding -flax-vector-conversions=none %s -triple=x86_64-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -ffreestanding -flax-vector-conversions=none %s -triple=i686-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK
+
+// RUN: %clang_cc1 -x c -ffreestanding -flax-vector-conversions=none %s -triple=x86_64-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -ffreestanding -flax-vector-conversions=none %s -triple=i686-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c++ -ffreestanding -flax-vector-conversions=none %s -triple=x86_64-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -ffreestanding -flax-vector-conversions=none %s -triple=i686-unknown-unknown -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+
#include <immintrin.h>
+#include "builtin_test_helpers.h"
_Float16 test_mm512_cvtsh_h(__m512h __A) {
- // CHECK-LABEL: @test_mm512_cvtsh_h
+ // CHECK-LABEL: test_mm512_cvtsh_h
// CHECK: extractelement <32 x half> %{{.*}}, i32 0
return _mm512_cvtsh_h(__A);
}
__m128h test_mm_setzero_ph(void) {
- // CHECK-LABEL: @test_mm_setzero_ph
+ // CHECK-LABEL: test_mm_setzero_ph
// CHECK: zeroinitializer
return _mm_setzero_ph();
}
+TEST_CONSTEXPR(match_m128h(_mm_setzero_ph(), +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f));
__m256h test_mm256_setzero_ph(void) {
- // CHECK-LABEL: @test_mm256_setzero_ph
+ // CHECK-LABEL: test_mm256_setzero_ph
// CHECK: zeroinitializer
return _mm256_setzero_ph();
}
+TEST_CONSTEXPR(match_m256h(_mm256_setzero_ph(), +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f));
__m256h test_mm256_undefined_ph(void) {
- // CHECK-LABEL: @test_mm256_undefined_ph
+ // CHECK-LABEL: test_mm256_undefined_ph
// CHECK: ret <16 x half> zeroinitializer
return _mm256_undefined_ph();
}
__m512h test_mm512_setzero_ph(void) {
- // CHECK-LABEL: @test_mm512_setzero_ph
+ // CHECK-LABEL: test_mm512_setzero_ph
// CHECK: zeroinitializer
return _mm512_setzero_ph();
}
+TEST_CONSTEXPR(match_m512h(_mm512_setzero_ph(), +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f));
__m128h test_mm_undefined_ph(void) {
- // CHECK-LABEL: @test_mm_undefined_ph
+ // CHECK-LABEL: test_mm_undefined_ph
// CHECK: ret <8 x half> zeroinitializer
return _mm_undefined_ph();
}
__m512h test_mm512_undefined_ph(void) {
- // CHECK-LABEL: @test_mm512_undefined_ph
+ // CHECK-LABEL: test_mm512_undefined_ph
// CHECK: ret <32 x half> zeroinitializer
return _mm512_undefined_ph();
}
__m512h test_mm512_set1_ph(_Float16 h) {
- // CHECK-LABEL: @test_mm512_set1_ph
+ // CHECK-LABEL: test_mm512_set1_ph
// CHECK: insertelement <32 x half> {{.*}}, i32 0
// CHECK: insertelement <32 x half> {{.*}}, i32 1
// CHECK: insertelement <32 x half> {{.*}}, i32 2
@@ -80,9 +93,10 @@ __m512h test_mm512_set1_ph(_Float16 h) {
// CHECK: insertelement <32 x half> {{.*}}, i32 31
return _mm512_set1_ph(h);
}
+TEST_CONSTEXPR(match_m512h(_mm512_set1_ph(-101.0f), -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f, -101.0f));
__m512h test_mm512_set1_pch(_Float16 _Complex h) {
- // CHECK-LABEL: @test_mm512_set1_pch
+ // CHECK-LABEL: test_mm512_set1_pch
// CHECK: insertelement <16 x float> {{.*}}, i32 0
// CHECK: insertelement <16 x float> {{.*}}, i32 1
// CHECK: insertelement <16 x float> {{.*}}, i32 2
@@ -111,7 +125,7 @@ __m512h test_mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16
_Float16 __h21, _Float16 __h22, _Float16 __h23, _Float16 __h24,
_Float16 __h25, _Float16 __h26, _Float16 __h27, _Float16 __h28,
_Float16 __h29, _Float16 __h30, _Float16 __h31, _Float16 __h32) {
- // CHECK-LABEL: @test_mm512_set_ph
+ // CHECK-LABEL: test_mm512_set_ph
// CHECK: insertelement <32 x half> {{.*}}, i32 0
// CHECK: insertelement <32 x half> {{.*}}, i32 1
// CHECK: insertelement <32 x half> {{.*}}, i32 2
@@ -149,6 +163,13 @@ __m512h test_mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16
__h17, __h18, __h19, __h20, __h21, __h22, __h23, __h24,
__h25, __h26, __h27, __h28, __h29, __h30, __h31, __h32);
}
+TEST_CONSTEXPR(match_m512h(_mm512_set_ph(32.0f, -31.0f, 30.0f, -29.0f, 28.0f, -27.0f, 26.0f, -25.0f,
+ 24.0f, -23.0f, 22.0f, -21.0f, 20.0f, -19.0f, 18.0f, -17.0f,
+ 16.0f, -15.0f, 14.0f, -13.0f, 12.0f, -11.0f, 10.0f, -9.0f,
+ 8.0f, -7.0f, 6.0f, -5.0f, 4.0f, -3.0f, 2.0f, -1.0f), -1.0f, 2.0f, -3.0f, 4.0f, -5.0f, 6.0f, -7.0f, 8.0f,
+ -9.0f, 10.0f, -11.0f, 12.0f, -13.0f, 14.0f, -15.0f, 16.0f,
+ -17.0f, 18.0f, -19.0f, 20.0f, -21.0f, 22.0f, -23.0f, 24.0f,
+ -25.0f, 26.0f, -27.0f, 28.0f, -29.0f, 30.0f, -31.0f, 32.0f));
__m512h test_mm512_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
_Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8,
@@ -158,7 +179,7 @@ __m512h test_mm512_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16
_Float16 __h21, _Float16 __h22, _Float16 __h23, _Float16 __h24,
_Float16 __h25, _Float16 __h26, _Float16 __h27, _Float16 __h28,
_Float16 __h29, _Float16 __h30, _Float16 __h31, _Float16 __h32) {
- // CHECK-LABEL: @test_mm512_setr_ph
+ // CHECK-LABEL: test_mm512_setr_ph
// CHECK: insertelement <32 x half> {{.*}}, i32 0
// CHECK: insertelement <32 x half> {{.*}}, i32 1
// CHECK: insertelement <32 x half> {{.*}}, i32 2
@@ -196,6 +217,14 @@ __m512h test_mm512_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16
__h17, __h18, __h19, __h20, __h21, __h22, __h23, __h24,
__h25, __h26, __h27, __h28, __h29, __h30, __h31, __h32);
}
+TEST_CONSTEXPR(match_m512h(_mm512_setr_ph(1.0f, -3.0f, 5.0f, -7.0f, 9.0f, -11.0f, 13.0f, -15.0f,
+ 17.0f, -19.0f, 21.0f, -23.0f, 25.0f, -27.0f, 29.0f, -31.0f,
+ 33.0f, -35.0f, 37.0f, -39.0f, 41.0f, -43.0f, 45.0f, -47.0f,
+ 49.0f, -51.0f, 53.0f, -55.0f, 57.0f, -59.0f, 61.0f, -63.0f),
+ 1.0f, -3.0f, 5.0f, -7.0f, 9.0f, -11.0f, 13.0f, -15.0f,
+ 17.0f, -19.0f, 21.0f, -23.0f, 25.0f, -27.0f, 29.0f, -31.0f,
+ 33.0f, -35.0f, 37.0f, -39.0f, 41.0f, -43.0f, 45.0f, -47.0f,
+ 49.0f, -51.0f, 53.0f, -55.0f, 57.0f, -59.0f, 61.0f, -63.0f));
__m128 test_mm_castph_ps(__m128h A) {
// CHECK-LABEL: test_mm_castph_ps
@@ -368,327 +397,328 @@ __m512h test_mm512_zextph256_ph512(__m256h __a) {
int test_mm_comi_round_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_comi_round_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 0, i32 8)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 0, i32 8)
return _mm_comi_round_sh(__A, __B, 0, _MM_FROUND_NO_EXC);
}
int test_mm_comi_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_comi_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 0, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 0, i32 4)
return _mm_comi_sh(__A, __B, 0);
}
int test_mm_comieq_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_comieq_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 16, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 16, i32 4)
return _mm_comieq_sh(__A, __B);
}
int test_mm_comilt_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_comilt_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 1, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 1, i32 4)
return _mm_comilt_sh(__A, __B);
}
int test_mm_comile_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_comile_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 2, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 2, i32 4)
return _mm_comile_sh(__A, __B);
}
int test_mm_comigt_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_comigt_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 14, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 14, i32 4)
return _mm_comigt_sh(__A, __B);
}
int test_mm_comige_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_comige_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 13, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 13, i32 4)
return _mm_comige_sh(__A, __B);
}
int test_mm_comineq_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_comineq_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 20, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 20, i32 4)
return _mm_comineq_sh(__A, __B);
}
int test_mm_ucomieq_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_ucomieq_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 0, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 0, i32 4)
return _mm_ucomieq_sh(__A, __B);
}
int test_mm_ucomilt_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_ucomilt_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 17, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 17, i32 4)
return _mm_ucomilt_sh(__A, __B);
}
int test_mm_ucomile_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_ucomile_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 18, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 18, i32 4)
return _mm_ucomile_sh(__A, __B);
}
int test_mm_ucomigt_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_ucomigt_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 30, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 30, i32 4)
return _mm_ucomigt_sh(__A, __B);
}
int test_mm_ucomige_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_ucomige_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 29, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 29, i32 4)
return _mm_ucomige_sh(__A, __B);
}
int test_mm_ucomineq_sh(__m128h __A, __m128h __B) {
// CHECK-LABEL: test_mm_ucomineq_sh
- // CHECK: %{{.}} = call i32 @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 4, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vcomi.sh(<8 x half> %{{.}}, <8 x half> %{{.}}, i32 4, i32 4)
return _mm_ucomineq_sh(__A, __B);
}
__m512h test_mm512_add_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_add_ph
+ // CHECK-LABEL: test_mm512_add_ph
// CHECK: %{{.*}} = fadd <32 x half> %{{.*}}, %{{.*}}
return _mm512_add_ph(__A, __B);
}
__m512h test_mm512_mask_add_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_add_ph
+ // CHECK-LABEL: test_mm512_mask_add_ph
// CHECK: %{{.*}} = fadd <32 x half> %{{.*}}, %{{.*}}
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return (__m512h)_mm512_mask_add_ph(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_add_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_add_ph
+ // CHECK-LABEL: test_mm512_maskz_add_ph
// CHECK: %{{.*}} = fadd <32 x half> %{{.*}}, %{{.*}}
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_add_ph(__U, __A, __B);
}
__m512h test_mm512_add_round_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_add_round_ph
+ // CHECK-LABEL: test_mm512_add_round_ph
// CHECK: @llvm.x86.avx512fp16.add.ph.512
return _mm512_add_round_ph(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_add_round_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_add_round_ph
+ // CHECK-LABEL: test_mm512_mask_add_round_ph
// CHECK: @llvm.x86.avx512fp16.add.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_add_round_ph(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_add_round_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_add_round_ph
+ // CHECK-LABEL: test_mm512_maskz_add_round_ph
// CHECK: @llvm.x86.avx512fp16.add.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_add_round_ph(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_sub_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_sub_ph
+ // CHECK-LABEL: test_mm512_sub_ph
// CHECK: %{{.*}} = fsub <32 x half> %{{.*}}, %{{.*}}
return _mm512_sub_ph(__A, __B);
}
__m512h test_mm512_mask_sub_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_sub_ph
+ // CHECK-LABEL: test_mm512_mask_sub_ph
// CHECK: %{{.*}} = fsub <32 x half> %{{.*}}, %{{.*}}
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return (__m512h)_mm512_mask_sub_ph(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_sub_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_sub_ph
+ // CHECK-LABEL: test_mm512_maskz_sub_ph
// CHECK: %{{.*}} = fsub <32 x half> %{{.*}}, %{{.*}}
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_sub_ph(__U, __A, __B);
}
__m512h test_mm512_sub_round_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_sub_round_ph
+ // CHECK-LABEL: test_mm512_sub_round_ph
// CHECK: @llvm.x86.avx512fp16.sub.ph.512
return _mm512_sub_round_ph(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_sub_round_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_sub_round_ph
+ // CHECK-LABEL: test_mm512_mask_sub_round_ph
// CHECK: @llvm.x86.avx512fp16.sub.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_sub_round_ph(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_sub_round_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_sub_round_ph
+ // CHECK-LABEL: test_mm512_maskz_sub_round_ph
// CHECK: @llvm.x86.avx512fp16.sub.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_sub_round_ph(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mul_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mul_ph
+ // CHECK-LABEL: test_mm512_mul_ph
// CHECK: %{{.*}} = fmul <32 x half> %{{.*}}, %{{.*}}
return _mm512_mul_ph(__A, __B);
}
__m512h test_mm512_mask_mul_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_mul_ph
+ // CHECK-LABEL: test_mm512_mask_mul_ph
// CHECK: %{{.*}} = fmul <32 x half> %{{.*}}, %{{.*}}
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return (__m512h)_mm512_mask_mul_ph(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_mul_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_mul_ph
+ // CHECK-LABEL: test_mm512_maskz_mul_ph
// CHECK: %{{.*}} = fmul <32 x half> %{{.*}}, %{{.*}}
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_mul_ph(__U, __A, __B);
}
__m512h test_mm512_mul_round_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mul_round_ph
+ // CHECK-LABEL: test_mm512_mul_round_ph
// CHECK: @llvm.x86.avx512fp16.mul.ph.512
return _mm512_mul_round_ph(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_mul_round_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_mul_round_ph
+ // CHECK-LABEL: test_mm512_mask_mul_round_ph
// CHECK: @llvm.x86.avx512fp16.mul.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_mul_round_ph(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_mul_round_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_mul_round_ph
+ // CHECK-LABEL: test_mm512_maskz_mul_round_ph
// CHECK: @llvm.x86.avx512fp16.mul.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_mul_round_ph(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_div_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_div_ph
+ // CHECK-LABEL: test_mm512_div_ph
// CHECK: %{{.*}} = fdiv <32 x half> %{{.*}}, %{{.*}}
return _mm512_div_ph(__A, __B);
}
__m512h test_mm512_mask_div_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_div_ph
+ // CHECK-LABEL: test_mm512_mask_div_ph
// CHECK: %{{.*}} = fdiv <32 x half> %{{.*}}, %{{.*}}
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return (__m512h)_mm512_mask_div_ph(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_div_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_div_ph
+ // CHECK-LABEL: test_mm512_maskz_div_ph
// CHECK: %{{.*}} = fdiv <32 x half> %{{.*}}, %{{.*}}
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_div_ph(__U, __A, __B);
}
__m512h test_mm512_div_round_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_div_round_ph
+ // CHECK-LABEL: test_mm512_div_round_ph
// CHECK: @llvm.x86.avx512fp16.div.ph.512
return _mm512_div_round_ph(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_div_round_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_div_round_ph
+ // CHECK-LABEL: test_mm512_mask_div_round_ph
// CHECK: @llvm.x86.avx512fp16.div.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_div_round_ph(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_div_round_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_div_round_ph
+ // CHECK-LABEL: test_mm512_maskz_div_round_ph
// CHECK: @llvm.x86.avx512fp16.div.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_div_round_ph(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_min_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_min_ph
+ // CHECK-LABEL: test_mm512_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.512
return _mm512_min_ph(__A, __B);
}
__m512h test_mm512_mask_min_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_min_ph
+ // CHECK-LABEL: test_mm512_mask_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return (__m512h)_mm512_mask_min_ph(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_min_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_min_ph
+ // CHECK-LABEL: test_mm512_maskz_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_min_ph(__U, __A, __B);
}
__m512h test_mm512_min_round_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_min_round_ph
+ // CHECK-LABEL: test_mm512_min_round_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.512
return _mm512_min_round_ph(__A, __B, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_min_round_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_min_round_ph
+ // CHECK-LABEL: test_mm512_mask_min_round_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_min_round_ph(__W, __U, __A, __B, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_min_round_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_min_round_ph
+ // CHECK-LABEL: test_mm512_maskz_min_round_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_min_round_ph(__U, __A, __B, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_max_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_max_ph
+ // CHECK-LABEL: test_mm512_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.512
return _mm512_max_ph(__A, __B);
}
__m512h test_mm512_mask_max_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_max_ph
+ // CHECK-LABEL: test_mm512_mask_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return (__m512h)_mm512_mask_max_ph(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_max_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_max_ph
+ // CHECK-LABEL: test_mm512_maskz_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_max_ph(__U, __A, __B);
}
__m512h test_mm512_max_round_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_max_round_ph
+ // CHECK-LABEL: test_mm512_max_round_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.512
return _mm512_max_round_ph(__A, __B, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_max_round_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_max_round_ph
+ // CHECK-LABEL: test_mm512_mask_max_round_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_max_round_ph(__W, __U, __A, __B, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_max_round_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_max_round_ph
+ // CHECK-LABEL: test_mm512_maskz_max_round_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.512
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_maskz_max_round_ph(__U, __A, __B, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_abs_ph(__m512h a) {
- // CHECK-LABEL: @test_mm512_abs_ph
+ // CHECK-LABEL: test_mm512_abs_ph
// CHECK: and <16 x i32>
return _mm512_abs_ph(a);
}
+TEST_CONSTEXPR(match_m512h(_mm512_abs_ph((__m512h){-1.0, 2.0, -3.0, 4.0, -5.0, 6.0, -7.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0, 14.0, -15.0, -16.0, -17.0, 18.0, -19.0, 20.0, -21.0, 22.0, -23.0, 24.0, -25.0, 26.0, -27.0, 28.0, -29.0, 30.0, -31.0, 32.0}), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0));
__m512h test_mm512_conj_pch(__m512h __A) {
- // CHECK-LABEL: @test_mm512_conj_pch
+ // CHECK-LABEL: test_mm512_conj_pch
// CHECK: %{{.*}} = bitcast <32 x half> %{{.*}} to <8 x i64>
// CHECK: %{{.*}} = bitcast <8 x i64> %{{.*}} to <16 x i32>
// CHECK: %{{.*}} = bitcast <8 x i64> %{{.*}} to <16 x i32>
@@ -699,7 +729,7 @@ __m512h test_mm512_conj_pch(__m512h __A) {
}
__m512h test_mm512_mask_conj_pch(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_conj_pch
+ // CHECK-LABEL: test_mm512_mask_conj_pch
// CHECK: %{{.*}} = trunc i32 %{{.*}} to i16
// CHECK: %{{.*}} = bitcast <32 x half> %{{.*}} to <8 x i64>
// CHECK: %{{.*}} = bitcast <8 x i64> %{{.*}} to <16 x i32>
@@ -715,7 +745,7 @@ __m512h test_mm512_mask_conj_pch(__m512h __W, __mmask32 __U, __m512h __A) {
}
__m512h test_mm512_maskz_conj_pch(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_conj_pch
+ // CHECK-LABEL: test_mm512_maskz_conj_pch
// CHECK: %{{.*}} = trunc i32 %{{.*}} to i16
// CHECK: %{{.*}} = bitcast <32 x half> %{{.*}} to <8 x i64>
// CHECK: %{{.*}} = bitcast <8 x i64> %{{.*}} to <16 x i32>
@@ -730,22 +760,22 @@ __m512h test_mm512_maskz_conj_pch(__mmask32 __U, __m512h __A) {
}
__m128h test_mm_add_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_add_round_sh
+ // CHECK-LABEL: test_mm_add_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.add.sh.round
return _mm_add_round_sh(__A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_mask_add_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_add_round_sh
+ // CHECK-LABEL: test_mm_mask_add_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.add.sh.round
return _mm_mask_add_round_sh(__W, __U, __A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_maskz_add_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_add_round_sh
+ // CHECK-LABEL: test_mm_maskz_add_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.add.sh.round
return _mm_maskz_add_round_sh(__U, __A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_mask_add_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_add_sh
+ // CHECK-LABEL: test_mm_mask_add_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fadd half %{{.*}}, %{{.*}}
@@ -759,7 +789,7 @@ __m128h test_mm_mask_add_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
return _mm_mask_add_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_add_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_add_sh
+ // CHECK-LABEL: test_mm_maskz_add_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fadd half %{{.*}}, %{{.*}}
@@ -774,7 +804,7 @@ __m128h test_mm_maskz_add_sh(__mmask8 __U, __m128h __A, __m128h __B) {
}
__m128h test_mm_add_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_add_sh
+ // CHECK-LABEL: test_mm_add_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fadd half %{{.*}}, %{{.*}}
@@ -783,22 +813,22 @@ __m128h test_mm_add_sh(__m128h __A, __m128h __B) {
}
__m128h test_mm_sub_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_sub_round_sh
+ // CHECK-LABEL: test_mm_sub_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.sub.sh.round
return _mm_sub_round_sh(__A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_mask_sub_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_sub_round_sh
+ // CHECK-LABEL: test_mm_mask_sub_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.sub.sh.round
return _mm_mask_sub_round_sh(__W, __U, __A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_maskz_sub_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_sub_round_sh
+ // CHECK-LABEL: test_mm_maskz_sub_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.sub.sh.round
return _mm_maskz_sub_round_sh(__U, __A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_mask_sub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_sub_sh
+ // CHECK-LABEL: test_mm_mask_sub_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fsub half %{{.*}}, %{{.*}}
@@ -812,7 +842,7 @@ __m128h test_mm_mask_sub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
return _mm_mask_sub_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_sub_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_sub_sh
+ // CHECK-LABEL: test_mm_maskz_sub_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fsub half %{{.*}}, %{{.*}}
@@ -827,7 +857,7 @@ __m128h test_mm_maskz_sub_sh(__mmask8 __U, __m128h __A, __m128h __B) {
}
__m128h test_mm_sub_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_sub_sh
+ // CHECK-LABEL: test_mm_sub_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fsub half %{{.*}}, %{{.*}}
@@ -836,22 +866,22 @@ __m128h test_mm_sub_sh(__m128h __A, __m128h __B) {
}
__m128h test_mm_mul_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mul_round_sh
+ // CHECK-LABEL: test_mm_mul_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.mul.sh.round
return _mm_mul_round_sh(__A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_mask_mul_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_mul_round_sh
+ // CHECK-LABEL: test_mm_mask_mul_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.mul.sh.round
return _mm_mask_mul_round_sh(__W, __U, __A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_maskz_mul_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_round_sh
+ // CHECK-LABEL: test_mm_maskz_mul_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.mul.sh.round
return _mm_maskz_mul_round_sh(__U, __A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_mask_mul_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_mul_sh
+ // CHECK-LABEL: test_mm_mask_mul_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fmul half %{{.*}}, %{{.*}}
@@ -865,7 +895,7 @@ __m128h test_mm_mask_mul_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
return _mm_mask_mul_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_mul_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_sh
+ // CHECK-LABEL: test_mm_maskz_mul_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fmul half %{{.*}}, %{{.*}}
@@ -880,7 +910,7 @@ __m128h test_mm_maskz_mul_sh(__mmask8 __U, __m128h __A, __m128h __B) {
}
__m128h test_mm_mul_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mul_sh
+ // CHECK-LABEL: test_mm_mul_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fmul half %{{.*}}, %{{.*}}
@@ -889,22 +919,22 @@ __m128h test_mm_mul_sh(__m128h __A, __m128h __B) {
}
__m128h test_mm_div_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_div_round_sh
+ // CHECK-LABEL: test_mm_div_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.div.sh.round
return _mm_div_round_sh(__A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_mask_div_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_div_round_sh
+ // CHECK-LABEL: test_mm_mask_div_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.div.sh.round
return _mm_mask_div_round_sh(__W, __U, __A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_maskz_div_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_div_round_sh
+ // CHECK-LABEL: test_mm_maskz_div_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.div.sh.round
return _mm_maskz_div_round_sh(__U, __A, __B, _MM_FROUND_NO_EXC | _MM_FROUND_TO_ZERO);
}
__m128h test_mm_mask_div_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_div_sh
+ // CHECK-LABEL: test_mm_mask_div_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fdiv half %{{.*}}, %{{.*}}
@@ -918,7 +948,7 @@ __m128h test_mm_mask_div_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B)
return _mm_mask_div_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_div_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_div_sh
+ // CHECK-LABEL: test_mm_maskz_div_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fdiv half %{{.*}}, %{{.*}}
@@ -933,7 +963,7 @@ __m128h test_mm_maskz_div_sh(__mmask8 __U, __m128h __A, __m128h __B) {
}
__m128h test_mm_div_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_div_sh
+ // CHECK-LABEL: test_mm_div_sh
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: %{{.*}} = fdiv half %{{.*}}, %{{.*}}
@@ -942,83 +972,83 @@ __m128h test_mm_div_sh(__m128h __A, __m128h __B) {
}
__m128h test_mm_min_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_min_round_sh
+ // CHECK-LABEL: test_mm_min_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.min.sh.round
return _mm_min_round_sh(__A, __B, 0x08);
}
__m128h test_mm_mask_min_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_min_round_sh
+ // CHECK-LABEL: test_mm_mask_min_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.min.sh.round
return _mm_mask_min_round_sh(__W, __U, __A, __B, 0x08);
}
__m128h test_mm_maskz_min_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_min_round_sh
+ // CHECK-LABEL: test_mm_maskz_min_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.min.sh.round
return _mm_maskz_min_round_sh(__U, __A, __B, 0x08);
}
__m128h test_mm_mask_min_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_min_sh
+ // CHECK-LABEL: test_mm_mask_min_sh
// CHECK: @llvm.x86.avx512fp16.mask.min.sh.round
return _mm_mask_min_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_min_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_min_sh
+ // CHECK-LABEL: test_mm_maskz_min_sh
// CHECK: @llvm.x86.avx512fp16.mask.min.sh.round
return _mm_maskz_min_sh(__U, __A, __B);
}
__m128h test_mm_min_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_min_sh
+ // CHECK-LABEL: test_mm_min_sh
// CHECK: @llvm.x86.avx512fp16.mask.min.sh.round
return _mm_min_sh(__A, __B);
}
__m128h test_mm_max_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_max_round_sh
+ // CHECK-LABEL: test_mm_max_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.max.sh.round
return _mm_max_round_sh(__A, __B, 0x08);
}
__m128h test_mm_mask_max_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_max_round_sh
+ // CHECK-LABEL: test_mm_mask_max_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.max.sh.round
return _mm_mask_max_round_sh(__W, __U, __A, __B, 0x08);
}
__m128h test_mm_maskz_max_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_max_round_sh
+ // CHECK-LABEL: test_mm_maskz_max_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.max.sh.round
return _mm_maskz_max_round_sh(__U, __A, __B, 0x08);
}
__m128h test_mm_mask_max_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_max_sh
+ // CHECK-LABEL: test_mm_mask_max_sh
// CHECK: @llvm.x86.avx512fp16.mask.max.sh.round
return _mm_mask_max_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_max_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_max_sh
+ // CHECK-LABEL: test_mm_maskz_max_sh
// CHECK: @llvm.x86.avx512fp16.mask.max.sh.round
return _mm_maskz_max_sh(__U, __A, __B);
}
__m128h test_mm_max_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_max_sh
+ // CHECK-LABEL: test_mm_max_sh
// CHECK: @llvm.x86.avx512fp16.mask.max.sh.round
return _mm_max_sh(__A, __B);
}
__mmask32 test_mm512_cmp_round_ph_mask(__m512h a, __m512h b) {
- // CHECK-LABEL: @test_mm512_cmp_round_ph_mask
+ // CHECK-LABEL: test_mm512_cmp_round_ph_mask
// CHECK: fcmp oeq <32 x half> %{{.*}}, %{{.*}}
return _mm512_cmp_round_ph_mask(a, b, 0, _MM_FROUND_NO_EXC);
}
__mmask32 test_mm512_mask_cmp_round_ph_mask(__mmask32 m, __m512h a, __m512h b) {
- // CHECK-LABEL: @test_mm512_mask_cmp_round_ph_mask
+ // CHECK-LABEL: test_mm512_mask_cmp_round_ph_mask
// CHECK: [[CMP:%.*]] = fcmp oeq <32 x half> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> [[CMP]], {{.*}}
return _mm512_mask_cmp_round_ph_mask(m, a, b, 0, _MM_FROUND_NO_EXC);
}
__mmask32 test_mm512_cmp_ph_mask_eq_oq(__m512h a, __m512h b) {
- // CHECK-LABEL: @test_mm512_cmp_ph_mask_eq_oq
+ // CHECK-LABEL: test_mm512_cmp_ph_mask_eq_oq
// CHECK: fcmp oeq <32 x half> %{{.*}}, %{{.*}}
return _mm512_cmp_ph_mask(a, b, _CMP_EQ_OQ);
}
@@ -1210,7 +1240,7 @@ __mmask32 test_mm512_cmp_ph_mask_true_us(__m512h a, __m512h b) {
}
__mmask32 test_mm512_mask_cmp_ph_mask_eq_oq(__mmask32 m, __m512h a, __m512h b) {
- // CHECK-LABEL: @test_mm512_mask_cmp_ph_mask_eq_oq
+ // CHECK-LABEL: test_mm512_mask_cmp_ph_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <32 x half> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> [[CMP]], {{.*}}
return _mm512_mask_cmp_ph_mask(m, a, b, _CMP_EQ_OQ);
@@ -1434,25 +1464,25 @@ __mmask32 test_mm512_mask_cmp_ph_mask_true_us(__mmask32 m, __m512h a, __m512h b)
}
__mmask8 test_mm_cmp_round_sh_mask(__m128h __X, __m128h __Y) {
- // CHECK-LABEL: @test_mm_cmp_round_sh_mask
+ // CHECK-LABEL: test_mm_cmp_round_sh_mask
// CHECK: @llvm.x86.avx512fp16.mask.cmp.sh
return _mm_cmp_round_sh_mask(__X, __Y, _CMP_NLT_US, _MM_FROUND_NO_EXC);
}
__mmask8 test_mm_mask_cmp_round_sh_mask(__mmask8 __M, __m128h __X, __m128h __Y) {
- // CHECK-LABEL: @test_mm_mask_cmp_round_sh_mask
+ // CHECK-LABEL: test_mm_mask_cmp_round_sh_mask
// CHECK: @llvm.x86.avx512fp16.mask.cmp.sh
return _mm_mask_cmp_round_sh_mask(__M, __X, __Y, _CMP_NLT_US, _MM_FROUND_NO_EXC);
}
__mmask8 test_mm_cmp_sh_mask(__m128h __X, __m128h __Y) {
- // CHECK-LABEL: @test_mm_cmp_sh_mask
+ // CHECK-LABEL: test_mm_cmp_sh_mask
// CHECK: @llvm.x86.avx512fp16.mask.cmp.sh
return _mm_cmp_sh_mask(__X, __Y, _CMP_NLT_US);
}
__mmask8 test_mm_mask_cmp_sh_mask(__mmask8 __M, __m128h __X, __m128h __Y) {
- // CHECK-LABEL: @test_mm_mask_cmp_sh_mask
+ // CHECK-LABEL: test_mm_mask_cmp_sh_mask
// CHECK: @llvm.x86.avx512fp16.mask.cmp.sh
return _mm_mask_cmp_sh_mask(__M, __X, __Y, _CMP_NLT_US);
}
@@ -1466,49 +1496,49 @@ __m128h test_mm_load_sh(void const *A) {
}
__m128h test_mm_mask_load_sh(__m128h __A, __mmask8 __U, const void *__W) {
- // CHECK-LABEL: @test_mm_mask_load_sh
- // CHECK: %{{.*}} = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_load_sh
+ // CHECK: @llvm.masked.load.v8f16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
return _mm_mask_load_sh(__A, __U, __W);
}
__m128h test_mm_maskz_load_sh(__mmask8 __U, const void *__W) {
- // CHECK-LABEL: @test_mm_maskz_load_sh
- // CHECK: %{{.*}} = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_load_sh
+ // CHECK: @llvm.masked.load.v8f16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
return _mm_maskz_load_sh(__U, __W);
}
__m512h test_mm512_load_ph(void *p) {
- // CHECK-LABEL: @test_mm512_load_ph
+ // CHECK-LABEL: test_mm512_load_ph
// CHECK: load <32 x half>, ptr %{{.*}}, align 64{{$}}
return _mm512_load_ph(p);
}
__m256h test_mm256_load_ph(void *p) {
- // CHECK-LABEL: @test_mm256_load_ph
+ // CHECK-LABEL: test_mm256_load_ph
// CHECK: load <16 x half>, ptr %{{.*}}, align 32{{$}}
return _mm256_load_ph(p);
}
__m128h test_mm_load_ph(void *p) {
- // CHECK-LABEL: @test_mm_load_ph
+ // CHECK-LABEL: test_mm_load_ph
// CHECK: load <8 x half>, ptr %{{.*}}, align 16{{$}}
return _mm_load_ph(p);
}
__m512h test_mm512_loadu_ph(void *p) {
- // CHECK-LABEL: @test_mm512_loadu_ph
+ // CHECK-LABEL: test_mm512_loadu_ph
// CHECK: load <32 x half>, ptr {{.*}}, align 1{{$}}
return _mm512_loadu_ph(p);
}
__m256h test_mm256_loadu_ph(void *p) {
- // CHECK-LABEL: @test_mm256_loadu_ph
+ // CHECK-LABEL: test_mm256_loadu_ph
// CHECK: load <16 x half>, ptr {{.*}}, align 1{{$}}
return _mm256_loadu_ph(p);
}
__m128h test_mm_loadu_ph(void *p) {
- // CHECK-LABEL: @test_mm_loadu_ph
+ // CHECK-LABEL: test_mm_loadu_ph
// CHECK: load <8 x half>, ptr {{.*}}, align 1{{$}}
return _mm_loadu_ph(p);
}
@@ -1521,45 +1551,45 @@ void test_mm_store_sh(void *A, __m128h B) {
}
void test_mm_mask_store_sh(void *__P, __mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_store_sh
+ // CHECK-LABEL: test_mm_mask_store_sh
// CHECK: call void @llvm.masked.store.v8f16.p0(<8 x half> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
_mm_mask_store_sh(__P, __U, __A);
}
void test_mm512_store_ph(void *p, __m512h a) {
- // CHECK-LABEL: @test_mm512_store_ph
+ // CHECK-LABEL: test_mm512_store_ph
// CHECK: store <32 x half> %{{.*}}, ptr %{{.*}}, align 64
_mm512_store_ph(p, a);
}
void test_mm256_store_ph(void *p, __m256h a) {
- // CHECK-LABEL: @test_mm256_store_ph
+ // CHECK-LABEL: test_mm256_store_ph
// CHECK: store <16 x half> %{{.*}}, ptr %{{.*}}, align 32
_mm256_store_ph(p, a);
}
void test_mm_store_ph(void *p, __m128h a) {
- // CHECK-LABEL: @test_mm_store_ph
+ // CHECK-LABEL: test_mm_store_ph
// CHECK: store <8 x half> %{{.*}}, ptr %{{.*}}, align 16
_mm_store_ph(p, a);
}
void test_mm512_storeu_ph(void *p, __m512h a) {
- // CHECK-LABEL: @test_mm512_storeu_ph
+ // CHECK-LABEL: test_mm512_storeu_ph
// CHECK: store <32 x half> %{{.*}}, ptr %{{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm512_storeu_ph(p, a);
}
void test_mm256_storeu_ph(void *p, __m256h a) {
- // CHECK-LABEL: @test_mm256_storeu_ph
+ // CHECK-LABEL: test_mm256_storeu_ph
// CHECK: store <16 x half> %{{.*}}, ptr %{{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm256_storeu_ph(p, a);
}
void test_mm_storeu_ph(void *p, __m128h a) {
- // CHECK-LABEL: @test_mm_storeu_ph
+ // CHECK-LABEL: test_mm_storeu_ph
// CHECK: store <8 x half> %{{.*}}, ptr %{{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm_storeu_ph(p, a);
@@ -1573,7 +1603,7 @@ __m128h test_mm_move_sh(__m128h A, __m128h B) {
}
__m128h test_mm_mask_move_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_move_sh
+ // CHECK-LABEL: test_mm_mask_move_sh
// CHECK: [[EXT:%.*]] = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: insertelement <8 x half> %{{.*}}, half [[EXT]], i32 0
// CHECK: [[A:%.*]] = extractelement <8 x half> [[VEC:%.*]], i64 0
@@ -1586,7 +1616,7 @@ __m128h test_mm_mask_move_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B
}
__m128h test_mm_maskz_move_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_move_sh
+ // CHECK-LABEL: test_mm_maskz_move_sh
// CHECK: [[EXT:%.*]] = extractelement <8 x half> %{{.*}}, i32 0
// CHECK: insertelement <8 x half> %{{.*}}, half [[EXT]], i32 0
// CHECK: [[A:%.*]] = extractelement <8 x half> [[VEC:%.*]], i64 0
@@ -1614,532 +1644,532 @@ __m128i test_mm_cvtsi16_si128(short A) {
}
__m512h test_mm512_rcp_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_rcp_ph
+ // CHECK-LABEL: test_mm512_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.512
return _mm512_rcp_ph(__A);
}
__m512h test_mm512_mask_rcp_ph(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_rcp_ph
+ // CHECK-LABEL: test_mm512_mask_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.512
return (__m512h)_mm512_mask_rcp_ph(__W, __U, __A);
}
__m512h test_mm512_maskz_rcp_ph(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_rcp_ph
+ // CHECK-LABEL: test_mm512_maskz_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.512
return _mm512_maskz_rcp_ph(__U, __A);
}
__m512h test_mm512_rsqrt_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_rsqrt_ph
+ // CHECK-LABEL: test_mm512_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.512
return _mm512_rsqrt_ph(__A);
}
__m512h test_mm512_mask_rsqrt_ph(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_rsqrt_ph
+ // CHECK-LABEL: test_mm512_mask_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.512
return (__m512h)_mm512_mask_rsqrt_ph(__W, __U, __A);
}
__m512h test_mm512_maskz_rsqrt_ph(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_rsqrt_ph
+ // CHECK-LABEL: test_mm512_maskz_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.512
return _mm512_maskz_rsqrt_ph(__U, __A);
}
__m512h test_mm512_getmant_round_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_getmant_round_ph
+ // CHECK-LABEL: test_mm512_getmant_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.512
return _mm512_getmant_round_ph(__A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_getmant_round_ph(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_getmant_round_ph
+ // CHECK-LABEL: test_mm512_mask_getmant_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.512
return _mm512_mask_getmant_round_ph(__W, __U, __A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_getmant_round_ph(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_getmant_round_ph
+ // CHECK-LABEL: test_mm512_maskz_getmant_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.512
return _mm512_maskz_getmant_round_ph(__U, __A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_getmant_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_getmant_ph
+ // CHECK-LABEL: test_mm512_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.512
return _mm512_getmant_ph(__A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m512h test_mm512_mask_getmant_ph(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_getmant_ph
+ // CHECK-LABEL: test_mm512_mask_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.512
return _mm512_mask_getmant_ph(__W, __U, __A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m512h test_mm512_maskz_getmant_ph(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_getmant_ph
+ // CHECK-LABEL: test_mm512_maskz_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.512
return _mm512_maskz_getmant_ph(__U, __A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m512h test_mm512_scalef_round_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_scalef_round_ph
+ // CHECK-LABEL: test_mm512_scalef_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.512
return _mm512_scalef_round_ph(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_scalef_round_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_scalef_round_ph
+ // CHECK-LABEL: test_mm512_mask_scalef_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.512
return _mm512_mask_scalef_round_ph(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_scalef_round_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_scalef_round_ph
+ // CHECK-LABEL: test_mm512_maskz_scalef_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.512
return _mm512_maskz_scalef_round_ph(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_scalef_ph(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_scalef_ph
+ // CHECK-LABEL: test_mm512_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.512
return _mm512_scalef_ph(__A, __B);
}
__m512h test_mm512_mask_scalef_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_scalef_ph
+ // CHECK-LABEL: test_mm512_mask_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.512
return _mm512_mask_scalef_ph(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_scalef_ph(__mmask32 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_scalef_ph
+ // CHECK-LABEL: test_mm512_maskz_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.512
return _mm512_maskz_scalef_ph(__U, __A, __B);
}
__m512h test_mm512_mask_roundscale_ph(__m512h __W, __mmask16 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_roundscale_ph
+ // CHECK-LABEL: test_mm512_mask_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.512
return _mm512_mask_roundscale_ph(__W, __U, __A, 1);
}
__m512h test_mm512_maskz_roundscale_ph(__mmask16 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_roundscale_ph
+ // CHECK-LABEL: test_mm512_maskz_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.512
return _mm512_maskz_roundscale_ph(__U, __A, 1);
}
__m512h test_mm512_mask_roundscale_round_ph(__m512h __A, __mmask16 __U, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_roundscale_round_ph
+ // CHECK-LABEL: test_mm512_mask_roundscale_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.512
return _mm512_mask_roundscale_round_ph(__A, __U, __C, 3, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_roundscale_round_ph(__m512h __A, __mmask16 __U) {
- // CHECK-LABEL: @test_mm512_maskz_roundscale_round_ph
+ // CHECK-LABEL: test_mm512_maskz_roundscale_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.512
return _mm512_maskz_roundscale_round_ph(__U, __A, 3, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_roundscale_round_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_roundscale_round_ph
+ // CHECK-LABEL: test_mm512_roundscale_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.512
return _mm512_roundscale_round_ph(__A, 3, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_roundscale_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_roundscale_ph
+ // CHECK-LABEL: test_mm512_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.512
return _mm512_roundscale_ph(__A, 3);
}
__m512h test_mm512_getexp_round_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_getexp_round_ph
+ // CHECK-LABEL: test_mm512_getexp_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.512
return _mm512_getexp_round_ph(__A, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_getexp_round_ph(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_getexp_round_ph
+ // CHECK-LABEL: test_mm512_mask_getexp_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.512
return _mm512_mask_getexp_round_ph(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_getexp_round_ph(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_getexp_round_ph
+ // CHECK-LABEL: test_mm512_maskz_getexp_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.512
return _mm512_maskz_getexp_round_ph(__U, __A, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_getexp_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_getexp_ph
+ // CHECK-LABEL: test_mm512_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.512
return _mm512_getexp_ph(__A);
}
__m512h test_mm512_mask_getexp_ph(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_getexp_ph
+ // CHECK-LABEL: test_mm512_mask_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.512
return _mm512_mask_getexp_ph(__W, __U, __A);
}
__m512h test_mm512_maskz_getexp_ph(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_getexp_ph
+ // CHECK-LABEL: test_mm512_maskz_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.512
return _mm512_maskz_getexp_ph(__U, __A);
}
__m512h test_mm512_mask_reduce_ph(__m512h __W, __mmask16 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_ph
+ // CHECK-LABEL: test_mm512_mask_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.512
return _mm512_mask_reduce_ph(__W, __U, __A, 1);
}
__m512h test_mm512_maskz_reduce_ph(__mmask16 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_ph
+ // CHECK-LABEL: test_mm512_maskz_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.512
return _mm512_maskz_reduce_ph(__U, __A, 1);
}
__m512h test_mm512_mask_reduce_round_ph(__m512h __A, __mmask16 __U, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_reduce_round_ph
+ // CHECK-LABEL: test_mm512_mask_reduce_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.512
return _mm512_mask_reduce_round_ph(__A, __U, __C, 3, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_reduce_round_ph(__m512h __A, __mmask16 __U) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_round_ph
+ // CHECK-LABEL: test_mm512_maskz_reduce_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.512
return _mm512_maskz_reduce_round_ph(__U, __A, 3, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_reduce_round_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_reduce_round_ph
+ // CHECK-LABEL: test_mm512_reduce_round_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.512
return _mm512_reduce_round_ph(__A, 3, _MM_FROUND_NO_EXC);
}
__m512h test_mm512_reduce_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_reduce_ph
+ // CHECK-LABEL: test_mm512_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.512
return _mm512_reduce_ph(__A, 3);
}
__m128h test_mm_rcp_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_rcp_sh
+ // CHECK-LABEL: test_mm_rcp_sh
// CHECK: @llvm.x86.avx512fp16.mask.rcp.sh
return _mm_rcp_sh(__A, __B);
}
__m128h test_mm_mask_rcp_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_rcp_sh
+ // CHECK-LABEL: test_mm_mask_rcp_sh
// CHECK: @llvm.x86.avx512fp16.mask.rcp.sh
return _mm_mask_rcp_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_rcp_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_rcp_sh
+ // CHECK-LABEL: test_mm_maskz_rcp_sh
// CHECK: @llvm.x86.avx512fp16.mask.rcp.sh
return _mm_maskz_rcp_sh(__U, __A, __B);
}
__m128h test_mm_rsqrt_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_rsqrt_sh
+ // CHECK-LABEL: test_mm_rsqrt_sh
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.sh
return _mm_rsqrt_sh(__A, __B);
}
__m128h test_mm_mask_rsqrt_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_rsqrt_sh
+ // CHECK-LABEL: test_mm_mask_rsqrt_sh
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.sh
return _mm_mask_rsqrt_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_rsqrt_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt_sh
+ // CHECK-LABEL: test_mm_maskz_rsqrt_sh
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.sh
return _mm_maskz_rsqrt_sh(__U, __A, __B);
}
__m128h test_mm_getmant_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_getmant_round_sh
+ // CHECK-LABEL: test_mm_getmant_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.getmant.sh
return _mm_getmant_round_sh(__A, __B, _MM_MANT_NORM_1_2, _MM_MANT_SIGN_src, 8);
}
__m128h test_mm_getmant_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_getmant_sh
+ // CHECK-LABEL: test_mm_getmant_sh
// CHECK: @llvm.x86.avx512fp16.mask.getmant.sh
return _mm_getmant_sh(__A, __B, _MM_MANT_NORM_1_2, _MM_MANT_SIGN_src);
}
__m128h test_mm_mask_getmant_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_getmant_sh
+ // CHECK-LABEL: test_mm_mask_getmant_sh
// CHECK: @llvm.x86.avx512fp16.mask.getmant.sh
return _mm_mask_getmant_sh(__W, __U, __A, __B, 1, 2);
}
__m128h test_mm_mask_getmant_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_getmant_round_sh
+ // CHECK-LABEL: test_mm_mask_getmant_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.getmant.sh
return _mm_mask_getmant_round_sh(__W, __U, __A, __B, 1, 2, _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_getmant_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_getmant_sh
+ // CHECK-LABEL: test_mm_maskz_getmant_sh
// CHECK: @llvm.x86.avx512fp16.mask.getmant.sh
return _mm_maskz_getmant_sh(__U, __A, __B, 1, 2);
}
__m128h test_mm_maskz_getmant_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_getmant_round_sh
+ // CHECK-LABEL: test_mm_maskz_getmant_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.getmant.sh
return _mm_maskz_getmant_round_sh(__U, __A, __B, 1, 2, _MM_FROUND_NO_EXC);
}
__m128h test_mm_getexp_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_getexp_round_sh
+ // CHECK-LABEL: test_mm_getexp_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.getexp.sh
return _mm_getexp_round_sh(__A, __B, 8);
}
__m128h test_mm_getexp_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_getexp_sh
+ // CHECK-LABEL: test_mm_getexp_sh
// CHECK: @llvm.x86.avx512fp16.mask.getexp.sh
return _mm_getexp_sh(__A, __B);
}
__m128h test_mm_mask_getexp_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_getexp_sh
+ // CHECK-LABEL: test_mm_mask_getexp_sh
// CHECK: @llvm.x86.avx512fp16.mask.getexp.sh
return _mm_mask_getexp_sh(__W, __U, __A, __B);
}
__m128h test_mm_mask_getexp_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_getexp_round_sh
+ // CHECK-LABEL: test_mm_mask_getexp_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.getexp.sh
return _mm_mask_getexp_round_sh(__W, __U, __A, __B, _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_getexp_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_getexp_sh
+ // CHECK-LABEL: test_mm_maskz_getexp_sh
// CHECK: @llvm.x86.avx512fp16.mask.getexp.sh
return _mm_maskz_getexp_sh(__U, __A, __B);
}
__m128h test_mm_maskz_getexp_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_getexp_round_sh
+ // CHECK-LABEL: test_mm_maskz_getexp_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.getexp.sh
return _mm_maskz_getexp_round_sh(__U, __A, __B, _MM_FROUND_NO_EXC);
}
__m128h test_mm_scalef_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_scalef_round_sh
+ // CHECK-LABEL: test_mm_scalef_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.scalef.sh(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}, i8 -1, i32 11)
return _mm_scalef_round_sh(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_scalef_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_scalef_sh
+ // CHECK-LABEL: test_mm_scalef_sh
// CHECK: @llvm.x86.avx512fp16.mask.scalef.sh
return _mm_scalef_sh(__A, __B);
}
__m128h test_mm_mask_scalef_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_scalef_sh
+ // CHECK-LABEL: test_mm_mask_scalef_sh
// CHECK: @llvm.x86.avx512fp16.mask.scalef.sh
return _mm_mask_scalef_sh(__W, __U, __A, __B);
}
__m128h test_mm_mask_scalef_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_scalef_round_sh
+ // CHECK-LABEL: test_mm_mask_scalef_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.scalef.sh(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}, i8 %{{.*}}, i32 11)
return _mm_mask_scalef_round_sh(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_scalef_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_scalef_sh
+ // CHECK-LABEL: test_mm_maskz_scalef_sh
// CHECK: @llvm.x86.avx512fp16.mask.scalef.sh
return _mm_maskz_scalef_sh(__U, __A, __B);
}
__m128h test_mm_maskz_scalef_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_scalef_round_sh
+ // CHECK-LABEL: test_mm_maskz_scalef_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.scalef.sh(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}, i8 %{{.*}}, i32 11)
return _mm_maskz_scalef_round_sh(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_roundscale_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_roundscale_round_sh
+ // CHECK-LABEL: test_mm_roundscale_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.sh
return _mm_roundscale_round_sh(__A, __B, 3, _MM_FROUND_NO_EXC);
}
__m128h test_mm_roundscale_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_roundscale_sh
+ // CHECK-LABEL: test_mm_roundscale_sh
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.sh
return _mm_roundscale_sh(__A, __B, 3);
}
__m128h test_mm_mask_roundscale_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_roundscale_sh
+ // CHECK-LABEL: test_mm_mask_roundscale_sh
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.sh
return _mm_mask_roundscale_sh(__W, __U, __A, __B, 3);
}
__m128h test_mm_mask_roundscale_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_roundscale_round_sh
+ // CHECK-LABEL: test_mm_mask_roundscale_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.sh
return _mm_mask_roundscale_round_sh(__W, __U, __A, __B, 3, _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_roundscale_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_roundscale_round_sh
+ // CHECK-LABEL: test_mm_maskz_roundscale_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.sh
return _mm_maskz_roundscale_round_sh(__U, __A, __B, 3, _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_roundscale_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_roundscale_sh
+ // CHECK-LABEL: test_mm_maskz_roundscale_sh
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.sh
return _mm_maskz_roundscale_sh(__U, __A, __B, 3);
}
__m128h test_mm_reduce_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_reduce_sh
+ // CHECK-LABEL: test_mm_reduce_sh
// CHECK: @llvm.x86.avx512fp16.mask.reduce.sh
return _mm_reduce_sh(__A, __B, 4);
}
__m128h test_mm_mask_reduce_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_sh
+ // CHECK-LABEL: test_mm_mask_reduce_sh
// CHECK: @llvm.x86.avx512fp16.mask.reduce.sh
return _mm_mask_reduce_sh(__W, __U, __A, __B, 4);
}
__m128h test_mm_maskz_reduce_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_sh
+ // CHECK-LABEL: test_mm_maskz_reduce_sh
// CHECK: @llvm.x86.avx512fp16.mask.reduce.sh
return _mm_maskz_reduce_sh(__U, __A, __B, 4);
}
__m128h test_mm_reduce_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_reduce_round_sh
+ // CHECK-LABEL: test_mm_reduce_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.reduce.sh
return _mm_reduce_round_sh(__A, __B, 4, 8);
}
__m128h test_mm_mask_reduce_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_round_sh
+ // CHECK-LABEL: test_mm_mask_reduce_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.reduce.sh
return _mm_mask_reduce_round_sh(__W, __U, __A, __B, 4, 8);
}
__m128h test_mm_maskz_reduce_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_round_sh
+ // CHECK-LABEL: test_mm_maskz_reduce_round_sh
// CHECK: @llvm.x86.avx512fp16.mask.reduce.sh
return _mm_maskz_reduce_round_sh(__U, __A, __B, 4, 8);
}
__m512h test_mm512_sqrt_round_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_sqrt_round_ph
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.sqrt.ph.512(<32 x half> %{{.*}}, i32 11)
+ // CHECK-LABEL: test_mm512_sqrt_round_ph
+ // CHECK: @llvm.x86.avx512fp16.sqrt.ph.512(<32 x half> %{{.*}}, i32 11)
return _mm512_sqrt_round_ph(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_sqrt_round_ph(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_sqrt_round_ph
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.sqrt.ph.512(<32 x half> %{{.*}}, i32 11)
+ // CHECK-LABEL: test_mm512_mask_sqrt_round_ph
+ // CHECK: @llvm.x86.avx512fp16.sqrt.ph.512(<32 x half> %{{.*}}, i32 11)
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_sqrt_round_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_sqrt_round_ph(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_sqrt_round_ph
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.sqrt.ph.512(<32 x half> %{{.*}}, i32 11)
+ // CHECK-LABEL: test_mm512_maskz_sqrt_round_ph
+ // CHECK: @llvm.x86.avx512fp16.sqrt.ph.512(<32 x half> %{{.*}}, i32 11)
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> {{.*}}
return _mm512_maskz_sqrt_round_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_sqrt_ph(__m512h __A) {
- // CHECK-LABEL: @test_mm512_sqrt_ph
- // CHECK: %{{.*}} = call <32 x half> @llvm.sqrt.v32f16(<32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_sqrt_ph
+ // CHECK: @llvm.sqrt.v32f16(<32 x half> %{{.*}})
return _mm512_sqrt_ph(__A);
}
__m512h test_mm512_mask_sqrt_ph(__m512h __W, __mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_sqrt_ph
- // CHECK: %{{.*}} = call <32 x half> @llvm.sqrt.v32f16(<32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_sqrt_ph
+ // CHECK: @llvm.sqrt.v32f16(<32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_sqrt_ph(__W, __U, __A);
}
__m512h test_mm512_maskz_sqrt_ph(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_maskz_sqrt_ph
- // CHECK: %{{.*}} = call <32 x half> @llvm.sqrt.v32f16(<32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_sqrt_ph
+ // CHECK: @llvm.sqrt.v32f16(<32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> {{.*}}
return _mm512_maskz_sqrt_ph(__U, __A);
}
__m128h test_mm_sqrt_round_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_sqrt_round_sh
+ // CHECK-LABEL: test_mm_sqrt_round_sh
// CHECK: call <8 x half> @llvm.x86.avx512fp16.mask.sqrt.sh(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}, i8 -1, i32 11)
return _mm_sqrt_round_sh(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask_sqrt_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_sqrt_round_sh
+ // CHECK-LABEL: test_mm_mask_sqrt_round_sh
// CHECK: call <8 x half> @llvm.x86.avx512fp16.mask.sqrt.sh(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}, i8 {{.*}}, i32 11)
return _mm_mask_sqrt_round_sh(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_sqrt_round_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_sqrt_round_sh
+ // CHECK-LABEL: test_mm_maskz_sqrt_round_sh
// CHECK: call <8 x half> @llvm.x86.avx512fp16.mask.sqrt.sh(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}, i8 {{.*}}, i32 11)
return _mm_maskz_sqrt_round_sh(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_sqrt_sh(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_sqrt_sh
+ // CHECK-LABEL: test_mm_sqrt_sh
// CHECK: %{{.*}} = call half @llvm.sqrt.f16(half %{{.*}})
return _mm_sqrt_sh(__A, __B);
}
__m128h test_mm_mask_sqrt_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_sqrt_sh
+ // CHECK-LABEL: test_mm_mask_sqrt_sh
// CHECK: %{{.*}} = call half @llvm.sqrt.f16(half %{{.*}})
return _mm_mask_sqrt_sh(__W, __U, __A, __B);
}
__m128h test_mm_maskz_sqrt_sh(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_sqrt_sh
+ // CHECK-LABEL: test_mm_maskz_sqrt_sh
// CHECK: %{{.*}} = call half @llvm.sqrt.f16(half %{{.*}})
return _mm_maskz_sqrt_sh(__U, __A, __B);
}
__mmask32 test_mm512_mask_fpclass_ph_mask(__mmask32 __U, __m512h __A) {
- // CHECK-LABEL: @test_mm512_mask_fpclass_ph_mask
+ // CHECK-LABEL: test_mm512_mask_fpclass_ph_mask
// CHECK: @llvm.x86.avx512fp16.fpclass.ph.512
return _mm512_mask_fpclass_ph_mask(__U, __A, 4);
}
__mmask32 test_mm512_fpclass_ph_mask(__m512h __A) {
- // CHECK-LABEL: @test_mm512_fpclass_ph_mask
+ // CHECK-LABEL: test_mm512_fpclass_ph_mask
// CHECK: @llvm.x86.avx512fp16.fpclass.ph.512
return _mm512_fpclass_ph_mask(__A, 4);
}
__mmask8 test_mm_fpclash_sh_mask(__m128 __A) {
- // CHECK-LABEL: @test_mm_fpclash_sh_mask
+ // CHECK-LABEL: test_mm_fpclash_sh_mask
// CHECK: @llvm.x86.avx512fp16.mask.fpclass.sh
return _mm_fpclass_sh_mask(__A, 2);
}
__mmask8 test_mm_mask_fpclash_sh_mask(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_fpclash_sh_mask
+ // CHECK-LABEL: test_mm_mask_fpclash_sh_mask
// CHECK: @llvm.x86.avx512fp16.mask.fpclass.sh
return _mm_mask_fpclass_sh_mask(__U, __A, 2);
}
@@ -3034,26 +3064,26 @@ unsigned int test_mm_cvtsh_u32(__m128h A) {
#ifdef __x86_64__
long long test_mm_cvt_roundsh_i64(__m128h A) {
- // CHECK-LABEL: test_mm_cvt_roundsh_i64
- // CHECK: @llvm.x86.avx512fp16.vcvtsh2si64
+ // X64-LABEL: test_mm_cvt_roundsh_i64
+ // X64: @llvm.x86.avx512fp16.vcvtsh2si64
return _mm_cvt_roundsh_i64(A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
long long test_mm_cvtsh_i64(__m128h A) {
- // CHECK-LABEL: test_mm_cvtsh_i64
- // CHECK: @llvm.x86.avx512fp16.vcvtsh2si64
+ // X64-LABEL: test_mm_cvtsh_i64
+ // X64: @llvm.x86.avx512fp16.vcvtsh2si64
return _mm_cvtsh_i64(A);
}
unsigned long long test_mm_cvt_roundsh_u64(__m128h A) {
- // CHECK-LABEL: test_mm_cvt_roundsh_u64
- // CHECK: @llvm.x86.avx512fp16.vcvtsh2usi64
+ // X64-LABEL: test_mm_cvt_roundsh_u64
+ // X64: @llvm.x86.avx512fp16.vcvtsh2usi64
return _mm_cvt_roundsh_u64(A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
unsigned long long test_mm_cvtsh_u64(__m128h A) {
- // CHECK-LABEL: test_mm_cvtsh_u64
- // CHECK: @llvm.x86.avx512fp16.vcvtsh2usi64
+ // X64-LABEL: test_mm_cvtsh_u64
+ // X64: @llvm.x86.avx512fp16.vcvtsh2usi64
return _mm_cvtsh_u64(A);
}
#endif
@@ -3072,14 +3102,14 @@ __m128h test_mm_cvtu32_sh(__m128h A, unsigned int B) {
#ifdef __x86_64__
__m128h test_mm_cvt_roundu64_sh(__m128h A, unsigned long long B) {
- // CHECK-LABEL: test_mm_cvt_roundu64_sh
- // CHECK: @llvm.x86.avx512fp16.vcvtusi642sh
+ // X64-LABEL: test_mm_cvt_roundu64_sh
+ // X64: @llvm.x86.avx512fp16.vcvtusi642sh
return _mm_cvt_roundu64_sh(A, B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_cvtu64_sh(__m128h A, unsigned long long B) {
- // CHECK-LABEL: test_mm_cvtu64_sh
- // CHECK: %{{.*}} = uitofp i64 %{{.*}} to half
+ // X64-LABEL: test_mm_cvtu64_sh
+ // X64: %{{.*}} = uitofp i64 %{{.*}} to half
return _mm_cvtu64_sh(A, B);
}
#endif
@@ -3098,14 +3128,14 @@ __m128h test_mm_cvti32_sh(__m128h A, int B) {
#ifdef __x86_64__
__m128h test_mm_cvt_roundi64_sh(__m128h A, long long B) {
- // CHECK-LABEL: test_mm_cvt_roundi64_sh
- // CHECK: @llvm.x86.avx512fp16.vcvtsi642sh
+ // X64-LABEL: test_mm_cvt_roundi64_sh
+ // X64: @llvm.x86.avx512fp16.vcvtsi642sh
return _mm_cvt_roundi64_sh(A, B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_cvti64_sh(__m128h A, long long B) {
- // CHECK-LABEL: test_mm_cvti64_sh
- // CHECK: %{{.*}} = sitofp i64 %{{.*}} to half
+ // X64-LABEL: test_mm_cvti64_sh
+ // X64: %{{.*}} = sitofp i64 %{{.*}} to half
return _mm_cvti64_sh(A, B);
}
#endif
@@ -3124,14 +3154,14 @@ int test_mm_cvttsh_i32(__m128h A) {
#ifdef __x86_64__
long long test_mm_cvtt_roundsh_i64(__m128h A) {
- // CHECK-LABEL: test_mm_cvtt_roundsh_i64
- // CHECK: @llvm.x86.avx512fp16.vcvttsh2si64
+ // X64-LABEL: test_mm_cvtt_roundsh_i64
+ // X64: @llvm.x86.avx512fp16.vcvttsh2si64
return _mm_cvtt_roundsh_i64(A, _MM_FROUND_NO_EXC);
}
long long test_mm_cvttsh_i64(__m128h A) {
- // CHECK-LABEL: test_mm_cvttsh_i64
- // CHECK: @llvm.x86.avx512fp16.vcvttsh2si64
+ // X64-LABEL: test_mm_cvttsh_i64
+ // X64: @llvm.x86.avx512fp16.vcvttsh2si64
return _mm_cvttsh_i64(A);
}
#endif
@@ -3150,14 +3180,14 @@ unsigned int test_mm_cvttsh_u32(__m128h A) {
#ifdef __x86_64__
unsigned long long test_mm_cvtt_roundsh_u64(__m128h A) {
- // CHECK-LABEL: test_mm_cvtt_roundsh_u64
- // CHECK: @llvm.x86.avx512fp16.vcvttsh2usi64
+ // X64-LABEL: test_mm_cvtt_roundsh_u64
+ // X64: @llvm.x86.avx512fp16.vcvttsh2usi64
return _mm_cvtt_roundsh_u64(A, _MM_FROUND_NO_EXC);
}
unsigned long long test_mm_cvttsh_u64(__m128h A) {
- // CHECK-LABEL: test_mm_cvttsh_u64
- // CHECK: @llvm.x86.avx512fp16.vcvttsh2usi64
+ // X64-LABEL: test_mm_cvttsh_u64
+ // X64: @llvm.x86.avx512fp16.vcvttsh2usi64
return _mm_cvttsh_u64(A);
}
#endif
@@ -3235,13 +3265,13 @@ __m256h test_mm512_maskz_cvtxps_ph(__mmask16 A, __m512 B) {
}
__m512h test_mm512_fmadd_round_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmadd_round_ph
+ // CHECK-LABEL: test_mm512_fmadd_round_ph
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
return _mm512_fmadd_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_fmadd_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmadd_round_ph
+ // CHECK-LABEL: test_mm512_mask_fmadd_round_ph
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
@@ -3249,7 +3279,7 @@ __m512h test_mm512_mask_fmadd_round_ph(__m512h __A, __mmask32 __U, __m512h __B,
}
__m512h test_mm512_mask3_fmadd_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmadd_round_ph
+ // CHECK-LABEL: test_mm512_mask3_fmadd_round_ph
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
@@ -3257,7 +3287,7 @@ __m512h test_mm512_mask3_fmadd_round_ph(__m512h __A, __m512h __B, __m512h __C, _
}
__m512h test_mm512_maskz_fmadd_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmadd_round_ph
+ // CHECK-LABEL: test_mm512_maskz_fmadd_round_ph
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer
@@ -3265,14 +3295,14 @@ __m512h test_mm512_maskz_fmadd_round_ph(__mmask32 __U, __m512h __A, __m512h __B,
}
__m512h test_mm512_fmsub_round_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmsub_round_ph
+ // CHECK-LABEL: test_mm512_fmsub_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
return _mm512_fmsub_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_fmsub_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmsub_round_ph
+ // CHECK-LABEL: test_mm512_mask_fmsub_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3281,7 +3311,7 @@ __m512h test_mm512_mask_fmsub_round_ph(__m512h __A, __mmask32 __U, __m512h __B,
}
__m512h test_mm512_maskz_fmsub_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmsub_round_ph
+ // CHECK-LABEL: test_mm512_maskz_fmsub_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3290,14 +3320,14 @@ __m512h test_mm512_maskz_fmsub_round_ph(__mmask32 __U, __m512h __A, __m512h __B,
}
__m512h test_mm512_fnmadd_round_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fnmadd_round_ph
+ // CHECK-LABEL: test_mm512_fnmadd_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
return _mm512_fnmadd_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask3_fnmadd_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fnmadd_round_ph
+ // CHECK-LABEL: test_mm512_mask3_fnmadd_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3306,7 +3336,7 @@ __m512h test_mm512_mask3_fnmadd_round_ph(__m512h __A, __m512h __B, __m512h __C,
}
__m512h test_mm512_maskz_fnmadd_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fnmadd_round_ph
+ // CHECK-LABEL: test_mm512_maskz_fnmadd_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3315,7 +3345,7 @@ __m512h test_mm512_maskz_fnmadd_round_ph(__mmask32 __U, __m512h __A, __m512h __B
}
__m512h test_mm512_fnmsub_round_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fnmsub_round_ph
+ // CHECK-LABEL: test_mm512_fnmsub_round_ph
// CHECK: fneg
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
@@ -3323,7 +3353,7 @@ __m512h test_mm512_fnmsub_round_ph(__m512h __A, __m512h __B, __m512h __C) {
}
__m512h test_mm512_maskz_fnmsub_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fnmsub_round_ph
+ // CHECK-LABEL: test_mm512_maskz_fnmsub_round_ph
// CHECK: fneg
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
@@ -3333,109 +3363,109 @@ __m512h test_mm512_maskz_fnmsub_round_ph(__mmask32 __U, __m512h __A, __m512h __B
}
__m512h test_mm512_fmadd_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmadd_ph
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_fmadd_ph
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
return _mm512_fmadd_ph(__A, __B, __C);
}
__m512h test_mm512_mask_fmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmadd_ph
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_fmadd_ph
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
return _mm512_mask_fmadd_ph(__A, __U, __B, __C);
}
__m512h test_mm512_mask3_fmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmadd_ph
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask3_fmadd_ph
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask3_fmadd_ph(__A, __B, __C, __U);
}
__m512h test_mm512_maskz_fmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmadd_ph
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_fmadd_ph
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer
return _mm512_maskz_fmadd_ph(__U, __A, __B, __C);
}
__m512h test_mm512_fmsub_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmsub_ph
+ // CHECK-LABEL: test_mm512_fmsub_ph
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
return _mm512_fmsub_ph(__A, __B, __C);
}
__m512h test_mm512_mask_fmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmsub_ph
+ // CHECK-LABEL: test_mm512_mask_fmsub_ph
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_fmsub_ph(__A, __U, __B, __C);
}
__m512h test_mm512_maskz_fmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmsub_ph
+ // CHECK-LABEL: test_mm512_maskz_fmsub_ph
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer
return _mm512_maskz_fmsub_ph(__U, __A, __B, __C);
}
__m512h test_mm512_fnmadd_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fnmadd_ph
+ // CHECK-LABEL: test_mm512_fnmadd_ph
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
return _mm512_fnmadd_ph(__A, __B, __C);
}
__m512h test_mm512_mask3_fnmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fnmadd_ph
+ // CHECK-LABEL: test_mm512_mask3_fnmadd_ph
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask3_fnmadd_ph(__A, __B, __C, __U);
}
__m512h test_mm512_maskz_fnmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fnmadd_ph
+ // CHECK-LABEL: test_mm512_maskz_fnmadd_ph
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer
return _mm512_maskz_fnmadd_ph(__U, __A, __B, __C);
}
__m512h test_mm512_fnmsub_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fnmsub_ph
+ // CHECK-LABEL: test_mm512_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
return _mm512_fnmsub_ph(__A, __B, __C);
}
__m512h test_mm512_maskz_fnmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fnmsub_ph
+ // CHECK-LABEL: test_mm512_maskz_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer
return _mm512_maskz_fnmsub_ph(__U, __A, __B, __C);
}
__m512h test_mm512_fmaddsub_round_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmaddsub_round_ph
+ // CHECK-LABEL: test_mm512_fmaddsub_round_ph
// CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512
return _mm512_fmaddsub_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_fmaddsub_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmaddsub_round_ph
+ // CHECK-LABEL: test_mm512_mask_fmaddsub_round_ph
// CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
@@ -3443,7 +3473,7 @@ __m512h test_mm512_mask_fmaddsub_round_ph(__m512h __A, __mmask32 __U, __m512h __
}
__m512h test_mm512_mask3_fmaddsub_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmaddsub_round_ph
+ // CHECK-LABEL: test_mm512_mask3_fmaddsub_round_ph
// CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
@@ -3451,7 +3481,7 @@ __m512h test_mm512_mask3_fmaddsub_round_ph(__m512h __A, __m512h __B, __m512h __C
}
__m512h test_mm512_maskz_fmaddsub_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmaddsub_round_ph
+ // CHECK-LABEL: test_mm512_maskz_fmaddsub_round_ph
// CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer
@@ -3459,14 +3489,14 @@ __m512h test_mm512_maskz_fmaddsub_round_ph(__mmask32 __U, __m512h __A, __m512h _
}
__m512h test_mm512_fmsubadd_round_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmsubadd_round_ph
+ // CHECK-LABEL: test_mm512_fmsubadd_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512
return _mm512_fmsubadd_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_fmsubadd_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmsubadd_round_ph
+ // CHECK-LABEL: test_mm512_mask_fmsubadd_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3475,7 +3505,7 @@ __m512h test_mm512_mask_fmsubadd_round_ph(__m512h __A, __mmask32 __U, __m512h __
}
__m512h test_mm512_maskz_fmsubadd_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmsubadd_round_ph
+ // CHECK-LABEL: test_mm512_maskz_fmsubadd_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3484,66 +3514,66 @@ __m512h test_mm512_maskz_fmsubadd_round_ph(__mmask32 __U, __m512h __A, __m512h _
}
__m512h test_mm512_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmaddsub_ph
+ // CHECK-LABEL: test_mm512_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4)
return _mm512_fmaddsub_ph(__A, __B, __C);
}
__m512h test_mm512_mask_fmaddsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmaddsub_ph
+ // CHECK-LABEL: test_mm512_mask_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4)
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_fmaddsub_ph(__A, __U, __B, __C);
}
__m512h test_mm512_mask3_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmaddsub_ph
+ // CHECK-LABEL: test_mm512_mask3_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4)
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask3_fmaddsub_ph(__A, __B, __C, __U);
}
__m512h test_mm512_maskz_fmaddsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmaddsub_ph
+ // CHECK-LABEL: test_mm512_maskz_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4)
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer
return _mm512_maskz_fmaddsub_ph(__U, __A, __B, __C);
}
__m512h test_mm512_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmsubadd_ph
+ // CHECK-LABEL: test_mm512_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4)
return _mm512_fmsubadd_ph(__A, __B, __C);
}
__m512h test_mm512_mask_fmsubadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmsubadd_ph
+ // CHECK-LABEL: test_mm512_mask_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4)
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_fmsubadd_ph(__A, __U, __B, __C);
}
__m512h test_mm512_maskz_fmsubadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmsubadd_ph
+ // CHECK-LABEL: test_mm512_maskz_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4)
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer
return _mm512_maskz_fmsubadd_ph(__U, __A, __B, __C);
}
__m512h test_mm512_mask3_fmsub_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmsub_round_ph
+ // CHECK-LABEL: test_mm512_mask3_fmsub_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3552,16 +3582,16 @@ __m512h test_mm512_mask3_fmsub_round_ph(__m512h __A, __m512h __B, __m512h __C, _
}
__m512h test_mm512_mask3_fmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmsub_ph
+ // CHECK-LABEL: test_mm512_mask3_fmsub_ph
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask3_fmsub_ph(__A, __B, __C, __U);
}
__m512h test_mm512_mask3_fmsubadd_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmsubadd_round_ph
+ // CHECK-LABEL: test_mm512_mask3_fmsubadd_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3570,16 +3600,16 @@ __m512h test_mm512_mask3_fmsubadd_round_ph(__m512h __A, __m512h __B, __m512h __C
}
__m512h test_mm512_mask3_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmsubadd_ph
+ // CHECK-LABEL: test_mm512_mask3_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4)
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4)
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask3_fmsubadd_ph(__A, __B, __C, __U);
}
__m512h test_mm512_mask_fnmadd_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fnmadd_round_ph
+ // CHECK-LABEL: test_mm512_mask_fnmadd_round_ph
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
@@ -3588,16 +3618,16 @@ __m512h test_mm512_mask_fnmadd_round_ph(__m512h __A, __mmask32 __U, __m512h __B,
}
__m512h test_mm512_mask_fnmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fnmadd_ph
+ // CHECK-LABEL: test_mm512_mask_fnmadd_ph
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_fnmadd_ph(__A, __U, __B, __C);
}
__m512h test_mm512_mask_fnmsub_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fnmsub_round_ph
+ // CHECK-LABEL: test_mm512_mask_fnmsub_round_ph
// CHECK: fneg
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
@@ -3607,7 +3637,7 @@ __m512h test_mm512_mask_fnmsub_round_ph(__m512h __A, __mmask32 __U, __m512h __B,
}
__m512h test_mm512_mask3_fnmsub_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fnmsub_round_ph
+ // CHECK-LABEL: test_mm512_mask3_fnmsub_round_ph
// CHECK: fneg
// CHECK: fneg
// CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512
@@ -3617,27 +3647,27 @@ __m512h test_mm512_mask3_fnmsub_round_ph(__m512h __A, __m512h __B, __m512h __C,
}
__m512h test_mm512_mask_fnmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fnmsub_ph
+ // CHECK-LABEL: test_mm512_mask_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_fnmsub_ph(__A, __U, __B, __C);
}
__m512h test_mm512_mask3_fnmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fnmsub_ph
+ // CHECK-LABEL: test_mm512_mask3_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
+ // CHECK: @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}})
// CHECK: bitcast i32 %{{.*}} to <32 x i1>
// CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask3_fnmsub_ph(__A, __B, __C, __U);
}
__m128h test_mm_fmadd_sh(__m128h __W, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fmadd_sh
+ // CHECK-LABEL: test_mm_fmadd_sh
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3646,7 +3676,7 @@ __m128h test_mm_fmadd_sh(__m128h __W, __m128h __A, __m128h __B) {
}
__m128h test_mm_mask_fmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fmadd_sh
+ // CHECK-LABEL: test_mm_mask_fmadd_sh
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3659,7 +3689,7 @@ __m128h test_mm_mask_fmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __
}
__m128h test_mm_fmadd_round_sh(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmadd_round_sh
+ // CHECK-LABEL: test_mm_fmadd_round_sh
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3669,7 +3699,7 @@ __m128h test_mm_fmadd_round_sh(__m128h __A, __m128h __B, __m128h __C) {
}
__m128h test_mm_mask_fmadd_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fmadd_round_sh
+ // CHECK-LABEL: test_mm_mask_fmadd_round_sh
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3682,7 +3712,7 @@ __m128h test_mm_mask_fmadd_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m1
}
__m128h test_mm_maskz_fmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_sh
+ // CHECK-LABEL: test_mm_maskz_fmadd_sh
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3695,7 +3725,7 @@ __m128h test_mm_maskz_fmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h _
}
__m128h test_mm_maskz_fmadd_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_round_sh
+ // CHECK-LABEL: test_mm_maskz_fmadd_round_sh
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3708,7 +3738,7 @@ __m128h test_mm_maskz_fmadd_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m
}
__m128h test_mm_mask3_fmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_sh
+ // CHECK-LABEL: test_mm_mask3_fmadd_sh
// CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0
@@ -3721,7 +3751,7 @@ __m128h test_mm_mask3_fmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 _
}
__m128h test_mm_mask3_fmadd_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_round_sh
+ // CHECK-LABEL: test_mm_mask3_fmadd_round_sh
// CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0
@@ -3734,7 +3764,7 @@ __m128h test_mm_mask3_fmadd_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mm
}
__m128h test_mm_fmsub_sh(__m128h __W, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fmsub_sh
+ // CHECK-LABEL: test_mm_fmsub_sh
// CHECK: %{{.*}} = fneg <8 x half> %{{.*}}
// CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
@@ -3746,7 +3776,7 @@ __m128h test_mm_fmsub_sh(__m128h __W, __m128h __A, __m128h __B) {
}
__m128h test_mm_mask_fmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fmsub_sh
+ // CHECK-LABEL: test_mm_mask_fmsub_sh
// CHECK: %{{.*}} = fneg <8 x half> %{{.*}}
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
@@ -3761,7 +3791,7 @@ __m128h test_mm_mask_fmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __
}
__m128h test_mm_fmsub_round_sh(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmsub_round_sh
+ // CHECK-LABEL: test_mm_fmsub_round_sh
// CHECK: %{{.*}} = fneg <8 x half> %{{.*}}
// CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
@@ -3773,7 +3803,7 @@ __m128h test_mm_fmsub_round_sh(__m128h __A, __m128h __B, __m128h __C) {
}
__m128h test_mm_mask_fmsub_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fmsub_round_sh
+ // CHECK-LABEL: test_mm_mask_fmsub_round_sh
// CHECK: %{{.*}} = fneg <8 x half> %{{.*}}
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
@@ -3788,7 +3818,7 @@ __m128h test_mm_mask_fmsub_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m1
}
__m128h test_mm_maskz_fmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsub_sh
+ // CHECK-LABEL: test_mm_maskz_fmsub_sh
// CHECK: %{{.*}} = fneg <8 x half> %{{.*}}
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
@@ -3803,7 +3833,7 @@ __m128h test_mm_maskz_fmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h _
}
__m128h test_mm_maskz_fmsub_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsub_round_sh
+ // CHECK-LABEL: test_mm_maskz_fmsub_round_sh
// CHECK: %{{.*}} = fneg <8 x half> %{{.*}}
// CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0
@@ -3818,7 +3848,7 @@ __m128h test_mm_maskz_fmsub_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m
}
__m128h test_mm_mask3_fmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsub_sh
+ // CHECK-LABEL: test_mm_mask3_fmsub_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3833,7 +3863,7 @@ __m128h test_mm_mask3_fmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 _
}
__m128h test_mm_mask3_fmsub_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsub_round_sh
+ // CHECK-LABEL: test_mm_mask3_fmsub_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3848,7 +3878,7 @@ __m128h test_mm_mask3_fmsub_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mm
}
__m128h test_mm_fnmadd_sh(__m128h __W, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fnmadd_sh
+ // CHECK-LABEL: test_mm_fnmadd_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3858,7 +3888,7 @@ __m128h test_mm_fnmadd_sh(__m128h __W, __m128h __A, __m128h __B) {
}
__m128h test_mm_mask_fnmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fnmadd_sh
+ // CHECK-LABEL: test_mm_mask_fnmadd_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3872,7 +3902,7 @@ __m128h test_mm_mask_fnmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h _
}
__m128h test_mm_fnmadd_round_sh(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fnmadd_round_sh
+ // CHECK-LABEL: test_mm_fnmadd_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3883,7 +3913,7 @@ __m128h test_mm_fnmadd_round_sh(__m128h __A, __m128h __B, __m128h __C) {
}
__m128h test_mm_mask_fnmadd_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fnmadd_round_sh
+ // CHECK-LABEL: test_mm_mask_fnmadd_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3897,7 +3927,7 @@ __m128h test_mm_mask_fnmadd_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m
}
__m128h test_mm_maskz_fnmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmadd_sh
+ // CHECK-LABEL: test_mm_maskz_fnmadd_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3911,7 +3941,7 @@ __m128h test_mm_maskz_fnmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h
}
__m128h test_mm_maskz_fnmadd_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmadd_round_sh
+ // CHECK-LABEL: test_mm_maskz_fnmadd_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3925,7 +3955,7 @@ __m128h test_mm_maskz_fnmadd_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __
}
__m128h test_mm_mask3_fnmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmadd_sh
+ // CHECK-LABEL: test_mm_mask3_fnmadd_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3939,7 +3969,7 @@ __m128h test_mm_mask3_fnmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8
}
__m128h test_mm_mask3_fnmadd_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmadd_round_sh
+ // CHECK-LABEL: test_mm_mask3_fnmadd_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
// CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -3953,7 +3983,7 @@ __m128h test_mm_mask3_fnmadd_round_sh(__m128h __W, __m128h __X, __m128h __Y, __m
}
__m128h test_mm_fnmsub_sh(__m128h __W, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fnmsub_sh
+ // CHECK-LABEL: test_mm_fnmsub_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[NEG2:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
@@ -3964,7 +3994,7 @@ __m128h test_mm_fnmsub_sh(__m128h __W, __m128h __A, __m128h __B) {
}
__m128h test_mm_mask_fnmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fnmsub_sh
+ // CHECK-LABEL: test_mm_mask_fnmsub_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[NEG2:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
@@ -3979,7 +4009,7 @@ __m128h test_mm_mask_fnmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h _
}
__m128h test_mm_fnmsub_round_sh(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fnmsub_round_sh
+ // CHECK-LABEL: test_mm_fnmsub_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[NEG2:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
@@ -3991,7 +4021,7 @@ __m128h test_mm_fnmsub_round_sh(__m128h __A, __m128h __B, __m128h __C) {
}
__m128h test_mm_mask_fnmsub_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fnmsub_round_sh
+ // CHECK-LABEL: test_mm_mask_fnmsub_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[NEG2:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
@@ -4006,7 +4036,7 @@ __m128h test_mm_mask_fnmsub_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m
}
__m128h test_mm_maskz_fnmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmsub_sh
+ // CHECK-LABEL: test_mm_maskz_fnmsub_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[NEG2:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
@@ -4021,7 +4051,7 @@ __m128h test_mm_maskz_fnmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h
}
__m128h test_mm_maskz_fnmsub_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmsub_round_sh
+ // CHECK-LABEL: test_mm_maskz_fnmsub_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[NEG2:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0
@@ -4036,7 +4066,7 @@ __m128h test_mm_maskz_fnmsub_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __
}
__m128h test_mm_mask3_fnmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmsub_sh
+ // CHECK-LABEL: test_mm_mask3_fnmsub_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[NEG2:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -4052,7 +4082,7 @@ __m128h test_mm_mask3_fnmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8
}
__m128h test_mm_mask3_fnmsub_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmsub_round_sh
+ // CHECK-LABEL: test_mm_mask3_fnmsub_round_sh
// CHECK: [[NEG:%.+]] = fneg
// CHECK: [[NEG2:%.+]] = fneg
// CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0
@@ -4068,13 +4098,13 @@ __m128h test_mm_mask3_fnmsub_round_sh(__m128h __W, __m128h __X, __m128h __Y, __m
}
__m128h test_mm_fcmadd_sch(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fcmadd_sch
+ // CHECK-LABEL: test_mm_fcmadd_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.csh
return _mm_fcmadd_sch(__A, __B, __C);
}
__m128h test_mm_mask_fcmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fcmadd_sch
+ // CHECK-LABEL: test_mm_mask_fcmadd_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.csh
// CHECK: %{{.*}} = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: %{{.*}} = select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -4082,13 +4112,13 @@ __m128h test_mm_mask_fcmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h
}
__m128h test_mm_maskz_fcmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fcmadd_sch
+ // CHECK-LABEL: test_mm_maskz_fcmadd_sch
// CHECK: @llvm.x86.avx512fp16.maskz.vfcmadd.csh
return _mm_maskz_fcmadd_sch(__U, __A, __B, __C);
}
__m128h test_mm_mask3_fcmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fcmadd_sch
+ // CHECK-LABEL: test_mm_mask3_fcmadd_sch
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
@@ -4099,13 +4129,13 @@ __m128h test_mm_mask3_fcmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8
}
__m128h test_mm_fcmadd_round_sch(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fcmadd_round_sch
+ // CHECK-LABEL: test_mm_fcmadd_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.csh
return _mm_fcmadd_round_sch(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask_fcmadd_round_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fcmadd_round_sch
+ // CHECK-LABEL: test_mm_mask_fcmadd_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.csh
// CHECK: %{{.*}} = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: %{{.*}} = select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -4113,13 +4143,13 @@ __m128h test_mm_mask_fcmadd_round_sch(__m128h __A, __mmask8 __U, __m128h __B, __
}
__m128h test_mm_maskz_fcmadd_round_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fcmadd_round_sch
+ // CHECK-LABEL: test_mm_maskz_fcmadd_round_sch
// CHECK: @llvm.x86.avx512fp16.maskz.vfcmadd.csh
return _mm_maskz_fcmadd_round_sch(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask3_fcmadd_round_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fcmadd_round_sch
+ // CHECK-LABEL: test_mm_mask3_fcmadd_round_sch
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
@@ -4130,13 +4160,13 @@ __m128h test_mm_mask3_fcmadd_round_sch(__m128h __A, __m128h __B, __m128h __C, __
}
__m128h test_mm_fmadd_sch(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmadd_sch
+ // CHECK-LABEL: test_mm_fmadd_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.csh
return _mm_fmadd_sch(__A, __B, __C);
}
__m128h test_mm_mask_fmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fmadd_sch
+ // CHECK-LABEL: test_mm_mask_fmadd_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.csh
// CHECK: %{{.*}} = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: %{{.*}} = select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -4144,13 +4174,13 @@ __m128h test_mm_mask_fmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h _
}
__m128h test_mm_maskz_fmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_sch
+ // CHECK-LABEL: test_mm_maskz_fmadd_sch
// CHECK: @llvm.x86.avx512fp16.maskz.vfmadd.csh
return _mm_maskz_fmadd_sch(__U, __A, __B, __C);
}
__m128h test_mm_mask3_fmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_sch
+ // CHECK-LABEL: test_mm_mask3_fmadd_sch
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
@@ -4161,13 +4191,13 @@ __m128h test_mm_mask3_fmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8
}
__m128h test_mm_fmadd_round_sch(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmadd_round_sch
+ // CHECK-LABEL: test_mm_fmadd_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.csh
return _mm_fmadd_round_sch(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask_fmadd_round_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fmadd_round_sch
+ // CHECK-LABEL: test_mm_mask_fmadd_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.csh
// CHECK: %{{.*}} = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: %{{.*}} = select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -4175,13 +4205,13 @@ __m128h test_mm_mask_fmadd_round_sch(__m128h __A, __mmask8 __U, __m128h __B, __m
}
__m128h test_mm_maskz_fmadd_round_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_round_sch
+ // CHECK-LABEL: test_mm_maskz_fmadd_round_sch
// CHECK: @llvm.x86.avx512fp16.maskz.vfmadd.csh
return _mm_maskz_fmadd_round_sch(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask3_fmadd_round_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_round_sch
+ // CHECK-LABEL: test_mm_mask3_fmadd_round_sch
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
@@ -4192,286 +4222,286 @@ __m128h test_mm_mask3_fmadd_round_sch(__m128h __A, __m128h __B, __m128h __C, __m
}
__m128h test_mm_fcmul_sch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fcmul_sch
+ // CHECK-LABEL: test_mm_fcmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_fcmul_sch(__A, __B);
}
__m128h test_mm_mask_fcmul_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fcmul_sch
+ // CHECK-LABEL: test_mm_mask_fcmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_mask_fcmul_sch(__W, __U, __A, __B);
}
__m128h test_mm_maskz_fcmul_sch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_fcmul_sch
+ // CHECK-LABEL: test_mm_maskz_fcmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_maskz_fcmul_sch(__U, __A, __B);
}
__m128h test_mm_fcmul_round_sch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fcmul_round_sch
+ // CHECK-LABEL: test_mm_fcmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_fcmul_round_sch(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask_fcmul_round_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fcmul_round_sch
+ // CHECK-LABEL: test_mm_mask_fcmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_mask_fcmul_round_sch(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_fcmul_round_sch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_fcmul_round_sch
+ // CHECK-LABEL: test_mm_maskz_fcmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_maskz_fcmul_round_sch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_fcmul_pch(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_fcmul_pch
+ // CHECK-LABEL: test_mm512_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_fcmul_pch(__A, __B);
}
__m512h test_mm512_mask_fcmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_fcmul_pch
+ // CHECK-LABEL: test_mm512_mask_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_mask_fcmul_pch(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_fcmul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_fcmul_pch
+ // CHECK-LABEL: test_mm512_maskz_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_maskz_fcmul_pch(__U, __A, __B);
}
__m512h test_mm512_fcmul_round_pch(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_fcmul_round_pch
+ // CHECK-LABEL: test_mm512_fcmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_fcmul_round_pch(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_fcmul_round_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_fcmul_round_pch
+ // CHECK-LABEL: test_mm512_mask_fcmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_mask_fcmul_round_pch(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_fcmul_round_pch(__mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_fcmul_round_pch
+ // CHECK-LABEL: test_mm512_maskz_fcmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_maskz_fcmul_round_pch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_fcmadd_pch(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fcmadd_pch
+ // CHECK-LABEL: test_mm512_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.512
return _mm512_fcmadd_pch(__A, __B, __C);
}
__m512h test_mm512_mask_fcmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fcmadd_pch
+ // CHECK-LABEL: test_mm512_mask_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.512
// CHECK: %{{.*}} = select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_fcmadd_pch(__A, __U, __B, __C);
}
__m512h test_mm512_mask3_fcmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fcmadd_pch
+ // CHECK-LABEL: test_mm512_mask3_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.512
// CHECK-NOT: %{{.*}} = select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask3_fcmadd_pch(__A, __B, __C, __U);
}
__m512h test_mm512_maskz_fcmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fcmadd_pch
+ // CHECK-LABEL: test_mm512_maskz_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.maskz.vfcmadd.cph.512
return _mm512_maskz_fcmadd_pch(__U, __A, __B, __C);
}
__m512h test_mm512_fcmadd_round_pch(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fcmadd_round_pch
+ // CHECK-LABEL: test_mm512_fcmadd_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.512
return _mm512_fcmadd_round_pch(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_fcmadd_round_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fcmadd_round_pch
+ // CHECK-LABEL: test_mm512_mask_fcmadd_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.512
// CHECK: %{{.*}} = select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_fcmadd_round_pch(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask3_fcmadd_round_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fcmadd_round_pch
+ // CHECK-LABEL: test_mm512_mask3_fcmadd_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.512
// CHECK-NOT: %{{.*}} = select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask3_fcmadd_round_pch(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_fcmadd_round_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fcmadd_round_pch
+ // CHECK-LABEL: test_mm512_maskz_fcmadd_round_pch
// CHECK: @llvm.x86.avx512fp16.maskz.vfcmadd.cph.512
return _mm512_maskz_fcmadd_round_pch(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_fmul_pch(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_fmul_pch
+ // CHECK-LABEL: test_mm512_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_fmul_pch(__A, __B);
}
__m512h test_mm512_mask_fmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_fmul_pch
+ // CHECK-LABEL: test_mm512_mask_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_mask_fmul_pch(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_fmul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_fmul_pch
+ // CHECK-LABEL: test_mm512_maskz_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_maskz_fmul_pch(__U, __A, __B);
}
__m512h test_mm512_fmul_round_pch(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_fmul_round_pch
+ // CHECK-LABEL: test_mm512_fmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_fmul_round_pch(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_fmul_round_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_fmul_round_pch
+ // CHECK-LABEL: test_mm512_mask_fmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_mask_fmul_round_pch(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_fmul_round_pch(__mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_fmul_round_pch
+ // CHECK-LABEL: test_mm512_maskz_fmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_maskz_fmul_round_pch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_fmadd_pch(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmadd_pch
+ // CHECK-LABEL: test_mm512_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.512
return _mm512_fmadd_pch(__A, __B, __C);
}
__m512h test_mm512_mask_fmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmadd_pch
+ // CHECK-LABEL: test_mm512_mask_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.512
// CHECK: %{{.*}} = select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_fmadd_pch(__A, __U, __B, __C);
}
__m512h test_mm512_mask3_fmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmadd_pch
+ // CHECK-LABEL: test_mm512_mask3_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.512
// CHECK-NOT: %{{.*}} = select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask3_fmadd_pch(__A, __B, __C, __U);
}
__m512h test_mm512_maskz_fmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmadd_pch
+ // CHECK-LABEL: test_mm512_maskz_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.maskz.vfmadd.cph.512
return _mm512_maskz_fmadd_pch(__U, __A, __B, __C);
}
__m512h test_mm512_fmadd_round_pch(__m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_fmadd_round_pch
+ // CHECK-LABEL: test_mm512_fmadd_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.512
return _mm512_fmadd_round_pch(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_fmadd_round_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_mask_fmadd_round_pch
+ // CHECK-LABEL: test_mm512_mask_fmadd_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.512
// CHECK: %{{.*}} = select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_fmadd_round_pch(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask3_fmadd_round_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm512_mask3_fmadd_round_pch
+ // CHECK-LABEL: test_mm512_mask3_fmadd_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.512
// CHECK-NOT: %{{.*}} = select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask3_fmadd_round_pch(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_fmadd_round_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
- // CHECK-LABEL: @test_mm512_maskz_fmadd_round_pch
+ // CHECK-LABEL: test_mm512_maskz_fmadd_round_pch
// CHECK: @llvm.x86.avx512fp16.maskz.vfmadd.cph.512
return _mm512_maskz_fmadd_round_pch(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_fmul_sch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fmul_sch
+ // CHECK-LABEL: test_mm_fmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_fmul_sch(__A, __B);
}
__m128h test_mm_mask_fmul_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fmul_sch
+ // CHECK-LABEL: test_mm_mask_fmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_mask_fmul_sch(__W, __U, __A, __B);
}
__m128h test_mm_maskz_fmul_sch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_fmul_sch
+ // CHECK-LABEL: test_mm_maskz_fmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_maskz_fmul_sch(__U, __A, __B);
}
__m128h test_mm_fmul_round_sch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fmul_round_sch
+ // CHECK-LABEL: test_mm_fmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_fmul_round_sch(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask_fmul_round_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fmul_round_sch
+ // CHECK-LABEL: test_mm_mask_fmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_mask_fmul_round_sch(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_fmul_round_sch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_fmul_round_sch
+ // CHECK-LABEL: test_mm_maskz_fmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_maskz_fmul_round_sch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
_Float16 test_mm512_reduce_add_ph(__m512h __W) {
- // CHECK-LABEL: @test_mm512_reduce_add_ph
- // CHECK: call reassoc half @llvm.vector.reduce.fadd.v32f16(half 0xH8000, <32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_reduce_add_ph
+ // CHECK: @llvm.vector.reduce.fadd.v32f16(half 0xH8000, <32 x half> %{{.*}})
return _mm512_reduce_add_ph(__W);
}
_Float16 test_mm512_reduce_mul_ph(__m512h __W) {
- // CHECK-LABEL: @test_mm512_reduce_mul_ph
- // CHECK: call reassoc half @llvm.vector.reduce.fmul.v32f16(half 0xH3C00, <32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_reduce_mul_ph
+ // CHECK: @llvm.vector.reduce.fmul.v32f16(half 0xH3C00, <32 x half> %{{.*}})
return _mm512_reduce_mul_ph(__W);
}
_Float16 test_mm512_reduce_max_ph(__m512h __W) {
- // CHECK-LABEL: @test_mm512_reduce_max_ph
- // CHECK: call nnan half @llvm.vector.reduce.fmax.v32f16(<32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_reduce_max_ph
+ // CHECK: @llvm.vector.reduce.fmax.v32f16(<32 x half> %{{.*}})
return _mm512_reduce_max_ph(__W);
}
_Float16 test_mm512_reduce_min_ph(__m512h __W) {
- // CHECK-LABEL: @test_mm512_reduce_min_ph
- // CHECK: call nnan half @llvm.vector.reduce.fmin.v32f16(<32 x half> %{{.*}})
+ // CHECK-LABEL: test_mm512_reduce_min_ph
+ // CHECK: @llvm.vector.reduce.fmin.v32f16(<32 x half> %{{.*}})
return _mm512_reduce_min_ph(__W);
}
__m512h test_mm512_mask_blend_ph(__mmask32 __U, __m512h __A, __m512h __W) {
- // CHECK-LABEL: @test_mm512_mask_blend_ph
+ // CHECK-LABEL: test_mm512_mask_blend_ph
// CHECK: %{{.*}} = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: %{{.*}} = select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}
return _mm512_mask_blend_ph(__U, __A, __W);
}
__m512h test_mm512_permutex2var_ph(__m512h __A, __m512i __I, __m512h __B) {
- // CHECK-LABEL: @test_mm512_permutex2var_ph
+ // CHECK-LABEL: test_mm512_permutex2var_ph
// CHECK: %{{.*}} = bitcast <32 x half> %{{.*}} to <32 x i16>
// CHECK: %{{.*}} = bitcast <8 x i64> %{{.*}} to <32 x i16>
// CHECK: %{{.*}} = bitcast <32 x half> %{{.*}} to <32 x i16>
@@ -4481,7 +4511,7 @@ __m512h test_mm512_permutex2var_ph(__m512h __A, __m512i __I, __m512h __B) {
}
__m512h test_mm512_permutexvar_epi16(__m512i __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_permutexvar_epi16
+ // CHECK-LABEL: test_mm512_permutexvar_epi16
// CHECK: %{{.*}} = bitcast <32 x half> %{{.*}} to <32 x i16>
// CHECK: %{{.*}} = bitcast <8 x i64> %{{.*}} to <32 x i16>
// CHECK: %{{.*}} = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> %{{.*}}, <32 x i16> %{{.*}})
@@ -4491,144 +4521,144 @@ __m512h test_mm512_permutexvar_epi16(__m512i __A, __m512h __B) {
// tests below are for alias intrinsics.
__m512h test_mm512_mul_pch(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mul_pch
+ // CHECK-LABEL: test_mm512_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_mul_pch(__A, __B);
}
__m512h test_mm512_mask_mul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_mul_pch
+ // CHECK-LABEL: test_mm512_mask_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_mask_mul_pch(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_mul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_mul_pch
+ // CHECK-LABEL: test_mm512_maskz_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_maskz_mul_pch(__U, __A, __B);
}
__m512h test_mm512_cmul_pch(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_cmul_pch
+ // CHECK-LABEL: test_mm512_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_cmul_pch(__A, __B);
}
__m512h test_mm512_mask_cmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_cmul_pch
+ // CHECK-LABEL: test_mm512_mask_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_mask_cmul_pch(__W, __U, __A, __B);
}
__m512h test_mm512_maskz_cmul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_cmul_pch
+ // CHECK-LABEL: test_mm512_maskz_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_maskz_cmul_pch(__U, __A, __B);
}
__m128h test_mm_mul_sch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mul_sch
+ // CHECK-LABEL: test_mm_mul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_mul_sch(__A, __B);
}
__m128h test_mm_mask_mul_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_mul_sch
+ // CHECK-LABEL: test_mm_mask_mul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_mask_mul_sch(__W, __U, __A, __B);
}
__m128h test_mm_maskz_mul_sch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_sch
+ // CHECK-LABEL: test_mm_maskz_mul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_maskz_mul_sch(__U, __A, __B);
}
__m128h test_mm_mul_round_sch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mul_round_sch
+ // CHECK-LABEL: test_mm_mul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_mul_round_sch(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask_mul_round_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_mul_round_sch
+ // CHECK-LABEL: test_mm_mask_mul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_mask_mul_round_sch(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_mul_round_sch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_round_sch
+ // CHECK-LABEL: test_mm_maskz_mul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.csh
return _mm_maskz_mul_round_sch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mul_round_pch(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mul_round_pch
+ // CHECK-LABEL: test_mm512_mul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_mul_round_pch(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_mul_round_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_mul_round_pch
+ // CHECK-LABEL: test_mm512_mask_mul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_mask_mul_round_pch(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_mul_round_pch(__mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_mul_round_pch
+ // CHECK-LABEL: test_mm512_maskz_mul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.512
return _mm512_maskz_mul_round_pch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_cmul_round_pch(__m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_cmul_round_pch
+ // CHECK-LABEL: test_mm512_cmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_cmul_round_pch(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_mask_cmul_round_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_mask_cmul_round_pch
+ // CHECK-LABEL: test_mm512_mask_cmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_mask_cmul_round_pch(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m512h test_mm512_maskz_cmul_round_pch(__mmask16 __U, __m512h __A, __m512h __B) {
- // CHECK-LABEL: @test_mm512_maskz_cmul_round_pch
+ // CHECK-LABEL: test_mm512_maskz_cmul_round_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.512
return _mm512_maskz_cmul_round_pch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_cmul_sch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_cmul_sch
+ // CHECK-LABEL: test_mm_cmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_cmul_sch(__A, __B);
}
__m128h test_mm_mask_cmul_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_cmul_sch
+ // CHECK-LABEL: test_mm_mask_cmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_mask_cmul_sch(__W, __U, __A, __B);
}
__m128h test_mm_maskz_cmul_sch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_cmul_sch
+ // CHECK-LABEL: test_mm_maskz_cmul_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_maskz_cmul_sch(__U, __A, __B);
}
__m128h test_mm_cmul_round_sch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_cmul_round_sch
+ // CHECK-LABEL: test_mm_cmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_cmul_round_sch(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_mask_cmul_round_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_cmul_round_sch
+ // CHECK-LABEL: test_mm_mask_cmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_mask_cmul_round_sch(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128h test_mm_maskz_cmul_round_sch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_cmul_round_sch
+ // CHECK-LABEL: test_mm_maskz_cmul_round_sch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh
return _mm_maskz_cmul_round_sch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
diff --git a/clang/test/CodeGen/X86/avx512ifma-builtins.c b/clang/test/CodeGen/X86/avx512ifma-builtins.c
index e2077b5..7c7c492 100644
--- a/clang/test/CodeGen/X86/avx512ifma-builtins.c
+++ b/clang/test/CodeGen/X86/avx512ifma-builtins.c
@@ -1,44 +1,47 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m512i test_mm512_madd52hi_epu64(__m512i __X, __m512i __Y, __m512i __Z) {
- // CHECK-LABEL: @test_mm512_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.512
+ // CHECK-LABEL: test_mm512_madd52hi_epu64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52h.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_madd52hi_epu64(__X, __Y, __Z);
}
__m512i test_mm512_mask_madd52hi_epu64(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) {
- // CHECK-LABEL: @test_mm512_mask_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.512
+ // CHECK-LABEL: test_mm512_mask_madd52hi_epu64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52h.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_madd52hi_epu64(__W, __M, __X, __Y);
}
__m512i test_mm512_maskz_madd52hi_epu64(__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) {
- // CHECK-LABEL: @test_mm512_maskz_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.512
+ // CHECK-LABEL: test_mm512_maskz_madd52hi_epu64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52h.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
}
__m512i test_mm512_madd52lo_epu64(__m512i __X, __m512i __Y, __m512i __Z) {
- // CHECK-LABEL: @test_mm512_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.512
+ // CHECK-LABEL: test_mm512_madd52lo_epu64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52l.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_madd52lo_epu64(__X, __Y, __Z);
}
__m512i test_mm512_mask_madd52lo_epu64(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) {
- // CHECK-LABEL: @test_mm512_mask_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.512
+ // CHECK-LABEL: test_mm512_mask_madd52lo_epu64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52l.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_madd52lo_epu64(__W, __M, __X, __Y);
}
__m512i test_mm512_maskz_madd52lo_epu64(__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) {
- // CHECK-LABEL: @test_mm512_maskz_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.512
+ // CHECK-LABEL: test_mm512_maskz_madd52lo_epu64
+ // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52l.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
}
diff --git a/clang/test/CodeGen/X86/avx512ifmavl-builtins.c b/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
index 3b74478..c115b60 100644
--- a/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
@@ -1,83 +1,86 @@
-// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
- // CHECK-LABEL: @test_mm_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.128
+ // CHECK-LABEL: test_mm_madd52hi_epu64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52hi_epu64(__X, __Y, __Z);
}
__m128i test_mm_mask_madd52hi_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.128
+ // CHECK-LABEL: test_mm_mask_madd52hi_epu64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_madd52hi_epu64(__W, __M, __X, __Y);
}
__m128i test_mm_maskz_madd52hi_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) {
- // CHECK-LABEL: @test_mm_maskz_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.128
+ // CHECK-LABEL: test_mm_maskz_madd52hi_epu64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
}
__m256i test_mm256_madd52hi_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
- // CHECK-LABEL: @test_mm256_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.256
+ // CHECK-LABEL: test_mm256_madd52hi_epu64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52hi_epu64(__X, __Y, __Z);
}
__m256i test_mm256_mask_madd52hi_epu64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.256
+ // CHECK-LABEL: test_mm256_mask_madd52hi_epu64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_madd52hi_epu64(__W, __M, __X, __Y);
}
__m256i test_mm256_maskz_madd52hi_epu64(__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) {
- // CHECK-LABEL: @test_mm256_maskz_madd52hi_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52h.uq.256
+ // CHECK-LABEL: test_mm256_maskz_madd52hi_epu64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
}
__m128i test_mm_madd52lo_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
- // CHECK-LABEL: @test_mm_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.128
+ // CHECK-LABEL: test_mm_madd52lo_epu64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52lo_epu64(__X, __Y, __Z);
}
__m128i test_mm_mask_madd52lo_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.128
+ // CHECK-LABEL: test_mm_mask_madd52lo_epu64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_madd52lo_epu64(__W, __M, __X, __Y);
}
__m128i test_mm_maskz_madd52lo_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) {
- // CHECK-LABEL: @test_mm_maskz_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.128
+ // CHECK-LABEL: test_mm_maskz_madd52lo_epu64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
}
__m256i test_mm256_madd52lo_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
- // CHECK-LABEL: @test_mm256_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.256
+ // CHECK-LABEL: test_mm256_madd52lo_epu64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52lo_epu64(__X, __Y, __Z);
}
__m256i test_mm256_mask_madd52lo_epu64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.256
+ // CHECK-LABEL: test_mm256_mask_madd52lo_epu64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_madd52lo_epu64(__W, __M, __X, __Y);
}
__m256i test_mm256_maskz_madd52lo_epu64(__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) {
- // CHECK-LABEL: @test_mm256_maskz_madd52lo_epu64
- // CHECK: @llvm.x86.avx512.vpmadd52l.uq.256
+ // CHECK-LABEL: test_mm256_maskz_madd52lo_epu64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
}
diff --git a/clang/test/CodeGen/X86/avx512vbmi-builtins.c b/clang/test/CodeGen/X86/avx512vbmi-builtins.c
index aefc39f..c3b6298 100644
--- a/clang/test/CodeGen/X86/avx512vbmi-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vbmi-builtins.c
@@ -1,71 +1,74 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vbmi -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vbmi -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m512i test_mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask2_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.512
+ // CHECK-LABEL: test_mm512_mask2_permutex2var_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask2_permutex2var_epi8(__A, __I, __U, __B);
}
__m512i test_mm512_permutex2var_epi8(__m512i __A, __m512i __I, __m512i __B) {
- // CHECK-LABEL: @test_mm512_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.512
+ // CHECK-LABEL: test_mm512_permutex2var_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_permutex2var_epi8(__A, __I, __B);
}
__m512i test_mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.512
+ // CHECK-LABEL: test_mm512_mask_permutex2var_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_permutex2var_epi8(__A, __U, __I, __B);
}
__m512i test_mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.512
+ // CHECK-LABEL: test_mm512_maskz_permutex2var_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_permutex2var_epi8(__U, __A, __I, __B);
}
__m512i test_mm512_permutexvar_epi8(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.512
+ // CHECK-LABEL: test_mm512_permutexvar_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.permvar.qi.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_permutexvar_epi8(__A, __B);
}
__m512i test_mm512_maskz_permutexvar_epi8(__mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.512
+ // CHECK-LABEL: test_mm512_maskz_permutexvar_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.permvar.qi.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_permutexvar_epi8(__M, __A, __B);
}
__m512i test_mm512_mask_permutexvar_epi8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.512
+ // CHECK-LABEL: test_mm512_mask_permutexvar_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.permvar.qi.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_permutexvar_epi8(__W, __M, __A, __B);
}
__m512i test_mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y) {
- // CHECK-LABEL: @test_mm512_mask_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.512
+ // CHECK-LABEL: test_mm512_mask_multishift_epi64_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_multishift_epi64_epi8(__W, __M, __X, __Y);
}
__m512i test_mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) {
- // CHECK-LABEL: @test_mm512_maskz_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.512
+ // CHECK-LABEL: test_mm512_maskz_multishift_epi64_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_multishift_epi64_epi8(__M, __X, __Y);
}
__m512i test_mm512_multishift_epi64_epi8(__m512i __X, __m512i __Y) {
- // CHECK-LABEL: @test_mm512_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.512
+ // CHECK-LABEL: test_mm512_multishift_epi64_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.pmultishift.qb.512(<64 x i8> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_multishift_epi64_epi8(__X, __Y);
}
diff --git a/clang/test/CodeGen/X86/avx512vbmi2-builtins.c b/clang/test/CodeGen/X86/avx512vbmi2-builtins.c
index 1e804c3..4f6139b 100644
--- a/clang/test/CodeGen/X86/avx512vbmi2-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vbmi2-builtins.c
@@ -1,328 +1,331 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m512i test_mm512_mask_compress_epi16(__m512i __S, __mmask32 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_mask_compress_epi16
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm512_mask_compress_epi16
+ // CHECK: call <32 x i16> @llvm.x86.avx512.mask.compress.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i1> %{{.*}})
return _mm512_mask_compress_epi16(__S, __U, __D);
}
__m512i test_mm512_maskz_compress_epi16(__mmask32 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_maskz_compress_epi16
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm512_maskz_compress_epi16
+ // CHECK: call <32 x i16> @llvm.x86.avx512.mask.compress.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i1> %{{.*}})
return _mm512_maskz_compress_epi16(__U, __D);
}
__m512i test_mm512_mask_compress_epi8(__m512i __S, __mmask64 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_mask_compress_epi8
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm512_mask_compress_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.mask.compress.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i1> %{{.*}})
return _mm512_mask_compress_epi8(__S, __U, __D);
}
__m512i test_mm512_maskz_compress_epi8(__mmask64 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_maskz_compress_epi8
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm512_maskz_compress_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.mask.compress.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i1> %{{.*}})
return _mm512_maskz_compress_epi8(__U, __D);
}
void test_mm512_mask_compressstoreu_epi16(void *__P, __mmask32 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_mask_compressstoreu_epi16
- // CHECK: @llvm.masked.compressstore.v32i16(<32 x i16> %{{.*}}, ptr %{{.*}}, <32 x i1> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_compressstoreu_epi16
+ // CHECK: call void @llvm.masked.compressstore.v32i16(<32 x i16> %{{.*}}, ptr %{{.*}}, <32 x i1> %{{.*}})
_mm512_mask_compressstoreu_epi16(__P, __U, __D);
}
void test_mm512_mask_compressstoreu_epi8(void *__P, __mmask64 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_mask_compressstoreu_epi8
- // CHECK: @llvm.masked.compressstore.v64i8(<64 x i8> %{{.*}}, ptr %{{.*}}, <64 x i1> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_compressstoreu_epi8
+ // CHECK: call void @llvm.masked.compressstore.v64i8(<64 x i8> %{{.*}}, ptr %{{.*}}, <64 x i1> %{{.*}})
_mm512_mask_compressstoreu_epi8(__P, __U, __D);
}
__m512i test_mm512_mask_expand_epi16(__m512i __S, __mmask32 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_mask_expand_epi16
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm512_mask_expand_epi16
+ // CHECK: call <32 x i16> @llvm.x86.avx512.mask.expand.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i1> %{{.*}})
return _mm512_mask_expand_epi16(__S, __U, __D);
}
__m512i test_mm512_maskz_expand_epi16(__mmask32 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_maskz_expand_epi16
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm512_maskz_expand_epi16
+ // CHECK: call <32 x i16> @llvm.x86.avx512.mask.expand.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i1> %{{.*}})
return _mm512_maskz_expand_epi16(__U, __D);
}
__m512i test_mm512_mask_expand_epi8(__m512i __S, __mmask64 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_mask_expand_epi8
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm512_mask_expand_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.mask.expand.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i1> %{{.*}})
return _mm512_mask_expand_epi8(__S, __U, __D);
}
__m512i test_mm512_maskz_expand_epi8(__mmask64 __U, __m512i __D) {
- // CHECK-LABEL: @test_mm512_maskz_expand_epi8
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm512_maskz_expand_epi8
+ // CHECK: call <64 x i8> @llvm.x86.avx512.mask.expand.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i1> %{{.*}})
return _mm512_maskz_expand_epi8(__U, __D);
}
__m512i test_mm512_mask_expandloadu_epi16(__m512i __S, __mmask32 __U, void const* __P) {
- // CHECK-LABEL: @test_mm512_mask_expandloadu_epi16
- // CHECK: @llvm.masked.expandload.v32i16(ptr %{{.*}}, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_expandloadu_epi16
+ // CHECK: call <32 x i16> @llvm.masked.expandload.v32i16(ptr %{{.*}}, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_mask_expandloadu_epi16(__S, __U, __P);
}
__m512i test_mm512_maskz_expandloadu_epi16(__mmask32 __U, void const* __P) {
- // CHECK-LABEL: @test_mm512_maskz_expandloadu_epi16
- // CHECK: @llvm.masked.expandload.v32i16(ptr %{{.*}}, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_expandloadu_epi16
+ // CHECK: call <32 x i16> @llvm.masked.expandload.v32i16(ptr %{{.*}}, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_maskz_expandloadu_epi16(__U, __P);
}
__m512i test_mm512_mask_expandloadu_epi8(__m512i __S, __mmask64 __U, void const* __P) {
- // CHECK-LABEL: @test_mm512_mask_expandloadu_epi8
- // CHECK: @llvm.masked.expandload.v64i8(ptr %{{.*}}, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_expandloadu_epi8
+ // CHECK: call <64 x i8> @llvm.masked.expandload.v64i8(ptr %{{.*}}, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_mask_expandloadu_epi8(__S, __U, __P);
}
__m512i test_mm512_maskz_expandloadu_epi8(__mmask64 __U, void const* __P) {
- // CHECK-LABEL: @test_mm512_maskz_expandloadu_epi8
- // CHECK: @llvm.masked.expandload.v64i8(ptr %{{.*}}, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_expandloadu_epi8
+ // CHECK: call <64 x i8> @llvm.masked.expandload.v64i8(ptr %{{.*}}, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_maskz_expandloadu_epi8(__U, __P);
}
__m512i test_mm512_mask_shldi_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shldi_epi64
- // CHECK: @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 47))
+ // CHECK-LABEL: test_mm512_mask_shldi_epi64
+ // CHECK: call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 47))
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_shldi_epi64(__S, __U, __A, __B, 47);
}
__m512i test_mm512_maskz_shldi_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shldi_epi64
- // CHECK: @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 63))
+ // CHECK-LABEL: test_mm512_maskz_shldi_epi64
+ // CHECK: call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 63))
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_shldi_epi64(__U, __A, __B, 63);
}
__m512i test_mm512_shldi_epi64(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shldi_epi64
- // CHECK: @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 31))
+ // CHECK-LABEL: test_mm512_shldi_epi64
+ // CHECK: call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 31))
return _mm512_shldi_epi64(__A, __B, 31);
}
__m512i test_mm512_mask_shldi_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shldi_epi32
- // CHECK: @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 7))
+ // CHECK-LABEL: test_mm512_mask_shldi_epi32
+ // CHECK: call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 7))
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_shldi_epi32(__S, __U, __A, __B, 7);
}
__m512i test_mm512_maskz_shldi_epi32(__mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shldi_epi32
- // CHECK: @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 15))
+ // CHECK-LABEL: test_mm512_maskz_shldi_epi32
+ // CHECK: call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 15))
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_shldi_epi32(__U, __A, __B, 15);
}
__m512i test_mm512_shldi_epi32(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shldi_epi32
- // CHECK: @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 31))
+ // CHECK-LABEL: test_mm512_shldi_epi32
+ // CHECK: call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 31))
return _mm512_shldi_epi32(__A, __B, 31);
}
__m512i test_mm512_mask_shldi_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shldi_epi16
- // CHECK: @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 3))
+ // CHECK-LABEL: test_mm512_mask_shldi_epi16
+ // CHECK: call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 3))
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_shldi_epi16(__S, __U, __A, __B, 3);
}
__m512i test_mm512_maskz_shldi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shldi_epi16
- // CHECK: @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 7))
+ // CHECK-LABEL: test_mm512_maskz_shldi_epi16
+ // CHECK: call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 7))
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_shldi_epi16(__U, __A, __B, 7);
}
__m512i test_mm512_shldi_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shldi_epi16
- // CHECK: @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 15))
+ // CHECK-LABEL: test_mm512_shldi_epi16
+ // CHECK: call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 15))
return _mm512_shldi_epi16(__A, __B, 15);
}
__m512i test_mm512_mask_shrdi_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shrdi_epi64
- // CHECK: @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 47))
+ // CHECK-LABEL: test_mm512_mask_shrdi_epi64
+ // CHECK: call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 47))
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_shrdi_epi64(__S, __U, __A, __B, 47);
}
__m512i test_mm512_maskz_shrdi_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shrdi_epi64
- // CHECK: @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 63))
+ // CHECK-LABEL: test_mm512_maskz_shrdi_epi64
+ // CHECK: call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 63))
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_shrdi_epi64(__U, __A, __B, 63);
}
__m512i test_mm512_shrdi_epi64(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shrdi_epi64
- // CHECK: @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 31))
+ // CHECK-LABEL: test_mm512_shrdi_epi64
+ // CHECK: call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> splat (i64 31))
return _mm512_shrdi_epi64(__A, __B, 31);
}
__m512i test_mm512_mask_shrdi_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shrdi_epi32
- // CHECK: @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 7))
+ // CHECK-LABEL: test_mm512_mask_shrdi_epi32
+ // CHECK: call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 7))
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_shrdi_epi32(__S, __U, __A, __B, 7);
}
__m512i test_mm512_maskz_shrdi_epi32(__mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shrdi_epi32
- // CHECK: @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 15))
+ // CHECK-LABEL: test_mm512_maskz_shrdi_epi32
+ // CHECK: call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 15))
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_shrdi_epi32(__U, __A, __B, 15);
}
__m512i test_mm512_shrdi_epi32(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shrdi_epi32
- // CHECK: @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 31))
+ // CHECK-LABEL: test_mm512_shrdi_epi32
+ // CHECK: call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> splat (i32 31))
return _mm512_shrdi_epi32(__A, __B, 31);
}
__m512i test_mm512_mask_shrdi_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shrdi_epi16
- // CHECK: @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 3))
+ // CHECK-LABEL: test_mm512_mask_shrdi_epi16
+ // CHECK: call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 3))
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_shrdi_epi16(__S, __U, __A, __B, 3);
}
__m512i test_mm512_maskz_shrdi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shrdi_epi16
- // CHECK: @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 15))
+ // CHECK-LABEL: test_mm512_maskz_shrdi_epi16
+ // CHECK: call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 15))
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_shrdi_epi16(__U, __A, __B, 15);
}
__m512i test_mm512_shrdi_epi16(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shrdi_epi16
- // CHECK: @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 31))
+ // CHECK-LABEL: test_mm512_shrdi_epi16
+ // CHECK: call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> splat (i16 31))
return _mm512_shrdi_epi16(__A, __B, 31);
}
__m512i test_mm512_mask_shldv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shldv_epi64
- // CHECK: @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_shldv_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_shldv_epi64(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shldv_epi64
- // CHECK: @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_shldv_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_shldv_epi64(__U, __S, __A, __B);
}
__m512i test_mm512_shldv_epi64(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shldv_epi64
- // CHECK: @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm512_shldv_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.fshl.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_shldv_epi64(__S, __A, __B);
}
__m512i test_mm512_mask_shldv_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shldv_epi32
- // CHECK: @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_shldv_epi32
+ // CHECK: call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_shldv_epi32(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_shldv_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shldv_epi32
- // CHECK: @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_shldv_epi32
+ // CHECK: call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_shldv_epi32(__U, __S, __A, __B);
}
__m512i test_mm512_shldv_epi32(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shldv_epi32
- // CHECK: @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm512_shldv_epi32
+ // CHECK: call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_shldv_epi32(__S, __A, __B);
}
__m512i test_mm512_mask_shldv_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shldv_epi16
- // CHECK: @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_shldv_epi16
+ // CHECK: call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_shldv_epi16(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_shldv_epi16(__mmask32 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shldv_epi16
- // CHECK: @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_shldv_epi16
+ // CHECK: call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_shldv_epi16(__U, __S, __A, __B);
}
__m512i test_mm512_shldv_epi16(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shldv_epi16
- // CHECK: @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm512_shldv_epi16
+ // CHECK: call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_shldv_epi16(__S, __A, __B);
}
__m512i test_mm512_mask_shrdv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shrdv_epi64
- // CHECK: @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_shrdv_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_shrdv_epi64(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_shrdv_epi64(__mmask8 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shrdv_epi64
- // CHECK: @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_shrdv_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_shrdv_epi64(__U, __S, __A, __B);
}
__m512i test_mm512_shrdv_epi64(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shrdv_epi64
- // CHECK: @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm512_shrdv_epi64
+ // CHECK: call {{.*}}<8 x i64> @llvm.fshr.v8i64(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_shrdv_epi64(__S, __A, __B);
}
__m512i test_mm512_mask_shrdv_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shrdv_epi32
- // CHECK: @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_shrdv_epi32
+ // CHECK: call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_shrdv_epi32(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_shrdv_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shrdv_epi32
- // CHECK: @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_shrdv_epi32
+ // CHECK: call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_shrdv_epi32(__U, __S, __A, __B);
}
__m512i test_mm512_shrdv_epi32(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shrdv_epi32
- // CHECK: @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm512_shrdv_epi32
+ // CHECK: call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_shrdv_epi32(__S, __A, __B);
}
__m512i test_mm512_mask_shrdv_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_shrdv_epi16
- // CHECK: @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm512_mask_shrdv_epi16
+ // CHECK: call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_shrdv_epi16(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_shrdv_epi16(__mmask32 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_shrdv_epi16
- // CHECK: @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm512_maskz_shrdv_epi16
+ // CHECK: call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_shrdv_epi16(__U, __S, __A, __B);
}
__m512i test_mm512_shrdv_epi16(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_shrdv_epi16
- // CHECK: @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm512_shrdv_epi16
+ // CHECK: call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_shrdv_epi16(__S, __A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512vbmivl-builtin.c b/clang/test/CodeGen/X86/avx512vbmivl-builtin.c
index 2562da26..c4d5fc8 100644
--- a/clang/test/CodeGen/X86/avx512vbmivl-builtin.c
+++ b/clang/test/CodeGen/X86/avx512vbmivl-builtin.c
@@ -1,139 +1,142 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi -target-feature +avx512vl -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi -target-feature +avx512vl -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vbmi -target-feature +avx512vl -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vbmi -target-feature +avx512vl -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vbmi -target-feature +avx512vl -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m128i test_mm_permutexvar_epi8(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.128
+ // CHECK-LABEL: test_mm_permutexvar_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.permvar.qi.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
return _mm_permutexvar_epi8(__A, __B);
}
__m128i test_mm_maskz_permutexvar_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.128
+ // CHECK-LABEL: test_mm_maskz_permutexvar_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.permvar.qi.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_permutexvar_epi8(__M, __A, __B);
}
__m128i test_mm_mask_permutexvar_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.128
+ // CHECK-LABEL: test_mm_mask_permutexvar_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.permvar.qi.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_permutexvar_epi8(__W, __M, __A, __B);
}
__m256i test_mm256_permutexvar_epi8(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.256
+ // CHECK-LABEL: test_mm256_permutexvar_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.permvar.qi.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_permutexvar_epi8(__A, __B);
}
__m256i test_mm256_maskz_permutexvar_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.256
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.permvar.qi.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_permutexvar_epi8(__M, __A, __B);
}
__m256i test_mm256_mask_permutexvar_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_epi8
- // CHECK: @llvm.x86.avx512.permvar.qi.256
+ // CHECK-LABEL: test_mm256_mask_permutexvar_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.permvar.qi.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_permutexvar_epi8(__W, __M, __A, __B);
}
__m128i test_mm_mask2_permutex2var_epi8(__m128i __A, __m128i __I, __mmask16 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.128
+ // CHECK-LABEL: test_mm_mask2_permutex2var_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask2_permutex2var_epi8(__A, __I, __U, __B);
}
__m256i test_mm256_mask2_permutex2var_epi8(__m256i __A, __m256i __I, __mmask32 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.256
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask2_permutex2var_epi8(__A, __I, __U, __B);
}
__m128i test_mm_permutex2var_epi8(__m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.128
+ // CHECK-LABEL: test_mm_permutex2var_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
return _mm_permutex2var_epi8(__A, __I, __B);
}
__m128i test_mm_mask_permutex2var_epi8(__m128i __A, __mmask16 __U, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.128
+ // CHECK-LABEL: test_mm_mask_permutex2var_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_permutex2var_epi8(__A, __U, __I, __B);
}
__m128i test_mm_maskz_permutex2var_epi8(__mmask16 __U, __m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.128
+ // CHECK-LABEL: test_mm_maskz_permutex2var_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_permutex2var_epi8(__U, __A, __I, __B);
}
__m256i test_mm256_permutex2var_epi8(__m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.256
+ // CHECK-LABEL: test_mm256_permutex2var_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_permutex2var_epi8(__A, __I, __B);
}
__m256i test_mm256_mask_permutex2var_epi8(__m256i __A, __mmask32 __U, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.256
+ // CHECK-LABEL: test_mm256_mask_permutex2var_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_permutex2var_epi8(__A, __U, __I, __B);
}
__m256i test_mm256_maskz_permutex2var_epi8(__mmask32 __U, __m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_epi8
- // CHECK: @llvm.x86.avx512.vpermi2var.qi.256
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_permutex2var_epi8(__U, __A, __I, __B);
}
__m128i test_mm_mask_multishift_epi64_epi8(__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.128
+ // CHECK-LABEL: test_mm_mask_multishift_epi64_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_multishift_epi64_epi8(__W, __M, __X, __Y);
}
__m128i test_mm_maskz_multishift_epi64_epi8(__mmask16 __M, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.128
+ // CHECK-LABEL: test_mm_maskz_multishift_epi64_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_multishift_epi64_epi8(__M, __X, __Y);
}
__m128i test_mm_multishift_epi64_epi8(__m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.128
+ // CHECK-LABEL: test_mm_multishift_epi64_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.pmultishift.qb.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
return _mm_multishift_epi64_epi8(__X, __Y);
}
__m256i test_mm256_mask_multishift_epi64_epi8(__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.256
+ // CHECK-LABEL: test_mm256_mask_multishift_epi64_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_multishift_epi64_epi8(__W, __M, __X, __Y);
}
__m256i test_mm256_maskz_multishift_epi64_epi8(__mmask32 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.256
+ // CHECK-LABEL: test_mm256_maskz_multishift_epi64_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_multishift_epi64_epi8(__M, __X, __Y);
}
__m256i test_mm256_multishift_epi64_epi8(__m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_multishift_epi64_epi8
- // CHECK: @llvm.x86.avx512.pmultishift.qb.256
+ // CHECK-LABEL: test_mm256_multishift_epi64_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.pmultishift.qb.256(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_multishift_epi64_epi8(__X, __Y);
}
diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c
index 1c2d467..78e01b9 100644
--- a/clang/test/CodeGen/X86/avx512vl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vl-builtins.c
@@ -1,602 +1,607 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__mmask8 test_mm_cmpeq_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epu32_mask
+ // CHECK-LABEL: test_mm_cmpeq_epu32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: shufflevector <4 x i1> %{{.*}}, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return (__mmask8)_mm_cmpeq_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epu32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpeq_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epu64_mask
+ // CHECK-LABEL: test_mm_cmpeq_epu64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: shufflevector <2 x i1> %{{.*}}, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
return (__mmask8)_mm_cmpeq_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epu64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epi32_mask
+ // CHECK-LABEL: test_mm_cmpge_epi32_mask
// CHECK: icmp sge <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epi32_mask
// CHECK: icmp sge <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epi64_mask
+ // CHECK-LABEL: test_mm_cmpge_epi64_mask
// CHECK: icmp sge <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epi64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epi64_mask
// CHECK: icmp sge <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpge_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epi32_mask
+ // CHECK-LABEL: test_mm256_cmpge_epi32_mask
// CHECK: icmp sge <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpge_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpge_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epi32_mask
// CHECK: icmp sge <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpge_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpge_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epi64_mask
+ // CHECK-LABEL: test_mm256_cmpge_epi64_mask
// CHECK: icmp sge <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpge_epi64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpge_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epi64_mask
// CHECK: icmp sge <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpge_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epu32_mask
+ // CHECK-LABEL: test_mm_cmpge_epu32_mask
// CHECK: icmp uge <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epu32_mask
// CHECK: icmp uge <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epu64_mask
+ // CHECK-LABEL: test_mm_cmpge_epu64_mask
// CHECK: icmp uge <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epu64_mask
// CHECK: icmp uge <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpge_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epu32_mask
+ // CHECK-LABEL: test_mm256_cmpge_epu32_mask
// CHECK: icmp uge <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpge_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpge_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epu32_mask
// CHECK: icmp uge <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpge_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpge_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epu64_mask
+ // CHECK-LABEL: test_mm256_cmpge_epu64_mask
// CHECK: icmp uge <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpge_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpge_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epu64_mask
// CHECK: icmp uge <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpge_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpgt_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epu32_mask
+ // CHECK-LABEL: test_mm_cmpgt_epu32_mask
// CHECK: icmp ugt <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpgt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epu32_mask
// CHECK: icmp ugt <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpgt_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epu64_mask
+ // CHECK-LABEL: test_mm_cmpgt_epu64_mask
// CHECK: icmp ugt <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpgt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epu64_mask
// CHECK: icmp ugt <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpgt_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epu32_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epu32_mask
// CHECK: icmp ugt <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpgt_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpgt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epu32_mask
// CHECK: icmp ugt <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpgt_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpgt_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epu64_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epu64_mask
// CHECK: icmp ugt <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpgt_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpgt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epu64_mask
// CHECK: icmp ugt <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpgt_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epi32_mask
+ // CHECK-LABEL: test_mm_cmple_epi32_mask
// CHECK: icmp sle <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epi32_mask
// CHECK: icmp sle <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epi64_mask
+ // CHECK-LABEL: test_mm_cmple_epi64_mask
// CHECK: icmp sle <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epi64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epi64_mask
// CHECK: icmp sle <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmple_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epi32_mask
+ // CHECK-LABEL: test_mm256_cmple_epi32_mask
// CHECK: icmp sle <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmple_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmple_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epi32_mask
// CHECK: icmp sle <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmple_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmple_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epi64_mask
+ // CHECK-LABEL: test_mm256_cmple_epi64_mask
// CHECK: icmp sle <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmple_epi64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmple_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epi64_mask
// CHECK: icmp sle <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmple_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epu32_mask
+ // CHECK-LABEL: test_mm_cmple_epu32_mask
// CHECK: icmp ule <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epu32_mask
// CHECK: icmp ule <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epu64_mask
+ // CHECK-LABEL: test_mm_cmple_epu64_mask
// CHECK: icmp ule <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epu64_mask
// CHECK: icmp ule <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmple_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epu32_mask
+ // CHECK-LABEL: test_mm256_cmple_epu32_mask
// CHECK: icmp ule <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmple_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmple_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epu32_mask
// CHECK: icmp ule <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmple_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmple_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epu64_mask
+ // CHECK-LABEL: test_mm256_cmple_epu64_mask
// CHECK: icmp ule <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmple_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmple_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epu64_mask
// CHECK: icmp ule <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmple_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epi32_mask
+ // CHECK-LABEL: test_mm_cmplt_epi32_mask
// CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epi32_mask
// CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epi64_mask
+ // CHECK-LABEL: test_mm_cmplt_epi64_mask
// CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epi64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epi64_mask
// CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmplt_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epi32_mask
+ // CHECK-LABEL: test_mm256_cmplt_epi32_mask
// CHECK: icmp slt <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmplt_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmplt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epi32_mask
// CHECK: icmp slt <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmplt_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmplt_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epi64_mask
+ // CHECK-LABEL: test_mm256_cmplt_epi64_mask
// CHECK: icmp slt <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmplt_epi64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmplt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epi64_mask
// CHECK: icmp slt <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmplt_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epu32_mask
+ // CHECK-LABEL: test_mm_cmplt_epu32_mask
// CHECK: icmp ult <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epu32_mask
// CHECK: icmp ult <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epu64_mask
+ // CHECK-LABEL: test_mm_cmplt_epu64_mask
// CHECK: icmp ult <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epu64_mask
// CHECK: icmp ult <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmplt_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epu32_mask
+ // CHECK-LABEL: test_mm256_cmplt_epu32_mask
// CHECK: icmp ult <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmplt_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmplt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epu32_mask
// CHECK: icmp ult <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmplt_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmplt_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epu64_mask
+ // CHECK-LABEL: test_mm256_cmplt_epu64_mask
// CHECK: icmp ult <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmplt_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmplt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epu64_mask
// CHECK: icmp ult <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmplt_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epi32_mask
+ // CHECK-LABEL: test_mm_cmpneq_epi32_mask
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epi32_mask
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epi64_mask
+ // CHECK-LABEL: test_mm_cmpneq_epi64_mask
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epi64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epi64_mask
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpneq_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epi32_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epi32_mask
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpneq_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpneq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epi32_mask
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpneq_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpneq_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epi64_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epi64_mask
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpneq_epi64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpneq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epi64_mask
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpneq_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epu32_mask
+ // CHECK-LABEL: test_mm_cmpneq_epu32_mask
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epu32_mask
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epu64_mask
+ // CHECK-LABEL: test_mm_cmpneq_epu64_mask
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epu64_mask
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpneq_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epu32_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epu32_mask
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpneq_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpneq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epu32_mask
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpneq_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpneq_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epu64_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epu64_mask
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpneq_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpneq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epu64_mask
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpneq_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmp_eq_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_eq_epi32_mask
+ // CHECK-LABEL: test_mm_cmp_eq_epi32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epi32_mask(__a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm_mask_cmp_lt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_lt_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmp_lt_epi32_mask
// CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epi32_mask(__u, __a, __b, _MM_CMPINT_LT);
}
__mmask8 test_mm_cmp_lt_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_lt_epi64_mask
+ // CHECK-LABEL: test_mm_cmp_lt_epi64_mask
// CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epi64_mask(__a, __b, _MM_CMPINT_LT);
}
__mmask8 test_mm_mask_cmp_eq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_eq_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmp_eq_epi64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epi64_mask(__u, __a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm256_cmp_eq_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_eq_epi32_mask
+ // CHECK-LABEL: test_mm256_cmp_eq_epi32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmp_epi32_mask(__a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm256_mask_cmp_le_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_le_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_le_epi32_mask
// CHECK: icmp sle <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmp_epi32_mask(__u, __a, __b, _MM_CMPINT_LE);
}
__mmask8 test_mm256_cmp_eq_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_eq_epi64_mask
+ // CHECK-LABEL: test_mm256_cmp_eq_epi64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmp_epi64_mask(__a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm256_mask_cmp_eq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_eq_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_eq_epi64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmp_epi64_mask(__u, __a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm_cmp_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_epu32_mask
+ // CHECK-LABEL: test_mm_cmp_epu32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epu32_mask(__a, __b, 0);
}
__mmask8 test_mm_mask_cmp_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmp_epu32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epu32_mask(__u, __a, __b, 0);
}
__mmask8 test_mm_cmp_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_epu64_mask
+ // CHECK-LABEL: test_mm_cmp_epu64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epu64_mask(__a, __b, 0);
}
__mmask8 test_mm_mask_cmp_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmp_epu64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epu64_mask(__u, __a, __b, 0);
}
__mmask8 test_mm256_cmp_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_epu32_mask
+ // CHECK-LABEL: test_mm256_cmp_epu32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmp_epu32_mask(__a, __b, 0);
}
__mmask8 test_mm256_mask_cmp_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_epu32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmp_epu32_mask(__u, __a, __b, 0);
}
__mmask8 test_mm256_cmp_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_epu64_mask
+ // CHECK-LABEL: test_mm256_cmp_epu64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmp_epu64_mask(__a, __b, 0);
}
__mmask8 test_mm256_mask_cmp_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_epu64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmp_epu64_mask(__u, __a, __b, 0);
@@ -604,14 +609,14 @@ __mmask8 test_mm256_mask_cmp_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b)
__m256i test_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_add_epi32
+ //CHECK-LABEL: test_mm256_mask_add_epi32
//CHECK: add <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_add_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_add_epi32
+ //CHECK-LABEL: test_mm256_maskz_add_epi32
//CHECK: add <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_add_epi32(__U, __A, __B);
@@ -619,14 +624,14 @@ __m256i test_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
__m256i test_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_add_epi64
+ //CHECK-LABEL: test_mm256_mask_add_epi64
//CHECK: add <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_add_epi64(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_add_epi64
+ //CHECK-LABEL: test_mm256_maskz_add_epi64
//CHECK: add <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_add_epi64 (__U,__A,__B);
@@ -634,14 +639,14 @@ __m256i test_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
__m256i test_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_sub_epi32
+ //CHECK-LABEL: test_mm256_mask_sub_epi32
//CHECK: sub <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_sub_epi32 (__W,__U,__A,__B);
}
__m256i test_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_sub_epi32
+ //CHECK-LABEL: test_mm256_maskz_sub_epi32
//CHECK: sub <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_sub_epi32 (__U,__A,__B);
@@ -649,14 +654,14 @@ __m256i test_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
__m256i test_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_sub_epi64
+ //CHECK-LABEL: test_mm256_mask_sub_epi64
//CHECK: sub <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_sub_epi64 (__W,__U,__A,__B);
}
__m256i test_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_sub_epi64
+ //CHECK-LABEL: test_mm256_maskz_sub_epi64
//CHECK: sub <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_sub_epi64 (__U,__A,__B);
@@ -664,7 +669,7 @@ __m256i test_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
__m128i test_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_add_epi32
+ //CHECK-LABEL: test_mm_mask_add_epi32
//CHECK: add <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_add_epi32(__W,__U,__A,__B);
@@ -672,7 +677,7 @@ __m128i test_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i test_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_add_epi32
+ //CHECK-LABEL: test_mm_maskz_add_epi32
//CHECK: add <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_add_epi32 (__U,__A,__B);
@@ -680,14 +685,14 @@ __m128i test_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
__m128i test_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_add_epi64
+ //CHECK-LABEL: test_mm_mask_add_epi64
//CHECK: add <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_add_epi64 (__W,__U,__A,__B);
}
__m128i test_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_add_epi64
+ //CHECK-LABEL: test_mm_maskz_add_epi64
//CHECK: add <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_add_epi64 (__U,__A,__B);
@@ -695,14 +700,14 @@ __m128i test_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
__m128i test_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_sub_epi32
+ //CHECK-LABEL: test_mm_mask_sub_epi32
//CHECK: sub <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_sub_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_sub_epi32
+ //CHECK-LABEL: test_mm_maskz_sub_epi32
//CHECK: sub <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_sub_epi32(__U, __A, __B);
@@ -710,14 +715,14 @@ __m128i test_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
__m128i test_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_sub_epi64
+ //CHECK-LABEL: test_mm_mask_sub_epi64
//CHECK: sub <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_sub_epi64 (__W, __U, __A, __B);
}
__m128i test_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_sub_epi64
+ //CHECK-LABEL: test_mm_maskz_sub_epi64
//CHECK: sub <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_sub_epi64 (__U, __A, __B);
@@ -725,7 +730,7 @@ __m128i test_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
__m256i test_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
__m256i __Y) {
- //CHECK-LABEL: @test_mm256_mask_mul_epi32
+ //CHECK-LABEL: test_mm256_mask_mul_epi32
//CHECK: shl <4 x i64> %{{.*}}, splat (i64 32)
//CHECK: ashr <4 x i64> %{{.*}}, splat (i64 32)
//CHECK: shl <4 x i64> %{{.*}}, splat (i64 32)
@@ -736,7 +741,7 @@ __m256i test_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
}
__m256i test_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) {
- //CHECK-LABEL: @test_mm256_maskz_mul_epi32
+ //CHECK-LABEL: test_mm256_maskz_mul_epi32
//CHECK: shl <4 x i64> %{{.*}}, splat (i64 32)
//CHECK: ashr <4 x i64> %{{.*}}, splat (i64 32)
//CHECK: shl <4 x i64> %{{.*}}, splat (i64 32)
@@ -749,7 +754,7 @@ __m256i test_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) {
__m128i test_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
__m128i __Y) {
- //CHECK-LABEL: @test_mm_mask_mul_epi32
+ //CHECK-LABEL: test_mm_mask_mul_epi32
//CHECK: shl <2 x i64> %{{.*}}, splat (i64 32)
//CHECK: ashr <2 x i64> %{{.*}}, splat (i64 32)
//CHECK: shl <2 x i64> %{{.*}}, splat (i64 32)
@@ -760,7 +765,7 @@ __m128i test_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
}
__m128i test_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) {
- //CHECK-LABEL: @test_mm_maskz_mul_epi32
+ //CHECK-LABEL: test_mm_maskz_mul_epi32
//CHECK: shl <2 x i64> %{{.*}}, splat (i64 32)
//CHECK: ashr <2 x i64> %{{.*}}, splat (i64 32)
//CHECK: shl <2 x i64> %{{.*}}, splat (i64 32)
@@ -772,7 +777,7 @@ __m128i test_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) {
__m256i test_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
__m256i __Y) {
- //CHECK-LABEL: @test_mm256_mask_mul_epu32
+ //CHECK-LABEL: test_mm256_mask_mul_epu32
//CHECK: and <4 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: and <4 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
@@ -781,7 +786,7 @@ __m256i test_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
}
__m256i test_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) {
- //CHECK-LABEL: @test_mm256_maskz_mul_epu32
+ //CHECK-LABEL: test_mm256_maskz_mul_epu32
//CHECK: and <4 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: and <4 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
@@ -791,7 +796,7 @@ __m256i test_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) {
__m128i test_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
__m128i __Y) {
- //CHECK-LABEL: @test_mm_mask_mul_epu32
+ //CHECK-LABEL: test_mm_mask_mul_epu32
//CHECK: and <2 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: and <2 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
@@ -800,7 +805,7 @@ __m128i test_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
}
__m128i test_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y) {
- //CHECK-LABEL: @test_mm_maskz_mul_epu32
+ //CHECK-LABEL: test_mm_maskz_mul_epu32
//CHECK: and <2 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: and <2 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
@@ -809,7 +814,7 @@ __m128i test_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y) {
}
__m128i test_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_mullo_epi32
+ //CHECK-LABEL: test_mm_maskz_mullo_epi32
//CHECK: mul <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_mullo_epi32(__M, __A, __B);
@@ -817,14 +822,14 @@ __m128i test_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
__m128i test_mm_mask_mullo_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_mullo_epi32
+ //CHECK-LABEL: test_mm_mask_mullo_epi32
//CHECK: mul <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_mullo_epi32(__W, __M, __A, __B);
}
__m256i test_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_mullo_epi32
+ //CHECK-LABEL: test_mm256_maskz_mullo_epi32
//CHECK: mul <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_mullo_epi32(__M, __A, __B);
@@ -832,61 +837,63 @@ __m256i test_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
__m256i test_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_mullo_epi32
+ //CHECK-LABEL: test_mm256_mask_mullo_epi32
//CHECK: mul <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_mullo_epi32(__W, __M, __A, __B);
}
__m256i test_mm256_and_epi32 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_and_epi32
+ //CHECK-LABEL: test_mm256_and_epi32
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
return _mm256_and_epi32(__A, __B);
}
+TEST_CONSTEXPR(match_v4di(_mm256_and_epi32((__m256i)(__v4di){7, 7, 7, 7}, (__m256i)(__v4di){3, 3, 3, 3}), 3, 3, 3, 3));
__m256i test_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_and_epi32
+ //CHECK-LABEL: test_mm256_mask_and_epi32
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_and_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_and_epi32
+ //CHECK-LABEL: test_mm256_maskz_and_epi32
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_and_epi32(__U, __A, __B);
}
__m128i test_mm_and_epi32 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_and_epi32
+ //CHECK-LABEL: test_mm_and_epi32
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
return _mm_and_epi32(__A, __B);
}
+TEST_CONSTEXPR(match_v2di(_mm_and_epi32((__m128i)(__v2di){7, 7}, (__m128i)(__v2di){3, 3}), 3, 3));
__m128i test_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_and_epi32
+ //CHECK-LABEL: test_mm_mask_and_epi32
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_and_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_and_epi32
+ //CHECK-LABEL: test_mm_maskz_and_epi32
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_and_epi32(__U, __A, __B);
}
__m256i test_mm256_andnot_epi32 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_andnot_epi32
+ //CHECK-LABEL: test_mm256_andnot_epi32
//CHECK: xor <8 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
return _mm256_andnot_epi32(__A, __B);
}
__m256i test_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_andnot_epi32
+ //CHECK-LABEL: test_mm256_mask_andnot_epi32
//CHECK: xor <8 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
@@ -894,7 +901,7 @@ __m256i test_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __
}
__m256i test_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_andnot_epi32
+ //CHECK-LABEL: test_mm256_maskz_andnot_epi32
//CHECK: xor <8 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
@@ -902,14 +909,14 @@ __m256i test_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
}
__m128i test_mm_andnot_epi32 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_andnot_epi32
+ //CHECK-LABEL: test_mm_andnot_epi32
//CHECK: xor <4 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
return _mm_andnot_epi32(__A, __B);
}
__m128i test_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_andnot_epi32
+ //CHECK-LABEL: test_mm_mask_andnot_epi32
//CHECK: xor <4 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
@@ -917,7 +924,7 @@ __m128i test_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m12
}
__m128i test_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_andnot_epi32
+ //CHECK-LABEL: test_mm_maskz_andnot_epi32
//CHECK: xor <4 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
@@ -925,134 +932,134 @@ __m128i test_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
}
__m256i test_mm256_or_epi32 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_or_epi32
+ //CHECK-LABEL: test_mm256_or_epi32
//CHECK: or <8 x i32> %{{.*}}, %{{.*}}
return _mm256_or_epi32(__A, __B);
}
__m256i test_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_or_epi32
+ //CHECK-LABEL: test_mm256_mask_or_epi32
//CHECK: or <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_or_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_or_epi32
+ //CHECK-LABEL: test_mm256_maskz_or_epi32
//CHECK: or <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_or_epi32(__U, __A, __B);
}
__m128i test_mm_or_epi32 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_or_epi32
+ //CHECK-LABEL: test_mm_or_epi32
//CHECK: or <4 x i32> %{{.*}}, %{{.*}}
return _mm_or_epi32(__A, __B);
}
__m128i test_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_or_epi32
+ //CHECK-LABEL: test_mm_mask_or_epi32
//CHECK: or <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_or_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_or_epi32
+ //CHECK-LABEL: test_mm_maskz_or_epi32
//CHECK: or <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_or_epi32(__U, __A, __B);
}
__m256i test_mm256_xor_epi32 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_xor_epi32
+ //CHECK-LABEL: test_mm256_xor_epi32
//CHECK: or <8 x i32> %{{.*}}, %{{.*}}
return _mm256_xor_epi32(__A, __B);
}
__m256i test_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_xor_epi32
+ //CHECK-LABEL: test_mm256_mask_xor_epi32
//CHECK: xor <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_xor_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_xor_epi32
+ //CHECK-LABEL: test_mm256_maskz_xor_epi32
//CHECK: xor <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_xor_epi32(__U, __A, __B);
}
__m128i test_mm_xor_epi32 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_xor_epi32
+ //CHECK-LABEL: test_mm_xor_epi32
//CHECK: xor <4 x i32> %{{.*}}, %{{.*}}
return _mm_xor_epi32(__A, __B);
}
__m128i test_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_xor_epi32
+ //CHECK-LABEL: test_mm_mask_xor_epi32
//CHECK: xor <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_xor_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_xor_epi32
+ //CHECK-LABEL: test_mm_maskz_xor_epi32
//CHECK: xor <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_xor_epi32(__U, __A, __B);
}
__m256i test_mm256_and_epi64 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_and_epi64
+ //CHECK-LABEL: test_mm256_and_epi64
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
return _mm256_and_epi64(__A, __B);
}
__m256i test_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_and_epi64
+ //CHECK-LABEL: test_mm256_mask_and_epi64
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_and_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_and_epi64
+ //CHECK-LABEL: test_mm256_maskz_and_epi64
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_and_epi64(__U, __A, __B);
}
__m128i test_mm_and_epi64 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_and_epi64
+ //CHECK-LABEL: test_mm_and_epi64
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
return _mm_and_epi64(__A, __B);
}
__m128i test_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_and_epi64
+ //CHECK-LABEL: test_mm_mask_and_epi64
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_and_epi64(__W,__U, __A, __B);
}
__m128i test_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_and_epi64
+ //CHECK-LABEL: test_mm_maskz_and_epi64
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_and_epi64(__U, __A, __B);
}
__m256i test_mm256_andnot_epi64 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_andnot_epi64
+ //CHECK-LABEL: test_mm256_andnot_epi64
//CHECK: xor <4 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
return _mm256_andnot_epi64(__A, __B);
}
__m256i test_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_andnot_epi64
+ //CHECK-LABEL: test_mm256_mask_andnot_epi64
//CHECK: xor <4 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
@@ -1060,7 +1067,7 @@ __m256i test_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __
}
__m256i test_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_andnot_epi64
+ //CHECK-LABEL: test_mm256_maskz_andnot_epi64
//CHECK: xor <4 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
@@ -1068,14 +1075,14 @@ __m256i test_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
}
__m128i test_mm_andnot_epi64 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_andnot_epi64
+ //CHECK-LABEL: test_mm_andnot_epi64
//CHECK: xor <2 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
return _mm_andnot_epi64(__A, __B);
}
__m128i test_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_andnot_epi64
+ //CHECK-LABEL: test_mm_mask_andnot_epi64
//CHECK: xor <2 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
@@ -1083,7 +1090,7 @@ __m128i test_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m12
}
__m128i test_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_andnot_epi64
+ //CHECK-LABEL: test_mm_maskz_andnot_epi64
//CHECK: xor <2 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
@@ -1091,87 +1098,87 @@ __m128i test_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
}
__m256i test_mm256_or_epi64 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_or_epi64
+ //CHECK-LABEL: test_mm256_or_epi64
//CHECK: or <4 x i64> %{{.*}}, %{{.*}}
return _mm256_or_epi64(__A, __B);
}
__m256i test_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_or_epi64
+ //CHECK-LABEL: test_mm256_mask_or_epi64
//CHECK: or <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_or_epi64(__W,__U, __A, __B);
}
__m256i test_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_or_epi64
+ //CHECK-LABEL: test_mm256_maskz_or_epi64
//CHECK: or <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_or_epi64(__U, __A, __B);
}
__m128i test_mm_or_epi64 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_or_epi64
+ //CHECK-LABEL: test_mm_or_epi64
//CHECK: or <2 x i64> %{{.*}}, %{{.*}}
return _mm_or_epi64(__A, __B);
}
__m128i test_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_or_epi64
+ //CHECK-LABEL: test_mm_mask_or_epi64
//CHECK: or <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_or_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_or_epi64
+ //CHECK-LABEL: test_mm_maskz_or_epi64
//CHECK: or <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_or_epi64( __U, __A, __B);
}
__m256i test_mm256_xor_epi64 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_xor_epi64
+ //CHECK-LABEL: test_mm256_xor_epi64
//CHECK: xor <4 x i64> %{{.*}}, %{{.*}}
return _mm256_xor_epi64(__A, __B);
}
__m256i test_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_xor_epi64
+ //CHECK-LABEL: test_mm256_mask_xor_epi64
//CHECK: xor <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_xor_epi64(__W,__U, __A, __B);
}
__m256i test_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_xor_epi64
+ //CHECK-LABEL: test_mm256_maskz_xor_epi64
//CHECK: xor <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_xor_epi64(__U, __A, __B);
}
__m128i test_mm_xor_epi64 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_xor_epi64
+ //CHECK-LABEL: test_mm_xor_epi64
//CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
return _mm_xor_epi64(__A, __B);
}
__m128i test_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_xor_epi64
+ //CHECK-LABEL: test_mm_mask_xor_epi64
//CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_xor_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_xor_epi64
+ //CHECK-LABEL: test_mm_maskz_xor_epi64
//CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_xor_epi64( __U, __A, __B);
}
__mmask8 test_mm256_cmp_ps_mask_eq_oq(__m256 a, __m256 b) {
- // CHECK-LABEL: @test_mm256_cmp_ps_mask_eq_oq
+ // CHECK-LABEL: test_mm256_cmp_ps_mask_eq_oq
// CHECK: fcmp oeq <8 x float> %{{.*}}, %{{.*}}
return _mm256_cmp_ps_mask(a, b, _CMP_EQ_OQ);
}
@@ -1363,7 +1370,7 @@ __mmask8 test_mm256_cmp_ps_mask_true_us(__m256 a, __m256 b) {
}
__mmask8 test_mm256_mask_cmp_ps_mask_eq_oq(__mmask8 m, __m256 a, __m256 b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_ps_mask_eq_oq
+ // CHECK-LABEL: test_mm256_mask_cmp_ps_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <8 x float> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> [[CMP]], {{.*}}
return _mm256_mask_cmp_ps_mask(m, a, b, _CMP_EQ_OQ);
@@ -1587,7 +1594,7 @@ __mmask8 test_mm256_mask_cmp_ps_mask_true_us(__mmask8 m, __m256 a, __m256 b) {
}
__mmask8 test_mm256_cmp_pd_mask_eq_oq(__m256d a, __m256d b) {
- // CHECK-LABEL: @test_mm256_cmp_pd_mask_eq_oq
+ // CHECK-LABEL: test_mm256_cmp_pd_mask_eq_oq
// CHECK: fcmp oeq <4 x double> %{{.*}}, %{{.*}}
return _mm256_cmp_pd_mask(a, b, _CMP_EQ_OQ);
}
@@ -1779,7 +1786,7 @@ __mmask8 test_mm256_cmp_pd_mask_true_us(__m256d a, __m256d b) {
}
__mmask8 test_mm256_mask_cmp_pd_mask_eq_oq(__mmask8 m, __m256d a, __m256d b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_pd_mask_eq_oq
+ // CHECK-LABEL: test_mm256_mask_cmp_pd_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x double> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> [[CMP]], {{.*}}
return _mm256_mask_cmp_pd_mask(m, a, b, _CMP_EQ_OQ);
@@ -2003,7 +2010,7 @@ __mmask8 test_mm256_mask_cmp_pd_mask_true_us(__mmask8 m, __m256d a, __m256d b) {
}
__mmask8 test_mm_cmp_ps_mask_eq_oq(__m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_cmp_ps_mask_eq_oq
+ // CHECK-LABEL: test_mm_cmp_ps_mask_eq_oq
// CHECK: fcmp oeq <4 x float> %{{.*}}, %{{.*}}
return _mm_cmp_ps_mask(a, b, _CMP_EQ_OQ);
}
@@ -2195,7 +2202,7 @@ __mmask8 test_mm_cmp_ps_mask_true_us(__m128 a, __m128 b) {
}
__mmask8 test_mm_mask_cmp_ps_mask_eq_oq(__mmask8 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_mask_cmp_ps_mask_eq_oq
+ // CHECK-LABEL: test_mm_mask_cmp_ps_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x float> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> [[CMP]], {{.*}}
return _mm_mask_cmp_ps_mask(m, a, b, _CMP_EQ_OQ);
@@ -2419,7 +2426,7 @@ __mmask8 test_mm_mask_cmp_ps_mask_true_us(__mmask8 m, __m128 a, __m128 b) {
}
__mmask8 test_mm_cmp_pd_mask_eq_oq(__m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_cmp_pd_mask_eq_oq
+ // CHECK-LABEL: test_mm_cmp_pd_mask_eq_oq
// CHECK: fcmp oeq <2 x double> %{{.*}}, %{{.*}}
return _mm_cmp_pd_mask(a, b, _CMP_EQ_OQ);
}
@@ -2611,7 +2618,7 @@ __mmask8 test_mm_cmp_pd_mask_true_us(__m128d a, __m128d b) {
}
__mmask8 test_mm_mask_cmp_pd_mask_eq_oq(__mmask8 m, __m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_mask_cmp_pd_mask_eq_oq
+ // CHECK-LABEL: test_mm_mask_cmp_pd_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <2 x double> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> [[CMP]], {{.*}}
return _mm_mask_cmp_pd_mask(m, a, b, _CMP_EQ_OQ);
@@ -2835,7 +2842,7 @@ __mmask8 test_mm_mask_cmp_pd_mask_true_us(__mmask8 m, __m128d a, __m128d b) {
}
__m128d test_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fmadd_pd
+ // CHECK-LABEL: test_mm_mask_fmadd_pd
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
@@ -2843,7 +2850,7 @@ __m128d test_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __
}
__m128d test_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fmsub_pd
+ // CHECK-LABEL: test_mm_mask_fmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -2852,7 +2859,7 @@ __m128d test_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __
}
__m128d test_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_pd
+ // CHECK-LABEL: test_mm_mask3_fmadd_pd
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
@@ -2860,7 +2867,7 @@ __m128d test_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 _
}
__m128d test_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmadd_pd
+ // CHECK-LABEL: test_mm_mask3_fnmadd_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -2869,7 +2876,7 @@ __m128d test_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8
}
__m128d test_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_pd
+ // CHECK-LABEL: test_mm_maskz_fmadd_pd
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
@@ -2877,7 +2884,7 @@ __m128d test_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d _
}
__m128d test_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsub_pd
+ // CHECK-LABEL: test_mm_maskz_fmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -2886,7 +2893,7 @@ __m128d test_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d _
}
__m128d test_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmadd_pd
+ // CHECK-LABEL: test_mm_maskz_fnmadd_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -2895,7 +2902,7 @@ __m128d test_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d
}
__m128d test_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmsub_pd
+ // CHECK-LABEL: test_mm_maskz_fnmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
@@ -2905,7 +2912,7 @@ __m128d test_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d
}
__m256d test_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fmadd_pd
+ // CHECK-LABEL: test_mm256_mask_fmadd_pd
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -2913,7 +2920,7 @@ __m256d test_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d
}
__m256d test_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsub_pd
+ // CHECK-LABEL: test_mm256_mask_fmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2922,7 +2929,7 @@ __m256d test_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d
}
__m256d test_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmadd_pd
+ // CHECK-LABEL: test_mm256_mask3_fmadd_pd
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -2930,7 +2937,7 @@ __m256d test_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask
}
__m256d test_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmadd_pd
+ // CHECK-LABEL: test_mm256_mask3_fnmadd_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2939,7 +2946,7 @@ __m256d test_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmas
}
__m256d test_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmadd_pd
+ // CHECK-LABEL: test_mm256_maskz_fmadd_pd
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -2947,7 +2954,7 @@ __m256d test_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256
}
__m256d test_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsub_pd
+ // CHECK-LABEL: test_mm256_maskz_fmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2956,7 +2963,7 @@ __m256d test_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256
}
__m256d test_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmadd_pd
+ // CHECK-LABEL: test_mm256_maskz_fnmadd_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2965,7 +2972,7 @@ __m256d test_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m25
}
__m256d test_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmsub_pd
+ // CHECK-LABEL: test_mm256_maskz_fnmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
@@ -2975,7 +2982,7 @@ __m256d test_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m25
}
__m128 test_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fmadd_ps
+ // CHECK-LABEL: test_mm_mask_fmadd_ps
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -2983,7 +2990,7 @@ __m128 test_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
}
__m128 test_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fmsub_ps
+ // CHECK-LABEL: test_mm_mask_fmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2992,7 +2999,7 @@ __m128 test_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
}
__m128 test_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_ps
+ // CHECK-LABEL: test_mm_mask3_fmadd_ps
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -3000,7 +3007,7 @@ __m128 test_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
__m128 test_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmadd_ps
+ // CHECK-LABEL: test_mm_mask3_fnmadd_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3009,7 +3016,7 @@ __m128 test_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
__m128 test_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_ps
+ // CHECK-LABEL: test_mm_maskz_fmadd_ps
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -3017,7 +3024,7 @@ __m128 test_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
__m128 test_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsub_ps
+ // CHECK-LABEL: test_mm_maskz_fmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3026,7 +3033,7 @@ __m128 test_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
__m128 test_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmadd_ps
+ // CHECK-LABEL: test_mm_maskz_fnmadd_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3035,7 +3042,7 @@ __m128 test_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
__m128 test_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmsub_ps
+ // CHECK-LABEL: test_mm_maskz_fnmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
@@ -3045,14 +3052,14 @@ __m128 test_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
__m256 test_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fmadd_ps
+ // CHECK-LABEL: test_mm256_mask_fmadd_ps
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_fmadd_ps(__A, __U, __B, __C);
}
__m256 test_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsub_ps
+ // CHECK-LABEL: test_mm256_mask_fmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3060,14 +3067,14 @@ __m256 test_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C
}
__m256 test_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmadd_ps
+ // CHECK-LABEL: test_mm256_mask3_fmadd_ps
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask3_fmadd_ps(__A, __B, __C, __U);
}
__m256 test_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmadd_ps
+ // CHECK-LABEL: test_mm256_mask3_fnmadd_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3075,14 +3082,14 @@ __m256 test_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 _
}
__m256 test_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmadd_ps
+ // CHECK-LABEL: test_mm256_maskz_fmadd_ps
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_fmadd_ps(__U, __A, __B, __C);
}
__m256 test_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsub_ps
+ // CHECK-LABEL: test_mm256_maskz_fmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3090,7 +3097,7 @@ __m256 test_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __
}
__m256 test_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmadd_ps
+ // CHECK-LABEL: test_mm256_maskz_fnmadd_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3098,7 +3105,7 @@ __m256 test_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 _
}
__m256 test_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmsub_ps
+ // CHECK-LABEL: test_mm256_maskz_fnmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
@@ -3107,7 +3114,7 @@ __m256 test_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 _
}
__m128d test_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fmaddsub_pd
+ // CHECK-LABEL: test_mm_mask_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3116,7 +3123,7 @@ __m128d test_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d
}
__m128d test_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fmsubadd_pd
+ // CHECK-LABEL: test_mm_mask_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3125,7 +3132,7 @@ __m128d test_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d
}
__m128d test_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmaddsub_pd
+ // CHECK-LABEL: test_mm_mask3_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3134,7 +3141,7 @@ __m128d test_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask
}
__m128d test_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fmaddsub_pd
+ // CHECK-LABEL: test_mm_maskz_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3143,7 +3150,7 @@ __m128d test_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128
}
__m128d test_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsubadd_pd
+ // CHECK-LABEL: test_mm_maskz_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3152,7 +3159,7 @@ __m128d test_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128
}
__m256d test_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fmaddsub_pd
+ // CHECK-LABEL: test_mm256_mask_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3161,7 +3168,7 @@ __m256d test_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m2
}
__m256d test_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsubadd_pd
+ // CHECK-LABEL: test_mm256_mask_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3170,7 +3177,7 @@ __m256d test_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m2
}
__m256d test_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmaddsub_pd
+ // CHECK-LABEL: test_mm256_mask3_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3179,7 +3186,7 @@ __m256d test_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mm
}
__m256d test_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmaddsub_pd
+ // CHECK-LABEL: test_mm256_maskz_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3188,7 +3195,7 @@ __m256d test_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m
}
__m256d test_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsubadd_pd
+ // CHECK-LABEL: test_mm256_maskz_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3197,7 +3204,7 @@ __m256d test_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m
}
__m128 test_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fmaddsub_ps
+ // CHECK-LABEL: test_mm_mask_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3206,7 +3213,7 @@ __m128 test_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C
}
__m128 test_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fmsubadd_ps
+ // CHECK-LABEL: test_mm_mask_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3215,7 +3222,7 @@ __m128 test_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C
}
__m128 test_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmaddsub_ps
+ // CHECK-LABEL: test_mm_mask3_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3224,7 +3231,7 @@ __m128 test_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __
}
__m128 test_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fmaddsub_ps
+ // CHECK-LABEL: test_mm_maskz_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3233,7 +3240,7 @@ __m128 test_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __
}
__m128 test_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsubadd_ps
+ // CHECK-LABEL: test_mm_maskz_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3242,7 +3249,7 @@ __m128 test_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __
}
__m256 test_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fmaddsub_ps
+ // CHECK-LABEL: test_mm256_mask_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3250,7 +3257,7 @@ __m256 test_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256
}
__m256 test_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsubadd_ps
+ // CHECK-LABEL: test_mm256_mask_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3258,7 +3265,7 @@ __m256 test_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256
}
__m256 test_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmaddsub_ps
+ // CHECK-LABEL: test_mm256_mask3_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3266,7 +3273,7 @@ __m256 test_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8
}
__m256 test_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmaddsub_ps
+ // CHECK-LABEL: test_mm256_maskz_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3274,7 +3281,7 @@ __m256 test_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256
}
__m256 test_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsubadd_ps
+ // CHECK-LABEL: test_mm256_maskz_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3282,7 +3289,7 @@ __m256 test_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256
}
__m128d test_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsub_pd
+ // CHECK-LABEL: test_mm_mask3_fmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3291,7 +3298,7 @@ __m128d test_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 _
}
__m256d test_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsub_pd
+ // CHECK-LABEL: test_mm256_mask3_fmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3300,7 +3307,7 @@ __m256d test_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask
}
__m128 test_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsub_ps
+ // CHECK-LABEL: test_mm_mask3_fmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3309,7 +3316,7 @@ __m128 test_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
__m256 test_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsub_ps
+ // CHECK-LABEL: test_mm256_mask3_fmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3317,7 +3324,7 @@ __m256 test_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __
}
__m128d test_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsubadd_pd
+ // CHECK-LABEL: test_mm_mask3_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3326,7 +3333,7 @@ __m128d test_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask
}
__m256d test_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsubadd_pd
+ // CHECK-LABEL: test_mm256_mask3_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3335,7 +3342,7 @@ __m256d test_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mm
}
__m128 test_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsubadd_ps
+ // CHECK-LABEL: test_mm_mask3_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3344,7 +3351,7 @@ __m128 test_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __
}
__m256 test_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsubadd_ps
+ // CHECK-LABEL: test_mm256_mask3_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3352,7 +3359,7 @@ __m256 test_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8
}
__m128d test_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fnmadd_pd
+ // CHECK-LABEL: test_mm_mask_fnmadd_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3361,7 +3368,7 @@ __m128d test_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d _
}
__m256d test_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmadd_pd
+ // CHECK-LABEL: test_mm256_mask_fnmadd_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3370,7 +3377,7 @@ __m256d test_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256
}
__m128 test_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fnmadd_ps
+ // CHECK-LABEL: test_mm_mask_fnmadd_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3379,7 +3386,7 @@ __m128 test_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
}
__m256 test_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmadd_ps
+ // CHECK-LABEL: test_mm256_mask_fnmadd_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3387,7 +3394,7 @@ __m256 test_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __
}
__m128d test_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fnmsub_pd
+ // CHECK-LABEL: test_mm_mask_fnmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
@@ -3397,7 +3404,7 @@ __m128d test_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d _
}
__m128d test_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmsub_pd
+ // CHECK-LABEL: test_mm_mask3_fnmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
@@ -3407,7 +3414,7 @@ __m128d test_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8
}
__m256d test_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmsub_pd
+ // CHECK-LABEL: test_mm256_mask_fnmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
@@ -3417,7 +3424,7 @@ __m256d test_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256
}
__m256d test_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmsub_pd
+ // CHECK-LABEL: test_mm256_mask3_fnmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
@@ -3427,7 +3434,7 @@ __m256d test_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmas
}
__m128 test_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fnmsub_ps
+ // CHECK-LABEL: test_mm_mask_fnmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
@@ -3437,7 +3444,7 @@ __m128 test_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
}
__m128 test_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmsub_ps
+ // CHECK-LABEL: test_mm_mask3_fnmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
@@ -3447,7 +3454,7 @@ __m128 test_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
__m256 test_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmsub_ps
+ // CHECK-LABEL: test_mm256_mask_fnmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
@@ -3456,7 +3463,7 @@ __m256 test_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __
}
__m256 test_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmsub_ps
+ // CHECK-LABEL: test_mm256_mask3_fnmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
@@ -3465,1072 +3472,1092 @@ __m256 test_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 _
}
__m128d test_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_add_pd
+ // CHECK-LABEL: test_mm_mask_add_pd
// CHECK: fadd <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_add_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_add_pd
+ // CHECK-LABEL: test_mm_maskz_add_pd
// CHECK: fadd <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_add_pd(__U,__A,__B);
}
__m256d test_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_add_pd
+ // CHECK-LABEL: test_mm256_mask_add_pd
// CHECK: fadd <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_add_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_add_pd
+ // CHECK-LABEL: test_mm256_maskz_add_pd
// CHECK: fadd <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_add_pd(__U,__A,__B);
}
__m128 test_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_add_ps
+ // CHECK-LABEL: test_mm_mask_add_ps
// CHECK: fadd <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_add_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_add_ps
+ // CHECK-LABEL: test_mm_maskz_add_ps
// CHECK: fadd <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_add_ps(__U,__A,__B);
}
__m256 test_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_add_ps
+ // CHECK-LABEL: test_mm256_mask_add_ps
// CHECK: fadd <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_add_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_add_ps
+ // CHECK-LABEL: test_mm256_maskz_add_ps
// CHECK: fadd <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_add_ps(__U,__A,__B);
}
__m128i test_mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W) {
- // CHECK-LABEL: @test_mm_mask_blend_epi32
+ // CHECK-LABEL: test_mm_mask_blend_epi32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_blend_epi32(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi32(__mmask8 __U, __m256i __A, __m256i __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_epi32
+ // CHECK-LABEL: test_mm256_mask_blend_epi32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_blend_epi32(__U,__A,__W);
}
__m128d test_mm_mask_blend_pd(__mmask8 __U, __m128d __A, __m128d __W) {
- // CHECK-LABEL: @test_mm_mask_blend_pd
+ // CHECK-LABEL: test_mm_mask_blend_pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_blend_pd(__U,__A,__W);
}
__m256d test_mm256_mask_blend_pd(__mmask8 __U, __m256d __A, __m256d __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_pd
+ // CHECK-LABEL: test_mm256_mask_blend_pd
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_blend_pd(__U,__A,__W);
}
__m128 test_mm_mask_blend_ps(__mmask8 __U, __m128 __A, __m128 __W) {
- // CHECK-LABEL: @test_mm_mask_blend_ps
+ // CHECK-LABEL: test_mm_mask_blend_ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_blend_ps(__U,__A,__W);
}
__m256 test_mm256_mask_blend_ps(__mmask8 __U, __m256 __A, __m256 __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_ps
+ // CHECK-LABEL: test_mm256_mask_blend_ps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_blend_ps(__U,__A,__W);
}
__m128i test_mm_mask_blend_epi64(__mmask8 __U, __m128i __A, __m128i __W) {
- // CHECK-LABEL: @test_mm_mask_blend_epi64
+ // CHECK-LABEL: test_mm_mask_blend_epi64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_blend_epi64(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi64(__mmask8 __U, __m256i __A, __m256i __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_epi64
+ // CHECK-LABEL: test_mm256_mask_blend_epi64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_blend_epi64(__U,__A,__W);
}
__m128d test_mm_mask_compress_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_compress_pd
+ // CHECK-LABEL: test_mm_mask_compress_pd
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_mask_compress_pd(__W,__U,__A);
}
__m128d test_mm_maskz_compress_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_compress_pd
+ // CHECK-LABEL: test_mm_maskz_compress_pd
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_maskz_compress_pd(__U,__A);
}
__m256d test_mm256_mask_compress_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_compress_pd
+ // CHECK-LABEL: test_mm256_mask_compress_pd
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_mask_compress_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_compress_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_compress_pd
+ // CHECK-LABEL: test_mm256_maskz_compress_pd
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_maskz_compress_pd(__U,__A);
}
__m128i test_mm_mask_compress_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_compress_epi64
+ // CHECK-LABEL: test_mm_mask_compress_epi64
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_mask_compress_epi64(__W,__U,__A);
}
__m128i test_mm_maskz_compress_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_compress_epi64
+ // CHECK-LABEL: test_mm_maskz_compress_epi64
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_maskz_compress_epi64(__U,__A);
}
__m256i test_mm256_mask_compress_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_compress_epi64
+ // CHECK-LABEL: test_mm256_mask_compress_epi64
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_mask_compress_epi64(__W,__U,__A);
}
__m256i test_mm256_maskz_compress_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_compress_epi64
+ // CHECK-LABEL: test_mm256_maskz_compress_epi64
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_maskz_compress_epi64(__U,__A);
}
__m128 test_mm_mask_compress_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_compress_ps
+ // CHECK-LABEL: test_mm_mask_compress_ps
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_mask_compress_ps(__W,__U,__A);
}
__m128 test_mm_maskz_compress_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_compress_ps
+ // CHECK-LABEL: test_mm_maskz_compress_ps
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_maskz_compress_ps(__U,__A);
}
__m256 test_mm256_mask_compress_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_compress_ps
+ // CHECK-LABEL: test_mm256_mask_compress_ps
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_mask_compress_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_compress_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_compress_ps
+ // CHECK-LABEL: test_mm256_maskz_compress_ps
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_maskz_compress_ps(__U,__A);
}
__m128i test_mm_mask_compress_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_compress_epi32
+ // CHECK-LABEL: test_mm_mask_compress_epi32
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_mask_compress_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_compress_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_compress_epi32
+ // CHECK-LABEL: test_mm_maskz_compress_epi32
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_maskz_compress_epi32(__U,__A);
}
__m256i test_mm256_mask_compress_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_compress_epi32
+ // CHECK-LABEL: test_mm256_mask_compress_epi32
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_mask_compress_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_compress_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_compress_epi32
+ // CHECK-LABEL: test_mm256_maskz_compress_epi32
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_maskz_compress_epi32(__U,__A);
}
void test_mm_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_pd
+ // CHECK-LABEL: test_mm_mask_compressstoreu_pd
// CHECK: @llvm.masked.compressstore.v2f64(<2 x double> %{{.*}}, ptr %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_compressstoreu_pd(__P,__U,__A);
}
void test_mm256_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_pd
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_pd
// CHECK: @llvm.masked.compressstore.v4f64(<4 x double> %{{.*}}, ptr %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_compressstoreu_pd(__P,__U,__A);
}
void test_mm_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_epi64
+ // CHECK-LABEL: test_mm_mask_compressstoreu_epi64
// CHECK: @llvm.masked.compressstore.v2i64(<2 x i64> %{{.*}}, ptr %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_compressstoreu_epi64(__P,__U,__A);
}
void test_mm256_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi64
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_epi64
// CHECK: @llvm.masked.compressstore.v4i64(<4 x i64> %{{.*}}, ptr %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_compressstoreu_epi64(__P,__U,__A);
}
void test_mm_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_ps
+ // CHECK-LABEL: test_mm_mask_compressstoreu_ps
// CHECK: @llvm.masked.compressstore.v4f32(<4 x float> %{{.*}}, ptr %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_compressstoreu_ps(__P,__U,__A);
}
void test_mm256_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_ps
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_ps
// CHECK: @llvm.masked.compressstore.v8f32(<8 x float> %{{.*}}, ptr %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_compressstoreu_ps(__P,__U,__A);
}
void test_mm_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_epi32
+ // CHECK-LABEL: test_mm_mask_compressstoreu_epi32
// CHECK: @llvm.masked.compressstore.v4i32(<4 x i32> %{{.*}}, ptr %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_compressstoreu_epi32(__P,__U,__A);
}
void test_mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi32
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_epi32
// CHECK: @llvm.masked.compressstore.v8i32(<8 x i32> %{{.*}}, ptr %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_compressstoreu_epi32(__P,__U,__A);
}
__m128d test_mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_pd
+ // CHECK-LABEL: test_mm_mask_cvtepi32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: sitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_mask_cvtepi32_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_pd
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: sitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_maskz_cvtepi32_pd(__U,__A);
}
__m256d test_mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_pd
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_pd
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_mask_cvtepi32_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_pd
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_pd
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_maskz_cvtepi32_pd(__U,__A);
}
__m128 test_mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_ps
+ // CHECK-LABEL: test_mm_mask_cvtepi32_ps
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> {{.*}}, <4 x float> {{.*}}, <4 x float> {{.*}}
return _mm_mask_cvtepi32_ps(__W,__U,__A);
}
__m128 test_mm_maskz_cvtepi32_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_ps
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_ps
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> {{.*}}, <4 x float> {{.*}}, <4 x float> {{.*}}
return _mm_maskz_cvtepi32_ps(__U,__A);
}
__m256 test_mm256_mask_cvtepi32_ps(__m256 __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_ps
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_ps
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> {{.*}}, <8 x float> {{.*}}, <8 x float> {{.*}}
return _mm256_mask_cvtepi32_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_cvtepi32_ps(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_ps
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> {{.*}}, <8 x float> {{.*}}, <8 x float> {{.*}}
return _mm256_maskz_cvtepi32_ps(__U,__A);
}
__m128i test_mm_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvtpd_epi32
+ // CHECK-LABEL: test_mm_mask_cvtpd_epi32
// CHECK: @llvm.x86.avx512.mask.cvtpd2dq.128
return _mm_mask_cvtpd_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_cvtpd_epi32(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpd_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtpd_epi32
// CHECK: @llvm.x86.avx512.mask.cvtpd2dq.128
return _mm_maskz_cvtpd_epi32(__U,__A);
}
__m128i test_mm256_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpd_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtpd_epi32
// CHECK: @llvm.x86.avx.cvt.pd2dq.256
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm256_mask_cvtpd_epi32(__W,__U,__A);
}
__m128i test_mm256_maskz_cvtpd_epi32(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpd_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtpd_epi32
// CHECK: @llvm.x86.avx.cvt.pd2dq.256
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm256_maskz_cvtpd_epi32(__U,__A);
}
__m128 test_mm_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvtpd_ps
+ // CHECK-LABEL: test_mm_mask_cvtpd_ps
// CHECK: @llvm.x86.avx512.mask.cvtpd2ps
return _mm_mask_cvtpd_ps(__W,__U,__A);
}
__m128 test_mm_maskz_cvtpd_ps(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpd_ps
+ // CHECK-LABEL: test_mm_maskz_cvtpd_ps
// CHECK: @llvm.x86.avx512.mask.cvtpd2ps
return _mm_maskz_cvtpd_ps(__U,__A);
}
__m128 test_mm256_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpd_ps
+ // CHECK-LABEL: test_mm256_mask_cvtpd_ps
// CHECK: @llvm.x86.avx.cvt.pd2.ps.256
// CHECK: select <4 x i1> {{.*}}, <4 x float> {{.*}}, <4 x float> {{.*}}
return _mm256_mask_cvtpd_ps(__W,__U,__A);
}
__m128 test_mm256_maskz_cvtpd_ps(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpd_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtpd_ps
// CHECK: @llvm.x86.avx.cvt.pd2.ps.256
// CHECK: select <4 x i1> {{.*}}, <4 x float> {{.*}}, <4 x float> {{.*}}
return _mm256_maskz_cvtpd_ps(__U,__A);
}
__m128i test_mm_cvtpd_epu32(__m128d __A) {
- // CHECK-LABEL: @test_mm_cvtpd_epu32
+ // CHECK-LABEL: test_mm_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.128
return _mm_cvtpd_epu32(__A);
}
__m128i test_mm_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvtpd_epu32
+ // CHECK-LABEL: test_mm_mask_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.128
return _mm_mask_cvtpd_epu32(__W,__U,__A);
}
__m128i test_mm_maskz_cvtpd_epu32(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpd_epu32
+ // CHECK-LABEL: test_mm_maskz_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.128
return _mm_maskz_cvtpd_epu32(__U,__A);
}
__m128i test_mm256_cvtpd_epu32(__m256d __A) {
- // CHECK-LABEL: @test_mm256_cvtpd_epu32
+ // CHECK-LABEL: test_mm256_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.256
return _mm256_cvtpd_epu32(__A);
}
__m128i test_mm256_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpd_epu32
+ // CHECK-LABEL: test_mm256_mask_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.256
return _mm256_mask_cvtpd_epu32(__W,__U,__A);
}
__m128i test_mm256_maskz_cvtpd_epu32(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpd_epu32
+ // CHECK-LABEL: test_mm256_maskz_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.256
return _mm256_maskz_cvtpd_epu32(__U,__A);
}
__m128i test_mm_mask_cvtps_epi32(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_epi32
+ // CHECK-LABEL: test_mm_mask_cvtps_epi32
// CHECK: @llvm.x86.sse2.cvtps2dq
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm_mask_cvtps_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_cvtps_epi32(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtps_epi32
// CHECK: @llvm.x86.sse2.cvtps2dq
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm_maskz_cvtps_epi32(__U,__A);
}
__m256i test_mm256_mask_cvtps_epi32(__m256i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtps_epi32
// CHECK: @llvm.x86.avx.cvt.ps2dq.256
// CHECK: select <8 x i1> {{.*}}, <8 x i32> {{.*}}, <8 x i32> {{.*}}
return _mm256_mask_cvtps_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_cvtps_epi32(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtps_epi32
// CHECK: @llvm.x86.avx.cvt.ps2dq.256
// CHECK: select <8 x i1> {{.*}}, <8 x i32> {{.*}}, <8 x i32> {{.*}}
return _mm256_maskz_cvtps_epi32(__U,__A);
}
__m128d test_mm_mask_cvtps_pd(__m128d __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_pd
+ // CHECK-LABEL: test_mm_mask_cvtps_pd
// CHECK: fpext <2 x float> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_mask_cvtps_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtps_pd(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_pd
+ // CHECK-LABEL: test_mm_maskz_cvtps_pd
// CHECK: fpext <2 x float> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_maskz_cvtps_pd(__U,__A);
}
__m256d test_mm256_mask_cvtps_pd(__m256d __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_pd
+ // CHECK-LABEL: test_mm256_mask_cvtps_pd
// CHECK: fpext <4 x float> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_mask_cvtps_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtps_pd(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_pd
+ // CHECK-LABEL: test_mm256_maskz_cvtps_pd
// CHECK: fpext <4 x float> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_maskz_cvtps_pd(__U,__A);
}
__m128i test_mm_cvtps_epu32(__m128 __A) {
- // CHECK-LABEL: @test_mm_cvtps_epu32
+ // CHECK-LABEL: test_mm_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.128
return _mm_cvtps_epu32(__A);
}
__m128i test_mm_mask_cvtps_epu32(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_epu32
+ // CHECK-LABEL: test_mm_mask_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.128
return _mm_mask_cvtps_epu32(__W,__U,__A);
}
__m128i test_mm_maskz_cvtps_epu32(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_epu32
+ // CHECK-LABEL: test_mm_maskz_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.128
return _mm_maskz_cvtps_epu32(__U,__A);
}
__m256i test_mm256_cvtps_epu32(__m256 __A) {
- // CHECK-LABEL: @test_mm256_cvtps_epu32
+ // CHECK-LABEL: test_mm256_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.256
return _mm256_cvtps_epu32(__A);
}
__m256i test_mm256_mask_cvtps_epu32(__m256i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_epu32
+ // CHECK-LABEL: test_mm256_mask_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.256
return _mm256_mask_cvtps_epu32(__W,__U,__A);
}
__m256i test_mm256_maskz_cvtps_epu32(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_epu32
+ // CHECK-LABEL: test_mm256_maskz_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.256
return _mm256_maskz_cvtps_epu32(__U,__A);
}
__m128i test_mm_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvttpd_epi32
+ // CHECK-LABEL: test_mm_mask_cvttpd_epi32
// CHECK: @llvm.x86.avx512.mask.cvttpd2dq.128
return _mm_mask_cvttpd_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_cvttpd_epi32(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttpd_epi32
+ // CHECK-LABEL: test_mm_maskz_cvttpd_epi32
// CHECK: @llvm.x86.avx512.mask.cvttpd2dq.128
return _mm_maskz_cvttpd_epi32(__U,__A);
}
__m128i test_mm256_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttpd_epi32
+ // CHECK-LABEL: test_mm256_mask_cvttpd_epi32
// CHECK: @llvm.x86.avx.cvtt.pd2dq.256
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm256_mask_cvttpd_epi32(__W,__U,__A);
}
__m128i test_mm256_maskz_cvttpd_epi32(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttpd_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvttpd_epi32
// CHECK: @llvm.x86.avx.cvtt.pd2dq.256
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm256_maskz_cvttpd_epi32(__U,__A);
}
__m128i test_mm_cvttpd_epu32(__m128d __A) {
- // CHECK-LABEL: @test_mm_cvttpd_epu32
+ // CHECK-LABEL: test_mm_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.128
return _mm_cvttpd_epu32(__A);
}
__m128i test_mm_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvttpd_epu32
+ // CHECK-LABEL: test_mm_mask_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.128
return _mm_mask_cvttpd_epu32(__W,__U,__A);
}
__m128i test_mm_maskz_cvttpd_epu32(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttpd_epu32
+ // CHECK-LABEL: test_mm_maskz_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.128
return _mm_maskz_cvttpd_epu32(__U,__A);
}
__m128i test_mm256_cvttpd_epu32(__m256d __A) {
- // CHECK-LABEL: @test_mm256_cvttpd_epu32
+ // CHECK-LABEL: test_mm256_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.256
return _mm256_cvttpd_epu32(__A);
}
__m128i test_mm256_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttpd_epu32
+ // CHECK-LABEL: test_mm256_mask_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.256
return _mm256_mask_cvttpd_epu32(__W,__U,__A);
}
__m128i test_mm256_maskz_cvttpd_epu32(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttpd_epu32
+ // CHECK-LABEL: test_mm256_maskz_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.256
return _mm256_maskz_cvttpd_epu32(__U,__A);
}
__m128i test_mm_mask_cvttps_epi32(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvttps_epi32
+ // CHECK-LABEL: test_mm_mask_cvttps_epi32
// CHECK: @llvm.x86.sse2.cvttps2dq
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm_mask_cvttps_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_cvttps_epi32(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttps_epi32
+ // CHECK-LABEL: test_mm_maskz_cvttps_epi32
// CHECK: @llvm.x86.sse2.cvttps2dq
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm_maskz_cvttps_epi32(__U,__A);
}
__m256i test_mm256_mask_cvttps_epi32(__m256i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttps_epi32
+ // CHECK-LABEL: test_mm256_mask_cvttps_epi32
// CHECK: @llvm.x86.avx.cvtt.ps2dq.256
// CHECK: select <8 x i1> {{.*}}, <8 x i32> {{.*}}, <8 x i32> {{.*}}
return _mm256_mask_cvttps_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_cvttps_epi32(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttps_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvttps_epi32
// CHECK: @llvm.x86.avx.cvtt.ps2dq.256
// CHECK: select <8 x i1> {{.*}}, <8 x i32> {{.*}}, <8 x i32> {{.*}}
return _mm256_maskz_cvttps_epi32(__U,__A);
}
__m128i test_mm_cvttps_epu32(__m128 __A) {
- // CHECK-LABEL: @test_mm_cvttps_epu32
+ // CHECK-LABEL: test_mm_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.128
return _mm_cvttps_epu32(__A);
}
__m128i test_mm_mask_cvttps_epu32(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvttps_epu32
+ // CHECK-LABEL: test_mm_mask_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.128
return _mm_mask_cvttps_epu32(__W,__U,__A);
}
__m128i test_mm_maskz_cvttps_epu32(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttps_epu32
+ // CHECK-LABEL: test_mm_maskz_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.128
return _mm_maskz_cvttps_epu32(__U,__A);
}
__m256i test_mm256_cvttps_epu32(__m256 __A) {
- // CHECK-LABEL: @test_mm256_cvttps_epu32
+ // CHECK-LABEL: test_mm256_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.256
return _mm256_cvttps_epu32(__A);
}
__m256i test_mm256_mask_cvttps_epu32(__m256i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttps_epu32
+ // CHECK-LABEL: test_mm256_mask_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.256
return _mm256_mask_cvttps_epu32(__W,__U,__A);
}
__m256i test_mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttps_epu32
+ // CHECK-LABEL: test_mm256_maskz_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.256
return _mm256_maskz_cvttps_epu32(__U,__A);
}
__m128d test_mm_cvtepu32_pd(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepu32_pd
+ // CHECK-LABEL: test_mm_cvtepu32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
return _mm_cvtepu32_pd(__A);
}
__m128d test_mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu32_pd
+ // CHECK-LABEL: test_mm_mask_cvtepu32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_mask_cvtepu32_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu32_pd
+ // CHECK-LABEL: test_mm_maskz_cvtepu32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_maskz_cvtepu32_pd(__U,__A);
}
__m256d test_mm256_cvtepu32_pd(__m128i __A) {
- // CHECK-LABEL: @test_mm256_cvtepu32_pd
+ // CHECK-LABEL: test_mm256_cvtepu32_pd
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
return _mm256_cvtepu32_pd(__A);
}
__m256d test_mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu32_pd
+ // CHECK-LABEL: test_mm256_mask_cvtepu32_pd
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_mask_cvtepu32_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu32_pd
+ // CHECK-LABEL: test_mm256_maskz_cvtepu32_pd
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_maskz_cvtepu32_pd(__U,__A);
}
__m128 test_mm_cvtepu32_ps(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepu32_ps
+ // CHECK-LABEL: test_mm_cvtepu32_ps
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x float>
return _mm_cvtepu32_ps(__A);
}
__m128 test_mm_mask_cvtepu32_ps(__m128 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu32_ps
+ // CHECK-LABEL: test_mm_mask_cvtepu32_ps
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_cvtepu32_ps(__W,__U,__A);
}
__m128 test_mm_maskz_cvtepu32_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu32_ps
+ // CHECK-LABEL: test_mm_maskz_cvtepu32_ps
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_cvtepu32_ps(__U,__A);
}
__m256 test_mm256_cvtepu32_ps(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepu32_ps
+ // CHECK-LABEL: test_mm256_cvtepu32_ps
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x float>
return _mm256_cvtepu32_ps(__A);
}
__m256 test_mm256_mask_cvtepu32_ps(__m256 __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu32_ps
+ // CHECK-LABEL: test_mm256_mask_cvtepu32_ps
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_cvtepu32_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_cvtepu32_ps(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu32_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtepu32_ps
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_cvtepu32_ps(__U,__A);
}
__m128d test_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_div_pd
+ // CHECK-LABEL: test_mm_mask_div_pd
// CHECK: fdiv <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_div_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_div_pd
+ // CHECK-LABEL: test_mm_maskz_div_pd
// CHECK: fdiv <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_div_pd(__U,__A,__B);
}
__m256d test_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_div_pd
+ // CHECK-LABEL: test_mm256_mask_div_pd
// CHECK: fdiv <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_div_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_div_pd
+ // CHECK-LABEL: test_mm256_maskz_div_pd
// CHECK: fdiv <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_div_pd(__U,__A,__B);
}
__m128 test_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_div_ps
+ // CHECK-LABEL: test_mm_mask_div_ps
// CHECK: fdiv <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_div_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_div_ps
+ // CHECK-LABEL: test_mm_maskz_div_ps
// CHECK: fdiv <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_div_ps(__U,__A,__B);
}
__m256 test_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_div_ps
+ // CHECK-LABEL: test_mm256_mask_div_ps
// CHECK: fdiv <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_div_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_div_ps
+ // CHECK-LABEL: test_mm256_maskz_div_ps
// CHECK: fdiv <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_div_ps(__U,__A,__B);
}
__m128d test_mm_mask_expand_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_expand_pd
+ // CHECK-LABEL: test_mm_mask_expand_pd
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_mask_expand_pd(__W,__U,__A);
}
__m128d test_mm_maskz_expand_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_expand_pd
+ // CHECK-LABEL: test_mm_maskz_expand_pd
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_maskz_expand_pd(__U,__A);
}
__m256d test_mm256_mask_expand_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_expand_pd
+ // CHECK-LABEL: test_mm256_mask_expand_pd
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_mask_expand_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_expand_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_expand_pd
+ // CHECK-LABEL: test_mm256_maskz_expand_pd
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_maskz_expand_pd(__U,__A);
}
__m128i test_mm_mask_expand_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_expand_epi64
+ // CHECK-LABEL: test_mm_mask_expand_epi64
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_mask_expand_epi64(__W,__U,__A);
}
__m128i test_mm_maskz_expand_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_expand_epi64
+ // CHECK-LABEL: test_mm_maskz_expand_epi64
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_maskz_expand_epi64(__U,__A);
}
__m256i test_mm256_mask_expand_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_expand_epi64
+ // CHECK-LABEL: test_mm256_mask_expand_epi64
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_mask_expand_epi64(__W,__U,__A);
}
__m256i test_mm256_maskz_expand_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_expand_epi64
+ // CHECK-LABEL: test_mm256_maskz_expand_epi64
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_maskz_expand_epi64(__U,__A);
}
__m128d test_mm_mask_expandloadu_pd(__m128d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_pd
+ // CHECK-LABEL: test_mm_mask_expandloadu_pd
// CHECK: @llvm.masked.expandload.v2f64(ptr %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_expandloadu_pd(__W,__U,__P);
}
__m128d test_mm_maskz_expandloadu_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_pd
+ // CHECK-LABEL: test_mm_maskz_expandloadu_pd
// CHECK: @llvm.masked.expandload.v2f64(ptr %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_expandloadu_pd(__U,__P);
}
__m256d test_mm256_mask_expandloadu_pd(__m256d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_pd
+ // CHECK-LABEL: test_mm256_mask_expandloadu_pd
// CHECK: @llvm.masked.expandload.v4f64(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_expandloadu_pd(__W,__U,__P);
}
__m256d test_mm256_maskz_expandloadu_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_pd
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_pd
// CHECK: @llvm.masked.expandload.v4f64(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_expandloadu_pd(__U,__P);
}
__m128i test_mm_mask_expandloadu_epi64(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_epi64
+ // CHECK-LABEL: test_mm_mask_expandloadu_epi64
// CHECK: @llvm.masked.expandload.v2i64(ptr %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_expandloadu_epi64(__W,__U,__P);
}
__m128i test_mm_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_epi64
+ // CHECK-LABEL: test_mm_maskz_expandloadu_epi64
// CHECK: @llvm.masked.expandload.v2i64(ptr %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_expandloadu_epi64(__U,__P);
}
__m256i test_mm256_mask_expandloadu_epi64(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_epi64
+ // CHECK-LABEL: test_mm256_mask_expandloadu_epi64
// CHECK: @llvm.masked.expandload.v4i64(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_expandloadu_epi64(__W,__U,__P);
}
__m256i test_mm256_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi64
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_epi64
// CHECK: @llvm.masked.expandload.v4i64(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_expandloadu_epi64(__U,__P);
}
__m128 test_mm_mask_expandloadu_ps(__m128 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_ps
+ // CHECK-LABEL: test_mm_mask_expandloadu_ps
// CHECK: @llvm.masked.expandload.v4f32(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_expandloadu_ps(__W,__U,__P);
}
__m128 test_mm_maskz_expandloadu_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_ps
+ // CHECK-LABEL: test_mm_maskz_expandloadu_ps
// CHECK: @llvm.masked.expandload.v4f32(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_expandloadu_ps(__U,__P);
}
__m256 test_mm256_mask_expandloadu_ps(__m256 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_ps
+ // CHECK-LABEL: test_mm256_mask_expandloadu_ps
// CHECK: @llvm.masked.expandload.v8f32(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_expandloadu_ps(__W,__U,__P);
}
__m256 test_mm256_maskz_expandloadu_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_ps
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_ps
// CHECK: @llvm.masked.expandload.v8f32(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_expandloadu_ps(__U,__P);
}
__m128i test_mm_mask_expandloadu_epi32(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_epi32
+ // CHECK-LABEL: test_mm_mask_expandloadu_epi32
// CHECK: @llvm.masked.expandload.v4i32(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_expandloadu_epi32(__W,__U,__P);
}
__m128i test_mm_maskz_expandloadu_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_epi32
+ // CHECK-LABEL: test_mm_maskz_expandloadu_epi32
// CHECK: @llvm.masked.expandload.v4i32(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_expandloadu_epi32(__U,__P);
}
__m256i test_mm256_mask_expandloadu_epi32(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_epi32
+ // CHECK-LABEL: test_mm256_mask_expandloadu_epi32
// CHECK: @llvm.masked.expandload.v8i32(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_expandloadu_epi32(__W,__U,__P);
}
__m256i test_mm256_maskz_expandloadu_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi32
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_epi32
// CHECK: @llvm.masked.expandload.v8i32(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_expandloadu_epi32(__U,__P);
}
__m128 test_mm_mask_expand_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_expand_ps
+ // CHECK-LABEL: test_mm_mask_expand_ps
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_mask_expand_ps(__W,__U,__A);
}
__m128 test_mm_maskz_expand_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_expand_ps
+ // CHECK-LABEL: test_mm_maskz_expand_ps
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_maskz_expand_ps(__U,__A);
}
__m256 test_mm256_mask_expand_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_expand_ps
+ // CHECK-LABEL: test_mm256_mask_expand_ps
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_mask_expand_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_expand_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_expand_ps
+ // CHECK-LABEL: test_mm256_maskz_expand_ps
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_maskz_expand_ps(__U,__A);
}
__m128i test_mm_mask_expand_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_expand_epi32
+ // CHECK-LABEL: test_mm_mask_expand_epi32
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_mask_expand_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_expand_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_expand_epi32
+ // CHECK-LABEL: test_mm_maskz_expand_epi32
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_maskz_expand_epi32(__U,__A);
}
__m256i test_mm256_mask_expand_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_expand_epi32
+ // CHECK-LABEL: test_mm256_mask_expand_epi32
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_mask_expand_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_expand_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_expand_epi32
+ // CHECK-LABEL: test_mm256_maskz_expand_epi32
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_maskz_expand_epi32(__U,__A);
}
__m128d test_mm_getexp_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_getexp_pd
+ // CHECK-LABEL: test_mm_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.128
return _mm_getexp_pd(__A);
}
__m128d test_mm_mask_getexp_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_getexp_pd
+ // CHECK-LABEL: test_mm_mask_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.128
return _mm_mask_getexp_pd(__W,__U,__A);
}
__m128d test_mm_maskz_getexp_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_getexp_pd
+ // CHECK-LABEL: test_mm_maskz_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.128
return _mm_maskz_getexp_pd(__U,__A);
}
__m256d test_mm256_getexp_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_getexp_pd
+ // CHECK-LABEL: test_mm256_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.256
return _mm256_getexp_pd(__A);
}
__m256d test_mm256_mask_getexp_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_getexp_pd
+ // CHECK-LABEL: test_mm256_mask_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.256
return _mm256_mask_getexp_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_getexp_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_getexp_pd
+ // CHECK-LABEL: test_mm256_maskz_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.256
return _mm256_maskz_getexp_pd(__U,__A);
}
__m128 test_mm_getexp_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_getexp_ps
+ // CHECK-LABEL: test_mm_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.128
return _mm_getexp_ps(__A);
}
__m128 test_mm_mask_getexp_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_getexp_ps
+ // CHECK-LABEL: test_mm_mask_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.128
return _mm_mask_getexp_ps(__W,__U,__A);
}
__m128 test_mm_maskz_getexp_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_getexp_ps
+ // CHECK-LABEL: test_mm_maskz_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.128
return _mm_maskz_getexp_ps(__U,__A);
}
__m256 test_mm256_getexp_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_getexp_ps
+ // CHECK-LABEL: test_mm256_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.256
return _mm256_getexp_ps(__A);
}
__m256 test_mm256_mask_getexp_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_getexp_ps
+ // CHECK-LABEL: test_mm256_mask_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.256
return _mm256_mask_getexp_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_getexp_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_getexp_ps
+ // CHECK-LABEL: test_mm256_maskz_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.256
return _mm256_maskz_getexp_ps(__U,__A);
}
__m128d test_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_max_pd
+ // CHECK-LABEL: test_mm_mask_max_pd
// CHECK: @llvm.x86.sse2.max.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_max_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_max_pd
+ // CHECK-LABEL: test_mm_maskz_max_pd
// CHECK: @llvm.x86.sse2.max.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_max_pd(__U,__A,__B);
}
__m256d test_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_max_pd
+ // CHECK-LABEL: test_mm256_mask_max_pd
// CHECK: @llvm.x86.avx.max.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_max_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_pd
+ // CHECK-LABEL: test_mm256_maskz_max_pd
// CHECK: @llvm.x86.avx.max.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_max_pd(__U,__A,__B);
}
__m128 test_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_max_ps
+ // CHECK-LABEL: test_mm_mask_max_ps
// CHECK: @llvm.x86.sse.max.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_max_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_max_ps
+ // CHECK-LABEL: test_mm_maskz_max_ps
// CHECK: @llvm.x86.sse.max.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_max_ps(__U,__A,__B);
}
__m256 test_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_max_ps
+ // CHECK-LABEL: test_mm256_mask_max_ps
// CHECK: @llvm.x86.avx.max.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_max_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_ps
+ // CHECK-LABEL: test_mm256_maskz_max_ps
// CHECK: @llvm.x86.avx.max.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_max_ps(__U,__A,__B);
}
__m128d test_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_min_pd
+ // CHECK-LABEL: test_mm_mask_min_pd
// CHECK: @llvm.x86.sse2.min.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_min_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_min_pd
+ // CHECK-LABEL: test_mm_maskz_min_pd
// CHECK: @llvm.x86.sse2.min.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_min_pd(__U,__A,__B);
}
__m256d test_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_min_pd
+ // CHECK-LABEL: test_mm256_mask_min_pd
// CHECK: @llvm.x86.avx.min.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_min_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_pd
+ // CHECK-LABEL: test_mm256_maskz_min_pd
// CHECK: @llvm.x86.avx.min.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_min_pd(__U,__A,__B);
}
__m128 test_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_min_ps
+ // CHECK-LABEL: test_mm_mask_min_ps
// CHECK: @llvm.x86.sse.min.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_min_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_min_ps
+ // CHECK-LABEL: test_mm_maskz_min_ps
// CHECK: @llvm.x86.sse.min.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_min_ps(__U,__A,__B);
}
__m256 test_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_min_ps
+ // CHECK-LABEL: test_mm256_mask_min_ps
// CHECK: @llvm.x86.avx.min.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_min_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_ps
+ // CHECK-LABEL: test_mm256_maskz_min_ps
// CHECK: @llvm.x86.avx.min.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_min_ps(__U,__A,__B);
}
__m128d test_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_mul_pd
+ // CHECK-LABEL: test_mm_mask_mul_pd
// CHECK: fmul <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_mul_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_pd
+ // CHECK-LABEL: test_mm_maskz_mul_pd
// CHECK: fmul <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_mul_pd(__U,__A,__B);
}
__m256d test_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_mul_pd
+ // CHECK-LABEL: test_mm256_mask_mul_pd
// CHECK: fmul <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_mul_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_mul_pd
+ // CHECK-LABEL: test_mm256_maskz_mul_pd
// CHECK: fmul <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_mul_pd(__U,__A,__B);
}
__m128 test_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_mul_ps
+ // CHECK-LABEL: test_mm_mask_mul_ps
// CHECK: fmul <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_mul_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_ps
+ // CHECK-LABEL: test_mm_maskz_mul_ps
// CHECK: fmul <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_mul_ps(__U,__A,__B);
}
__m256 test_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_mul_ps
+ // CHECK-LABEL: test_mm256_mask_mul_ps
// CHECK: fmul <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_mul_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_mul_ps
+ // CHECK-LABEL: test_mm256_maskz_mul_ps
// CHECK: fmul <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_mul_ps(__U,__A,__B);
}
__m128i test_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_abs_epi32
+ // CHECK-LABEL: test_mm_mask_abs_epi32
// CHECK: [[ABS:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[ABS]] to <2 x i64>
// CHECK: [[ABS:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> [[ABS]], <4 x i32> %{{.*}}
return _mm_mask_abs_epi32(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_abs_epi32((__m128i)(__v4si){99, 99, 99, 99}, (__mmask8)0x01, (__m128i)(__v4si){-1, 2, 2, 2}), 1, 99, 99, 99));
+
__m128i test_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_abs_epi32
+ // CHECK-LABEL: test_mm_maskz_abs_epi32
// CHECK: [[ABS:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[ABS]] to <2 x i64>
// CHECK: [[ABS:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> [[ABS]], <4 x i32> %{{.*}}
return _mm_maskz_abs_epi32(__U,__A);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_abs_epi32((__mmask8)0x05, (__m128i)(__v4si){-1, 2, -3, 4}), 1, 0, 3, 0));
+
__m256i test_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_abs_epi32
+ // CHECK-LABEL: test_mm256_mask_abs_epi32
// CHECK: [[ABS:%.*]] = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[ABS]] to <4 x i64>
// CHECK: [[ABS:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> [[ABS]], <8 x i32> %{{.*}}
return _mm256_mask_abs_epi32(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_abs_epi32((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}, (__mmask8)0x01, (__m256i)(__v8si){-1, 2, 2, 2, 2, 2, 2, 2}), 1, 99, 99, 99, 99, 99, 99, 99));
+
__m256i test_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_abs_epi32
+ // CHECK-LABEL: test_mm256_maskz_abs_epi32
// CHECK: [[ABS:%.*]] = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[ABS]] to <4 x i64>
// CHECK: [[ABS:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> [[ABS]], <8 x i32> %{{.*}}
return _mm256_maskz_abs_epi32(__U,__A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_abs_epi32((__mmask8)0x01, (__m256i)(__v8si){-1, 2, 2, 2, 2, 2, 2, 2}), 1, 0, 0, 0, 0, 0, 0, 0));
+
__m128i test_mm_abs_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_abs_epi64
- // CHECK: [[ABS:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
return _mm_abs_epi64(__A);
}
+TEST_CONSTEXPR(match_v2di(_mm_abs_epi64((__m128i)(__v2di){+5, -3}), 5, 3));
+
__m128i test_mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_abs_epi64
- // CHECK: [[ABS:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_mask_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> [[ABS]], <2 x i64> %{{.*}}
return _mm_mask_abs_epi64(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_abs_epi64((__m128i)(__v2di){99, 99}, (__mmask8)0x01, (__m128i)(__v2di){-1, 2}), 1, 99));
+
__m128i test_mm_maskz_abs_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_abs_epi64
- // CHECK: [[ABS:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_maskz_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> [[ABS]], <2 x i64> %{{.*}}
return _mm_maskz_abs_epi64(__U,__A);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_abs_epi64((__mmask8)0x01, (__m128i)(__v2di){-1, 2}), 1, 0));
+
__m256i test_mm256_abs_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_abs_epi64
- // CHECK: [[ABS:%.*]] = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
return _mm256_abs_epi64(__A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_abs_epi64((__m256i)(__v4di){+5, -3, -1000, 1000}), 5, 3, 1000, 1000));
+
__m256i test_mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_abs_epi64
- // CHECK: [[ABS:%.*]] = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_mask_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> [[ABS]], <4 x i64> %{{.*}}
return _mm256_mask_abs_epi64(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_abs_epi64((__m256i)(__v4di){99, 99, 99, 99}, (__mmask8)0x01, (__m256i)(__v4di){-1, 2, 2, 2}), 1, 99, 99, 99));
+
__m256i test_mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_abs_epi64
- // CHECK: [[ABS:%.*]] = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_maskz_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> [[ABS]], <4 x i64> %{{.*}}
return _mm256_maskz_abs_epi64(__U,__A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_abs_epi64((__mmask8)0x01, (__m256i)(__v4di){-1, 2, 2, 2}), 1, 0, 0, 0));
+
__m128i test_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epi32
+ // CHECK-LABEL: test_mm_maskz_max_epi32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4538,7 +4565,7 @@ __m128i test_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_max_epi32(__M,__A,__B);
}
__m128i test_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epi32
+ // CHECK-LABEL: test_mm_mask_max_epi32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4546,7 +4573,7 @@ __m128i test_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_max_epi32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epi32
+ // CHECK-LABEL: test_mm256_maskz_max_epi32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4554,7 +4581,7 @@ __m256i test_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_max_epi32(__M,__A,__B);
}
__m256i test_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epi32
+ // CHECK-LABEL: test_mm256_mask_max_epi32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4562,41 +4589,41 @@ __m256i test_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256
return _mm256_mask_max_epi32(__W,__M,__A,__B);
}
__m128i test_mm_maskz_max_epi64(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_maskz_max_epi64(__M,__A,__B);
}
__m128i test_mm_mask_max_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_mask_max_epi64(__W,__M,__A,__B);
}
__m128i test_mm_max_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_max_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_max_epi64(__A,__B);
}
__m256i test_mm256_maskz_max_epi64(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_maskz_max_epi64(__M,__A,__B);
}
__m256i test_mm256_mask_max_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_mask_max_epi64(__W,__M,__A,__B);
}
__m256i test_mm256_max_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_max_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_max_epi64(__A,__B);
}
__m128i test_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epu32
+ // CHECK-LABEL: test_mm_maskz_max_epu32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4604,7 +4631,7 @@ __m128i test_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_max_epu32(__M,__A,__B);
}
__m128i test_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epu32
+ // CHECK-LABEL: test_mm_mask_max_epu32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4612,7 +4639,7 @@ __m128i test_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_max_epu32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epu32
+ // CHECK-LABEL: test_mm256_maskz_max_epu32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4620,7 +4647,7 @@ __m256i test_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_max_epu32(__M,__A,__B);
}
__m256i test_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epu32
+ // CHECK-LABEL: test_mm256_mask_max_epu32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4628,41 +4655,41 @@ __m256i test_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256
return _mm256_mask_max_epu32(__W,__M,__A,__B);
}
__m128i test_mm_maskz_max_epu64(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_maskz_max_epu64(__M,__A,__B);
}
__m128i test_mm_max_epu64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_max_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_max_epu64(__A,__B);
}
__m128i test_mm_mask_max_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_mask_max_epu64(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epu64(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_maskz_max_epu64(__M,__A,__B);
}
__m256i test_mm256_max_epu64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_max_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_max_epu64(__A,__B);
}
__m256i test_mm256_mask_max_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_mask_max_epu64(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epi32
+ // CHECK-LABEL: test_mm_maskz_min_epi32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4670,7 +4697,7 @@ __m128i test_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_min_epi32(__M,__A,__B);
}
__m128i test_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epi32
+ // CHECK-LABEL: test_mm_mask_min_epi32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4678,7 +4705,7 @@ __m128i test_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_min_epi32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epi32
+ // CHECK-LABEL: test_mm256_maskz_min_epi32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4686,7 +4713,7 @@ __m256i test_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_min_epi32(__M,__A,__B);
}
__m256i test_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epi32
+ // CHECK-LABEL: test_mm256_mask_min_epi32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4694,41 +4721,41 @@ __m256i test_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256
return _mm256_mask_min_epi32(__W,__M,__A,__B);
}
__m128i test_mm_min_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_min_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_min_epi64(__A,__B);
}
__m128i test_mm_mask_min_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_mask_min_epi64(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epi64(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_maskz_min_epi64(__M,__A,__B);
}
__m256i test_mm256_min_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_min_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_min_epi64(__A,__B);
}
__m256i test_mm256_mask_min_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_mask_min_epi64(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epi64(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_maskz_min_epi64(__M,__A,__B);
}
__m128i test_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epu32
+ // CHECK-LABEL: test_mm_maskz_min_epu32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4736,7 +4763,7 @@ __m128i test_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_min_epu32(__M,__A,__B);
}
__m128i test_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epu32
+ // CHECK-LABEL: test_mm_mask_min_epu32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4744,7 +4771,7 @@ __m128i test_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_min_epu32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epu32
+ // CHECK-LABEL: test_mm256_maskz_min_epu32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4752,7 +4779,7 @@ __m256i test_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_min_epu32(__M,__A,__B);
}
__m256i test_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epu32
+ // CHECK-LABEL: test_mm256_mask_min_epu32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4760,1914 +4787,1973 @@ __m256i test_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256
return _mm256_mask_min_epu32(__W,__M,__A,__B);
}
__m128i test_mm_min_epu64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_min_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_min_epu64(__A,__B);
}
__m128i test_mm_mask_min_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_mask_min_epu64(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epu64(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_maskz_min_epu64(__M,__A,__B);
}
__m256i test_mm256_min_epu64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_min_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_min_epu64(__A,__B);
}
__m256i test_mm256_mask_min_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_mask_min_epu64(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epu64(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_maskz_min_epu64(__M,__A,__B);
}
__m128d test_mm_roundscale_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_roundscale_pd
+ // CHECK-LABEL: test_mm_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.128
return _mm_roundscale_pd(__A,4);
}
__m128d test_mm_mask_roundscale_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_roundscale_pd
+ // CHECK-LABEL: test_mm_mask_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.128
return _mm_mask_roundscale_pd(__W,__U,__A,4);
}
__m128d test_mm_maskz_roundscale_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_roundscale_pd
+ // CHECK-LABEL: test_mm_maskz_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.128
return _mm_maskz_roundscale_pd(__U,__A,4);
}
__m256d test_mm256_roundscale_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_roundscale_pd
+ // CHECK-LABEL: test_mm256_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.256
return _mm256_roundscale_pd(__A,4);
}
__m256d test_mm256_mask_roundscale_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_roundscale_pd
+ // CHECK-LABEL: test_mm256_mask_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.256
return _mm256_mask_roundscale_pd(__W,__U,__A,4);
}
__m256d test_mm256_maskz_roundscale_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_roundscale_pd
+ // CHECK-LABEL: test_mm256_maskz_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.256
return _mm256_maskz_roundscale_pd(__U,__A,4);
}
__m128 test_mm_roundscale_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_roundscale_ps
+ // CHECK-LABEL: test_mm_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.128
return _mm_roundscale_ps(__A,4);
}
__m128 test_mm_mask_roundscale_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_roundscale_ps
+ // CHECK-LABEL: test_mm_mask_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.128
return _mm_mask_roundscale_ps(__W,__U,__A,4);
}
__m128 test_mm_maskz_roundscale_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_roundscale_ps
+ // CHECK-LABEL: test_mm_maskz_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.128
return _mm_maskz_roundscale_ps(__U,__A, 4);
}
__m256 test_mm256_roundscale_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_roundscale_ps
+ // CHECK-LABEL: test_mm256_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.256
return _mm256_roundscale_ps(__A,4);
}
__m256 test_mm256_mask_roundscale_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_roundscale_ps
+ // CHECK-LABEL: test_mm256_mask_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.256
return _mm256_mask_roundscale_ps(__W,__U,__A,4);
}
__m256 test_mm256_maskz_roundscale_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_roundscale_ps
+ // CHECK-LABEL: test_mm256_maskz_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.256
return _mm256_maskz_roundscale_ps(__U,__A,4);
}
__m128d test_mm_scalef_pd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_scalef_pd
+ // CHECK-LABEL: test_mm_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.128
return _mm_scalef_pd(__A,__B);
}
__m128d test_mm_mask_scalef_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_scalef_pd
+ // CHECK-LABEL: test_mm_mask_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.128
return _mm_mask_scalef_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_scalef_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_scalef_pd
+ // CHECK-LABEL: test_mm_maskz_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.128
return _mm_maskz_scalef_pd(__U,__A,__B);
}
__m256d test_mm256_scalef_pd(__m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_scalef_pd
+ // CHECK-LABEL: test_mm256_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.256
return _mm256_scalef_pd(__A,__B);
}
__m256d test_mm256_mask_scalef_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_scalef_pd
+ // CHECK-LABEL: test_mm256_mask_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.256
return _mm256_mask_scalef_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_scalef_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_scalef_pd
+ // CHECK-LABEL: test_mm256_maskz_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.256
return _mm256_maskz_scalef_pd(__U,__A,__B);
}
__m128 test_mm_scalef_ps(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_scalef_ps
+ // CHECK-LABEL: test_mm_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.128
return _mm_scalef_ps(__A,__B);
}
__m128 test_mm_mask_scalef_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_scalef_ps
+ // CHECK-LABEL: test_mm_mask_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.128
return _mm_mask_scalef_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_scalef_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_scalef_ps
+ // CHECK-LABEL: test_mm_maskz_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.128
return _mm_maskz_scalef_ps(__U,__A,__B);
}
__m256 test_mm256_scalef_ps(__m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_scalef_ps
+ // CHECK-LABEL: test_mm256_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.256
return _mm256_scalef_ps(__A,__B);
}
__m256 test_mm256_mask_scalef_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_scalef_ps
+ // CHECK-LABEL: test_mm256_mask_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.256
return _mm256_mask_scalef_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_scalef_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_scalef_ps
+ // CHECK-LABEL: test_mm256_maskz_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.256
return _mm256_maskz_scalef_ps(__U,__A,__B);
}
void test_mm_i64scatter_pd(double *__addr, __m128i __index, __m128d __v1) {
- // CHECK-LABEL: @test_mm_i64scatter_pd
+ // CHECK-LABEL: test_mm_i64scatter_pd
// CHECK: @llvm.x86.avx512.mask.scatterdiv2.df
return _mm_i64scatter_pd(__addr,__index,__v1,2);
}
void test_mm_mask_i64scatter_pd(double *__addr, __mmask8 __mask, __m128i __index, __m128d __v1) {
- // CHECK-LABEL: @test_mm_mask_i64scatter_pd
+ // CHECK-LABEL: test_mm_mask_i64scatter_pd
// CHECK: @llvm.x86.avx512.mask.scatterdiv2.df
return _mm_mask_i64scatter_pd(__addr,__mask,__index,__v1,2);
}
void test_mm_i64scatter_epi64(long long *__addr, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_i64scatter_epi64
+ // CHECK-LABEL: test_mm_i64scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scatterdiv2.di
return _mm_i64scatter_epi64(__addr,__index,__v1,2);
}
void test_mm_mask_i64scatter_epi64(long long *__addr, __mmask8 __mask, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_mask_i64scatter_epi64
+ // CHECK-LABEL: test_mm_mask_i64scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scatterdiv2.di
return _mm_mask_i64scatter_epi64(__addr,__mask,__index,__v1,2);
}
void test_mm256_i64scatter_pd(double *__addr, __m256i __index, __m256d __v1) {
- // CHECK-LABEL: @test_mm256_i64scatter_pd
+ // CHECK-LABEL: test_mm256_i64scatter_pd
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.df
return _mm256_i64scatter_pd(__addr,__index,__v1,2);
}
void test_mm256_mask_i64scatter_pd(double *__addr, __mmask8 __mask, __m256i __index, __m256d __v1) {
- // CHECK-LABEL: @test_mm256_mask_i64scatter_pd
+ // CHECK-LABEL: test_mm256_mask_i64scatter_pd
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.df
return _mm256_mask_i64scatter_pd(__addr,__mask,__index,__v1,2);
}
void test_mm256_i64scatter_epi64(long long *__addr, __m256i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_i64scatter_epi64
+ // CHECK-LABEL: test_mm256_i64scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.di
return _mm256_i64scatter_epi64(__addr,__index,__v1,2);
}
void test_mm256_mask_i64scatter_epi64(long long *__addr, __mmask8 __mask, __m256i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_mask_i64scatter_epi64
+ // CHECK-LABEL: test_mm256_mask_i64scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.di
return _mm256_mask_i64scatter_epi64(__addr,__mask,__index,__v1,2);
}
void test_mm_i64scatter_ps(float *__addr, __m128i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm_i64scatter_ps
+ // CHECK-LABEL: test_mm_i64scatter_ps
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.sf
return _mm_i64scatter_ps(__addr,__index,__v1,2);
}
void test_mm_mask_i64scatter_ps(float *__addr, __mmask8 __mask, __m128i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm_mask_i64scatter_ps
+ // CHECK-LABEL: test_mm_mask_i64scatter_ps
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.sf
return _mm_mask_i64scatter_ps(__addr,__mask,__index,__v1,2);
}
void test_mm_i64scatter_epi32(int *__addr, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_i64scatter_epi32
+ // CHECK-LABEL: test_mm_i64scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.si
return _mm_i64scatter_epi32(__addr,__index,__v1,2);
}
void test_mm_mask_i64scatter_epi32(int *__addr, __mmask8 __mask, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_mask_i64scatter_epi32
+ // CHECK-LABEL: test_mm_mask_i64scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.si
return _mm_mask_i64scatter_epi32(__addr,__mask,__index,__v1,2);
}
void test_mm256_i64scatter_ps(float *__addr, __m256i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm256_i64scatter_ps
+ // CHECK-LABEL: test_mm256_i64scatter_ps
// CHECK: @llvm.x86.avx512.mask.scatterdiv8.sf
return _mm256_i64scatter_ps(__addr,__index,__v1,2);
}
void test_mm256_mask_i64scatter_ps(float *__addr, __mmask8 __mask, __m256i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm256_mask_i64scatter_ps
+ // CHECK-LABEL: test_mm256_mask_i64scatter_ps
// CHECK: @llvm.x86.avx512.mask.scatterdiv8.sf
return _mm256_mask_i64scatter_ps(__addr,__mask,__index,__v1,2);
}
void test_mm256_i64scatter_epi32(int *__addr, __m256i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm256_i64scatter_epi32
+ // CHECK-LABEL: test_mm256_i64scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scatterdiv8.si
return _mm256_i64scatter_epi32(__addr,__index,__v1,2);
}
void test_mm256_mask_i64scatter_epi32(int *__addr, __mmask8 __mask, __m256i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm256_mask_i64scatter_epi32
+ // CHECK-LABEL: test_mm256_mask_i64scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scatterdiv8.si
return _mm256_mask_i64scatter_epi32(__addr,__mask,__index,__v1,2);
}
void test_mm_i32scatter_pd(double *__addr, __m128i __index, __m128d __v1) {
- // CHECK-LABEL: @test_mm_i32scatter_pd
+ // CHECK-LABEL: test_mm_i32scatter_pd
// CHECK: @llvm.x86.avx512.mask.scattersiv2.df
return _mm_i32scatter_pd(__addr,__index,__v1,2);
}
void test_mm_mask_i32scatter_pd(double *__addr, __mmask8 __mask, __m128i __index, __m128d __v1) {
- // CHECK-LABEL: @test_mm_mask_i32scatter_pd
+ // CHECK-LABEL: test_mm_mask_i32scatter_pd
// CHECK: @llvm.x86.avx512.mask.scattersiv2.df
return _mm_mask_i32scatter_pd(__addr,__mask,__index,__v1,2);
}
void test_mm_i32scatter_epi64(long long *__addr, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_i32scatter_epi64
+ // CHECK-LABEL: test_mm_i32scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scattersiv2.di
return _mm_i32scatter_epi64(__addr,__index,__v1,2);
}
void test_mm_mask_i32scatter_epi64(long long *__addr, __mmask8 __mask, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_mask_i32scatter_epi64
+ // CHECK-LABEL: test_mm_mask_i32scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scattersiv2.di
return _mm_mask_i32scatter_epi64(__addr,__mask,__index,__v1,2);
}
void test_mm256_i32scatter_pd(double *__addr, __m128i __index, __m256d __v1) {
- // CHECK-LABEL: @test_mm256_i32scatter_pd
+ // CHECK-LABEL: test_mm256_i32scatter_pd
// CHECK: @llvm.x86.avx512.mask.scattersiv4.df
return _mm256_i32scatter_pd(__addr,__index,__v1,2);
}
void test_mm256_mask_i32scatter_pd(double *__addr, __mmask8 __mask, __m128i __index, __m256d __v1) {
- // CHECK-LABEL: @test_mm256_mask_i32scatter_pd
+ // CHECK-LABEL: test_mm256_mask_i32scatter_pd
// CHECK: @llvm.x86.avx512.mask.scattersiv4.df
return _mm256_mask_i32scatter_pd(__addr,__mask,__index,__v1,2);
}
void test_mm256_i32scatter_epi64(long long *__addr, __m128i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_i32scatter_epi64
+ // CHECK-LABEL: test_mm256_i32scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scattersiv4.di
return _mm256_i32scatter_epi64(__addr,__index,__v1,2);
}
void test_mm256_mask_i32scatter_epi64(long long *__addr, __mmask8 __mask, __m128i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_mask_i32scatter_epi64
+ // CHECK-LABEL: test_mm256_mask_i32scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scattersiv4.di
return _mm256_mask_i32scatter_epi64(__addr,__mask,__index,__v1,2);
}
void test_mm_i32scatter_ps(float *__addr, __m128i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm_i32scatter_ps
+ // CHECK-LABEL: test_mm_i32scatter_ps
// CHECK: @llvm.x86.avx512.mask.scattersiv4.sf
return _mm_i32scatter_ps(__addr,__index,__v1,2);
}
void test_mm_mask_i32scatter_ps(float *__addr, __mmask8 __mask, __m128i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm_mask_i32scatter_ps
+ // CHECK-LABEL: test_mm_mask_i32scatter_ps
// CHECK: @llvm.x86.avx512.mask.scattersiv4.sf
return _mm_mask_i32scatter_ps(__addr,__mask,__index,__v1,2);
}
void test_mm_i32scatter_epi32(int *__addr, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_i32scatter_epi32
+ // CHECK-LABEL: test_mm_i32scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scattersiv4.si
return _mm_i32scatter_epi32(__addr,__index,__v1,2);
}
void test_mm_mask_i32scatter_epi32(int *__addr, __mmask8 __mask, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_mask_i32scatter_epi32
+ // CHECK-LABEL: test_mm_mask_i32scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scattersiv4.si
return _mm_mask_i32scatter_epi32(__addr,__mask,__index,__v1,2);
}
void test_mm256_i32scatter_ps(float *__addr, __m256i __index, __m256 __v1) {
- // CHECK-LABEL: @test_mm256_i32scatter_ps
+ // CHECK-LABEL: test_mm256_i32scatter_ps
// CHECK: @llvm.x86.avx512.mask.scattersiv8.sf
return _mm256_i32scatter_ps(__addr,__index,__v1,2);
}
void test_mm256_mask_i32scatter_ps(float *__addr, __mmask8 __mask, __m256i __index, __m256 __v1) {
- // CHECK-LABEL: @test_mm256_mask_i32scatter_ps
+ // CHECK-LABEL: test_mm256_mask_i32scatter_ps
// CHECK: @llvm.x86.avx512.mask.scattersiv8.sf
return _mm256_mask_i32scatter_ps(__addr,__mask,__index,__v1,2);
}
void test_mm256_i32scatter_epi32(int *__addr, __m256i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_i32scatter_epi32
+ // CHECK-LABEL: test_mm256_i32scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scattersiv8.si
return _mm256_i32scatter_epi32(__addr,__index,__v1,2);
}
void test_mm256_mask_i32scatter_epi32(int *__addr, __mmask8 __mask, __m256i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_mask_i32scatter_epi32
+ // CHECK-LABEL: test_mm256_mask_i32scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scattersiv8.si
return _mm256_mask_i32scatter_epi32(__addr,__mask,__index,__v1,2);
}
__m128d test_mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_sqrt_pd
+ // CHECK-LABEL: test_mm_mask_sqrt_pd
// CHECK: @llvm.sqrt.v2f64
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_sqrt_pd(__W,__U,__A);
}
__m128d test_mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_sqrt_pd
+ // CHECK-LABEL: test_mm_maskz_sqrt_pd
// CHECK: @llvm.sqrt.v2f64
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_sqrt_pd(__U,__A);
}
__m256d test_mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_sqrt_pd
+ // CHECK-LABEL: test_mm256_mask_sqrt_pd
// CHECK: @llvm.sqrt.v4f64
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_sqrt_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_sqrt_pd
+ // CHECK-LABEL: test_mm256_maskz_sqrt_pd
// CHECK: @llvm.sqrt.v4f64
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_sqrt_pd(__U,__A);
}
__m128 test_mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_sqrt_ps
+ // CHECK-LABEL: test_mm_mask_sqrt_ps
// CHECK: @llvm.sqrt.v4f32
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_sqrt_ps(__W,__U,__A);
}
__m128 test_mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_sqrt_ps
+ // CHECK-LABEL: test_mm_maskz_sqrt_ps
// CHECK: @llvm.sqrt.v4f32
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_sqrt_ps(__U,__A);
}
__m256 test_mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_sqrt_ps
+ // CHECK-LABEL: test_mm256_mask_sqrt_ps
// CHECK: @llvm.sqrt.v8f32
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_sqrt_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_sqrt_ps
+ // CHECK-LABEL: test_mm256_maskz_sqrt_ps
// CHECK: @llvm.sqrt.v8f32
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_sqrt_ps(__U,__A);
}
__m128d test_mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_sub_pd
+ // CHECK-LABEL: test_mm_mask_sub_pd
// CHECK: fsub <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_sub_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_sub_pd
+ // CHECK-LABEL: test_mm_maskz_sub_pd
// CHECK: fsub <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_sub_pd(__U,__A,__B);
}
__m256d test_mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_sub_pd
+ // CHECK-LABEL: test_mm256_mask_sub_pd
// CHECK: fsub <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_sub_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_sub_pd
+ // CHECK-LABEL: test_mm256_maskz_sub_pd
// CHECK: fsub <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_sub_pd(__U,__A,__B);
}
__m128 test_mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_sub_ps
+ // CHECK-LABEL: test_mm_mask_sub_ps
// CHECK: fsub <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_sub_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_sub_ps
+ // CHECK-LABEL: test_mm_maskz_sub_ps
// CHECK: fsub <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_sub_ps(__U,__A,__B);
}
__m256 test_mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_sub_ps
+ // CHECK-LABEL: test_mm256_mask_sub_ps
// CHECK: fsub <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_sub_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_sub_ps
+ // CHECK-LABEL: test_mm256_maskz_sub_ps
// CHECK: fsub <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_sub_ps(__U,__A,__B);
}
__m128i test_mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_epi32
+ // CHECK-LABEL: test_mm_mask2_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask2_permutex2var_epi32(__A,__I,__U,__B);
}
__m256i test_mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_epi32
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask2_permutex2var_epi32(__A,__I,__U,__B);
}
__m128d test_mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_pd
+ // CHECK-LABEL: test_mm_mask2_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.128
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask2_permutex2var_pd(__A,__I,__U,__B);
}
__m256d test_mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_pd
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask2_permutex2var_pd(__A,__I,__U,__B);
}
__m128 test_mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_ps
+ // CHECK-LABEL: test_mm_mask2_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.128
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask2_permutex2var_ps(__A,__I,__U,__B);
}
__m256 test_mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_ps
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask2_permutex2var_ps(__A,__I,__U,__B);
}
__m128i test_mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_epi64
+ // CHECK-LABEL: test_mm_mask2_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask2_permutex2var_epi64(__A,__I,__U,__B);
}
__m256i test_mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_epi64
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask2_permutex2var_epi64(__A,__I,__U,__B);
}
__m128i test_mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_permutex2var_epi32
+ // CHECK-LABEL: test_mm_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.128
return _mm_permutex2var_epi32(__A,__I,__B);
}
__m128i test_mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_epi32
+ // CHECK-LABEL: test_mm_mask_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_permutex2var_epi32(__A,__U,__I,__B);
}
__m128i test_mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_epi32
+ // CHECK-LABEL: test_mm_maskz_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_permutex2var_epi32(__U,__A,__I,__B);
}
__m256i test_mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_epi32
+ // CHECK-LABEL: test_mm256_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.256
return _mm256_permutex2var_epi32(__A,__I,__B);
}
__m256i test_mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_epi32
+ // CHECK-LABEL: test_mm256_mask_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_permutex2var_epi32(__A,__U,__I,__B);
}
__m256i test_mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_epi32
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_permutex2var_epi32(__U,__A,__I,__B);
}
__m128d test_mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B) {
- // CHECK-LABEL: @test_mm_permutex2var_pd
+ // CHECK-LABEL: test_mm_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.128
return _mm_permutex2var_pd(__A,__I,__B);
}
__m128d test_mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_pd
+ // CHECK-LABEL: test_mm_mask_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.128
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_permutex2var_pd(__A,__U,__I,__B);
}
__m128d test_mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_pd
+ // CHECK-LABEL: test_mm_maskz_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.128
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_permutex2var_pd(__U,__A,__I,__B);
}
__m256d test_mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_pd
+ // CHECK-LABEL: test_mm256_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.256
return _mm256_permutex2var_pd(__A,__I,__B);
}
__m256d test_mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_pd
+ // CHECK-LABEL: test_mm256_mask_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutex2var_pd(__A,__U,__I,__B);
}
__m256d test_mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_pd
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutex2var_pd(__U,__A,__I,__B);
}
__m128 test_mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B) {
- // CHECK-LABEL: @test_mm_permutex2var_ps
+ // CHECK-LABEL: test_mm_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.128
return _mm_permutex2var_ps(__A,__I,__B);
}
__m128 test_mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_ps
+ // CHECK-LABEL: test_mm_mask_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.128
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_permutex2var_ps(__A,__U,__I,__B);
}
__m128 test_mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_ps
+ // CHECK-LABEL: test_mm_maskz_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.128
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_permutex2var_ps(__U,__A,__I,__B);
}
__m256 test_mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_ps
+ // CHECK-LABEL: test_mm256_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.256
return _mm256_permutex2var_ps(__A,__I,__B);
}
__m256 test_mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_ps
+ // CHECK-LABEL: test_mm256_mask_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permutex2var_ps(__A,__U,__I,__B);
}
__m256 test_mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_ps
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permutex2var_ps(__U,__A,__I,__B);
}
__m128i test_mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_permutex2var_epi64
+ // CHECK-LABEL: test_mm_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.128
return _mm_permutex2var_epi64(__A,__I,__B);
}
__m128i test_mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_epi64
+ // CHECK-LABEL: test_mm_mask_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_permutex2var_epi64(__A,__U,__I,__B);
}
__m128i test_mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_epi64
+ // CHECK-LABEL: test_mm_maskz_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_permutex2var_epi64(__U,__A,__I,__B);
}
__m256i test_mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_epi64
+ // CHECK-LABEL: test_mm256_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.256
return _mm256_permutex2var_epi64(__A,__I,__B);
}
__m256i test_mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_epi64
+ // CHECK-LABEL: test_mm256_mask_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_permutex2var_epi64(__A,__U,__I,__B);
}
__m256i test_mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_epi64
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_permutex2var_epi64(__U,__A,__I,__B);
}
__m128i test_mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi8_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepi8_epi32
// CHECK: sext <4 x i8> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_cvtepi8_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi8_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepi8_epi32
// CHECK: sext <4 x i8> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_cvtepi8_epi32(__U, __A);
}
__m256i test_mm256_mask_cvtepi8_epi32(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi8_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepi8_epi32
// CHECK: sext <8 x i8> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_cvtepi8_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi8_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepi8_epi32
// CHECK: sext <8 x i8> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_cvtepi8_epi32(__U, __A);
}
__m128i test_mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi8_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepi8_epi64
// CHECK: sext <2 x i8> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepi8_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi8_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepi8_epi64
// CHECK: sext <2 x i8> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepi8_epi64(__U, __A);
}
__m256i test_mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi8_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepi8_epi64
// CHECK: sext <4 x i8> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepi8_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi8_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepi8_epi64
// CHECK: sext <4 x i8> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepi8_epi64(__U, __A);
}
__m128i test_mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepi32_epi64
// CHECK: sext <2 x i32> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepi32_epi64(__W, __U, __X);
}
__m128i test_mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_epi64
// CHECK: sext <2 x i32> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepi32_epi64(__U, __X);
}
__m256i test_mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_epi64
// CHECK: sext <4 x i32> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepi32_epi64(__W, __U, __X);
}
__m256i test_mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_epi64
// CHECK: sext <4 x i32> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepi32_epi64(__U, __X);
}
__m128i test_mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi16_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepi16_epi32
// CHECK: sext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_cvtepi16_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi16_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepi16_epi32
// CHECK: sext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_cvtepi16_epi32(__U, __A);
}
__m256i test_mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi16_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepi16_epi32
// CHECK: sext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_cvtepi16_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi16_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepi16_epi32
// CHECK: sext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_cvtepi16_epi32(__U, __A);
}
__m128i test_mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi16_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepi16_epi64
// CHECK: sext <2 x i16> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepi16_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi16_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepi16_epi64
// CHECK: sext <2 x i16> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepi16_epi64(__U, __A);
}
__m256i test_mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi16_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepi16_epi64
// CHECK: sext <4 x i16> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepi16_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi16_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepi16_epi64
// CHECK: sext <4 x i16> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepi16_epi64(__U, __A);
}
__m128i test_mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu8_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepu8_epi32
// CHECK: zext <4 x i8> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_cvtepu8_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu8_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepu8_epi32
// CHECK: zext <4 x i8> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_cvtepu8_epi32(__U, __A);
}
__m256i test_mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu8_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepu8_epi32
// CHECK: zext <8 x i8> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_cvtepu8_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu8_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepu8_epi32
// CHECK: zext <8 x i8> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_cvtepu8_epi32(__U, __A);
}
__m128i test_mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu8_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepu8_epi64
// CHECK: zext <2 x i8> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepu8_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu8_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepu8_epi64
// CHECK: zext <2 x i8> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepu8_epi64(__U, __A);
}
__m256i test_mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu8_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepu8_epi64
// CHECK: zext <4 x i8> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepu8_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu8_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepu8_epi64
// CHECK: zext <4 x i8> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepu8_epi64(__U, __A);
}
__m128i test_mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm_mask_cvtepu32_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepu32_epi64
// CHECK: zext <2 x i32> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepu32_epi64(__W, __U, __X);
}
__m128i test_mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu32_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepu32_epi64
// CHECK: zext <2 x i32> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepu32_epi64(__U, __X);
}
__m256i test_mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu32_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepu32_epi64
// CHECK: zext <4 x i32> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepu32_epi64(__W, __U, __X);
}
__m256i test_mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu32_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepu32_epi64
// CHECK: zext <4 x i32> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepu32_epi64(__U, __X);
}
__m128i test_mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu16_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepu16_epi32
// CHECK: zext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_cvtepu16_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu16_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepu16_epi32
// CHECK: zext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_cvtepu16_epi32(__U, __A);
}
__m256i test_mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu16_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepu16_epi32
// CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_cvtepu16_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu16_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepu16_epi32
// CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_cvtepu16_epi32(__U, __A);
}
__m128i test_mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu16_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepu16_epi64
// CHECK: zext <2 x i16> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepu16_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu16_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepu16_epi64
// CHECK: zext <2 x i16> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepu16_epi64(__U, __A);
}
__m256i test_mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu16_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepu16_epi64
// CHECK: zext <4 x i16> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepu16_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu16_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepu16_epi64
// CHECK: zext <4 x i16> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepu16_epi64(__U, __A);
}
__m128i test_mm_rol_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_rol_epi32
+ // CHECK-LABEL: test_mm_rol_epi32
// CHECK: @llvm.fshl.v4i32
return _mm_rol_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v4si(_mm_rol_epi32(((__m128i)(__v4si){1, -2, 3, -4}), 5), 32, -33, 96, -97));
__m128i test_mm_mask_rol_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_rol_epi32
+ // CHECK-LABEL: test_mm_mask_rol_epi32
// CHECK: @llvm.fshl.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_rol_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_rol_epi32(((__m128i)(__v4si){99, 99, 99, 99}), 0x3, ((__m128i)(__v4si){1, -2, 3, -4}), 5), 32, -33, 99, 99));
__m128i test_mm_maskz_rol_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_rol_epi32
+ // CHECK-LABEL: test_mm_maskz_rol_epi32
// CHECK: @llvm.fshl.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_rol_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_rol_epi32(0x9, ((__m128i)(__v4si){1, -2, 3, -4}), 5), 32, 0, 0, -97));
__m256i test_mm256_rol_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_rol_epi32
+ // CHECK-LABEL: test_mm256_rol_epi32
// CHECK: @llvm.fshl.v8i32
return _mm256_rol_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_rol_epi32(((__m256i)(__v8si){1, -2, 3, -4, -5, 6, -7, 8}), 5), 32, -33, 96, -97, -129, 192, -193, 256));
__m256i test_mm256_mask_rol_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_rol_epi32
+ // CHECK-LABEL: test_mm256_mask_rol_epi32
// CHECK: @llvm.fshl.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_rol_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_rol_epi32(((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}), 0x73, ((__m256i)(__v8si){1, -2, 3, -4, -5, 6, -7, 8}), 5), 32, -33, 99, 99, -129, 192, -193, 99));
__m256i test_mm256_maskz_rol_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_rol_epi32
+ // CHECK-LABEL: test_mm256_maskz_rol_epi32
// CHECK: @llvm.fshl.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_rol_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_rol_epi32(0x37, ((__m256i)(__v8si){1, -2, 3, -4, -5, 6, -7, 8}), 5), 32, -33, 96, 0, -129, 192, 0, 0));
__m128i test_mm_rol_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_rol_epi64
+ // CHECK-LABEL: test_mm_rol_epi64
// CHECK: @llvm.fshl.v2i64
return _mm_rol_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v2di(_mm_rol_epi64(((__m128i)(__v2di){10, -11}), 19), 5242880, -5242881));
__m128i test_mm_mask_rol_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_rol_epi64
+ // CHECK-LABEL: test_mm_mask_rol_epi64
// CHECK: @llvm.fshl.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_rol_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_rol_epi64(((__m128i)(__v2di){99, 99}), 0x1, ((__m128i)(__v2di){10, -11}), 19), 5242880, 99));
__m128i test_mm_maskz_rol_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_rol_epi64
+ // CHECK-LABEL: test_mm_maskz_rol_epi64
// CHECK: @llvm.fshl.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_rol_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_rol_epi64(0x2, ((__m128i)(__v2di){10, -11}), 19), 0, -5242881));
__m256i test_mm256_rol_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_rol_epi64
+ // CHECK-LABEL: test_mm256_rol_epi64
// CHECK: @llvm.fshl.v4i64
return _mm256_rol_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_rol_epi64(((__m256i)(__v4di){10, -11, -12, 13}), 19), 5242880, -5242881, -5767169, 6815744));
__m256i test_mm256_mask_rol_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_rol_epi64
+ // CHECK-LABEL: test_mm256_mask_rol_epi64
// CHECK: @llvm.fshl.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_rol_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_rol_epi64(((__m256i)(__v4di){99, 99, 99, 99}), 0x9, ((__m256i)(__v4di){10, -11, -12, 13}), 19), 5242880, 99, 99, 6815744));
__m256i test_mm256_maskz_rol_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_rol_epi64
+ // CHECK-LABEL: test_mm256_maskz_rol_epi64
// CHECK: @llvm.fshl.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_rol_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_rol_epi64(0xC, ((__m256i)(__v4di){10, -11, -12, 13}), 19), 0, 0, -5767169, 6815744));
__m128i test_mm_rolv_epi32(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_rolv_epi32
+ // CHECK-LABEL: test_mm_rolv_epi32
// CHECK: llvm.fshl.v4i32
return _mm_rolv_epi32(__A, __B);
}
__m128i test_mm_mask_rolv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_rolv_epi32
+ // CHECK-LABEL: test_mm_mask_rolv_epi32
// CHECK: llvm.fshl.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_rolv_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_rolv_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_rolv_epi32
+ // CHECK-LABEL: test_mm_maskz_rolv_epi32
// CHECK: llvm.fshl.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_rolv_epi32(__U, __A, __B);
}
__m256i test_mm256_rolv_epi32(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_rolv_epi32
+ // CHECK-LABEL: test_mm256_rolv_epi32
// CHECK: @llvm.fshl.v8i32
return _mm256_rolv_epi32(__A, __B);
}
__m256i test_mm256_mask_rolv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_rolv_epi32
+ // CHECK-LABEL: test_mm256_mask_rolv_epi32
// CHECK: @llvm.fshl.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_rolv_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_rolv_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_rolv_epi32
+ // CHECK-LABEL: test_mm256_maskz_rolv_epi32
// CHECK: @llvm.fshl.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_rolv_epi32(__U, __A, __B);
}
__m128i test_mm_rolv_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_rolv_epi64
+ // CHECK-LABEL: test_mm_rolv_epi64
// CHECK: @llvm.fshl.v2i64
return _mm_rolv_epi64(__A, __B);
}
__m128i test_mm_mask_rolv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_rolv_epi64
+ // CHECK-LABEL: test_mm_mask_rolv_epi64
// CHECK: @llvm.fshl.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_rolv_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_rolv_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_rolv_epi64
+ // CHECK-LABEL: test_mm_maskz_rolv_epi64
// CHECK: @llvm.fshl.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_rolv_epi64(__U, __A, __B);
}
__m256i test_mm256_rolv_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_rolv_epi64
+ // CHECK-LABEL: test_mm256_rolv_epi64
// CHECK: @llvm.fshl.v4i64
return _mm256_rolv_epi64(__A, __B);
}
__m256i test_mm256_mask_rolv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_rolv_epi64
+ // CHECK-LABEL: test_mm256_mask_rolv_epi64
// CHECK: @llvm.fshl.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_rolv_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_rolv_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_rolv_epi64
+ // CHECK-LABEL: test_mm256_maskz_rolv_epi64
// CHECK: @llvm.fshl.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_rolv_epi64(__U, __A, __B);
}
__m128i test_mm_ror_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_ror_epi32
+ // CHECK-LABEL: test_mm_ror_epi32
// CHECK: @llvm.fshr.v4i32
return _mm_ror_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v4si(_mm_ror_epi32(((__m128i)(__v4si){1, -2, 3, -4}), 5), 134217728, -134217729, 402653184, -402653185));
__m128i test_mm_mask_ror_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_ror_epi32
+ // CHECK-LABEL: test_mm_mask_ror_epi32
// CHECK: @llvm.fshr.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_ror_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_ror_epi32(((__m128i)(__v4si){99, 99, 99, 99}), 0x3, ((__m128i)(__v4si){1, -2, 3, -4}), 5), 134217728, -134217729, 99, 99));
__m128i test_mm_maskz_ror_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_ror_epi32
+ // CHECK-LABEL: test_mm_maskz_ror_epi32
// CHECK: @llvm.fshr.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_ror_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_ror_epi32(0x9, ((__m128i)(__v4si){1, -2, 3, -4}), 5), 134217728, 0, 0, -402653185));
__m256i test_mm256_ror_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_ror_epi32
+ // CHECK-LABEL: test_mm256_ror_epi32
// CHECK: @llvm.fshr.v8i32
return _mm256_ror_epi32(__A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_ror_epi32(((__m256i)(__v8si){1, -2, 3, -4, -5, 6, -7, 8}), 5), 134217728, -134217729, 402653184, -402653185, -536870913, 805306368, -805306369, 1073741824));
__m256i test_mm256_mask_ror_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_ror_epi32
+ // CHECK-LABEL: test_mm256_mask_ror_epi32
// CHECK: @llvm.fshr.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_ror_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_ror_epi32(((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}), 0x73, ((__m256i)(__v8si){1, -2, 3, -4, -5, 6, -7, 8}), 5), 134217728, -134217729, 99, 99, -536870913, 805306368, -805306369, 99));
__m256i test_mm256_maskz_ror_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_ror_epi32
+ // CHECK-LABEL: test_mm256_maskz_ror_epi32
// CHECK: @llvm.fshr.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_ror_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_ror_epi32(0x37, ((__m256i)(__v8si){1, -2, 3, -4, -5, 6, -7, 8}), 5), 134217728, -134217729, 402653184, 0, -536870913, 805306368, 0, 0));
__m128i test_mm_ror_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_ror_epi64
+ // CHECK-LABEL: test_mm_ror_epi64
// CHECK: @llvm.fshr.v2i64
return _mm_ror_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v2di(_mm_ror_epi64(((__m128i)(__v2di){10, -11}), 19), 351843720888320LL, -351843720888321LL));
__m128i test_mm_mask_ror_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_ror_epi64
+ // CHECK-LABEL: test_mm_mask_ror_epi64
// CHECK: @llvm.fshr.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_ror_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_ror_epi64(((__m128i)(__v2di){99, 99}), 0x1, ((__m128i)(__v2di){10, -11}), 19), 351843720888320LL, 99));
__m128i test_mm_maskz_ror_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_ror_epi64
+ // CHECK-LABEL: test_mm_maskz_ror_epi64
// CHECK: @llvm.fshr.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_ror_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_ror_epi64(0x2, ((__m128i)(__v2di){10, -11}), 19), 0, -351843720888321LL));
__m256i test_mm256_ror_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_ror_epi64
+ // CHECK-LABEL: test_mm256_ror_epi64
// CHECK: @llvm.fshr.v4i64
return _mm256_ror_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_ror_epi64(((__m256i)(__v4di){10, -11, -12, 13}), 19), 351843720888320LL, -351843720888321LL, -387028092977153LL, 457396837154816LL));
__m256i test_mm256_mask_ror_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_ror_epi64
+ // CHECK-LABEL: test_mm256_mask_ror_epi64
// CHECK: @llvm.fshr.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_ror_epi64(__W, __U, __A,5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_ror_epi64(((__m256i)(__v4di){99, 99, 99, 99}), 0x9, ((__m256i)(__v4di){10, -11, -12, 13}), 19), 351843720888320LL, 99, 99, 457396837154816LL));
__m256i test_mm256_maskz_ror_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_ror_epi64
+ // CHECK-LABEL: test_mm256_maskz_ror_epi64
// CHECK: @llvm.fshr.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_ror_epi64(__U, __A, 5);
}
-
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_ror_epi64(0xC, ((__m256i)(__v4di){10, -11, -12, 13}), 19), 0, 0, -387028092977153LL, 457396837154816LL));
__m128i test_mm_rorv_epi32(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_rorv_epi32
+ // CHECK-LABEL: test_mm_rorv_epi32
// CHECK: @llvm.fshr.v4i32
return _mm_rorv_epi32(__A, __B);
}
__m128i test_mm_mask_rorv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_rorv_epi32
+ // CHECK-LABEL: test_mm_mask_rorv_epi32
// CHECK: @llvm.fshr.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_rorv_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_rorv_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_rorv_epi32
+ // CHECK-LABEL: test_mm_maskz_rorv_epi32
// CHECK: @llvm.fshr.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_rorv_epi32(__U, __A, __B);
}
__m256i test_mm256_rorv_epi32(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_rorv_epi32
+ // CHECK-LABEL: test_mm256_rorv_epi32
// CHECK: @llvm.fshr.v8i32
return _mm256_rorv_epi32(__A, __B);
}
__m256i test_mm256_mask_rorv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_rorv_epi32
+ // CHECK-LABEL: test_mm256_mask_rorv_epi32
// CHECK: @llvm.fshr.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_rorv_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_rorv_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_rorv_epi32
+ // CHECK-LABEL: test_mm256_maskz_rorv_epi32
// CHECK: @llvm.fshr.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_rorv_epi32(__U, __A, __B);
}
__m128i test_mm_rorv_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_rorv_epi64
+ // CHECK-LABEL: test_mm_rorv_epi64
// CHECK: @llvm.fshr.v2i64
return _mm_rorv_epi64(__A, __B);
}
__m128i test_mm_mask_rorv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_rorv_epi64
+ // CHECK-LABEL: test_mm_mask_rorv_epi64
// CHECK: @llvm.fshr.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_rorv_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_rorv_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_rorv_epi64
+ // CHECK-LABEL: test_mm_maskz_rorv_epi64
// CHECK: @llvm.fshr.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_rorv_epi64(__U, __A, __B);
}
__m256i test_mm256_rorv_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_rorv_epi64
+ // CHECK-LABEL: test_mm256_rorv_epi64
// CHECK: @llvm.fshr.v4i64
return _mm256_rorv_epi64(__A, __B);
}
__m256i test_mm256_mask_rorv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_rorv_epi64
+ // CHECK-LABEL: test_mm256_mask_rorv_epi64
// CHECK: @llvm.fshr.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_rorv_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_rorv_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_rorv_epi64
+ // CHECK-LABEL: test_mm256_maskz_rorv_epi64
// CHECK: @llvm.fshr.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_rorv_epi64(__U, __A, __B);
}
__m128i test_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_sllv_epi64
+ // CHECK-LABEL: test_mm_mask_sllv_epi64
// CHECK: @llvm.x86.avx2.psllv.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_sllv_epi64(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_sllv_epi64((__m128i)(__v2di){99, 99}, (__mmask8)0x1, (__m128i)(__v2di){1, -3}, (__m128i)(__v2di){8, 63}), 256, 99));
__m128i test_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_sllv_epi64
+ // CHECK-LABEL: test_mm_maskz_sllv_epi64
// CHECK: @llvm.x86.avx2.psllv.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_sllv_epi64(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_m128i(_mm_maskz_sllv_epi64((__mmask8)0x2, (__m128i)(__v2di){1, -3}, (__m128i)(__v2di){8, 63}), 0, 0x8000000000000000ULL));
__m256i test_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_sllv_epi64
+ // CHECK-LABEL: test_mm256_mask_sllv_epi64
// CHECK: @llvm.x86.avx2.psllv.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_sllv_epi64(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_sllv_epi64((__m256i)(__v4di){99, 99, 99, 99}, (__mmask8)0x6, (__m256i)(__v4di){1, -2, 3, -4}, (__m256i)(__v4di){1, 2, 3, -4}), 99, -8, 24, 99));
__m256i test_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_sllv_epi64
+ // CHECK-LABEL: test_mm256_maskz_sllv_epi64
// CHECK: @llvm.x86.avx2.psllv.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_sllv_epi64(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_sllv_epi64((__mmask8)0x9, (__m256i)(__v4di){1, -2, 3, -4}, (__m256i)(__v4di){1, 2, 3, -4}), 2, 0, 0, 0));
__m128i test_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_sllv_epi32
+ // CHECK-LABEL: test_mm_mask_sllv_epi32
// CHECK: @llvm.x86.avx2.psllv.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_sllv_epi32(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_sllv_epi32((__m128i)(__v4si){99, 99, 99, 99}, (__mmask8)0x0E, (__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 99, -8, 24, 0));
__m128i test_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_sllv_epi32
+ // CHECK-LABEL: test_mm_maskz_sllv_epi32
// CHECK: @llvm.x86.avx2.psllv.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_sllv_epi32(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_sllv_epi32((__mmask8)0xC, (__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 0, 0, 24, 0));
__m256i test_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_sllv_epi32
+ // CHECK-LABEL: test_mm256_mask_sllv_epi32
// CHECK: @llvm.x86.avx2.psllv.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_sllv_epi32(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_sllv_epi32((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}, (__mmask8)0x3C, (__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 99, 99, 24, -64, 0, 0, 99, 99));
__m256i test_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_sllv_epi32
+ // CHECK-LABEL: test_mm256_maskz_sllv_epi32
// CHECK: @llvm.x86.avx2.psllv.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_sllv_epi32(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_sllv_epi32((__mmask8)0xFE, (__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 0, -8, 24, -64, 0, 0, 0, 0));
__m128i test_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_srlv_epi64
+ // CHECK-LABEL: test_mm_mask_srlv_epi64
// CHECK: @llvm.x86.avx2.psrlv.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srlv_epi64(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_m128i(_mm_mask_srlv_epi64((__m128i)(__v2di){99, 99}, (__mmask8)0x1, (__m128i)(__v2di){1, -3}, (__m128i)(__v2di){8, 63}), 0, 99));
__m128i test_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_srlv_epi64
+ // CHECK-LABEL: test_mm_maskz_srlv_epi64
// CHECK: @llvm.x86.avx2.psrlv.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srlv_epi64(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_m128i(_mm_maskz_srlv_epi64((__mmask8)0x2, (__m128i)(__v2di){1, -3}, (__m128i)(__v2di){8, 63}), 0, 1));
__m256i test_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_srlv_epi64
+ // CHECK-LABEL: test_mm256_mask_srlv_epi64
// CHECK: @llvm.x86.avx2.psrlv.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srlv_epi64(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_m256i(_mm256_mask_srlv_epi64((__m256i)(__v4di){99, 99, 99, 99}, (__mmask8)0x6, (__m256i)(__v4di){1, -2, 3, -4}, (__m256i)(__v4di){1, 2, 3, -4}), 99, 0x3FFFFFFFFFFFFFFFULL, 0, 99));
__m256i test_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_srlv_epi64
+ // CHECK-LABEL: test_mm256_maskz_srlv_epi64
// CHECK: @llvm.x86.avx2.psrlv.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srlv_epi64(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_m256i(_mm256_maskz_srlv_epi64((__mmask8)0x1, (__m256i)(__v4di){1, -2, 3, -4}, (__m256i)(__v4di){1, 2, 3, -4}), 0, 0, 0, 0));
__m128i test_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_srlv_epi32
+ // CHECK-LABEL: test_mm_mask_srlv_epi32
// CHECK: @llvm.x86.avx2.psrlv.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srlv_epi32(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_srlv_epi32((__m128i)(__v4si){99, 99, 99, 99}, (__mmask8)0x0E, (__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 99, 1073741823, 0, 0));
__m128i test_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_srlv_epi32
+ // CHECK-LABEL: test_mm_maskz_srlv_epi32
// CHECK: @llvm.x86.avx2.psrlv.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srlv_epi32(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_srlv_epi32((__mmask8)0xC, (__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 0, 0, 0, 0));
__m256i test_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_srlv_epi32
+ // CHECK-LABEL: test_mm256_mask_srlv_epi32
// CHECK: @llvm.x86.avx2.psrlv.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srlv_epi32(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_srlv_epi32((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}, (__mmask8)0x3C, (__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 99, 99, 0, 268435455, 0, 1, 99, 99));
__m256i test_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_srlv_epi32
+ // CHECK-LABEL: test_mm256_maskz_srlv_epi32
// CHECK: @llvm.x86.avx2.psrlv.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srlv_epi32(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_srlv_epi32((__mmask8)0x9E, (__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 0, 1073741823, 0, 268435455, 0, 0, 0, 7));
__m128i test_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_srl_epi32
+ // CHECK-LABEL: test_mm_mask_srl_epi32
// CHECK: @llvm.x86.sse2.psrl.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srl_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_srl_epi32
+ // CHECK-LABEL: test_mm_maskz_srl_epi32
// CHECK: @llvm.x86.sse2.psrl.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srl_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_srl_epi32
+ // CHECK-LABEL: test_mm256_mask_srl_epi32
// CHECK: @llvm.x86.avx2.psrl.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srl_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_srl_epi32
+ // CHECK-LABEL: test_mm256_maskz_srl_epi32
// CHECK: @llvm.x86.avx2.psrl.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srl_epi32(__U, __A, __B);
}
__m128i test_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srli_epi32
+ // CHECK-LABEL: test_mm_mask_srli_epi32
// CHECK: @llvm.x86.sse2.psrli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srli_epi32(__W, __U, __A, 5);
}
__m128i test_mm_mask_srli_epi32_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srli_epi32_2
+ // CHECK-LABEL: test_mm_mask_srli_epi32_2
// CHECK: @llvm.x86.sse2.psrli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srli_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi32
+ // CHECK-LABEL: test_mm_maskz_srli_epi32
// CHECK: @llvm.x86.sse2.psrli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srli_epi32(__U, __A, 5);
}
__m128i test_mm_maskz_srli_epi32_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi32_2
+ // CHECK-LABEL: test_mm_maskz_srli_epi32_2
// CHECK: @llvm.x86.sse2.psrli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srli_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi32
+ // CHECK-LABEL: test_mm256_mask_srli_epi32
// CHECK: @llvm.x86.avx2.psrli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srli_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_srli_epi32((__m256i)(__v8si){100, 101, 102, 103, 104, 105, 106, 107}, (__mmask8)0xff, (__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, 3), 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0));
__m256i test_mm256_mask_srli_epi32_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi32_2
+ // CHECK-LABEL: test_mm256_mask_srli_epi32_2
// CHECK: @llvm.x86.avx2.psrli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srli_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi32
+ // CHECK-LABEL: test_mm256_maskz_srli_epi32
// CHECK: @llvm.x86.avx2.psrli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srli_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_srli_epi32((__mmask8)0x71, (__m256i)(__v8si){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x7f, 0, 0, 0, 0x2, 0x2, 0x3, 0));
__m256i test_mm256_maskz_srli_epi32_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi32_2
+ // CHECK-LABEL: test_mm256_maskz_srli_epi32_2
// CHECK: @llvm.x86.avx2.psrli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srli_epi32(__U, __A, __B);
}
__m128i test_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_srl_epi64
+ // CHECK-LABEL: test_mm_mask_srl_epi64
// CHECK: @llvm.x86.sse2.psrl.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srl_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_srl_epi64
+ // CHECK-LABEL: test_mm_maskz_srl_epi64
// CHECK: @llvm.x86.sse2.psrl.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srl_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_srl_epi64
+ // CHECK-LABEL: test_mm256_mask_srl_epi64
// CHECK: @llvm.x86.avx2.psrl.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srl_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_srl_epi64
+ // CHECK-LABEL: test_mm256_maskz_srl_epi64
// CHECK: @llvm.x86.avx2.psrl.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srl_epi64(__U, __A, __B);
}
__m128i test_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srli_epi64
+ // CHECK-LABEL: test_mm_mask_srli_epi64
// CHECK: @llvm.x86.sse2.psrli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srli_epi64(__W, __U, __A, 5);
}
__m128i test_mm_mask_srli_epi64_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srli_epi64_2
+ // CHECK-LABEL: test_mm_mask_srli_epi64_2
// CHECK: @llvm.x86.sse2.psrli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srli_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi64
+ // CHECK-LABEL: test_mm_maskz_srli_epi64
// CHECK: @llvm.x86.sse2.psrli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srli_epi64(__U, __A, 5);
}
__m128i test_mm_maskz_srli_epi64_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi64_2
+ // CHECK-LABEL: test_mm_maskz_srli_epi64_2
// CHECK: @llvm.x86.sse2.psrli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srli_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi64
+ // CHECK-LABEL: test_mm256_mask_srli_epi64
// CHECK: @llvm.x86.avx2.psrli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srli_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_srli_epi64((__m256i)(__v4di){100, 101, 102, 103}, (__mmask8)0b1010, (__m256i)(__v4di){0, 0xff80, 2, 3}, 1), 100, 0x7fc0, 102, 0x1));
__m256i test_mm256_mask_srli_epi64_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi64_2
+ // CHECK-LABEL: test_mm256_mask_srli_epi64_2
// CHECK: @llvm.x86.avx2.psrli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srli_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi64
+ // CHECK-LABEL: test_mm256_maskz_srli_epi64
// CHECK: @llvm.x86.avx2.psrli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srli_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_srli_epi64((__mmask8)0x71, (__m256i)(__v4di){0xff, 1, 2, 3}, 1), 0x7f, 0, 0, 0));
__m256i test_mm256_maskz_srli_epi64_2(__mmask8 __U,__m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi64_2
+ // CHECK-LABEL: test_mm256_maskz_srli_epi64_2
// CHECK: @llvm.x86.avx2.psrli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srli_epi64(__U, __A, __B);
}
__m128i test_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sll_epi32
+ // CHECK-LABEL: test_mm_mask_sll_epi32
// CHECK: @llvm.x86.sse2.psll.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_sll_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sll_epi32
+ // CHECK-LABEL: test_mm_maskz_sll_epi32
// CHECK: @llvm.x86.sse2.psll.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_sll_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sll_epi32
+ // CHECK-LABEL: test_mm256_mask_sll_epi32
// CHECK: @llvm.x86.avx2.psll.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_sll_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sll_epi32
+ // CHECK-LABEL: test_mm256_maskz_sll_epi32
// CHECK: @llvm.x86.avx2.psll.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_sll_epi32(__U, __A, __B);
}
__m128i test_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_slli_epi32
+ // CHECK-LABEL: test_mm_mask_slli_epi32
// CHECK: @llvm.x86.sse2.pslli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_slli_epi32(__W, __U, __A, 5);
}
__m128i test_mm_mask_slli_epi32_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_slli_epi32_2
+ // CHECK-LABEL: test_mm_mask_slli_epi32_2
// CHECK: @llvm.x86.sse2.pslli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_slli_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi32
+ // CHECK-LABEL: test_mm_maskz_slli_epi32
// CHECK: @llvm.x86.sse2.pslli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_slli_epi32(__U, __A, 5);
}
__m128i test_mm_maskz_slli_epi32_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi32_2
+ // CHECK-LABEL: test_mm_maskz_slli_epi32_2
// CHECK: @llvm.x86.sse2.pslli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_slli_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi32
+ // CHECK-LABEL: test_mm256_mask_slli_epi32
// CHECK: @llvm.x86.avx2.pslli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_slli_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_slli_epi32((__m256i)(__v8si){100, 101, 102, 103, 104, 105, 106, 107}, (__mmask8)0xff, (__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, 3), 0x0, 0x8, 0x10, 0x18, 0x20, 0x28, 0x30, 0x38));
__m256i test_mm256_mask_slli_epi32_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi32_2
+ // CHECK-LABEL: test_mm256_mask_slli_epi32_2
// CHECK: @llvm.x86.avx2.pslli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_slli_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi32
+ // CHECK-LABEL: test_mm256_maskz_slli_epi32
// CHECK: @llvm.x86.avx2.pslli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_slli_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_slli_epi32((__mmask8)0x00ffcc71, (__m256i)(__v8si){0xff, 1, 2, 3, 4, 5, 6, 7}, 32), 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_slli_epi32((__mmask8)0, (__m256i)(__v8si){0xff, 1, 2, 3, 4, 5, 6, 7}, 16), 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_slli_epi32((__mmask8)0xff, (__m256i)(__v8si){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe));
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_slli_epi32((__mmask8)0x7, (__m256i)(__v8si){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x1fe, 0x2, 0x4, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_slli_epi32((__mmask8)0x71, (__m256i)(__v8si){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x1fe, 0, 0, 0, 0x8, 0xa, 0xc, 0));
__m256i test_mm256_maskz_slli_epi32_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi32_2
+ // CHECK-LABEL: test_mm256_maskz_slli_epi32_2
// CHECK: @llvm.x86.avx2.pslli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_slli_epi32(__U, __A, __B);
}
__m128i test_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sll_epi64
+ // CHECK-LABEL: test_mm_mask_sll_epi64
// CHECK: @llvm.x86.sse2.psll.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_sll_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sll_epi64
+ // CHECK-LABEL: test_mm_maskz_sll_epi64
// CHECK: @llvm.x86.sse2.psll.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_sll_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sll_epi64
+ // CHECK-LABEL: test_mm256_mask_sll_epi64
// CHECK: @llvm.x86.avx2.psll.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_sll_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sll_epi64
+ // CHECK-LABEL: test_mm256_maskz_sll_epi64
// CHECK: @llvm.x86.avx2.psll.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_sll_epi64(__U, __A, __B);
}
__m128i test_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_slli_epi64
+ // CHECK-LABEL: test_mm_mask_slli_epi64
// CHECK: @llvm.x86.sse2.pslli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_slli_epi64(__W, __U, __A, 5);
}
__m128i test_mm_mask_slli_epi64_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_slli_epi64_2
+ // CHECK-LABEL: test_mm_mask_slli_epi64_2
// CHECK: @llvm.x86.sse2.pslli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_slli_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi64
+ // CHECK-LABEL: test_mm_maskz_slli_epi64
// CHECK: @llvm.x86.sse2.pslli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_slli_epi64(__U, __A, 5);
}
__m128i test_mm_maskz_slli_epi64_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi64_2
+ // CHECK-LABEL: test_mm_maskz_slli_epi64_2
// CHECK: @llvm.x86.sse2.pslli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_slli_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi64
+ // CHECK-LABEL: test_mm256_mask_slli_epi64
// CHECK: @llvm.x86.avx2.pslli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_slli_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_slli_epi64((__m256i)(__v4di){100, 101, 102, 103}, (__mmask8)0b1010, (__m256i)(__v4di){0, 1, 2, 3}, 4), 100, 0x10, 102, 0x30));
__m256i test_mm256_mask_slli_epi64_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi64_2
+ // CHECK-LABEL: test_mm256_mask_slli_epi64_2
// CHECK: @llvm.x86.avx2.pslli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_slli_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi64
+ // CHECK-LABEL: test_mm256_maskz_slli_epi64
// CHECK: @llvm.x86.avx2.pslli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_slli_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_slli_epi64((__mmask8)0x00ffcc71, (__m256i)(__v4di){0xff, 1, 2, 3}, 64), 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_slli_epi64((__mmask8)0, (__m256i)(__v4di){0xff, 1, 2, 3}, 16), 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_slli_epi64((__mmask8)0xff, (__m256i)(__v4di){0xff, 1, 2, 3}, 1), 0x1fe, 0x2, 0x4, 0x6));
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_slli_epi64((__mmask8)0x7, (__m256i)(__v4di){0xff, 1, 2, 3}, 1), 0x1fe, 0x2, 0x4, 0));
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_slli_epi64((__mmask8)0x71, (__m256i)(__v4di){0xff, 1, 2, 3}, 1), 0x1fe, 0, 0, 0));
__m256i test_mm256_maskz_slli_epi64_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi64_2
+ // CHECK-LABEL: test_mm256_maskz_slli_epi64_2
// CHECK: @llvm.x86.avx2.pslli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_slli_epi64(__U, __A, __B);
}
__m128i test_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_srav_epi32
+ // CHECK-LABEL: test_mm_mask_srav_epi32
// CHECK: @llvm.x86.avx2.psrav.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srav_epi32(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_srav_epi32((__m128i)(__v4si){99, 99, 99, 99}, (__mmask8)0x0E, (__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 99, -1, 0, -1));
__m128i test_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_srav_epi32
+ // CHECK-LABEL: test_mm_maskz_srav_epi32
// CHECK: @llvm.x86.avx2.psrav.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srav_epi32(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_srav_epi32((__mmask8)0xC, (__m128i)(__v4si){1, -2, 3, -4}, (__m128i)(__v4si){1, 2, 3, -4}), 0, 0, 0, -1));
__m256i test_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_srav_epi32
+ // CHECK-LABEL: test_mm256_mask_srav_epi32
// CHECK: @llvm.x86.avx2.psrav.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srav_epi32(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_srav_epi32((__m256i)(__v8si){99, 99, 99, 99, 99, 99, 99, 99}, (__mmask8)0x3C, (__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 99, 99, 0, -1, 0, -1, 99, 99));
__m256i test_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_srav_epi32
+ // CHECK-LABEL: test_mm256_maskz_srav_epi32
// CHECK: @llvm.x86.avx2.psrav.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srav_epi32(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_srav_epi32((__mmask8)0x9E, (__m256i)(__v8si){1, -2, 3, -4, 5, -6, 7, -8}, (__m256i)(__v8si){1, 2, 3, 4, -17, 31, 33, 29}), 0, -1, 0, -1, 0, 0, 0, -1));
__m128i test_mm_srav_epi64(__m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_srav_epi64
+ // CHECK-LABEL: test_mm_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.128
return _mm_srav_epi64(__X, __Y);
}
__m128i test_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_srav_epi64
+ // CHECK-LABEL: test_mm_mask_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srav_epi64(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_srav_epi64
+ // CHECK-LABEL: test_mm_maskz_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srav_epi64(__U, __X, __Y);
}
__m256i test_mm256_srav_epi64(__m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_srav_epi64
+ // CHECK-LABEL: test_mm256_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.256
return _mm256_srav_epi64(__X, __Y);
}
__m256i test_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_srav_epi64
+ // CHECK-LABEL: test_mm256_mask_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srav_epi64(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_srav_epi64(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_srav_epi64
+ // CHECK-LABEL: test_mm256_maskz_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srav_epi64(__U, __X, __Y);
}
void test_mm_store_epi32(void *__P, __m128i __A) {
- // CHECK-LABEL: @test_mm_store_epi32
+ // CHECK-LABEL: test_mm_store_epi32
// CHECK: store <2 x i64> %{{.*}}, ptr %{{.*}}
return _mm_store_epi32(__P, __A);
}
void test_mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_store_epi32
+ // CHECK-LABEL: test_mm_mask_store_epi32
// CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr %{{.}}, i32 16, <4 x i1> %{{.*}})
return _mm_mask_store_epi32(__P, __U, __A);
}
void test_mm256_store_epi32(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_store_epi32
+ // CHECK-LABEL: test_mm256_store_epi32
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}
return _mm256_store_epi32(__P, __A);
}
void test_mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_store_epi32
+ // CHECK-LABEL: test_mm256_mask_store_epi32
// CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr %{{.}}, i32 32, <8 x i1> %{{.*}})
return _mm256_mask_store_epi32(__P, __U, __A);
}
__m128i test_mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_mov_epi32
+ // CHECK-LABEL: test_mm_mask_mov_epi32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_mov_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_epi32
+ // CHECK-LABEL: test_mm_maskz_mov_epi32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_mov_epi32(__U, __A);
}
__m256i test_mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_epi32
+ // CHECK-LABEL: test_mm256_mask_mov_epi32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_mov_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_epi32
+ // CHECK-LABEL: test_mm256_maskz_mov_epi32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_mov_epi32(__U, __A);
}
__m128i test_mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_mov_epi64
+ // CHECK-LABEL: test_mm_mask_mov_epi64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_mov_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_epi64
+ // CHECK-LABEL: test_mm_maskz_mov_epi64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_mov_epi64(__U, __A);
}
__m256i test_mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_epi64
+ // CHECK-LABEL: test_mm256_mask_mov_epi64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_mov_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_epi64
+ // CHECK-LABEL: test_mm256_maskz_mov_epi64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_mov_epi64(__U, __A);
}
__m128i test_mm_load_epi32(void const *__P) {
- // CHECK-LABEL: @test_mm_load_epi32
+ // CHECK-LABEL: test_mm_load_epi32
// CHECK: load <2 x i64>, ptr %{{.*}}
return _mm_load_epi32(__P);
}
__m128i test_mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_load_epi32
+ // CHECK-LABEL: test_mm_mask_load_epi32
// CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_load_epi32(__W, __U, __P);
}
__m128i test_mm_maskz_load_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_load_epi32
+ // CHECK-LABEL: test_mm_maskz_load_epi32
// CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_load_epi32(__U, __P);
}
__m256i test_mm256_load_epi32(void const *__P) {
- // CHECK-LABEL: @test_mm256_load_epi32
+ // CHECK-LABEL: test_mm256_load_epi32
// CHECK: load <4 x i64>, ptr %{{.*}}
return _mm256_load_epi32(__P);
}
__m256i test_mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_load_epi32
+ // CHECK-LABEL: test_mm256_mask_load_epi32
// CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_load_epi32(__W, __U, __P);
}
__m256i test_mm256_maskz_load_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_load_epi32
+ // CHECK-LABEL: test_mm256_maskz_load_epi32
// CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_load_epi32(__U, __P);
}
__m128i test_mm_load_epi64(void const *__P) {
- // CHECK-LABEL: @test_mm_load_epi64
+ // CHECK-LABEL: test_mm_load_epi64
// CHECK: load <2 x i64>, ptr %{{.*}}
return _mm_load_epi64(__P);
}
__m128i test_mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_load_epi64
+ // CHECK-LABEL: test_mm_mask_load_epi64
// CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_load_epi64(__W, __U, __P);
}
__m128i test_mm_maskz_load_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_load_epi64
+ // CHECK-LABEL: test_mm_maskz_load_epi64
// CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_load_epi64(__U, __P);
}
__m256i test_mm256_load_epi64(void const *__P) {
- // CHECK-LABEL: @test_mm256_load_epi64
+ // CHECK-LABEL: test_mm256_load_epi64
// CHECK: load <4 x i64>, ptr %{{.*}}
return _mm256_load_epi64(__P);
}
__m256i test_mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_load_epi64
+ // CHECK-LABEL: test_mm256_mask_load_epi64
// CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_load_epi64(__W, __U, __P);
}
__m256i test_mm256_maskz_load_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_load_epi64
+ // CHECK-LABEL: test_mm256_maskz_load_epi64
// CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_load_epi64(__U, __P);
}
void test_mm_store_epi64(void *__P, __m128i __A) {
- // CHECK-LABEL: @test_mm_store_epi64
+ // CHECK-LABEL: test_mm_store_epi64
// CHECK: store <2 x i64> %{{.*}}, ptr %{{.*}}
return _mm_store_epi64(__P, __A);
}
void test_mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_store_epi64
+ // CHECK-LABEL: test_mm_mask_store_epi64
// CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr %{{.*}}, i32 16, <2 x i1> %{{.*}})
return _mm_mask_store_epi64(__P, __U, __A);
}
void test_mm256_store_epi64(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_store_epi64
+ // CHECK-LABEL: test_mm256_store_epi64
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}
return _mm256_store_epi64(__P, __A);
}
void test_mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_store_epi64
+ // CHECK-LABEL: test_mm256_mask_store_epi64
// CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr %{{.*}}, i32 32, <4 x i1> %{{.*}})
return _mm256_mask_store_epi64(__P, __U, __A);
}
__m128d test_mm_mask_movedup_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_movedup_pd
+ // CHECK-LABEL: test_mm_mask_movedup_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> zeroinitializer
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_movedup_pd(__W, __U, __A);
}
__m128d test_mm_maskz_movedup_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_movedup_pd
+ // CHECK-LABEL: test_mm_maskz_movedup_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> zeroinitializer
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_movedup_pd(__U, __A);
}
__m256d test_mm256_mask_movedup_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_movedup_pd
+ // CHECK-LABEL: test_mm256_mask_movedup_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_movedup_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_movedup_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_movedup_pd
+ // CHECK-LABEL: test_mm256_maskz_movedup_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_movedup_pd(__U, __A);
}
__m128i test_mm_mask_set1_epi32(__m128i __O, __mmask8 __M) {
- // CHECK-LABEL: @test_mm_mask_set1_epi32
+ // CHECK-LABEL: test_mm_mask_set1_epi32
// CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i32 0
// CHECK: insertelement <4 x i32> %{{.*}}32 1
// CHECK: insertelement <4 x i32> %{{.*}}32 2
@@ -6678,7 +6764,7 @@ __m128i test_mm_mask_set1_epi32(__m128i __O, __mmask8 __M) {
}
__m128i test_mm_maskz_set1_epi32(__mmask8 __M) {
- // CHECK-LABEL: @test_mm_maskz_set1_epi32
+ // CHECK-LABEL: test_mm_maskz_set1_epi32
// CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i32 0
// CHECK: insertelement <4 x i32> %{{.*}}32 1
// CHECK: insertelement <4 x i32> %{{.*}}32 2
@@ -6689,7 +6775,7 @@ __m128i test_mm_maskz_set1_epi32(__mmask8 __M) {
}
__m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) {
- // CHECK-LABEL: @test_mm256_mask_set1_epi32
+ // CHECK-LABEL: test_mm256_mask_set1_epi32
// CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 1
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 2
@@ -6703,7 +6789,7 @@ __m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) {
}
__m256i test_mm256_maskz_set1_epi32(__mmask8 __M) {
- // CHECK-LABEL: @test_mm256_maskz_set1_epi32
+ // CHECK-LABEL: test_mm256_maskz_set1_epi32
// CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 1
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 2
@@ -6717,7 +6803,7 @@ __m256i test_mm256_maskz_set1_epi32(__mmask8 __M) {
}
__m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) {
- // CHECK-LABEL: @test_mm_mask_set1_epi64
+ // CHECK-LABEL: test_mm_mask_set1_epi64
// CHECK: insertelement <2 x i64> poison, i64 %{{.*}}, i32 0
// CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -6726,7 +6812,7 @@ __m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) {
}
__m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) {
- // CHECK-LABEL: @test_mm_maskz_set1_epi64
+ // CHECK-LABEL: test_mm_maskz_set1_epi64
// CHECK: insertelement <2 x i64> poison, i64 %{{.*}}, i32 0
// CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -6735,7 +6821,7 @@ __m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) {
}
__m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) {
- // CHECK-LABEL: @test_mm256_mask_set1_epi64
+ // CHECK-LABEL: test_mm256_mask_set1_epi64
// CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i32 0
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 1
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 2
@@ -6746,7 +6832,7 @@ __m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) {
}
__m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) {
- // CHECK-LABEL: @test_mm256_maskz_set1_epi64
+ // CHECK-LABEL: test_mm256_maskz_set1_epi64
// CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i32 0
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 1
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 2
@@ -6757,646 +6843,646 @@ __m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) {
}
__m128d test_mm_fixupimm_pd(__m128d __A, __m128d __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_fixupimm_pd
+ // CHECK-LABEL: test_mm_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.128
return _mm_fixupimm_pd(__A, __B, __C, 5);
}
__m128d test_mm_mask_fixupimm_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_fixupimm_pd
+ // CHECK-LABEL: test_mm_mask_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.128
return _mm_mask_fixupimm_pd(__A, __U, __B, __C, 5);
}
__m128d test_mm_maskz_fixupimm_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_fixupimm_pd
+ // CHECK-LABEL: test_mm_maskz_fixupimm_pd
// CHECK: @llvm.x86.avx512.maskz.fixupimm.pd.128
return _mm_maskz_fixupimm_pd(__U, __A, __B, __C, 5);
}
__m256d test_mm256_fixupimm_pd(__m256d __A, __m256d __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_fixupimm_pd
+ // CHECK-LABEL: test_mm256_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.256
return _mm256_fixupimm_pd(__A, __B, __C, 5);
}
__m256d test_mm256_mask_fixupimm_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_fixupimm_pd
+ // CHECK-LABEL: test_mm256_mask_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.256
return _mm256_mask_fixupimm_pd(__A, __U, __B, __C, 5);
}
__m256d test_mm256_maskz_fixupimm_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_fixupimm_pd
+ // CHECK-LABEL: test_mm256_maskz_fixupimm_pd
// CHECK: @llvm.x86.avx512.maskz.fixupimm.pd.256
return _mm256_maskz_fixupimm_pd(__U, __A, __B, __C, 5);
}
__m128 test_mm_fixupimm_ps(__m128 __A, __m128 __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_fixupimm_ps
+ // CHECK-LABEL: test_mm_fixupimm_ps
// CHECK: @llvm.x86.avx512.mask.fixupimm.ps.128
return _mm_fixupimm_ps(__A, __B, __C, 5);
}
__m128 test_mm_mask_fixupimm_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_fixupimm_ps
+ // CHECK-LABEL: test_mm_mask_fixupimm_ps
// CHECK: @llvm.x86.avx512.mask.fixupimm.ps.128
return _mm_mask_fixupimm_ps(__A, __U, __B, __C, 5);
}
__m128 test_mm_maskz_fixupimm_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_fixupimm_ps
+ // CHECK-LABEL: test_mm_maskz_fixupimm_ps
// CHECK: @llvm.x86.avx512.maskz.fixupimm.ps.128
return _mm_maskz_fixupimm_ps(__U, __A, __B, __C, 5);
}
__m256 test_mm256_fixupimm_ps(__m256 __A, __m256 __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_fixupimm_ps
+ // CHECK-LABEL: test_mm256_fixupimm_ps
// CHECK: @llvm.x86.avx512.mask.fixupimm.ps.256
return _mm256_fixupimm_ps(__A, __B, __C, 5);
}
__m256 test_mm256_mask_fixupimm_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_fixupimm_ps
+ // CHECK-LABEL: test_mm256_mask_fixupimm_ps
// CHECK: @llvm.x86.avx512.mask.fixupimm.ps.256
return _mm256_mask_fixupimm_ps(__A, __U, __B, __C, 5);
}
__m256 test_mm256_maskz_fixupimm_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_fixupimm_ps
+ // CHECK-LABEL: test_mm256_maskz_fixupimm_ps
// CHECK: @llvm.x86.avx512.maskz.fixupimm.ps.256
return _mm256_maskz_fixupimm_ps(__U, __A, __B, __C, 5);
}
__m128d test_mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_load_pd
+ // CHECK-LABEL: test_mm_mask_load_pd
// CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_load_pd(__W, __U, __P);
}
__m128d test_mm_maskz_load_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_load_pd
+ // CHECK-LABEL: test_mm_maskz_load_pd
// CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_load_pd(__U, __P);
}
__m256d test_mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_load_pd
+ // CHECK-LABEL: test_mm256_mask_load_pd
// CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_load_pd(__W, __U, __P);
}
__m256d test_mm256_maskz_load_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_load_pd
+ // CHECK-LABEL: test_mm256_maskz_load_pd
// CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_load_pd(__U, __P);
}
__m128 test_mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_load_ps
+ // CHECK-LABEL: test_mm_mask_load_ps
// CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_load_ps(__W, __U, __P);
}
__m128 test_mm_maskz_load_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_load_ps
+ // CHECK-LABEL: test_mm_maskz_load_ps
// CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_load_ps(__U, __P);
}
__m256 test_mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_load_ps
+ // CHECK-LABEL: test_mm256_mask_load_ps
// CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_load_ps(__W, __U, __P);
}
__m256 test_mm256_maskz_load_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_load_ps
+ // CHECK-LABEL: test_mm256_maskz_load_ps
// CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_load_ps(__U, __P);
}
__m128i test_mm_loadu_epi64(void const *__P) {
- // CHECK-LABEL: @test_mm_loadu_epi64
+ // CHECK-LABEL: test_mm_loadu_epi64
// CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm_loadu_epi64(__P);
}
__m128i test_mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_epi64
+ // CHECK-LABEL: test_mm_mask_loadu_epi64
// CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_loadu_epi64(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_epi64
+ // CHECK-LABEL: test_mm_maskz_loadu_epi64
// CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_loadu_epi64(__U, __P);
}
__m256i test_mm256_loadu_epi64(void const *__P) {
- // CHECK-LABEL: @test_mm256_loadu_epi64
+ // CHECK-LABEL: test_mm256_loadu_epi64
// CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm256_loadu_epi64(__P);
}
__m256i test_mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_epi64
+ // CHECK-LABEL: test_mm256_mask_loadu_epi64
// CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_loadu_epi64(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_epi64
+ // CHECK-LABEL: test_mm256_maskz_loadu_epi64
// CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_loadu_epi64(__U, __P);
}
__m128i test_mm_loadu_epi32(void const *__P) {
- // CHECK-LABEL: @test_mm_loadu_epi32
+ // CHECK-LABEL: test_mm_loadu_epi32
// CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm_loadu_epi32(__P);
}
__m128i test_mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_epi32
+ // CHECK-LABEL: test_mm_mask_loadu_epi32
// CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_loadu_epi32(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_epi32
+ // CHECK-LABEL: test_mm_maskz_loadu_epi32
// CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_loadu_epi32(__U, __P);
}
__m256i test_mm256_loadu_epi32(void const *__P) {
- // CHECK-LABEL: @test_mm256_loadu_epi32
+ // CHECK-LABEL: test_mm256_loadu_epi32
// CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm256_loadu_epi32(__P);
}
__m256i test_mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_epi32
+ // CHECK-LABEL: test_mm256_mask_loadu_epi32
// CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_loadu_epi32(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_epi32
+ // CHECK-LABEL: test_mm256_maskz_loadu_epi32
// CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_loadu_epi32(__U, __P);
}
__m128d test_mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_pd
+ // CHECK-LABEL: test_mm_mask_loadu_pd
// CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_loadu_pd(__W, __U, __P);
}
__m128d test_mm_maskz_loadu_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_pd
+ // CHECK-LABEL: test_mm_maskz_loadu_pd
// CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_loadu_pd(__U, __P);
}
__m256d test_mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_pd
+ // CHECK-LABEL: test_mm256_mask_loadu_pd
// CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_loadu_pd(__W, __U, __P);
}
__m256d test_mm256_maskz_loadu_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_pd
+ // CHECK-LABEL: test_mm256_maskz_loadu_pd
// CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_loadu_pd(__U, __P);
}
__m128 test_mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_ps
+ // CHECK-LABEL: test_mm_mask_loadu_ps
// CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_loadu_ps(__W, __U, __P);
}
__m128 test_mm_maskz_loadu_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_ps
+ // CHECK-LABEL: test_mm_maskz_loadu_ps
// CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_loadu_ps(__U, __P);
}
__m256 test_mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_ps
+ // CHECK-LABEL: test_mm256_mask_loadu_ps
// CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_loadu_ps(__W, __U, __P);
}
__m256 test_mm256_maskz_loadu_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_ps
+ // CHECK-LABEL: test_mm256_maskz_loadu_ps
// CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_loadu_ps(__U, __P);
}
void test_mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_store_pd
+ // CHECK-LABEL: test_mm_mask_store_pd
// CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 16, <2 x i1> %{{.*}})
return _mm_mask_store_pd(__P, __U, __A);
}
void test_mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_store_pd
+ // CHECK-LABEL: test_mm256_mask_store_pd
// CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr %{{.*}}, i32 32, <4 x i1> %{{.*}})
return _mm256_mask_store_pd(__P, __U, __A);
}
void test_mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_store_ps
+ // CHECK-LABEL: test_mm_mask_store_ps
// CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 16, <4 x i1> %{{.*}})
return _mm_mask_store_ps(__P, __U, __A);
}
void test_mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_store_ps
+ // CHECK-LABEL: test_mm256_mask_store_ps
// CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr %{{.*}}, i32 32, <8 x i1> %{{.*}})
return _mm256_mask_store_ps(__P, __U, __A);
}
void test_mm_storeu_epi64(void *__p, __m128i __a) {
- // check-label: @test_mm_storeu_epi64
+ // CHECK-LABEL: test_mm_storeu_epi64
// check: store <2 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm_storeu_epi64(__p, __a);
}
void test_mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_epi64
+ // CHECK-LABEL: test_mm_mask_storeu_epi64
// CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
return _mm_mask_storeu_epi64(__P, __U, __A);
}
void test_mm256_storeu_epi64(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_storeu_epi64
+ // CHECK-LABEL: test_mm256_storeu_epi64
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm256_storeu_epi64(__P, __A);
}
void test_mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_epi64
+ // CHECK-LABEL: test_mm256_mask_storeu_epi64
// CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm256_mask_storeu_epi64(__P, __U, __A);
}
void test_mm_storeu_epi32(void *__P, __m128i __A) {
- // CHECK-LABEL: @test_mm_storeu_epi32
+ // CHECK-LABEL: test_mm_storeu_epi32
// CHECK: store <2 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm_storeu_epi32(__P, __A);
}
void test_mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_epi32
+ // CHECK-LABEL: test_mm_mask_storeu_epi32
// CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm_mask_storeu_epi32(__P, __U, __A);
}
void test_mm256_storeu_epi32(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_storeu_epi32
+ // CHECK-LABEL: test_mm256_storeu_epi32
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm256_storeu_epi32(__P, __A);
}
void test_mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_epi32
+ // CHECK-LABEL: test_mm256_mask_storeu_epi32
// CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm256_mask_storeu_epi32(__P, __U, __A);
}
void test_mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_pd
+ // CHECK-LABEL: test_mm_mask_storeu_pd
// CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
return _mm_mask_storeu_pd(__P, __U, __A);
}
void test_mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_pd
+ // CHECK-LABEL: test_mm256_mask_storeu_pd
// CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm256_mask_storeu_pd(__P, __U, __A);
}
void test_mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_ps
+ // CHECK-LABEL: test_mm_mask_storeu_ps
// CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm_mask_storeu_ps(__P, __U, __A);
}
void test_mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_ps
+ // CHECK-LABEL: test_mm256_mask_storeu_ps
// CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm256_mask_storeu_ps(__P, __U, __A);
}
__m128d test_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_pd
+ // CHECK-LABEL: test_mm_mask_unpackhi_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_unpackhi_pd(__W, __U, __A, __B);
}
__m128d test_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_pd
+ // CHECK-LABEL: test_mm_maskz_unpackhi_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_unpackhi_pd(__U, __A, __B);
}
__m256d test_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_pd
+ // CHECK-LABEL: test_mm256_mask_unpackhi_pd
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}} <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_unpackhi_pd(__W, __U, __A, __B);
}
__m256d test_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_pd
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_pd
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}} <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_unpackhi_pd(__U, __A, __B);
}
__m128 test_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_ps
+ // CHECK-LABEL: test_mm_mask_unpackhi_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}} <4 x float> %{{.*}}
return _mm_mask_unpackhi_ps(__W, __U, __A, __B);
}
__m128 test_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_ps
+ // CHECK-LABEL: test_mm_maskz_unpackhi_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}} <4 x float> %{{.*}}
return _mm_maskz_unpackhi_ps(__U, __A, __B);
}
__m256 test_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_ps
+ // CHECK-LABEL: test_mm256_mask_unpackhi_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_unpackhi_ps(__W, __U, __A, __B);
}
__m256 test_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_ps
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_unpackhi_ps(__U, __A, __B);
}
__m128d test_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_pd
+ // CHECK-LABEL: test_mm_mask_unpacklo_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_unpacklo_pd(__W, __U, __A, __B);
}
__m128d test_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_pd
+ // CHECK-LABEL: test_mm_maskz_unpacklo_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_unpacklo_pd(__U, __A, __B);
}
__m256d test_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_pd
+ // CHECK-LABEL: test_mm256_mask_unpacklo_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}} <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_unpacklo_pd(__W, __U, __A, __B);
}
__m256d test_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_pd
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}} <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_unpacklo_pd(__U, __A, __B);
}
__m128 test_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_ps
+ // CHECK-LABEL: test_mm_mask_unpacklo_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_unpacklo_ps(__W, __U, __A, __B);
}
__m128 test_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_ps
+ // CHECK-LABEL: test_mm_maskz_unpacklo_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_unpacklo_ps(__U, __A, __B);
}
__m256 test_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_ps
+ // CHECK-LABEL: test_mm256_mask_unpacklo_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_unpacklo_ps(__W, __U, __A, __B);
}
__m256 test_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_ps
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_unpacklo_ps(__U, __A, __B);
}
__m128d test_mm_rcp14_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_rcp14_pd
+ // CHECK-LABEL: test_mm_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.128
return _mm_rcp14_pd(__A);
}
__m128d test_mm_mask_rcp14_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_rcp14_pd
+ // CHECK-LABEL: test_mm_mask_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.128
return _mm_mask_rcp14_pd(__W, __U, __A);
}
__m128d test_mm_maskz_rcp14_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_rcp14_pd
+ // CHECK-LABEL: test_mm_maskz_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.128
return _mm_maskz_rcp14_pd(__U, __A);
}
__m256d test_mm256_rcp14_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_rcp14_pd
+ // CHECK-LABEL: test_mm256_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.256
return _mm256_rcp14_pd(__A);
}
__m256d test_mm256_mask_rcp14_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_rcp14_pd
+ // CHECK-LABEL: test_mm256_mask_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.256
return _mm256_mask_rcp14_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_rcp14_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_rcp14_pd
+ // CHECK-LABEL: test_mm256_maskz_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.256
return _mm256_maskz_rcp14_pd(__U, __A);
}
__m128 test_mm_rcp14_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_rcp14_ps
+ // CHECK-LABEL: test_mm_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.128
return _mm_rcp14_ps(__A);
}
__m128 test_mm_mask_rcp14_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_rcp14_ps
+ // CHECK-LABEL: test_mm_mask_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.128
return _mm_mask_rcp14_ps(__W, __U, __A);
}
__m128 test_mm_maskz_rcp14_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_rcp14_ps
+ // CHECK-LABEL: test_mm_maskz_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.128
return _mm_maskz_rcp14_ps(__U, __A);
}
__m256 test_mm256_rcp14_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_rcp14_ps
+ // CHECK-LABEL: test_mm256_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.256
return _mm256_rcp14_ps(__A);
}
__m256 test_mm256_mask_rcp14_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_rcp14_ps
+ // CHECK-LABEL: test_mm256_mask_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.256
return _mm256_mask_rcp14_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_rcp14_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_rcp14_ps
+ // CHECK-LABEL: test_mm256_maskz_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.256
return _mm256_maskz_rcp14_ps(__U, __A);
}
__m128d test_mm_mask_permute_pd(__m128d __W, __mmask8 __U, __m128d __X) {
- // CHECK-LABEL: @test_mm_mask_permute_pd
+ // CHECK-LABEL: test_mm_mask_permute_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> poison, <2 x i32> <i32 1, i32 0>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_permute_pd(__W, __U, __X, 1);
}
__m128d test_mm_maskz_permute_pd(__mmask8 __U, __m128d __X) {
- // CHECK-LABEL: @test_mm_maskz_permute_pd
+ // CHECK-LABEL: test_mm_maskz_permute_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> poison, <2 x i32> <i32 1, i32 0>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_permute_pd(__U, __X, 1);
}
__m256d test_mm256_mask_permute_pd(__m256d __W, __mmask8 __U, __m256d __X) {
- // CHECK-LABEL: @test_mm256_mask_permute_pd
+ // CHECK-LABEL: test_mm256_mask_permute_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permute_pd(__W, __U, __X, 5);
}
__m256d test_mm256_maskz_permute_pd(__mmask8 __U, __m256d __X) {
- // CHECK-LABEL: @test_mm256_maskz_permute_pd
+ // CHECK-LABEL: test_mm256_maskz_permute_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permute_pd(__U, __X, 5);
}
__m128 test_mm_mask_permute_ps(__m128 __W, __mmask8 __U, __m128 __X) {
- // CHECK-LABEL: @test_mm_mask_permute_ps
+ // CHECK-LABEL: test_mm_mask_permute_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_permute_ps(__W, __U, __X, 0x1b);
}
__m128 test_mm_maskz_permute_ps(__mmask8 __U, __m128 __X) {
- // CHECK-LABEL: @test_mm_maskz_permute_ps
+ // CHECK-LABEL: test_mm_maskz_permute_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_permute_ps(__U, __X, 0x1b);
}
__m256 test_mm256_mask_permute_ps(__m256 __W, __mmask8 __U, __m256 __X) {
- // CHECK-LABEL: @test_mm256_mask_permute_ps
+ // CHECK-LABEL: test_mm256_mask_permute_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permute_ps(__W, __U, __X, 0x1b);
}
__m256 test_mm256_maskz_permute_ps(__mmask8 __U, __m256 __X) {
- // CHECK-LABEL: @test_mm256_maskz_permute_ps
+ // CHECK-LABEL: test_mm256_maskz_permute_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permute_ps(__U, __X, 0x1b);
}
__m128d test_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_permutevar_pd
+ // CHECK-LABEL: test_mm_mask_permutevar_pd
// CHECK: @llvm.x86.avx.vpermilvar.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_permutevar_pd(__W, __U, __A, __C);
}
__m128d test_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_permutevar_pd
+ // CHECK-LABEL: test_mm_maskz_permutevar_pd
// CHECK: @llvm.x86.avx.vpermilvar.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_permutevar_pd(__U, __A, __C);
}
__m256d test_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_permutevar_pd
+ // CHECK-LABEL: test_mm256_mask_permutevar_pd
// CHECK: @llvm.x86.avx.vpermilvar.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutevar_pd(__W, __U, __A, __C);
}
__m256d test_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_permutevar_pd
+ // CHECK-LABEL: test_mm256_maskz_permutevar_pd
// CHECK: @llvm.x86.avx.vpermilvar.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutevar_pd(__U, __A, __C);
}
__m128 test_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_permutevar_ps
+ // CHECK-LABEL: test_mm_mask_permutevar_ps
// CHECK: @llvm.x86.avx.vpermilvar.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_permutevar_ps(__W, __U, __A, __C);
}
__m128 test_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_permutevar_ps
+ // CHECK-LABEL: test_mm_maskz_permutevar_ps
// CHECK: @llvm.x86.avx.vpermilvar.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_permutevar_ps(__U, __A, __C);
}
__m256 test_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_permutevar_ps
+ // CHECK-LABEL: test_mm256_mask_permutevar_ps
// CHECK: @llvm.x86.avx.vpermilvar.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permutevar_ps(__W, __U, __A, __C);
}
__m256 test_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_permutevar_ps
+ // CHECK-LABEL: test_mm256_maskz_permutevar_ps
// CHECK: @llvm.x86.avx.vpermilvar.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permutevar_ps(__U, __A, __C);
}
__mmask8 test_mm_test_epi32_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_test_epi32_mask
+ // CHECK-LABEL: test_mm_test_epi32_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
return _mm_test_epi32_mask(__A, __B);
}
__mmask8 test_mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_test_epi32_mask
+ // CHECK-LABEL: test_mm_mask_test_epi32_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
@@ -7404,14 +7490,14 @@ __mmask8 test_mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask8 test_mm256_test_epi32_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_test_epi32_mask
+ // CHECK-LABEL: test_mm256_test_epi32_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
return _mm256_test_epi32_mask(__A, __B);
}
__mmask8 test_mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_test_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_test_epi32_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
@@ -7419,14 +7505,14 @@ __mmask8 test_mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
}
__mmask8 test_mm_test_epi64_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_test_epi64_mask
+ // CHECK-LABEL: test_mm_test_epi64_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
return _mm_test_epi64_mask(__A, __B);
}
__mmask8 test_mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_test_epi64_mask
+ // CHECK-LABEL: test_mm_mask_test_epi64_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
@@ -7434,14 +7520,14 @@ __mmask8 test_mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask8 test_mm256_test_epi64_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_test_epi64_mask
+ // CHECK-LABEL: test_mm256_test_epi64_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
return _mm256_test_epi64_mask(__A, __B);
}
__mmask8 test_mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_test_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_test_epi64_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
@@ -7449,14 +7535,14 @@ __mmask8 test_mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
}
__mmask8 test_mm_testn_epi32_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_testn_epi32_mask
+ // CHECK-LABEL: test_mm_testn_epi32_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return _mm_testn_epi32_mask(__A, __B);
}
__mmask8 test_mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_testn_epi32_mask
+ // CHECK-LABEL: test_mm_mask_testn_epi32_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
@@ -7464,14 +7550,14 @@ __mmask8 test_mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask8 test_mm256_testn_epi32_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_testn_epi32_mask
+ // CHECK-LABEL: test_mm256_testn_epi32_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return _mm256_testn_epi32_mask(__A, __B);
}
__mmask8 test_mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_testn_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_testn_epi32_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
@@ -7479,14 +7565,14 @@ __mmask8 test_mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B
}
__mmask8 test_mm_testn_epi64_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_testn_epi64_mask
+ // CHECK-LABEL: test_mm_testn_epi64_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
return _mm_testn_epi64_mask(__A, __B);
}
__mmask8 test_mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_testn_epi64_mask
+ // CHECK-LABEL: test_mm_mask_testn_epi64_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
@@ -7494,14 +7580,14 @@ __mmask8 test_mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask8 test_mm256_testn_epi64_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_testn_epi64_mask
+ // CHECK-LABEL: test_mm256_testn_epi64_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return _mm256_testn_epi64_mask(__A, __B);
}
__mmask8 test_mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_testn_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_testn_epi64_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
@@ -7509,428 +7595,434 @@ __mmask8 test_mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B
}
__m128i test_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_epi32
+ // CHECK-LABEL: test_mm_mask_unpackhi_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_unpackhi_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_epi32
+ // CHECK-LABEL: test_mm_maskz_unpackhi_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_unpackhi_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_epi32
+ // CHECK-LABEL: test_mm256_mask_unpackhi_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_unpackhi_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_epi32
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_unpackhi_epi32(__U, __A, __B);
}
__m128i test_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_epi64
+ // CHECK-LABEL: test_mm_mask_unpackhi_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_unpackhi_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_epi64
+ // CHECK-LABEL: test_mm_maskz_unpackhi_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_unpackhi_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_epi64
+ // CHECK-LABEL: test_mm256_mask_unpackhi_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_unpackhi_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_epi64
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_unpackhi_epi64(__U, __A, __B);
}
__m128i test_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_epi32
+ // CHECK-LABEL: test_mm_mask_unpacklo_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_unpacklo_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_epi32
+ // CHECK-LABEL: test_mm_maskz_unpacklo_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_unpacklo_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_epi32
+ // CHECK-LABEL: test_mm256_mask_unpacklo_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_unpacklo_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_epi32
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_unpacklo_epi32(__U, __A, __B);
}
__m128i test_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_epi64
+ // CHECK-LABEL: test_mm_mask_unpacklo_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_unpacklo_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_epi64
+ // CHECK-LABEL: test_mm_maskz_unpacklo_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_unpacklo_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_epi64
+ // CHECK-LABEL: test_mm256_mask_unpacklo_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_unpacklo_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_epi64
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_unpacklo_epi64(__U, __A, __B);
}
__m128i test_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sra_epi32
+ // CHECK-LABEL: test_mm_mask_sra_epi32
// CHECK: @llvm.x86.sse2.psra.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_sra_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sra_epi32
+ // CHECK-LABEL: test_mm_maskz_sra_epi32
// CHECK: @llvm.x86.sse2.psra.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_sra_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sra_epi32
+ // CHECK-LABEL: test_mm256_mask_sra_epi32
// CHECK: @llvm.x86.avx2.psra.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_sra_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sra_epi32
+ // CHECK-LABEL: test_mm256_maskz_sra_epi32
// CHECK: @llvm.x86.avx2.psra.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_sra_epi32(__U, __A, __B);
}
__m128i test_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srai_epi32
+ // CHECK-LABEL: test_mm_mask_srai_epi32
// CHECK: @llvm.x86.sse2.psrai.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srai_epi32(__W, __U, __A, 5);
}
__m128i test_mm_mask_srai_epi32_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srai_epi32_2
+ // CHECK-LABEL: test_mm_mask_srai_epi32_2
// CHECK: @llvm.x86.sse2.psrai.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srai_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi32
+ // CHECK-LABEL: test_mm_maskz_srai_epi32
// CHECK: @llvm.x86.sse2.psrai.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srai_epi32(__U, __A, 5);
}
__m128i test_mm_maskz_srai_epi32_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi32_2
+ // CHECK-LABEL: test_mm_maskz_srai_epi32_2
// CHECK: @llvm.x86.sse2.psrai.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srai_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi32
+ // CHECK-LABEL: test_mm256_mask_srai_epi32
// CHECK: @llvm.x86.avx2.psrai.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srai_epi32(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_srai_epi32((__m256i)(__v8si){100, 101, 102, 103, 104, 105, 106, 107}, (__mmask8)0xff, (__m256i)(__v8si){0, 1, 2, 3, 4, 5, 6, 7}, 3), 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0));
__m256i test_mm256_mask_srai_epi32_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi32_2
+ // CHECK-LABEL: test_mm256_mask_srai_epi32_2
// CHECK: @llvm.x86.avx2.psrai.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srai_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi32
+ // CHECK-LABEL: test_mm256_maskz_srai_epi32
// CHECK: @llvm.x86.avx2.psrai.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srai_epi32(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_srai_epi32((__mmask8)0x71, (__m256i)(__v8si){0xff, 1, 2, 3, 4, 5, 6, 7}, 1), 0x7f, 0, 0, 0, 0x2, 0x2, 0x3, 0));
__m256i test_mm256_maskz_srai_epi32_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi32_2
+ // CHECK-LABEL: test_mm256_maskz_srai_epi32_2
// CHECK: @llvm.x86.avx2.psrai.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srai_epi32(__U, __A, __B);
}
__m128i test_mm_sra_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_sra_epi64
+ // CHECK-LABEL: test_mm_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.128
return _mm_sra_epi64(__A, __B);
}
__m128i test_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sra_epi64
+ // CHECK-LABEL: test_mm_mask_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_sra_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sra_epi64
+ // CHECK-LABEL: test_mm_maskz_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_sra_epi64(__U, __A, __B);
}
__m256i test_mm256_sra_epi64(__m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_sra_epi64
+ // CHECK-LABEL: test_mm256_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.256
return _mm256_sra_epi64(__A, __B);
}
__m256i test_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sra_epi64
+ // CHECK-LABEL: test_mm256_mask_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_sra_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sra_epi64
+ // CHECK-LABEL: test_mm256_maskz_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_sra_epi64(__U, __A, __B);
}
__m128i test_mm_srai_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_srai_epi64
+ // CHECK-LABEL: test_mm_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.128
return _mm_srai_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v2di(_mm_srai_epi64((__m128i)(__v2di){-32768, -3}, 1), -16384, -2));
__m128i test_mm_srai_epi64_2(__m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_srai_epi64_2
+ // CHECK-LABEL: test_mm_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.128
return _mm_srai_epi64(__A, __B);
}
__m128i test_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srai_epi64
+ // CHECK-LABEL: test_mm_mask_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srai_epi64(__W, __U, __A, 5);
}
__m128i test_mm_mask_srai_epi64_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srai_epi64_2
+ // CHECK-LABEL: test_mm_mask_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srai_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi64
+ // CHECK-LABEL: test_mm_maskz_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srai_epi64(__U, __A, 5);
}
__m128i test_mm_maskz_srai_epi64_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi64_2
+ // CHECK-LABEL: test_mm_maskz_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srai_epi64(__U, __A, __B);
}
__m256i test_mm256_srai_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_srai_epi64
+ // CHECK-LABEL: test_mm256_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.256
return _mm256_srai_epi64(__A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_srai_epi64((__m256i)(__v4di){-32768, 32767, -3, -2}, 1), -16384, 16383, -2, -1));
__m256i test_mm256_srai_epi64_2(__m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_srai_epi64_2
+ // CHECK-LABEL: test_mm256_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.256
return _mm256_srai_epi64(__A, __B);
}
__m256i test_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi64
+ // CHECK-LABEL: test_mm256_mask_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srai_epi64(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_srai_epi64((__m256i)(__v4di){100, 101, 102, 103}, (__mmask8)0b1010, (__m256i)(__v4di){0,-128, 2, 3}, 2), 100, -32, 102, 0x0));
__m256i test_mm256_mask_srai_epi64_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi64_2
+ // CHECK-LABEL: test_mm256_mask_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srai_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi64
+ // CHECK-LABEL: test_mm256_maskz_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srai_epi64(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_srai_epi64((__mmask8)0x71, (__m256i)(__v4di){0xff, 1, 2, 3}, 1), 0x7f, 0, 0, 0));
__m256i test_mm256_maskz_srai_epi64_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi64_2
+ // CHECK-LABEL: test_mm256_maskz_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srai_epi64(__U, __A, __B);
}
__m128i test_mm_ternarylogic_epi32(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_ternarylogic_epi32
+ // CHECK-LABEL: test_mm_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.128
return _mm_ternarylogic_epi32(__A, __B, __C, 4);
}
__m128i test_mm_mask_ternarylogic_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_ternarylogic_epi32
+ // CHECK-LABEL: test_mm_mask_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_ternarylogic_epi32(__A, __U, __B, __C, 4);
}
__m128i test_mm_maskz_ternarylogic_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_ternarylogic_epi32
+ // CHECK-LABEL: test_mm_maskz_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> zeroinitializer
return _mm_maskz_ternarylogic_epi32(__U, __A, __B, __C, 4);
}
__m256i test_mm256_ternarylogic_epi32(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_ternarylogic_epi32
+ // CHECK-LABEL: test_mm256_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.256
return _mm256_ternarylogic_epi32(__A, __B, __C, 4);
}
__m256i test_mm256_mask_ternarylogic_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_ternarylogic_epi32
+ // CHECK-LABEL: test_mm256_mask_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_ternarylogic_epi32(__A, __U, __B, __C, 4);
}
__m256i test_mm256_maskz_ternarylogic_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_ternarylogic_epi32
+ // CHECK-LABEL: test_mm256_maskz_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> zeroinitializer
return _mm256_maskz_ternarylogic_epi32(__U, __A, __B, __C, 4);
}
__m128i test_mm_ternarylogic_epi64(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_ternarylogic_epi64
+ // CHECK-LABEL: test_mm_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.128
return _mm_ternarylogic_epi64(__A, __B, __C, 4);
}
__m128i test_mm_mask_ternarylogic_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_ternarylogic_epi64
+ // CHECK-LABEL: test_mm_mask_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_ternarylogic_epi64(__A, __U, __B, __C, 4);
}
__m128i test_mm_maskz_ternarylogic_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_ternarylogic_epi64
+ // CHECK-LABEL: test_mm_maskz_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> zeroinitializer
return _mm_maskz_ternarylogic_epi64(__U, __A, __B, __C, 4);
}
__m256i test_mm256_ternarylogic_epi64(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_ternarylogic_epi64
+ // CHECK-LABEL: test_mm256_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.256
return _mm256_ternarylogic_epi64(__A, __B, __C, 4);
}
__m256i test_mm256_mask_ternarylogic_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_ternarylogic_epi64
+ // CHECK-LABEL: test_mm256_mask_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_ternarylogic_epi64(__A, __U, __B, __C, 4);
}
__m256i test_mm256_maskz_ternarylogic_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_ternarylogic_epi64
+ // CHECK-LABEL: test_mm256_maskz_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> zeroinitializer
return _mm256_maskz_ternarylogic_epi64(__U, __A, __B, __C, 4);
}
__m256 test_mm256_shuffle_f32x4(__m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_shuffle_f32x4
+ // CHECK-LABEL: test_mm256_shuffle_f32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
return _mm256_shuffle_f32x4(__A, __B, 3);
}
__m256 test_mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_f32x4
+ // CHECK-LABEL: test_mm256_mask_shuffle_f32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_shuffle_f32x4(__W, __U, __A, __B, 3);
}
__m256 test_mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_f32x4
+ // CHECK-LABEL: test_mm256_maskz_shuffle_f32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_shuffle_f32x4(__U, __A, __B, 3);
}
__m256d test_mm256_shuffle_f64x2(__m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_shuffle_f64x2
+ // CHECK-LABEL: test_mm256_shuffle_f64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
return _mm256_shuffle_f64x2(__A, __B, 3);
}
__m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_f64x2
+ // CHECK-LABEL: test_mm256_mask_shuffle_f64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -7938,7 +8030,7 @@ __m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __
}
__m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_f64x2
+ // CHECK-LABEL: test_mm256_maskz_shuffle_f64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -7946,33 +8038,33 @@ __m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) {
}
__m256i test_mm256_shuffle_i32x4(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shuffle_i32x4
+ // CHECK-LABEL: test_mm256_shuffle_i32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
return _mm256_shuffle_i32x4(__A, __B, 3);
}
__m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_i32x4
+ // CHECK-LABEL: test_mm256_mask_shuffle_i32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shuffle_i32x4(__W, __U, __A, __B, 3);
}
__m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_i32x4
+ // CHECK-LABEL: test_mm256_maskz_shuffle_i32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shuffle_i32x4(__U, __A, __B, 3);
}
__m256i test_mm256_shuffle_i64x2(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shuffle_i64x2
+ // CHECK-LABEL: test_mm256_shuffle_i64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
return _mm256_shuffle_i64x2(__A, __B, 3);
}
__m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_i64x2
+ // CHECK-LABEL: test_mm256_mask_shuffle_i64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
@@ -7980,7 +8072,7 @@ __m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __
}
__m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_i64x2
+ // CHECK-LABEL: test_mm256_maskz_shuffle_i64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
@@ -7988,1576 +8080,1586 @@ __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) {
}
__m128d test_mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_shuffle_pd
+ // CHECK-LABEL: test_mm_mask_shuffle_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_shuffle_pd(__W, __U, __A, __B, 3);
}
__m128d test_mm_maskz_shuffle_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_shuffle_pd
+ // CHECK-LABEL: test_mm_maskz_shuffle_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_shuffle_pd(__U, __A, __B, 3);
}
__m256d test_mm256_mask_shuffle_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_pd
+ // CHECK-LABEL: test_mm256_mask_shuffle_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_shuffle_pd(__W, __U, __A, __B, 3);
}
__m256d test_mm256_maskz_shuffle_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_pd
+ // CHECK-LABEL: test_mm256_maskz_shuffle_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_shuffle_pd(__U, __A, __B, 3);
}
__m128 test_mm_mask_shuffle_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_shuffle_ps
+ // CHECK-LABEL: test_mm_mask_shuffle_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_shuffle_ps(__W, __U, __A, __B, 4);
}
__m128 test_mm_maskz_shuffle_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_shuffle_ps
+ // CHECK-LABEL: test_mm_maskz_shuffle_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_shuffle_ps(__U, __A, __B, 4);
}
__m256 test_mm256_mask_shuffle_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_ps
+ // CHECK-LABEL: test_mm256_mask_shuffle_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_shuffle_ps(__W, __U, __A, __B, 4);
}
__m256 test_mm256_maskz_shuffle_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_ps
+ // CHECK-LABEL: test_mm256_maskz_shuffle_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_shuffle_ps(__U, __A, __B, 4);
}
__m128d test_mm_rsqrt14_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_rsqrt14_pd
+ // CHECK-LABEL: test_mm_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_rsqrt14_pd(__A);
}
__m128d test_mm_mask_rsqrt14_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_rsqrt14_pd
+ // CHECK-LABEL: test_mm_mask_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_mask_rsqrt14_pd(__W, __U, __A);
}
__m128d test_mm_maskz_rsqrt14_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt14_pd
+ // CHECK-LABEL: test_mm_maskz_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_maskz_rsqrt14_pd(__U, __A);
}
__m256d test_mm256_rsqrt14_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_rsqrt14_pd
+ // CHECK-LABEL: test_mm256_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_rsqrt14_pd(__A);
}
__m256d test_mm256_mask_rsqrt14_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_rsqrt14_pd
+ // CHECK-LABEL: test_mm256_mask_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_mask_rsqrt14_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_rsqrt14_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_rsqrt14_pd
+ // CHECK-LABEL: test_mm256_maskz_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_maskz_rsqrt14_pd(__U, __A);
}
__m128 test_mm_rsqrt14_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_rsqrt14_ps
+ // CHECK-LABEL: test_mm_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_rsqrt14_ps(__A);
}
__m128 test_mm_mask_rsqrt14_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_rsqrt14_ps
+ // CHECK-LABEL: test_mm_mask_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_mask_rsqrt14_ps(__W, __U, __A);
}
__m128 test_mm_maskz_rsqrt14_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt14_ps
+ // CHECK-LABEL: test_mm_maskz_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_maskz_rsqrt14_ps(__U, __A);
}
__m256 test_mm256_rsqrt14_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_rsqrt14_ps
+ // CHECK-LABEL: test_mm256_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_rsqrt14_ps(__A);
}
__m256 test_mm256_mask_rsqrt14_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_rsqrt14_ps
+ // CHECK-LABEL: test_mm256_mask_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_mask_rsqrt14_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_rsqrt14_ps
+ // CHECK-LABEL: test_mm256_maskz_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_maskz_rsqrt14_ps(__U, __A);
}
__m256 test_mm256_broadcast_f32x4(__m128 __A) {
- // CHECK-LABEL: @test_mm256_broadcast_f32x4
+ // CHECK-LABEL: test_mm256_broadcast_f32x4
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm256_broadcast_f32x4(__A);
}
+TEST_CONSTEXPR(match_m256(_mm256_broadcast_f32x4((__m128)(__v4sf){1.0f, 3.0f, -5.0f, -8.0f}), 1.0f, 3.0f, -5.0f, -8.0f, 1.0f, 3.0f, -5.0f, -8.0f));
__m256 test_mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcast_f32x4
+ // CHECK-LABEL: test_mm256_mask_broadcast_f32x4
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_broadcast_f32x4(__O, __M, __A);
}
__m256 test_mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcast_f32x4
+ // CHECK-LABEL: test_mm256_maskz_broadcast_f32x4
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_broadcast_f32x4(__M, __A);
}
__m256i test_mm256_broadcast_i32x4(__m128i const* __A) {
- // CHECK-LABEL: @test_mm256_broadcast_i32x4
+ // CHECK-LABEL: test_mm256_broadcast_i32x4
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm256_broadcast_i32x4(_mm_loadu_si128(__A));
}
+TEST_CONSTEXPR(match_v8si(_mm256_broadcast_i32x4((__m128i)(__v4si){1, 3, -5, -8}), 1, 3, -5, -8, 1, 3, -5, -8));
__m256i test_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcast_i32x4
+ // CHECK-LABEL: test_mm256_mask_broadcast_i32x4
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_broadcast_i32x4(__O, __M, _mm_loadu_si128(__A));
}
__m256i test_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcast_i32x4
+ // CHECK-LABEL: test_mm256_maskz_broadcast_i32x4
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_broadcast_i32x4(__M, _mm_loadu_si128(__A));
}
__m256d test_mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastsd_pd
+ // CHECK-LABEL: test_mm256_mask_broadcastsd_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_broadcastsd_pd(__O, __M, __A);
}
__m256d test_mm256_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastsd_pd
+ // CHECK-LABEL: test_mm256_maskz_broadcastsd_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_broadcastsd_pd(__M, __A);
}
__m128 test_mm_mask_broadcastss_ps(__m128 __O, __mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_broadcastss_ps
+ // CHECK-LABEL: test_mm_mask_broadcastss_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_broadcastss_ps(__O, __M, __A);
}
__m128 test_mm_maskz_broadcastss_ps(__mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcastss_ps
+ // CHECK-LABEL: test_mm_maskz_broadcastss_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_broadcastss_ps(__M, __A);
}
__m256 test_mm256_mask_broadcastss_ps(__m256 __O, __mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastss_ps
+ // CHECK-LABEL: test_mm256_mask_broadcastss_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_broadcastss_ps(__O, __M, __A);
}
__m256 test_mm256_maskz_broadcastss_ps(__mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastss_ps
+ // CHECK-LABEL: test_mm256_maskz_broadcastss_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_broadcastss_ps(__M, __A);
}
__m128i test_mm_mask_broadcastd_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_broadcastd_epi32
+ // CHECK-LABEL: test_mm_mask_broadcastd_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_broadcastd_epi32(__O, __M, __A);
}
__m128i test_mm_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcastd_epi32
+ // CHECK-LABEL: test_mm_maskz_broadcastd_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_broadcastd_epi32(__M, __A);
}
__m256i test_mm256_mask_broadcastd_epi32(__m256i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastd_epi32
+ // CHECK-LABEL: test_mm256_mask_broadcastd_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_broadcastd_epi32(__O, __M, __A);
}
__m256i test_mm256_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastd_epi32
+ // CHECK-LABEL: test_mm256_maskz_broadcastd_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_broadcastd_epi32(__M, __A);
}
__m128i test_mm_mask_broadcastq_epi64(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_broadcastq_epi64
+ // CHECK-LABEL: test_mm_mask_broadcastq_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> zeroinitializer
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_broadcastq_epi64(__O, __M, __A);
}
__m128i test_mm_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcastq_epi64
+ // CHECK-LABEL: test_mm_maskz_broadcastq_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> zeroinitializer
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_broadcastq_epi64(__M, __A);
}
__m256i test_mm256_mask_broadcastq_epi64(__m256i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastq_epi64
+ // CHECK-LABEL: test_mm256_mask_broadcastq_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_broadcastq_epi64(__O, __M, __A);
}
__m256i test_mm256_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastq_epi64
+ // CHECK-LABEL: test_mm256_maskz_broadcastq_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_broadcastq_epi64(__M, __A);
}
__m128i test_mm_cvtsepi32_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.128
return _mm_cvtsepi32_epi8(__A);
}
__m128i test_mm_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.128
return _mm_mask_cvtsepi32_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi32_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.128
return _mm_maskz_cvtsepi32_epi8(__M, __A);
}
void test_mm_mask_cvtsepi32_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi32_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.mem.128
return _mm_mask_cvtsepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtsepi32_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm256_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.256
return _mm256_cvtsepi32_epi8(__A);
}
__m128i test_mm256_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.256
return _mm256_mask_cvtsepi32_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi32_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.256
return _mm256_maskz_cvtsepi32_epi8(__M, __A);
}
void test_mm256_mask_cvtsepi32_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi32_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.mem.256
return _mm256_mask_cvtsepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtsepi32_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.128
return _mm_cvtsepi32_epi16(__A);
}
__m128i test_mm_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm_mask_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.128
return _mm_mask_cvtsepi32_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi32_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.128
return _mm_maskz_cvtsepi32_epi16(__M, __A);
}
void test_mm_mask_cvtsepi32_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi32_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtsepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.mem.128
return _mm_mask_cvtsepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtsepi32_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm256_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.256
return _mm256_cvtsepi32_epi16(__A);
}
__m128i test_mm256_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.256
return _mm256_mask_cvtsepi32_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi32_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.256
return _mm256_maskz_cvtsepi32_epi16(__M, __A);
}
void test_mm256_mask_cvtsepi32_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi32_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtsepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.mem.256
return _mm256_mask_cvtsepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtsepi64_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.128
return _mm_cvtsepi64_epi8(__A);
}
__m128i test_mm_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.128
return _mm_mask_cvtsepi64_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi64_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.128
return _mm_maskz_cvtsepi64_epi8(__M, __A);
}
void test_mm_mask_cvtsepi64_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.mem.128
return _mm_mask_cvtsepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtsepi64_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm256_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.256
return _mm256_cvtsepi64_epi8(__A);
}
__m128i test_mm256_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.256
return _mm256_mask_cvtsepi64_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi64_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.256
return _mm256_maskz_cvtsepi64_epi8(__M, __A);
}
void test_mm256_mask_cvtsepi64_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.mem.256
return _mm256_mask_cvtsepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtsepi64_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.128
return _mm_cvtsepi64_epi32(__A);
}
__m128i test_mm_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.128
return _mm_mask_cvtsepi64_epi32(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi64_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.128
return _mm_maskz_cvtsepi64_epi32(__M, __A);
}
void test_mm_mask_cvtsepi64_storeu_epi32(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_storeu_epi32
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.mem.128
return _mm_mask_cvtsepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm256_cvtsepi64_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm256_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.256
return _mm256_cvtsepi64_epi32(__A);
}
__m128i test_mm256_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.256
return _mm256_mask_cvtsepi64_epi32(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi64_epi32(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.256
return _mm256_maskz_cvtsepi64_epi32(__M, __A);
}
void test_mm256_mask_cvtsepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_storeu_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.mem.256
return _mm256_mask_cvtsepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm_cvtsepi64_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.128
return _mm_cvtsepi64_epi16(__A);
}
__m128i test_mm_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.128
return _mm_mask_cvtsepi64_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi64_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.128
return _mm_maskz_cvtsepi64_epi16(__M, __A);
}
void test_mm_mask_cvtsepi64_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.mem.128
return _mm_mask_cvtsepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtsepi64_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm256_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.256
return _mm256_cvtsepi64_epi16(__A);
}
__m128i test_mm256_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.256
return _mm256_mask_cvtsepi64_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi64_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.256
return _mm256_maskz_cvtsepi64_epi16(__M, __A);
}
void test_mm256_mask_cvtsepi64_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.mem.256
return _mm256_mask_cvtsepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtusepi32_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.128
return _mm_cvtusepi32_epi8(__A);
}
__m128i test_mm_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.128
return _mm_mask_cvtusepi32_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi32_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.128
return _mm_maskz_cvtusepi32_epi8(__M, __A);
}
void test_mm_mask_cvtusepi32_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi32_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.mem.128
return _mm_mask_cvtusepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtusepi32_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm256_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.256
return _mm256_cvtusepi32_epi8(__A);
}
__m128i test_mm256_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.256
return _mm256_mask_cvtusepi32_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi32_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.256
return _mm256_maskz_cvtusepi32_epi8(__M, __A);
}
void test_mm256_mask_cvtusepi32_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi32_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.mem.256
return _mm256_mask_cvtusepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtusepi32_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.128
return _mm_cvtusepi32_epi16(__A);
}
__m128i test_mm_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm_mask_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.128
return _mm_mask_cvtusepi32_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi32_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.128
return _mm_maskz_cvtusepi32_epi16(__M, __A);
}
void test_mm_mask_cvtusepi32_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi32_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtusepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.mem.128
return _mm_mask_cvtusepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtusepi32_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm256_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.256
return _mm256_cvtusepi32_epi16(__A);
}
__m128i test_mm256_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.256
return _mm256_mask_cvtusepi32_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi32_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.256
return _mm256_maskz_cvtusepi32_epi16(__M, __A);
}
void test_mm256_mask_cvtusepi32_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi32_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtusepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.mem.256
return _mm256_mask_cvtusepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtusepi64_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.128
return _mm_cvtusepi64_epi8(__A);
}
__m128i test_mm_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.128
return _mm_mask_cvtusepi64_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi64_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.128
return _mm_maskz_cvtusepi64_epi8(__M, __A);
}
void test_mm_mask_cvtusepi64_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.mem.128
return _mm_mask_cvtusepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtusepi64_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm256_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.256
return _mm256_cvtusepi64_epi8(__A);
}
__m128i test_mm256_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.256
return _mm256_mask_cvtusepi64_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi64_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.256
return _mm256_maskz_cvtusepi64_epi8(__M, __A);
}
void test_mm256_mask_cvtusepi64_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.mem.256
return _mm256_mask_cvtusepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtusepi64_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.128
return _mm_cvtusepi64_epi32(__A);
}
__m128i test_mm_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.128
return _mm_mask_cvtusepi64_epi32(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi64_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.128
return _mm_maskz_cvtusepi64_epi32(__M, __A);
}
void test_mm_mask_cvtusepi64_storeu_epi32(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_storeu_epi32
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.mem.128
return _mm_mask_cvtusepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm256_cvtusepi64_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm256_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.256
return _mm256_cvtusepi64_epi32(__A);
}
__m128i test_mm256_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.256
return _mm256_mask_cvtusepi64_epi32(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi64_epi32(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.256
return _mm256_maskz_cvtusepi64_epi32(__M, __A);
}
void test_mm256_mask_cvtusepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_storeu_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.mem.256
return _mm256_mask_cvtusepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm_cvtusepi64_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.128
return _mm_cvtusepi64_epi16(__A);
}
__m128i test_mm_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.128
return _mm_mask_cvtusepi64_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi64_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.128
return _mm_maskz_cvtusepi64_epi16(__M, __A);
}
void test_mm_mask_cvtusepi64_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.mem.128
return _mm_mask_cvtusepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtusepi64_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm256_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.256
return _mm256_cvtusepi64_epi16(__A);
}
__m128i test_mm256_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.256
return _mm256_mask_cvtusepi64_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi64_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.256
return _mm256_maskz_cvtusepi64_epi16(__M, __A);
}
void test_mm256_mask_cvtusepi64_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.mem.256
return _mm256_mask_cvtusepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtepi32_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi32_epi8
+ // CHECK-LABEL: test_mm_cvtepi32_epi8
// CHECK: trunc <4 x i32> %{{.*}} to <4 x i8>
// CHECK: shufflevector <4 x i8> %{{.*}}, <4 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
return _mm_cvtepi32_epi8(__A);
}
__m128i test_mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.128
return _mm_mask_cvtepi32_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi32_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.128
return _mm_maskz_cvtepi32_epi8(__M, __A);
}
void test_mm_mask_cvtepi32_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.mem.128
return _mm_mask_cvtepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtepi32_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi32_epi8
+ // CHECK-LABEL: test_mm256_cvtepi32_epi8
// CHECK: trunc <8 x i32> %{{.*}} to <8 x i8>
// CHECK: shufflevector <8 x i8> %{{.*}}, <8 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm256_cvtepi32_epi8(__A);
}
__m128i test_mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.256
return _mm256_mask_cvtepi32_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi32_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.256
return _mm256_maskz_cvtepi32_epi8(__M, __A);
}
void test_mm256_mask_cvtepi32_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.mem.256
return _mm256_mask_cvtepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtepi32_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi32_epi16
+ // CHECK-LABEL: test_mm_cvtepi32_epi16
// CHECK: trunc <4 x i32> %{{.*}} to <4 x i16>
// CHECK: shufflevector <4 x i16> %{{.*}}, <4 x i16> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm_cvtepi32_epi16(__A);
}
__m128i test_mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.128
return _mm_mask_cvtepi32_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi32_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.128
return _mm_maskz_cvtepi32_epi16(__M, __A);
}
void test_mm_mask_cvtepi32_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.mem.128
return _mm_mask_cvtepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtepi32_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi32_epi16
+ // CHECK-LABEL: test_mm256_cvtepi32_epi16
// CHECK: trunc <8 x i32> %{{.*}} to <8 x i16>
return _mm256_cvtepi32_epi16(__A);
}
__m128i test_mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.256
return _mm256_mask_cvtepi32_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi32_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.256
return _mm256_maskz_cvtepi32_epi16(__M, __A);
}
void test_mm256_mask_cvtepi32_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.mem.256
return _mm256_mask_cvtepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtepi64_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi64_epi8
+ // CHECK-LABEL: test_mm_cvtepi64_epi8
// CHECK: trunc <2 x i64> %{{.*}} to <2 x i8>
// CHECK: shufflevector <2 x i8> %{{.*}}, <2 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
return _mm_cvtepi64_epi8(__A);
}
__m128i test_mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.128
return _mm_mask_cvtepi64_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi64_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi64_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.128
return _mm_maskz_cvtepi64_epi8(__M, __A);
}
void test_mm_mask_cvtepi64_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.mem.128
return _mm_mask_cvtepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtepi64_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi64_epi8
+ // CHECK-LABEL: test_mm256_cvtepi64_epi8
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i8>
// CHECK: shufflevector <4 x i8> %{{.*}}, <4 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
return _mm256_cvtepi64_epi8(__A);
}
__m128i test_mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.256
return _mm256_mask_cvtepi64_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi64_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi64_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.256
return _mm256_maskz_cvtepi64_epi8(__M, __A);
}
void test_mm256_mask_cvtepi64_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.mem.256
return _mm256_mask_cvtepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtepi64_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi64_epi32
+ // CHECK-LABEL: test_mm_cvtepi64_epi32
// CHECK: trunc <2 x i64> %{{.*}} to <2 x i32>
// CHECK: shufflevector <2 x i32> %{{.*}}, <2 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
return _mm_cvtepi64_epi32(__A);
}
__m128i test_mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.128
return _mm_mask_cvtepi64_epi32(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi64_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi64_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.128
return _mm_maskz_cvtepi64_epi32(__M, __A);
}
void test_mm_mask_cvtepi64_storeu_epi32(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_storeu_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.mem.128
return _mm_mask_cvtepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm256_cvtepi64_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi64_epi32
+ // CHECK-LABEL: test_mm256_cvtepi64_epi32
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
return _mm256_cvtepi64_epi32(__A);
}
__m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_epi32
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_mask_cvtepi64_epi32(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi64_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepi64_epi32
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_maskz_cvtepi64_epi32(__M, __A);
}
void test_mm256_mask_cvtepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_storeu_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.mem.256
return _mm256_mask_cvtepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm_cvtepi64_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi64_epi16
+ // CHECK-LABEL: test_mm_cvtepi64_epi16
// CHECK: trunc <2 x i64> %{{.*}} to <2 x i16>
// CHECK: shufflevector <2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 3, i32 3, i32 3>
return _mm_cvtepi64_epi16(__A);
}
__m128i test_mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.128
return _mm_mask_cvtepi64_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi64_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi64_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.128
return _mm_maskz_cvtepi64_epi16(__M, __A);
}
void test_mm_mask_cvtepi64_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.mem.128
return _mm_mask_cvtepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtepi64_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi64_epi16
+ // CHECK-LABEL: test_mm256_cvtepi64_epi16
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i16>
// CHECK: shufflevector <4 x i16> %{{.*}}, <4 x i16> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm256_cvtepi64_epi16(__A);
}
__m128i test_mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.256
return _mm256_mask_cvtepi64_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi64_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi64_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.256
return _mm256_maskz_cvtepi64_epi16(__M, __A);
}
void test_mm256_mask_cvtepi64_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.mem.256
return _mm256_mask_cvtepi64_storeu_epi16(__P, __M, __A);
}
__m128 test_mm256_extractf32x4_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_extractf32x4_ps
+ // CHECK-LABEL: test_mm256_extractf32x4_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extractf32x4_ps(__A, 1);
}
__m128 test_mm256_mask_extractf32x4_ps(__m128 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_extractf32x4_ps
+ // CHECK-LABEL: test_mm256_mask_extractf32x4_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_mask_extractf32x4_ps(__W, __U, __A, 1);
}
__m128 test_mm256_maskz_extractf32x4_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_extractf32x4_ps
+ // CHECK-LABEL: test_mm256_maskz_extractf32x4_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_maskz_extractf32x4_ps(__U, __A, 1);
}
__m128i test_mm256_extracti32x4_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_extracti32x4_epi32
+ // CHECK-LABEL: test_mm256_extracti32x4_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extracti32x4_epi32(__A, 1);
}
__m128i test_mm256_mask_extracti32x4_epi32(__m128i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_extracti32x4_epi32
+ // CHECK-LABEL: test_mm256_mask_extracti32x4_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_mask_extracti32x4_epi32(__W, __U, __A, 1);
}
__m128i test_mm256_maskz_extracti32x4_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_extracti32x4_epi32
+ // CHECK-LABEL: test_mm256_maskz_extracti32x4_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_maskz_extracti32x4_epi32(__U, __A, 1);
}
__m256 test_mm256_insertf32x4(__m256 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm256_insertf32x4
+ // CHECK-LABEL: test_mm256_insertf32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
return _mm256_insertf32x4(__A, __B, 1);
}
__m256 test_mm256_mask_insertf32x4(__m256 __W, __mmask8 __U, __m256 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm256_mask_insertf32x4
+ // CHECK-LABEL: test_mm256_mask_insertf32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_insertf32x4(__W, __U, __A, __B, 1);
}
__m256 test_mm256_maskz_insertf32x4(__mmask8 __U, __m256 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm256_maskz_insertf32x4
+ // CHECK-LABEL: test_mm256_maskz_insertf32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_insertf32x4(__U, __A, __B, 1);
}
__m256i test_mm256_inserti32x4(__m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_inserti32x4
+ // CHECK-LABEL: test_mm256_inserti32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
return _mm256_inserti32x4(__A, __B, 1);
}
__m256i test_mm256_mask_inserti32x4(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_inserti32x4
+ // CHECK-LABEL: test_mm256_mask_inserti32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_inserti32x4(__W, __U, __A, __B, 1);
}
__m256i test_mm256_maskz_inserti32x4(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_inserti32x4
+ // CHECK-LABEL: test_mm256_maskz_inserti32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_inserti32x4(__U, __A, __B, 1);
}
__m128d test_mm_getmant_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_getmant_pd
+ // CHECK-LABEL: test_mm_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.128
return _mm_getmant_pd(__A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128d test_mm_mask_getmant_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_getmant_pd
+ // CHECK-LABEL: test_mm_mask_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.128
return _mm_mask_getmant_pd(__W, __U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128d test_mm_maskz_getmant_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_getmant_pd
+ // CHECK-LABEL: test_mm_maskz_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.128
return _mm_maskz_getmant_pd(__U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256d test_mm256_getmant_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_getmant_pd
+ // CHECK-LABEL: test_mm256_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.256
return _mm256_getmant_pd(__A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256d test_mm256_mask_getmant_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_getmant_pd
+ // CHECK-LABEL: test_mm256_mask_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.256
return _mm256_mask_getmant_pd(__W, __U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256d test_mm256_maskz_getmant_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_getmant_pd
+ // CHECK-LABEL: test_mm256_maskz_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.256
return _mm256_maskz_getmant_pd(__U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128 test_mm_getmant_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_getmant_ps
+ // CHECK-LABEL: test_mm_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.128
return _mm_getmant_ps(__A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128 test_mm_mask_getmant_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_getmant_ps
+ // CHECK-LABEL: test_mm_mask_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.128
return _mm_mask_getmant_ps(__W, __U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128 test_mm_maskz_getmant_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_getmant_ps
+ // CHECK-LABEL: test_mm_maskz_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.128
return _mm_maskz_getmant_ps(__U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256 test_mm256_getmant_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_getmant_ps
+ // CHECK-LABEL: test_mm256_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.256
return _mm256_getmant_ps(__A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256 test_mm256_mask_getmant_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_getmant_ps
+ // CHECK-LABEL: test_mm256_mask_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.256
return _mm256_mask_getmant_ps(__W, __U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256 test_mm256_maskz_getmant_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_getmant_ps
+ // CHECK-LABEL: test_mm256_maskz_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.256
return _mm256_maskz_getmant_ps(__U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128d test_mm_mmask_i64gather_pd(__m128d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mmask_i64gather_pd
+ // CHECK-LABEL: test_mm_mmask_i64gather_pd
// CHECK: @llvm.x86.avx512.mask.gather3div2.df
return _mm_mmask_i64gather_pd(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm_mmask_i64gather_epi64(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mmask_i64gather_epi64
+ // CHECK-LABEL: test_mm_mmask_i64gather_epi64
// CHECK: @llvm.x86.avx512.mask.gather3div2.di
return _mm_mmask_i64gather_epi64(__v1_old, __mask, __index, __addr, 2);
}
__m256d test_mm256_mmask_i64gather_pd(__m256d __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mmask_i64gather_pd
+ // CHECK-LABEL: test_mm256_mmask_i64gather_pd
// CHECK: @llvm.x86.avx512.mask.gather3div4.df
return _mm256_mmask_i64gather_pd(__v1_old, __mask, __index, __addr, 2);
}
__m256i test_mm256_mmask_i64gather_epi64(__m256i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mmask_i64gather_epi64
+ // CHECK-LABEL: test_mm256_mmask_i64gather_epi64
// CHECK: @llvm.x86.avx512.mask.gather3div4.di
return _mm256_mmask_i64gather_epi64(__v1_old, __mask, __index, __addr, 2);
}
__m128 test_mm_mmask_i64gather_ps(__m128 __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mmask_i64gather_ps
+ // CHECK-LABEL: test_mm_mmask_i64gather_ps
// CHECK: @llvm.x86.avx512.mask.gather3div4.sf
return _mm_mmask_i64gather_ps(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm_mmask_i64gather_epi32(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mmask_i64gather_epi32
+ // CHECK-LABEL: test_mm_mmask_i64gather_epi32
// CHECK: @llvm.x86.avx512.mask.gather3div4.si
return _mm_mmask_i64gather_epi32(__v1_old, __mask, __index, __addr, 2);
}
__m128 test_mm256_mmask_i64gather_ps(__m128 __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mmask_i64gather_ps
+ // CHECK-LABEL: test_mm256_mmask_i64gather_ps
// CHECK: @llvm.x86.avx512.mask.gather3div8.sf
return _mm256_mmask_i64gather_ps(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm256_mmask_i64gather_epi32(__m128i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mmask_i64gather_epi32
+ // CHECK-LABEL: test_mm256_mmask_i64gather_epi32
// CHECK: @llvm.x86.avx512.mask.gather3div8.si
return _mm256_mmask_i64gather_epi32(__v1_old, __mask, __index, __addr, 2);
}
__m128d test_mm_mask_i32gather_pd(__m128d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mask_i32gather_pd
+ // CHECK-LABEL: test_mm_mask_i32gather_pd
// CHECK: @llvm.x86.avx512.mask.gather3siv2.df
return _mm_mmask_i32gather_pd(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm_mask_i32gather_epi64(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mask_i32gather_epi64
+ // CHECK-LABEL: test_mm_mask_i32gather_epi64
// CHECK: @llvm.x86.avx512.mask.gather3siv2.di
return _mm_mmask_i32gather_epi64(__v1_old, __mask, __index, __addr, 2);
}
__m256d test_mm256_mask_i32gather_pd(__m256d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mask_i32gather_pd
+ // CHECK-LABEL: test_mm256_mask_i32gather_pd
// CHECK: @llvm.x86.avx512.mask.gather3siv4.df
return _mm256_mmask_i32gather_pd(__v1_old, __mask, __index, __addr, 2);
}
__m256i test_mm256_mask_i32gather_epi64(__m256i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mask_i32gather_epi64
+ // CHECK-LABEL: test_mm256_mask_i32gather_epi64
// CHECK: @llvm.x86.avx512.mask.gather3siv4.di
return _mm256_mmask_i32gather_epi64(__v1_old, __mask, __index, __addr, 2);
}
__m128 test_mm_mask_i32gather_ps(__m128 __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mask_i32gather_ps
+ // CHECK-LABEL: test_mm_mask_i32gather_ps
// CHECK: @llvm.x86.avx512.mask.gather3siv4.sf
return _mm_mmask_i32gather_ps(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm_mask_i32gather_epi32(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mask_i32gather_epi32
+ // CHECK-LABEL: test_mm_mask_i32gather_epi32
// CHECK: @llvm.x86.avx512.mask.gather3siv4.si
return _mm_mmask_i32gather_epi32(__v1_old, __mask, __index, __addr, 2);
}
__m256 test_mm256_mask_i32gather_ps(__m256 __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mask_i32gather_ps
+ // CHECK-LABEL: test_mm256_mask_i32gather_ps
// CHECK: @llvm.x86.avx512.mask.gather3siv8.sf
return _mm256_mmask_i32gather_ps(__v1_old, __mask, __index, __addr, 2);
}
__m256i test_mm256_mask_i32gather_epi32(__m256i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mask_i32gather_epi32
+ // CHECK-LABEL: test_mm256_mask_i32gather_epi32
// CHECK: @llvm.x86.avx512.mask.gather3siv8.si
return _mm256_mmask_i32gather_epi32(__v1_old, __mask, __index, __addr, 2);
}
__m256d test_mm256_permutex_pd(__m256d __X) {
- // CHECK-LABEL: @test_mm256_permutex_pd
+ // CHECK-LABEL: test_mm256_permutex_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
return _mm256_permutex_pd(__X, 3);
}
__m256d test_mm256_mask_permutex_pd(__m256d __W, __mmask8 __U, __m256d __X) {
- // CHECK-LABEL: @test_mm256_mask_permutex_pd
+ // CHECK-LABEL: test_mm256_mask_permutex_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutex_pd(__W, __U, __X, 1);
}
__m256d test_mm256_maskz_permutex_pd(__mmask8 __U, __m256d __X) {
- // CHECK-LABEL: @test_mm256_maskz_permutex_pd
+ // CHECK-LABEL: test_mm256_maskz_permutex_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutex_pd(__U, __X, 1);
}
__m256i test_mm256_permutex_epi64(__m256i __X) {
- // CHECK-LABEL: @test_mm256_permutex_epi64
+ // CHECK-LABEL: test_mm256_permutex_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
return _mm256_permutex_epi64(__X, 3);
}
__m256i test_mm256_mask_permutex_epi64(__m256i __W, __mmask8 __M, __m256i __X) {
- // CHECK-LABEL: @test_mm256_mask_permutex_epi64
+ // CHECK-LABEL: test_mm256_mask_permutex_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_permutex_epi64(__W, __M, __X, 3);
}
__m256i test_mm256_maskz_permutex_epi64(__mmask8 __M, __m256i __X) {
- // CHECK-LABEL: @test_mm256_maskz_permutex_epi64
+ // CHECK-LABEL: test_mm256_maskz_permutex_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_permutex_epi64(__M, __X, 3);
}
__m256d test_mm256_permutexvar_pd(__m256i __X, __m256d __Y) {
- // CHECK-LABEL: @test_mm256_permutexvar_pd
+ // CHECK-LABEL: test_mm256_permutexvar_pd
// CHECK: @llvm.x86.avx512.permvar.df.256
return _mm256_permutexvar_pd(__X, __Y);
}
__m256d test_mm256_mask_permutexvar_pd(__m256d __W, __mmask8 __U, __m256i __X, __m256d __Y) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_pd
+ // CHECK-LABEL: test_mm256_mask_permutexvar_pd
// CHECK: @llvm.x86.avx512.permvar.df.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutexvar_pd(__W, __U, __X, __Y);
}
__m256d test_mm256_maskz_permutexvar_pd(__mmask8 __U, __m256i __X, __m256d __Y) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_pd
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_pd
// CHECK: @llvm.x86.avx512.permvar.df.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutexvar_pd(__U, __X, __Y);
}
__m256i test_mm256_maskz_permutexvar_epi64(__mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_epi64
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_epi64
// CHECK: @llvm.x86.avx512.permvar.di.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_permutexvar_epi64(__M, __X, __Y);
}
__m256i test_mm256_mask_permutexvar_epi64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_epi64
+ // CHECK-LABEL: test_mm256_mask_permutexvar_epi64
// CHECK: @llvm.x86.avx512.permvar.di.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_permutexvar_epi64(__W, __M, __X, __Y);
}
__m256 test_mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_ps
+ // CHECK-LABEL: test_mm256_mask_permutexvar_ps
// CHECK: @llvm.x86.avx2.permps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permutexvar_ps(__W, __U, __X, __Y);
}
__m256 test_mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_ps
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_ps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permutexvar_ps(__U, __X, __Y);
}
__m256 test_mm256_permutexvar_ps(__m256i __X, __m256 __Y) {
- // CHECK-LABEL: @test_mm256_permutexvar_ps
+ // CHECK-LABEL: test_mm256_permutexvar_ps
// CHECK: @llvm.x86.avx2.permps
return _mm256_permutexvar_ps( __X, __Y);
}
__m256i test_mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_epi32
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_epi32
// CHECK: @llvm.x86.avx2.permd
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_permutexvar_epi32(__M, __X, __Y);
}
__m256i test_mm256_permutexvar_epi32(__m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_permutexvar_epi32
+ // CHECK-LABEL: test_mm256_permutexvar_epi32
// CHECK: @llvm.x86.avx2.permd
return _mm256_permutexvar_epi32(__X, __Y);
}
__m256i test_mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_epi32
+ // CHECK-LABEL: test_mm256_mask_permutexvar_epi32
// CHECK: @llvm.x86.avx2.permd
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_permutexvar_epi32(__W, __M, __X, __Y);
}
__m128i test_mm_alignr_epi32(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_alignr_epi32
+ // CHECK-LABEL: test_mm_alignr_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
return _mm_alignr_epi32(__A, __B, 1);
}
__m128i test_mm_mask_alignr_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_alignr_epi32
+ // CHECK-LABEL: test_mm_mask_alignr_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_alignr_epi32(__W, __U, __A, __B, 5);
}
__m128i test_mm_maskz_alignr_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_alignr_epi32
+ // CHECK-LABEL: test_mm_maskz_alignr_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_alignr_epi32(__U, __A, __B, 1);
}
__m256i test_mm256_alignr_epi32(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_alignr_epi32
+ // CHECK-LABEL: test_mm256_alignr_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
return _mm256_alignr_epi32(__A, __B, 1);
}
__m256i test_mm256_mask_alignr_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_alignr_epi32
+ // CHECK-LABEL: test_mm256_mask_alignr_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_alignr_epi32(__W, __U, __A, __B, 9);
}
__m256i test_mm256_maskz_alignr_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_alignr_epi32
+ // CHECK-LABEL: test_mm256_maskz_alignr_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_alignr_epi32(__U, __A, __B, 1);
}
__m128i test_mm_alignr_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_alignr_epi64
+ // CHECK-LABEL: test_mm_alignr_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 2>
return _mm_alignr_epi64(__A, __B, 1);
}
__m128i test_mm_mask_alignr_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_alignr_epi64
+ // CHECK-LABEL: test_mm_mask_alignr_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_alignr_epi64(__W, __U, __A, __B, 3);
}
__m128i test_mm_maskz_alignr_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_alignr_epi64
+ // CHECK-LABEL: test_mm_maskz_alignr_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_alignr_epi64(__U, __A, __B, 1);
}
__m256i test_mm256_alignr_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_alignr_epi64
+ // CHECK-LABEL: test_mm256_alignr_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
return _mm256_alignr_epi64(__A, __B, 1);
}
__m256i test_mm256_mask_alignr_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_alignr_epi64
+ // CHECK-LABEL: test_mm256_mask_alignr_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_alignr_epi64(__W, __U, __A, __B, 5);
}
__m256i test_mm256_maskz_alignr_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_alignr_epi64
+ // CHECK-LABEL: test_mm256_maskz_alignr_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_alignr_epi64(__U, __A, __B, 1);
}
__m128 test_mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_movehdup_ps
+ // CHECK-LABEL: test_mm_mask_movehdup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_movehdup_ps(__W, __U, __A);
}
__m128 test_mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_movehdup_ps
+ // CHECK-LABEL: test_mm_maskz_movehdup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_movehdup_ps(__U, __A);
}
__m256 test_mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_movehdup_ps
+ // CHECK-LABEL: test_mm256_mask_movehdup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
// CHECK: select <8 x i1> %{{.*}} <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_movehdup_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_movehdup_ps
+ // CHECK-LABEL: test_mm256_maskz_movehdup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
// CHECK: select <8 x i1> %{{.*}} <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_movehdup_ps(__U, __A);
}
__m128 test_mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_moveldup_ps
+ // CHECK-LABEL: test_mm_mask_moveldup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_moveldup_ps(__W, __U, __A);
}
__m128 test_mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_moveldup_ps
+ // CHECK-LABEL: test_mm_maskz_moveldup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_moveldup_ps(__U, __A);
}
__m256 test_mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_moveldup_ps
+ // CHECK-LABEL: test_mm256_mask_moveldup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
// CHECK: select <8 x i1> %{{.*}} <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_moveldup_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_moveldup_ps
+ // CHECK-LABEL: test_mm256_maskz_moveldup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
// CHECK: select <8 x i1> %{{.*}} <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_moveldup_ps(__U, __A);
}
__m128i test_mm_mask_shuffle_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_shuffle_epi32
+ // CHECK-LABEL: test_mm_mask_shuffle_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_shuffle_epi32(__W, __U, __A, 1);
}
__m128i test_mm_maskz_shuffle_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_shuffle_epi32
+ // CHECK-LABEL: test_mm_maskz_shuffle_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> poison, <4 x i32> <i32 2, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_shuffle_epi32(__U, __A, 2);
}
__m256i test_mm256_mask_shuffle_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_epi32
+ // CHECK-LABEL: test_mm256_mask_shuffle_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <8 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shuffle_epi32(__W, __U, __A, 2);
}
__m256i test_mm256_maskz_shuffle_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_epi32
+ // CHECK-LABEL: test_mm256_maskz_shuffle_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <8 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shuffle_epi32(__U, __A, 2);
}
__m128d test_mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_mov_pd
+ // CHECK-LABEL: test_mm_mask_mov_pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_mov_pd(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m128d(_mm_mask_mov_pd((__m128d){-2.0, -1.0}, 0x2, (__m128d){+1.0, +2.0}), -2.0, +2.0));
__m128d test_mm_maskz_mov_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_pd
+ // CHECK-LABEL: test_mm_maskz_mov_pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_mov_pd(__U, __A);
}
+TEST_CONSTEXPR(match_m128d(_mm_maskz_mov_pd(0x1, (__m128d){+1.0, +2.0}), +1.0, +0.0));
__m256d test_mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_pd
+ // CHECK-LABEL: test_mm256_mask_mov_pd
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_mov_pd(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_mask_mov_pd((__m256d){-4.0, -3.0, -2.0, -1.0}, 0x3, (__m256d){+1.0, +2.0, +3.0, +4.0}), +1.0, +2.0, -2.0, -1.0));
__m256d test_mm256_maskz_mov_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_pd
+ // CHECK-LABEL: test_mm256_maskz_mov_pd
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_mov_pd(__U, __A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_maskz_mov_pd(0xC, (__m256d){+1.0, +2.0, +3.0, +4.0}), 0.0, 0.0, +3.0, +4.0));
__m128 test_mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_mov_ps
+ // CHECK-LABEL: test_mm_mask_mov_ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_mov_ps(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m128(_mm_mask_mov_ps((__m128){-4.0f, -3.0f, -2.0f, -1.0f}, 0x3, (__m128){+1.0f, +2.0f, +3.0f, +4.0f}), +1.0f, +2.0f, -2.0f, -1.0f));
__m128 test_mm_maskz_mov_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_ps
+ // CHECK-LABEL: test_mm_maskz_mov_ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_mov_ps(__U, __A);
}
+TEST_CONSTEXPR(match_m128(_mm_maskz_mov_ps(0xC, (__m128){+1.0f, +2.0f, +3.0f, +4.0f}), 0.0f, 0.0f, +3.0f, +4.0f));
__m256 test_mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_ps
+ // CHECK-LABEL: test_mm256_mask_mov_ps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_mov_ps(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m256(_mm256_mask_mov_ps((__m256){-8.0f, -7.0f, -6.0f, -5.0f, -4.0f, -3.0f, -2.0f, -1.0f}, 0xC3, (__m256){+1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}), +1.0f, +2.0f, -6.0f, -5.0f, -4.0f, -3.0f, +7.0f, +8.0f));
__m256 test_mm256_maskz_mov_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_ps
+ // CHECK-LABEL: test_mm256_maskz_mov_ps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_mov_ps(__U, __A);
}
+TEST_CONSTEXPR(match_m256(_mm256_maskz_mov_ps(0xC3, (__m256){+1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}), +1.0f, +2.0f, 0.0f, 0.0f, 0.0f, 0.0f, +7.0f, +8.0f));
__m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtph_ps
+ // CHECK-LABEL: test_mm_mask_cvtph_ps
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: fpext <4 x half> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -9565,7 +9667,7 @@ __m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
}
__m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtph_ps
+ // CHECK-LABEL: test_mm_maskz_cvtph_ps
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: fpext <4 x half> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -9573,167 +9675,167 @@ __m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
}
__m256 test_mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtph_ps
+ // CHECK-LABEL: test_mm256_mask_cvtph_ps
// CHECK: fpext <8 x half> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_cvtph_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtph_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtph_ps
// CHECK: fpext <8 x half> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_cvtph_ps(__U, __A);
}
__m128i test_mm_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_ph
+ // CHECK-LABEL: test_mm_mask_cvtps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.128
return _mm_mask_cvtps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128i test_mm_maskz_cvtps_ph(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_ph
+ // CHECK-LABEL: test_mm_maskz_cvtps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.128
return _mm_maskz_cvtps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128i test_mm256_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_ph
+ // CHECK-LABEL: test_mm256_mask_cvtps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.256
return _mm256_mask_cvtps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128i test_mm256_maskz_cvtps_ph(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_ph
+ // CHECK-LABEL: test_mm256_maskz_cvtps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.256
return _mm256_maskz_cvtps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128i test_mm_mask_cvt_roundps_ph(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvt_roundps_ph
+ // CHECK-LABEL: test_mm_mask_cvt_roundps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.128
return _mm_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO);
}
__m128i test_mm_maskz_cvt_roundps_ph(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvt_roundps_ph
+ // CHECK-LABEL: test_mm_maskz_cvt_roundps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.128
return _mm_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO);
}
__m128i test_mm256_mask_cvt_roundps_ph(__m128i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvt_roundps_ph
+ // CHECK-LABEL: test_mm256_mask_cvt_roundps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.256
return _mm256_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO);
}
__m128i test_mm256_maskz_cvt_roundps_ph(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvt_roundps_ph
+ // CHECK-LABEL: test_mm256_maskz_cvt_roundps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.256
return _mm256_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO);
}
__mmask8 test_mm_cmpeq_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epi32_mask
+ // CHECK-LABEL: test_mm_cmpeq_epi32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpeq_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epi32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epi64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epi64_mask
+ // CHECK-LABEL: test_mm_cmpeq_epi64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpeq_epi64_mask(__a, __b);
}
__mmask8 test_mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epi32_mask
+ // CHECK-LABEL: test_mm_cmpgt_epi32_mask
// CHECK: icmp sgt <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpgt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epi32_mask
// CHECK: icmp sgt <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_mask_cmpgt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epi64_mask
// CHECK: icmp sgt <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpgt_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epi64_mask
+ // CHECK-LABEL: test_mm_cmpgt_epi64_mask
// CHECK: icmp sgt <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epi64_mask(__a, __b);
}
__mmask8 test_mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpeq_epi32_mask
+ // CHECK-LABEL: test_mm256_cmpeq_epi32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpeq_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpeq_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpeq_epi32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpeq_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpeq_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpeq_epi64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpeq_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpeq_epi64_mask
+ // CHECK-LABEL: test_mm256_cmpeq_epi64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpeq_epi64_mask(__a, __b);
}
__mmask8 test_mm256_cmpgt_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epi32_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epi32_mask
// CHECK: icmp sgt <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpgt_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpgt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epi32_mask
// CHECK: icmp sgt <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpgt_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_mask_cmpgt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epi64_mask
// CHECK: icmp sgt <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpgt_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpgt_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epi64_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epi64_mask
// CHECK: icmp sgt <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpgt_epi64_mask(__a, __b);
}
diff --git a/clang/test/CodeGen/X86/avx512vlbf16-builtins.c b/clang/test/CodeGen/X86/avx512vlbf16-builtins.c
index f62ba46..5e37b4d 100644
--- a/clang/test/CodeGen/X86/avx512vlbf16-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbf16-builtins.c
@@ -1,222 +1,196 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin \
-// RUN: -target-feature +avx512bf16 -target-feature \
-// RUN: +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m128bh test_mm_cvtne2ps2bf16(__m128 A, __m128 B) {
- // CHECK-LABEL: @test_mm_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.128
- // CHECK: ret <8 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm_cvtne2ps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.128(<4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_cvtne2ps_pbh(A, B);
}
__m128bh test_mm_maskz_cvtne2ps2bf16(__m128 A, __m128 B, __mmask8 U) {
- // CHECK-LABEL: @test_mm_maskz_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.128
+ // CHECK-LABEL: test_mm_maskz_cvtne2ps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.128(<4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}}, <8 x bfloat> %{{.*}}
- // CHECK: ret <8 x bfloat> %{{.*}}
return _mm_maskz_cvtne2ps_pbh(U, A, B);
}
__m128bh test_mm_mask_cvtne2ps2bf16(__m128bh C, __mmask8 U, __m128 A, __m128 B) {
- // CHECK-LABEL: @test_mm_mask_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.128
+ // CHECK-LABEL: test_mm_mask_cvtne2ps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.128(<4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}}, <8 x bfloat> %{{.*}}
- // CHECK: ret <8 x bfloat> %{{.*}}
return _mm_mask_cvtne2ps_pbh(C, U, A, B);
}
__m256bh test_mm256_cvtne2ps2bf16(__m256 A, __m256 B) {
- // CHECK-LABEL: @test_mm256_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.256
- // CHECK: ret <16 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm256_cvtne2ps2bf16
+ // CHECK: call {{.*}}<16 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_cvtne2ps_pbh(A, B);
}
__m256bh test_mm256_maskz_cvtne2ps2bf16(__m256 A, __m256 B, __mmask16 U) {
- // CHECK-LABEL: @test_mm256_maskz_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.256
+ // CHECK-LABEL: test_mm256_maskz_cvtne2ps2bf16
+ // CHECK: call {{.*}}<16 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x bfloat> %{{.*}}, <16 x bfloat> %{{.*}}
- // CHECK: ret <16 x bfloat> %{{.*}}
return _mm256_maskz_cvtne2ps_pbh(U, A, B);
}
__m256bh test_mm256_mask_cvtne2ps2bf16(__m256bh C, __mmask16 U, __m256 A, __m256 B) {
- // CHECK-LABEL: @test_mm256_mask_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.256
+ // CHECK-LABEL: test_mm256_mask_cvtne2ps2bf16
+ // CHECK: call {{.*}}<16 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x bfloat> %{{.*}}, <16 x bfloat> %{{.*}}
- // CHECK: ret <16 x bfloat> %{{.*}}
return _mm256_mask_cvtne2ps_pbh(C, U, A, B);
}
__m512bh test_mm512_cvtne2ps2bf16(__m512 A, __m512 B) {
- // CHECK-LABEL: @test_mm512_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.512
- // CHECK: ret <32 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm512_cvtne2ps2bf16
+ // CHECK: call {{.*}}<32 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %{{.*}}, <16 x float> %{{.*}})
return _mm512_cvtne2ps_pbh(A, B);
}
__m512bh test_mm512_maskz_cvtne2ps2bf16(__m512 A, __m512 B, __mmask32 U) {
- // CHECK-LABEL: @test_mm512_maskz_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.512
+ // CHECK-LABEL: test_mm512_maskz_cvtne2ps2bf16
+ // CHECK: call {{.*}}<32 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %{{.*}}, <16 x float> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x bfloat> %{{.*}}, <32 x bfloat> %{{.*}}
- // CHECK: ret <32 x bfloat> %{{.*}}
return _mm512_maskz_cvtne2ps_pbh(U, A, B);
}
__m512bh test_mm512_mask_cvtne2ps2bf16(__m512bh C, __mmask32 U, __m512 A, __m512 B) {
- // CHECK-LABEL: @test_mm512_mask_cvtne2ps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtne2ps2bf16.512
+ // CHECK-LABEL: test_mm512_mask_cvtne2ps2bf16
+ // CHECK: call {{.*}}<32 x bfloat> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %{{.*}}, <16 x float> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x bfloat> %{{.*}}, <32 x bfloat> %{{.*}}
- // CHECK: ret <32 x bfloat> %{{.*}}
return _mm512_mask_cvtne2ps_pbh(C, U, A, B);
}
__m128bh test_mm_cvtneps2bf16(__m128 A) {
- // CHECK-LABEL: @test_mm_cvtneps2bf16
- // CHECK: @llvm.x86.vcvtneps2bf16128
- // CHECK: ret <8 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm_cvtneps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.vcvtneps2bf16128(<4 x float> %{{.*}})
return _mm_cvtneps_pbh(A);
}
__m128bh test_mm_mask_cvtneps2bf16(__m128bh C, __mmask8 U, __m128 A) {
- // CHECK-LABEL: @test_mm_mask_cvtneps2bf16
- // CHECK: @llvm.x86.avx512bf16.mask.cvtneps2bf16.
- // CHECK: ret <8 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm_mask_cvtneps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.avx512bf16.mask.cvtneps2bf16.128(<4 x float> %{{.*}}, <8 x bfloat> %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_cvtneps_pbh(C, U, A);
}
__m128bh test_mm_maskz_cvtneps2bf16(__m128 A, __mmask8 U) {
- // CHECK-LABEL: @test_mm_maskz_cvtneps2bf16
- // CHECK: @llvm.x86.avx512bf16.mask.cvtneps2bf16.128
- // CHECK: ret <8 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm_maskz_cvtneps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.avx512bf16.mask.cvtneps2bf16.128(<4 x float> %{{.*}}, <8 x bfloat> %{{.*}}, <4 x i1> %{{.*}})
return _mm_maskz_cvtneps_pbh(U, A);
}
__m128bh test_mm256_cvtneps2bf16(__m256 A) {
- // CHECK-LABEL: @test_mm256_cvtneps2bf16
- // CHECK: @llvm.x86.vcvtneps2bf16256
- // CHECK: ret <8 x bfloat> %{{.*}}
+ // CHECK-LABEL: test_mm256_cvtneps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.vcvtneps2bf16256(<8 x float> %{{.*}})
return _mm256_cvtneps_pbh(A);
}
__m128bh test_mm256_mask_cvtneps2bf16(__m128bh C, __mmask8 U, __m256 A) {
- // CHECK-LABEL: @test_mm256_mask_cvtneps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtneps2bf16.256
+ // CHECK-LABEL: test_mm256_mask_cvtneps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.avx512bf16.cvtneps2bf16.256(<8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}}, <8 x bfloat> %{{.*}}
- // CHECK: ret <8 x bfloat> %{{.*}}
return _mm256_mask_cvtneps_pbh(C, U, A);
}
__m128bh test_mm256_maskz_cvtneps2bf16(__m256 A, __mmask8 U) {
- // CHECK-LABEL: @test_mm256_maskz_cvtneps2bf16
- // CHECK: @llvm.x86.avx512bf16.cvtneps2bf16.256
+ // CHECK-LABEL: test_mm256_maskz_cvtneps2bf16
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.avx512bf16.cvtneps2bf16.256
// CHECK: select <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}}, <8 x bfloat> %{{.*}}
- // CHECK: ret <8 x bfloat> %{{.*}}
return _mm256_maskz_cvtneps_pbh(U, A);
}
__m128 test_mm_dpbf16_ps(__m128 D, __m128bh A, __m128bh B) {
- // CHECK-LABEL: @test_mm_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.128
- // CHECK: ret <4 x float> %{{.*}}
+ // CHECK-LABEL: test_mm_dpbf16_ps
+ // CHECK: call {{.*}}<4 x float> @llvm.x86.avx512bf16.dpbf16ps.128(<4 x float> %{{.*}}, <8 x bfloat> %{{.*}}, <8 x bfloat> %{{.*}})
return _mm_dpbf16_ps(D, A, B);
}
__m128 test_mm_maskz_dpbf16_ps(__m128 D, __m128bh A, __m128bh B, __mmask8 U) {
- // CHECK-LABEL: @test_mm_maskz_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.128
+ // CHECK-LABEL: test_mm_maskz_dpbf16_ps
+ // CHECK: call {{.*}}<4 x float> @llvm.x86.avx512bf16.dpbf16ps.128(<4 x float> %{{.*}}, <8 x bfloat> %{{.*}}, <8 x bfloat> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
- // CHECK: ret <4 x float> %{{.*}}
return _mm_maskz_dpbf16_ps(U, D, A, B);
}
__m128 test_mm_mask_dpbf16_ps(__m128 D, __m128bh A, __m128bh B, __mmask8 U) {
- // CHECK-LABEL: @test_mm_mask_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.128
+ // CHECK-LABEL: test_mm_mask_dpbf16_ps
+ // CHECK: call {{.*}}<4 x float> @llvm.x86.avx512bf16.dpbf16ps.128(<4 x float> %{{.*}}, <8 x bfloat> %{{.*}}, <8 x bfloat> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
- // CHECK: ret <4 x float> %{{.*}}
return _mm_mask_dpbf16_ps(D, U, A, B);
}
+
__m256 test_mm256_dpbf16_ps(__m256 D, __m256bh A, __m256bh B) {
- // CHECK-LABEL: @test_mm256_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.256
- // CHECK: ret <8 x float> %{{.*}}
+ // CHECK-LABEL: test_mm256_dpbf16_ps
+ // CHECK: call {{.*}}<8 x float> @llvm.x86.avx512bf16.dpbf16ps.256(<8 x float> %{{.*}}, <16 x bfloat> %{{.*}}, <16 x bfloat> %{{.*}})
return _mm256_dpbf16_ps(D, A, B);
}
__m256 test_mm256_maskz_dpbf16_ps(__m256 D, __m256bh A, __m256bh B, __mmask8 U) {
- // CHECK-LABEL: @test_mm256_maskz_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.256
+ // CHECK-LABEL: test_mm256_maskz_dpbf16_ps
+ // CHECK: call {{.*}}<8 x float> @llvm.x86.avx512bf16.dpbf16ps.256(<8 x float> %{{.*}}, <16 x bfloat> %{{.*}}, <16 x bfloat> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
- // CHECK: ret <8 x float> %{{.*}}
return _mm256_maskz_dpbf16_ps(U, D, A, B);
}
__m256 test_mm256_mask_dpbf16_ps(__m256 D, __m256bh A, __m256bh B, __mmask8 U) {
- // CHECK-LABEL: @test_mm256_mask_dpbf16_ps
- // CHECK: @llvm.x86.avx512bf16.dpbf16ps.256
+ // CHECK-LABEL: test_mm256_mask_dpbf16_ps
+ // CHECK: call {{.*}}<8 x float> @llvm.x86.avx512bf16.dpbf16ps.256(<8 x float> %{{.*}}, <16 x bfloat> %{{.*}}, <16 x bfloat> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
- // CHECK: ret <8 x float> %{{.*}}
return _mm256_mask_dpbf16_ps(D, U, A, B);
}
__bf16 test_mm_cvtness_sbh(float A) {
- // CHECK-LABEL: @test_mm_cvtness_sbh
- // CHECK: @llvm.x86.avx512bf16.mask.cvtneps2bf16.128
- // CHECK: ret bfloat %{{.*}}
+ // CHECK-LABEL: test_mm_cvtness_sbh
+ // CHECK: call {{.*}}<8 x bfloat> @llvm.x86.avx512bf16.mask.cvtneps2bf16.128(<4 x float> %{{.*}}, <8 x bfloat> %{{.*}}, <4 x i1> splat (i1 true))
return _mm_cvtness_sbh(A);
}
__m128 test_mm_cvtpbh_ps(__m128bh A) {
- // CHECK-LABEL: @test_mm_cvtpbh_ps
+ // CHECK-LABEL: test_mm_cvtpbh_ps
// CHECK: sext <4 x i16> %{{.*}} to <4 x i32>
- // CHECK: @llvm.x86.sse2.pslli.d
- // CHECK: ret <4 x float> %{{.*}}
+ // CHECK: call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %{{.*}}, i32 %{{.*}})
return _mm_cvtpbh_ps(A);
}
__m256 test_mm256_cvtpbh_ps(__m128bh A) {
- // CHECK-LABEL: @test_mm256_cvtpbh_ps
+ // CHECK-LABEL: test_mm256_cvtpbh_ps
// CHECK: sext <8 x i16> %{{.*}} to <8 x i32>
- // CHECK: @llvm.x86.avx2.pslli.d
- // CHECK: ret <8 x float> %{{.*}}
+ // CHECK: call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %{{.*}}, i32 %{{.*}})
return _mm256_cvtpbh_ps(A);
}
__m128 test_mm_maskz_cvtpbh_ps(__mmask8 M, __m128bh A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpbh_ps
+ // CHECK-LABEL: test_mm_maskz_cvtpbh_ps
// CHECK: sext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
- // CHECK: @llvm.x86.sse2.pslli.d
- // CHECK: ret <4 x float> %{{.*}}
+ // CHECK: call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %{{.*}}, i32 %{{.*}})
return _mm_maskz_cvtpbh_ps(M, A);
}
__m256 test_mm256_maskz_cvtpbh_ps(__mmask8 M, __m128bh A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpbh_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtpbh_ps
// CHECK: sext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
- // CHECK: @llvm.x86.avx2.pslli.d
- // CHECK: ret <8 x float> %{{.*}}
+ // CHECK: call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %{{.*}}, i32 %{{.*}})
return _mm256_maskz_cvtpbh_ps(M, A);
}
__m128 test_mm_mask_cvtpbh_ps(__m128 S, __mmask8 M, __m128bh A) {
- // CHECK-LABEL: @test_mm_mask_cvtpbh_ps
+ // CHECK-LABEL: test_mm_mask_cvtpbh_ps
// CHECK: sext <4 x i16> %{{.*}} to <4 x i32>
- // CHECK: @llvm.x86.sse2.pslli.d
+ // CHECK: call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %{{.*}}, i32 %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
- // CHECK: ret <4 x float> %{{.*}}
return _mm_mask_cvtpbh_ps(S, M, A);
}
__m256 test_mm256_mask_cvtpbh_ps(__m256 S, __mmask8 M, __m128bh A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpbh_ps
+ // CHECK-LABEL: test_mm256_mask_cvtpbh_ps
// CHECK: sext <8 x i16> %{{.*}} to <8 x i32>
- // CHECK: @llvm.x86.avx2.pslli.d
+ // CHECK: call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %{{.*}}, i32 %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
- // CHECK: ret <8 x float> %{{.*}}
return _mm256_mask_cvtpbh_ps(S, M, A);
}
diff --git a/clang/test/CodeGen/X86/avx512vlbitalg-builtins.c b/clang/test/CodeGen/X86/avx512vlbitalg-builtins.c
index 4e65da0..e0b55c6 100644
--- a/clang/test/CodeGen/X86/avx512vlbitalg-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbitalg-builtins.c
@@ -1,105 +1,131 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m256i test_mm256_popcnt_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_popcnt_epi16
+ // CHECK-LABEL: test_mm256_popcnt_epi16
// CHECK: @llvm.ctpop.v16i16
return _mm256_popcnt_epi16(__A);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_popcnt_epi16((__m256i)(__v16hi){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, 15, 14, 1, 0, 8, 1, 9, 2, 2, 4, 2, 6, 2, 9, 2));
__m256i test_mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_popcnt_epi16
+ // CHECK-LABEL: test_mm256_mask_popcnt_epi16
// CHECK: @llvm.ctpop.v16i16
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_popcnt_epi16(__A, __U, __B);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_popcnt_epi16(_mm256_set1_epi16(-1), 0xF0F0, (__m256i)(__v16hi){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), -1, -1, -1, -1, 0, 8, 1, 9, -1, -1, -1, -1, 6, 2, 9, 2));
+
__m256i test_mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_popcnt_epi16
+ // CHECK-LABEL: test_mm256_maskz_popcnt_epi16
// CHECK: @llvm.ctpop.v16i16
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_popcnt_epi16(__U, __B);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_popcnt_epi16(0x0F0F, (__m256i)(__v16hi){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, 15, 14, 1, 0, 0, 0, 0, 2, 2, 4, 2, 0, 0, 0, 0));
__m128i test_mm_popcnt_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_popcnt_epi16
+ // CHECK-LABEL: test_mm_popcnt_epi16
// CHECK: @llvm.ctpop.v8i16
return _mm_popcnt_epi16(__A);
}
+TEST_CONSTEXPR(match_v8hi(_mm_popcnt_epi16((__m128i)(__v8hi){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 15, 14, 1, 0, 8, 1, 9));
__m128i test_mm_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_popcnt_epi16
+ // CHECK-LABEL: test_mm_mask_popcnt_epi16
// CHECK: @llvm.ctpop.v8i16
// CHECK: select <8 x i1> %{{[0-9]+}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_popcnt_epi16(__A, __U, __B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mask_popcnt_epi16(_mm_set1_epi16(-1), 0xF0, (__m128i)(__v8hi){+5, -3, -10, +8, 0, -256, +256, -128}), -1, -1, -1, -1, 0, 8, 1, 9));
+
__m128i test_mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_popcnt_epi16
+ // CHECK-LABEL: test_mm_maskz_popcnt_epi16
// CHECK: @llvm.ctpop.v8i16
// CHECK: select <8 x i1> %{{[0-9]+}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_popcnt_epi16(__U, __B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_maskz_popcnt_epi16(0x0F, (__m128i)(__v8hi){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 15, 14, 1, 0, 0, 0, 0));
__m256i test_mm256_popcnt_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_popcnt_epi8
+ // CHECK-LABEL: test_mm256_popcnt_epi8
// CHECK: @llvm.ctpop.v32i8
return _mm256_popcnt_epi8(__A);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_popcnt_epi8((__m256i)(__v32qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3, 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3));
__m256i test_mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_popcnt_epi8
+ // CHECK-LABEL: test_mm256_mask_popcnt_epi8
// CHECK: @llvm.ctpop.v32i8
// CHECK: select <32 x i1> %{{[0-9]+}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_popcnt_epi8(__A, __U, __B);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_mask_popcnt_epi8(_mm256_set1_epi8(-1), 0xF00F, (__m256i)(__v32qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, 6, 2, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1));
+
__m256i test_mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_popcnt_epi8
+ // CHECK-LABEL: test_mm256_maskz_popcnt_epi8
// CHECK: @llvm.ctpop.v32i8
// CHECK: select <32 x i1> %{{[0-9]+}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_popcnt_epi8(__U, __B);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_maskz_popcnt_epi8(0x0FF0, (__m256i)(__v32qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 0, 0, 0, 0, 0, 4, 1, 4, 2, 2, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_popcnt_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_popcnt_epi8
+ // CHECK-LABEL: test_mm_popcnt_epi8
// CHECK: @llvm.ctpop.v16i8
return _mm_popcnt_epi8(__A);
}
+TEST_CONSTEXPR(match_v16qi(_mm_popcnt_epi8((__m128i)(__v16qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3));
__m128i test_mm_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_popcnt_epi8
+ // CHECK-LABEL: test_mm_mask_popcnt_epi8
// CHECK: @llvm.ctpop.v16i8
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_popcnt_epi8(__A, __U, __B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_mask_popcnt_epi8(_mm_set1_epi8(-1), 0xF00F, (__m128i)(__v16qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, 6, 2, 4, 3));
+
__m128i test_mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_popcnt_epi8
+ // CHECK-LABEL: test_mm_maskz_popcnt_epi8
// CHECK: @llvm.ctpop.v16i8
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_popcnt_epi8(__U, __B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_maskz_popcnt_epi8(0x0FF0, (__m128i)(__v16qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 0, 0, 0, 0, 0, 4, 1, 4, 2, 2, 4, 2, 0, 0, 0, 0));
__mmask32 test_mm256_mask_bitshuffle_epi64_mask(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.256
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_bitshuffle_epi64_mask(__U, __A, __B);
}
__mmask32 test_mm256_bitshuffle_epi64_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm256_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.256
return _mm256_bitshuffle_epi64_mask(__A, __B);
}
__mmask16 test_mm_mask_bitshuffle_epi64_mask(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm_mask_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.128
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm_mask_bitshuffle_epi64_mask(__U, __A, __B);
}
__mmask16 test_mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.128
return _mm_bitshuffle_epi64_mask(__A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index 4ec499c..dd24ed8 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -1,732 +1,743 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx10.1-512 -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx10.1-512 -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx10.1-512 -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx10.1-512 -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx10.1-512 -emit-llvm -o - -Wall -Werror -Wsign-conversion -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__mmask32 test_mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpeq_epi8_mask
+ // CHECK-LABEL: test_mm256_cmpeq_epi8_mask
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmpeq_epi8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpeq_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_cmpeq_epi8_mask
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmpeq_epi8_mask(__u, __a, __b);
}
__mmask16 test_mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epi8_mask
+ // CHECK-LABEL: test_mm_cmpeq_epi8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmpeq_epi8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epi8_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epi8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmpeq_epi8_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpeq_epi16_mask
+ // CHECK-LABEL: test_mm256_cmpeq_epi16_mask
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmpeq_epi16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmpeq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpeq_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_cmpeq_epi16_mask
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmpeq_epi16_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpeq_epi16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epi16_mask
+ // CHECK-LABEL: test_mm_cmpeq_epi16_mask
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpeq_epi16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpeq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epi16_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epi16_mask
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epi16_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmpgt_epi8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epi8_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epi8_mask
// CHECK: icmp sgt <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmpgt_epi8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmpgt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epi8_mask
// CHECK: icmp sgt <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmpgt_epi8_mask(__u, __a, __b);
}
__mmask16 test_mm_cmpgt_epi8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epi8_mask
+ // CHECK-LABEL: test_mm_cmpgt_epi8_mask
// CHECK: icmp sgt <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmpgt_epi8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmpgt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epi8_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epi8_mask
// CHECK: icmp sgt <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmpgt_epi8_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmpgt_epi16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epi16_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epi16_mask
// CHECK: icmp sgt <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmpgt_epi16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmpgt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epi16_mask
// CHECK: icmp sgt <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmpgt_epi16_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpgt_epi16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epi16_mask
+ // CHECK-LABEL: test_mm_cmpgt_epi16_mask
// CHECK: icmp sgt <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epi16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpgt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epi16_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epi16_mask
// CHECK: icmp sgt <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epi16_mask(__u, __a, __b);
}
__mmask16 test_mm_cmpeq_epu8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epu8_mask
+ // CHECK-LABEL: test_mm_cmpeq_epu8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmpeq_epu8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmpeq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epu8_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epu8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmpeq_epu8_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpeq_epu16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epu16_mask
+ // CHECK-LABEL: test_mm_cmpeq_epu16_mask
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpeq_epu16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpeq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epu16_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epu16_mask
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epu16_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmpeq_epu8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpeq_epu8_mask
+ // CHECK-LABEL: test_mm256_cmpeq_epu8_mask
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmpeq_epu8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmpeq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpeq_epu8_mask
+ // CHECK-LABEL: test_mm256_mask_cmpeq_epu8_mask
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmpeq_epu8_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmpeq_epu16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpeq_epu16_mask
+ // CHECK-LABEL: test_mm256_cmpeq_epu16_mask
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmpeq_epu16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmpeq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpeq_epu16_mask
+ // CHECK-LABEL: test_mm256_mask_cmpeq_epu16_mask
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmpeq_epu16_mask(__u, __a, __b);
}
__mmask16 test_mm_cmpgt_epu8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epu8_mask
+ // CHECK-LABEL: test_mm_cmpgt_epu8_mask
// CHECK: icmp ugt <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmpgt_epu8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmpgt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epu8_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epu8_mask
// CHECK: icmp ugt <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmpgt_epu8_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpgt_epu16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epu16_mask
+ // CHECK-LABEL: test_mm_cmpgt_epu16_mask
// CHECK: icmp ugt <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epu16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpgt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epu16_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epu16_mask
// CHECK: icmp ugt <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epu16_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmpgt_epu8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epu8_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epu8_mask
// CHECK: icmp ugt <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmpgt_epu8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmpgt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epu8_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epu8_mask
// CHECK: icmp ugt <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmpgt_epu8_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmpgt_epu16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epu16_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epu16_mask
// CHECK: icmp ugt <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmpgt_epu16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmpgt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epu16_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epu16_mask
// CHECK: icmp ugt <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmpgt_epu16_mask(__u, __a, __b);
}
__mmask16 test_mm_cmpge_epi8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epi8_mask
+ // CHECK-LABEL: test_mm_cmpge_epi8_mask
// CHECK: icmp sge <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmpge_epi8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmpge_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epi8_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epi8_mask
// CHECK: icmp sge <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmpge_epi8_mask(__u, __a, __b);
}
__mmask16 test_mm_cmpge_epu8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epu8_mask
+ // CHECK-LABEL: test_mm_cmpge_epu8_mask
// CHECK: icmp uge <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmpge_epu8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmpge_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epu8_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epu8_mask
// CHECK: icmp uge <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmpge_epu8_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epi16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epi16_mask
+ // CHECK-LABEL: test_mm_cmpge_epi16_mask
// CHECK: icmp sge <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epi16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epi16_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epi16_mask
// CHECK: icmp sge <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epi16_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epu16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epu16_mask
+ // CHECK-LABEL: test_mm_cmpge_epu16_mask
// CHECK: icmp uge <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epu16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epu16_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epu16_mask
// CHECK: icmp uge <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epu16_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmpge_epi8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epi8_mask
+ // CHECK-LABEL: test_mm256_cmpge_epi8_mask
// CHECK: icmp sge <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmpge_epi8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmpge_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epi8_mask
// CHECK: icmp sge <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmpge_epi8_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmpge_epu8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epu8_mask
+ // CHECK-LABEL: test_mm256_cmpge_epu8_mask
// CHECK: icmp uge <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmpge_epu8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmpge_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epu8_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epu8_mask
// CHECK: icmp uge <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmpge_epu8_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmpge_epi16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epi16_mask
+ // CHECK-LABEL: test_mm256_cmpge_epi16_mask
// CHECK: icmp sge <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmpge_epi16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmpge_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epi16_mask
// CHECK: icmp sge <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmpge_epi16_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmpge_epu16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epu16_mask
+ // CHECK-LABEL: test_mm256_cmpge_epu16_mask
// CHECK: icmp uge <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmpge_epu16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmpge_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epu16_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epu16_mask
// CHECK: icmp uge <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmpge_epu16_mask(__u, __a, __b);
}
__mmask16 test_mm_cmple_epi8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epi8_mask
+ // CHECK-LABEL: test_mm_cmple_epi8_mask
// CHECK: icmp sle <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmple_epi8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmple_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epi8_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epi8_mask
// CHECK: icmp sle <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmple_epi8_mask(__u, __a, __b);
}
__mmask16 test_mm_cmple_epu8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epu8_mask
+ // CHECK-LABEL: test_mm_cmple_epu8_mask
// CHECK: icmp ule <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmple_epu8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmple_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epu8_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epu8_mask
// CHECK: icmp ule <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmple_epu8_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epi16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epi16_mask
+ // CHECK-LABEL: test_mm_cmple_epi16_mask
// CHECK: icmp sle <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epi16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epi16_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epi16_mask
// CHECK: icmp sle <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epi16_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epu16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epu16_mask
+ // CHECK-LABEL: test_mm_cmple_epu16_mask
// CHECK: icmp ule <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epu16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epu16_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epu16_mask
// CHECK: icmp ule <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epu16_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmple_epi8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epi8_mask
+ // CHECK-LABEL: test_mm256_cmple_epi8_mask
// CHECK: icmp sle <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmple_epi8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmple_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epi8_mask
// CHECK: icmp sle <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmple_epi8_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmple_epu8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epu8_mask
+ // CHECK-LABEL: test_mm256_cmple_epu8_mask
// CHECK: icmp ule <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmple_epu8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmple_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epu8_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epu8_mask
// CHECK: icmp ule <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmple_epu8_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmple_epi16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epi16_mask
+ // CHECK-LABEL: test_mm256_cmple_epi16_mask
// CHECK: icmp sle <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmple_epi16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmple_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epi16_mask
// CHECK: icmp sle <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmple_epi16_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmple_epu16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epu16_mask
+ // CHECK-LABEL: test_mm256_cmple_epu16_mask
// CHECK: icmp ule <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmple_epu16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmple_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epu16_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epu16_mask
// CHECK: icmp ule <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmple_epu16_mask(__u, __a, __b);
}
__mmask16 test_mm_cmplt_epi8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epi8_mask
+ // CHECK-LABEL: test_mm_cmplt_epi8_mask
// CHECK: icmp slt <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmplt_epi8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmplt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epi8_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epi8_mask
// CHECK: icmp slt <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmplt_epi8_mask(__u, __a, __b);
}
__mmask16 test_mm_cmplt_epu8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epu8_mask
+ // CHECK-LABEL: test_mm_cmplt_epu8_mask
// CHECK: icmp ult <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmplt_epu8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmplt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epu8_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epu8_mask
// CHECK: icmp ult <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmplt_epu8_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epi16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epi16_mask
+ // CHECK-LABEL: test_mm_cmplt_epi16_mask
// CHECK: icmp slt <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epi16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epi16_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epi16_mask
// CHECK: icmp slt <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epi16_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epu16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epu16_mask
+ // CHECK-LABEL: test_mm_cmplt_epu16_mask
// CHECK: icmp ult <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epu16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epu16_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epu16_mask
// CHECK: icmp ult <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epu16_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmplt_epi8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epi8_mask
+ // CHECK-LABEL: test_mm256_cmplt_epi8_mask
// CHECK: icmp slt <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmplt_epi8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmplt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epi8_mask
// CHECK: icmp slt <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmplt_epi8_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmplt_epu8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epu8_mask
+ // CHECK-LABEL: test_mm256_cmplt_epu8_mask
// CHECK: icmp ult <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmplt_epu8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmplt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epu8_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epu8_mask
// CHECK: icmp ult <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmplt_epu8_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmplt_epi16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epi16_mask
+ // CHECK-LABEL: test_mm256_cmplt_epi16_mask
// CHECK: icmp slt <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmplt_epi16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmplt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epi16_mask
// CHECK: icmp slt <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmplt_epi16_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmplt_epu16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epu16_mask
+ // CHECK-LABEL: test_mm256_cmplt_epu16_mask
// CHECK: icmp ult <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmplt_epu16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmplt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epu16_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epu16_mask
// CHECK: icmp ult <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmplt_epu16_mask(__u, __a, __b);
}
__mmask16 test_mm_cmpneq_epi8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epi8_mask
+ // CHECK-LABEL: test_mm_cmpneq_epi8_mask
// CHECK: icmp ne <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmpneq_epi8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmpneq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epi8_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epi8_mask
// CHECK: icmp ne <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmpneq_epi8_mask(__u, __a, __b);
}
__mmask16 test_mm_cmpneq_epu8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epu8_mask
+ // CHECK-LABEL: test_mm_cmpneq_epu8_mask
// CHECK: icmp ne <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmpneq_epu8_mask(__a, __b);
}
__mmask16 test_mm_mask_cmpneq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epu8_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epu8_mask
// CHECK: icmp ne <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmpneq_epu8_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epi16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epi16_mask
+ // CHECK-LABEL: test_mm_cmpneq_epi16_mask
// CHECK: icmp ne <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epi16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epi16_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epi16_mask
// CHECK: icmp ne <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epi16_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epu16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epu16_mask
+ // CHECK-LABEL: test_mm_cmpneq_epu16_mask
// CHECK: icmp ne <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epu16_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epu16_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epu16_mask
// CHECK: icmp ne <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epu16_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmpneq_epi8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epi8_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epi8_mask
// CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmpneq_epi8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmpneq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epi8_mask
// CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmpneq_epi8_mask(__u, __a, __b);
}
__mmask32 test_mm256_cmpneq_epu8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epu8_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epu8_mask
// CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmpneq_epu8_mask(__a, __b);
}
__mmask32 test_mm256_mask_cmpneq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epu8_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epu8_mask
// CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmpneq_epu8_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmpneq_epi16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epi16_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epi16_mask
// CHECK: icmp ne <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmpneq_epi16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmpneq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epi16_mask
// CHECK: icmp ne <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmpneq_epi16_mask(__u, __a, __b);
}
__mmask16 test_mm256_cmpneq_epu16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epu16_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epu16_mask
// CHECK: icmp ne <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmpneq_epu16_mask(__a, __b);
}
__mmask16 test_mm256_mask_cmpneq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epu16_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epu16_mask
// CHECK: icmp ne <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmpneq_epu16_mask(__u, __a, __b);
}
__mmask16 test_mm_cmp_epi8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_epi8_mask
+ // CHECK-LABEL: test_mm_cmp_epi8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmp_epi8_mask(__a, __b, 0);
}
__mmask16 test_mm_mask_cmp_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_epi8_mask
+ // CHECK-LABEL: test_mm_mask_cmp_epi8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmp_epi8_mask(__u, __a, __b, 0);
}
__mmask16 test_mm_cmp_epu8_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_epu8_mask
+ // CHECK-LABEL: test_mm_cmp_epu8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
return (__mmask16)_mm_cmp_epu8_mask(__a, __b, 0);
}
__mmask16 test_mm_mask_cmp_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_epu8_mask
+ // CHECK-LABEL: test_mm_mask_cmp_epu8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm_mask_cmp_epu8_mask(__u, __a, __b, 0);
}
__mmask8 test_mm_cmp_epi16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_epi16_mask
+ // CHECK-LABEL: test_mm_cmp_epi16_mask
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epi16_mask(__a, __b, 0);
}
__mmask8 test_mm_mask_cmp_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_epi16_mask
+ // CHECK-LABEL: test_mm_mask_cmp_epi16_mask
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epi16_mask(__u, __a, __b, 0);
}
__mmask8 test_mm_cmp_epu16_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_epu16_mask
+ // CHECK-LABEL: test_mm_cmp_epu16_mask
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epu16_mask(__a, __b, 0);
}
__mmask8 test_mm_mask_cmp_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_epu16_mask
+ // CHECK-LABEL: test_mm_mask_cmp_epu16_mask
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epu16_mask(__u, __a, __b, 0);
}
__mmask32 test_mm256_cmp_epi8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_epi8_mask
+ // CHECK-LABEL: test_mm256_cmp_epi8_mask
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmp_epi8_mask(__a, __b, 0);
}
__mmask32 test_mm256_mask_cmp_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_epi8_mask
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmp_epi8_mask(__u, __a, __b, 0);
}
__mmask32 test_mm256_cmp_epu8_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_epu8_mask
+ // CHECK-LABEL: test_mm256_cmp_epu8_mask
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_cmp_epu8_mask(__a, __b, 0);
}
__mmask32 test_mm256_mask_cmp_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_epu8_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_epu8_mask
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return (__mmask32)_mm256_mask_cmp_epu8_mask(__u, __a, __b, 0);
}
__mmask16 test_mm256_cmp_epi16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_epi16_mask
+ // CHECK-LABEL: test_mm256_cmp_epi16_mask
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmp_epi16_mask(__a, __b, 0);
}
__mmask16 test_mm256_mask_cmp_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_epi16_mask
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmp_epi16_mask(__u, __a, __b, 0);
}
__mmask16 test_mm256_cmp_epu16_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_epu16_mask
+ // CHECK-LABEL: test_mm256_cmp_epu16_mask
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_cmp_epu16_mask(__a, __b, 0);
}
__mmask16 test_mm256_mask_cmp_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_epu16_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_epu16_mask
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return (__mmask16)_mm256_mask_cmp_epu16_mask(__u, __a, __b, 0);
@@ -734,139 +745,139 @@ __mmask16 test_mm256_mask_cmp_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b
__m256i test_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){
- //CHECK-LABEL: @test_mm256_mask_add_epi8
+ //CHECK-LABEL: test_mm256_mask_add_epi8
//CHECK: add <32 x i8> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_add_epi8(__W, __U , __A, __B);
}
__m256i test_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_add_epi8
+ //CHECK-LABEL: test_mm256_maskz_add_epi8
//CHECK: add <32 x i8> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_add_epi8(__U , __A, __B);
}
__m256i test_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_add_epi16
+ //CHECK-LABEL: test_mm256_mask_add_epi16
//CHECK: add <16 x i16> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_add_epi16(__W, __U , __A, __B);
}
__m256i test_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_add_epi16
+ //CHECK-LABEL: test_mm256_maskz_add_epi16
//CHECK: add <16 x i16> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_add_epi16(__U , __A, __B);
}
__m256i test_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_sub_epi8
+ //CHECK-LABEL: test_mm256_mask_sub_epi8
//CHECK: sub <32 x i8> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_sub_epi8(__W, __U , __A, __B);
}
__m256i test_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_sub_epi8
+ //CHECK-LABEL: test_mm256_maskz_sub_epi8
//CHECK: sub <32 x i8> %{{.*}}, %{{.*}}
//CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_sub_epi8(__U , __A, __B);
}
__m256i test_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_sub_epi16
+ //CHECK-LABEL: test_mm256_mask_sub_epi16
//CHECK: sub <16 x i16> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_sub_epi16(__W, __U , __A, __B);
}
__m256i test_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_sub_epi16
+ //CHECK-LABEL: test_mm256_maskz_sub_epi16
//CHECK: sub <16 x i16> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_sub_epi16(__U , __A, __B);
}
__m128i test_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_add_epi8
+ //CHECK-LABEL: test_mm_mask_add_epi8
//CHECK: add <16 x i8> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_add_epi8(__W, __U , __A, __B);
}
__m128i test_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_add_epi8
+ //CHECK-LABEL: test_mm_maskz_add_epi8
//CHECK: add <16 x i8> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_add_epi8(__U , __A, __B);
}
__m128i test_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_add_epi16
+ //CHECK-LABEL: test_mm_mask_add_epi16
//CHECK: add <8 x i16> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_add_epi16(__W, __U , __A, __B);
}
__m128i test_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_add_epi16
+ //CHECK-LABEL: test_mm_maskz_add_epi16
//CHECK: add <8 x i16> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_add_epi16(__U , __A, __B);
}
__m128i test_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_sub_epi8
+ //CHECK-LABEL: test_mm_mask_sub_epi8
//CHECK: sub <16 x i8> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_sub_epi8(__W, __U , __A, __B);
}
__m128i test_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_sub_epi8
+ //CHECK-LABEL: test_mm_maskz_sub_epi8
//CHECK: sub <16 x i8> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_sub_epi8(__U , __A, __B);
}
__m128i test_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_sub_epi16
+ //CHECK-LABEL: test_mm_mask_sub_epi16
//CHECK: sub <8 x i16> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_sub_epi16(__W, __U , __A, __B);
}
__m128i test_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_sub_epi16
+ //CHECK-LABEL: test_mm_maskz_sub_epi16
//CHECK: sub <8 x i16> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_sub_epi16(__U , __A, __B);
}
__m256i test_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_mullo_epi16
+ //CHECK-LABEL: test_mm256_mask_mullo_epi16
//CHECK: mul <16 x i16> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mullo_epi16(__W, __U , __A, __B);
}
__m256i test_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_mullo_epi16
+ //CHECK-LABEL: test_mm256_maskz_mullo_epi16
//CHECK: mul <16 x i16> %{{.*}}, %{{.*}}
//CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mullo_epi16(__U , __A, __B);
}
__m128i test_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_mullo_epi16
+ //CHECK-LABEL: test_mm_mask_mullo_epi16
//CHECK: mul <8 x i16> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mullo_epi16(__W, __U , __A, __B);
}
__m128i test_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_mullo_epi16
+ //CHECK-LABEL: test_mm_maskz_mullo_epi16
//CHECK: mul <8 x i16> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mullo_epi16(__U , __A, __B);
@@ -874,359 +885,367 @@ __m128i test_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
__m128i test_mm_mask_blend_epi8(__mmask16 __U, __m128i __A, __m128i __W) {
- // CHECK-LABEL: @test_mm_mask_blend_epi8
+ // CHECK-LABEL: test_mm_mask_blend_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_blend_epi8(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi8(__mmask32 __U, __m256i __A, __m256i __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_epi8
+ // CHECK-LABEL: test_mm256_mask_blend_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_blend_epi8(__U,__A,__W);
}
__m128i test_mm_mask_blend_epi16(__mmask8 __U, __m128i __A, __m128i __W) {
- // CHECK-LABEL: @test_mm_mask_blend_epi16
+ // CHECK-LABEL: test_mm_mask_blend_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_blend_epi16(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi16(__mmask16 __U, __m256i __A, __m256i __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_epi16
+ // CHECK-LABEL: test_mm256_mask_blend_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_blend_epi16(__U,__A,__W);
}
__m128i test_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_abs_epi8
+ // CHECK-LABEL: test_mm_mask_abs_epi8
// CHECK: [[ABS:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <16 x i8> [[ABS]] to <2 x i64>
// CHECK: [[ABS:%.*]] = bitcast <2 x i64> [[TMP]] to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> [[ABS]], <16 x i8> %{{.*}}
return _mm_mask_abs_epi8(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v16qi(_mm_mask_abs_epi8((__m128i)(__v16qi){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, (__mmask16)0x0001, (__m128i)(__v16qi){(char)-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
__m128i test_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_abs_epi8
+ // CHECK-LABEL: test_mm_maskz_abs_epi8
// CHECK: [[ABS:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <16 x i8> [[ABS]] to <2 x i64>
// CHECK: [[ABS:%.*]] = bitcast <2 x i64> [[TMP]] to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> [[ABS]], <16 x i8> %{{.*}}
return _mm_maskz_abs_epi8(__U,__A);
}
+TEST_CONSTEXPR(match_v16qi(_mm_maskz_abs_epi8((__mmask16)0x5555, (__m128i)(__v16qi){(char)-1, 2, (char)-3, 4, (char)-5, 6, (char)-7, 8, (char)-9, 10, (char)-11, 12, (char)-13, 14, (char)-15, 16}), 1, 0, 3, 0, 5, 0, 7, 0, 9, 0, 11, 0, 13, 0, 15, 0));
__m256i test_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_abs_epi8
+ // CHECK-LABEL: test_mm256_mask_abs_epi8
// CHECK: [[ABS:%.*]] = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <32 x i8> [[ABS]] to <4 x i64>
// CHECK: [[ABS:%.*]] = bitcast <4 x i64> [[TMP]] to <32 x i8>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> [[ABS]], <32 x i8> %{{.*}}
return _mm256_mask_abs_epi8(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_mask_abs_epi8((__m256i)(__v32qi){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, (__mmask32)0x00000001, (__m256i)(__v32qi){(char)-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
__m256i test_mm256_maskz_abs_epi8(__mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_abs_epi8
+ // CHECK-LABEL: test_mm256_maskz_abs_epi8
// CHECK: [[ABS:%.*]] = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <32 x i8> [[ABS]] to <4 x i64>
// CHECK: [[ABS:%.*]] = bitcast <4 x i64> [[TMP]] to <32 x i8>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> [[ABS]], <32 x i8> %{{.*}}
return _mm256_maskz_abs_epi8(__U,__A);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_maskz_abs_epi8((__mmask32)0x55555555, (__m256i)(__v32qi){(char)-1, 2, (char)-3, 4, (char)-5, 6, (char)-7, 8, (char)-9, 10, (char)-11, 12, (char)-13, 14, (char)-15, 16, (char)-17, 18, (char)-19, 20, (char)-21, 22, (char)-23, 24, (char)-25, 26, (char)-27, 28, (char)-29, 30, (char)-31, 32}), 1, 0, 3, 0, 5, 0, 7, 0, 9, 0, 11, 0, 13, 0, 15, 0, 17, 0, 19, 0, 21, 0, 23, 0, 25, 0, 27, 0, 29, 0, 31, 0));
__m128i test_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_abs_epi16
+ // CHECK-LABEL: test_mm_mask_abs_epi16
// CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <8 x i16> [[ABS]] to <2 x i64>
// CHECK: [[ABS:%.*]] = bitcast <2 x i64> [[TMP]] to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> [[ABS]], <8 x i16> %{{.*}}
return _mm_mask_abs_epi16(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mask_abs_epi16((__m128i)(__v8hi){99, 99, 99, 99, 99, 99, 99, 99}, (__mmask16)0x01, (__m128i)(__v8hi){-1, 2, 2, 2, 2, 2, 2, 2}), 1, 99, 99, 99, 99, 99, 99, 99));
__m128i test_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_abs_epi16
+ // CHECK-LABEL: test_mm_maskz_abs_epi16
// CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <8 x i16> [[ABS]] to <2 x i64>
// CHECK: [[ABS:%.*]] = bitcast <2 x i64> [[TMP]] to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> [[ABS]], <8 x i16> %{{.*}}
return _mm_maskz_abs_epi16(__U,__A);
}
+TEST_CONSTEXPR(match_v8hi(_mm_maskz_abs_epi16((__mmask8)0x55, (__m128i)(__v8hi){-1, 2, -3, 4, -5, 6, -7, 8}), 1, 0, 3, 0, 5, 0, 7, 0));
__m256i test_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_abs_epi16
+ // CHECK-LABEL: test_mm256_mask_abs_epi16
// CHECK: [[ABS:%.*]] = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <16 x i16> [[ABS]] to <4 x i64>
// CHECK: [[ABS:%.*]] = bitcast <4 x i64> [[TMP]] to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> [[ABS]], <16 x i16> %{{.*}}
return _mm256_mask_abs_epi16(__W,__U,__A);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_abs_epi16((__m256i)(__v16hi){99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, (__mmask16)0x0001, (__m256i)(__v16hi){-128, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 128, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
__m256i test_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_abs_epi16
+ // CHECK-LABEL: test_mm256_maskz_abs_epi16
// CHECK: [[ABS:%.*]] = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <16 x i16> [[ABS]] to <4 x i64>
// CHECK: [[ABS:%.*]] = bitcast <4 x i64> [[TMP]] to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> [[ABS]], <16 x i16> %{{.*}}
return _mm256_maskz_abs_epi16(__U,__A);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_abs_epi16((__mmask16)0x0001, (__m256i)(__v16hi){-128, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}), 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_packs_epi32
+ // CHECK-LABEL: test_mm_maskz_packs_epi32
// CHECK: @llvm.x86.sse2.packssdw
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_packs_epi32(__M,__A,__B);
}
__m128i test_mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_packs_epi32
+ // CHECK-LABEL: test_mm_mask_packs_epi32
// CHECK: @llvm.x86.sse2.packssdw
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_packs_epi32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_packs_epi32
+ // CHECK-LABEL: test_mm256_maskz_packs_epi32
// CHECK: @llvm.x86.avx2.packssdw
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_packs_epi32(__M,__A,__B);
}
__m256i test_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_packs_epi32
+ // CHECK-LABEL: test_mm256_mask_packs_epi32
// CHECK: @llvm.x86.avx2.packssdw
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_packs_epi32(__W,__M,__A,__B);
}
__m128i test_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_packs_epi16
+ // CHECK-LABEL: test_mm_maskz_packs_epi16
// CHECK: @llvm.x86.sse2.packsswb
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_packs_epi16(__M,__A,__B);
}
__m128i test_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_packs_epi16
+ // CHECK-LABEL: test_mm_mask_packs_epi16
// CHECK: @llvm.x86.sse2.packsswb
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_packs_epi16(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_packs_epi16
+ // CHECK-LABEL: test_mm256_maskz_packs_epi16
// CHECK: @llvm.x86.avx2.packsswb
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_packs_epi16(__M,__A,__B);
}
__m256i test_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_packs_epi16
+ // CHECK-LABEL: test_mm256_mask_packs_epi16
// CHECK: @llvm.x86.avx2.packsswb
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_packs_epi16(__W,__M,__A,__B);
}
__m128i test_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_packus_epi32
+ // CHECK-LABEL: test_mm_mask_packus_epi32
// CHECK: @llvm.x86.sse41.packusdw
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_packus_epi32(__W,__M,__A,__B);
}
__m128i test_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_packus_epi32
+ // CHECK-LABEL: test_mm_maskz_packus_epi32
// CHECK: @llvm.x86.sse41.packusdw
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_packus_epi32(__M,__A,__B);
}
__m256i test_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_packus_epi32
+ // CHECK-LABEL: test_mm256_maskz_packus_epi32
// CHECK: @llvm.x86.avx2.packusdw
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_packus_epi32(__M,__A,__B);
}
__m256i test_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_packus_epi32
+ // CHECK-LABEL: test_mm256_mask_packus_epi32
// CHECK: @llvm.x86.avx2.packusdw
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_packus_epi32(__W,__M,__A,__B);
}
__m128i test_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_packus_epi16
+ // CHECK-LABEL: test_mm_maskz_packus_epi16
// CHECK: @llvm.x86.sse2.packuswb
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_packus_epi16(__M,__A,__B);
}
__m128i test_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_packus_epi16
+ // CHECK-LABEL: test_mm_mask_packus_epi16
// CHECK: @llvm.x86.sse2.packuswb
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_packus_epi16(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_packus_epi16
+ // CHECK-LABEL: test_mm256_maskz_packus_epi16
// CHECK: @llvm.x86.avx2.packuswb
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_packus_epi16(__M,__A,__B);
}
__m256i test_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_packus_epi16
+ // CHECK-LABEL: test_mm256_mask_packus_epi16
// CHECK: @llvm.x86.avx2.packuswb
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_packus_epi16(__W,__M,__A,__B);
}
__m128i test_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_adds_epi8
+ // CHECK-LABEL: test_mm_mask_adds_epi8
// CHECK: @llvm.sadd.sat.v16i8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_adds_epi8(__W,__U,__A,__B);
}
__m128i test_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_adds_epi8
+ // CHECK-LABEL: test_mm_maskz_adds_epi8
// CHECK: @llvm.sadd.sat.v16i8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_adds_epi8(__U,__A,__B);
}
__m256i test_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_adds_epi8
+ // CHECK-LABEL: test_mm256_mask_adds_epi8
// CHECK: @llvm.sadd.sat.v32i8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_adds_epi8(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_adds_epi8
+ // CHECK-LABEL: test_mm256_maskz_adds_epi8
// CHECK: @llvm.sadd.sat.v32i8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_adds_epi8(__U,__A,__B);
}
__m128i test_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_adds_epi16
+ // CHECK-LABEL: test_mm_mask_adds_epi16
// CHECK: @llvm.sadd.sat.v8i16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_adds_epi16(__W,__U,__A,__B);
}
__m128i test_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_adds_epi16
+ // CHECK-LABEL: test_mm_maskz_adds_epi16
// CHECK: @llvm.sadd.sat.v8i16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_adds_epi16(__U,__A,__B);
}
__m256i test_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_adds_epi16
+ // CHECK-LABEL: test_mm256_mask_adds_epi16
// CHECK: @llvm.sadd.sat.v16i16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_adds_epi16(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_adds_epi16
+ // CHECK-LABEL: test_mm256_maskz_adds_epi16
// CHECK: @llvm.sadd.sat.v16i16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_adds_epi16(__U,__A,__B);
}
__m128i test_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_adds_epu8
+ // CHECK-LABEL: test_mm_mask_adds_epu8
// CHECK-NOT: @llvm.x86.sse2.paddus.b
// CHECK: call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_adds_epu8(__W,__U,__A,__B);
}
__m128i test_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_adds_epu8
+ // CHECK-LABEL: test_mm_maskz_adds_epu8
// CHECK-NOT: @llvm.x86.sse2.paddus.b
// CHECK: call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_adds_epu8(__U,__A,__B);
}
__m256i test_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_adds_epu8
+ // CHECK-LABEL: test_mm256_mask_adds_epu8
// CHECK-NOT: @llvm.x86.avx2.paddus.b
// CHECK: call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_adds_epu8(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_adds_epu8
+ // CHECK-LABEL: test_mm256_maskz_adds_epu8
// CHECK-NOT: @llvm.x86.avx2.paddus.b
// CHECK: call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_adds_epu8(__U,__A,__B);
}
__m128i test_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_adds_epu16
+ // CHECK-LABEL: test_mm_mask_adds_epu16
// CHECK-NOT: @llvm.x86.sse2.paddus.w
// CHECK: call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_adds_epu16(__W,__U,__A,__B);
}
__m128i test_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_adds_epu16
+ // CHECK-LABEL: test_mm_maskz_adds_epu16
// CHECK-NOT: @llvm.x86.sse2.paddus.w
// CHECK: call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_adds_epu16(__U,__A,__B);
}
__m256i test_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_adds_epu16
+ // CHECK-LABEL: test_mm256_mask_adds_epu16
// CHECK-NOT: @llvm.x86.avx2.paddus.w
// CHECK: call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_adds_epu16(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_adds_epu16
+ // CHECK-LABEL: test_mm256_maskz_adds_epu16
// CHECK-NOT: @llvm.x86.avx2.paddus.w
// CHECK: call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_adds_epu16(__U,__A,__B);
}
__m128i test_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_avg_epu8
+ // CHECK-LABEL: test_mm_mask_avg_epu8
// CHECK: @llvm.x86.sse2.pavg.b
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_avg_epu8(__W,__U,__A,__B);
}
__m128i test_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_avg_epu8
+ // CHECK-LABEL: test_mm_maskz_avg_epu8
// CHECK: @llvm.x86.sse2.pavg.b
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_avg_epu8(__U,__A,__B);
}
__m256i test_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_avg_epu8
+ // CHECK-LABEL: test_mm256_mask_avg_epu8
// CHECK: @llvm.x86.avx2.pavg.b
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_avg_epu8(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_avg_epu8
+ // CHECK-LABEL: test_mm256_maskz_avg_epu8
// CHECK: @llvm.x86.avx2.pavg.b
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_avg_epu8(__U,__A,__B);
}
__m128i test_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_avg_epu16
+ // CHECK-LABEL: test_mm_mask_avg_epu16
// CHECK: @llvm.x86.sse2.pavg.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_avg_epu16(__W,__U,__A,__B);
}
__m128i test_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_avg_epu16
+ // CHECK-LABEL: test_mm_maskz_avg_epu16
// CHECK: @llvm.x86.sse2.pavg.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_avg_epu16(__U,__A,__B);
}
__m256i test_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_avg_epu16
+ // CHECK-LABEL: test_mm256_mask_avg_epu16
// CHECK: @llvm.x86.avx2.pavg.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_avg_epu16(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_avg_epu16
+ // CHECK-LABEL: test_mm256_maskz_avg_epu16
// CHECK: @llvm.x86.avx2.pavg.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_avg_epu16(__U,__A,__B);
}
__m128i test_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epi8
+ // CHECK-LABEL: test_mm_maskz_max_epi8
// CHECK: [[RES:%.*]] = call <16 x i8> @llvm.smax.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1234,7 +1253,7 @@ __m128i test_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
return _mm_maskz_max_epi8(__M,__A,__B);
}
__m128i test_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epi8
+ // CHECK-LABEL: test_mm_mask_max_epi8
// CHECK: [[RES:%.*]] = call <16 x i8> @llvm.smax.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1242,7 +1261,7 @@ __m128i test_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i _
return _mm_mask_max_epi8(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epi8
+ // CHECK-LABEL: test_mm256_maskz_max_epi8
// CHECK: [[RES:%.*]] = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1250,7 +1269,7 @@ __m256i test_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_max_epi8(__M,__A,__B);
}
__m256i test_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epi8
+ // CHECK-LABEL: test_mm256_mask_max_epi8
// CHECK: [[RES:%.*]] = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1258,7 +1277,7 @@ __m256i test_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256
return _mm256_mask_max_epi8(__W,__M,__A,__B);
}
__m128i test_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epi16
+ // CHECK-LABEL: test_mm_maskz_max_epi16
// CHECK: [[RES:%.*]] = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1266,7 +1285,7 @@ __m128i test_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_max_epi16(__M,__A,__B);
}
__m128i test_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epi16
+ // CHECK-LABEL: test_mm_mask_max_epi16
// CHECK: [[RES:%.*]] = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1274,7 +1293,7 @@ __m128i test_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_max_epi16(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epi16
+ // CHECK-LABEL: test_mm256_maskz_max_epi16
// CHECK: [[RES:%.*]] = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1282,7 +1301,7 @@ __m256i test_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_max_epi16(__M,__A,__B);
}
__m256i test_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epi16
+ // CHECK-LABEL: test_mm256_mask_max_epi16
// CHECK: [[RES:%.*]] = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1290,7 +1309,7 @@ __m256i test_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m25
return _mm256_mask_max_epi16(__W,__M,__A,__B);
}
__m128i test_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epu8
+ // CHECK-LABEL: test_mm_maskz_max_epu8
// CHECK: [[RES:%.*]] = call <16 x i8> @llvm.umax.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1298,7 +1317,7 @@ __m128i test_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B) {
return _mm_maskz_max_epu8(__M,__A,__B);
}
__m128i test_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epu8
+ // CHECK-LABEL: test_mm_mask_max_epu8
// CHECK: [[RES:%.*]] = call <16 x i8> @llvm.umax.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1306,7 +1325,7 @@ __m128i test_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i _
return _mm_mask_max_epu8(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epu8(__mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epu8
+ // CHECK-LABEL: test_mm256_maskz_max_epu8
// CHECK: [[RES:%.*]] = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1314,7 +1333,7 @@ __m256i test_mm256_maskz_max_epu8(__mmask32 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_max_epu8(__M,__A,__B);
}
__m256i test_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epu8
+ // CHECK-LABEL: test_mm256_mask_max_epu8
// CHECK: [[RES:%.*]] = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1322,7 +1341,7 @@ __m256i test_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256
return _mm256_mask_max_epu8(__W,__M,__A,__B);
}
__m128i test_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epu16
+ // CHECK-LABEL: test_mm_maskz_max_epu16
// CHECK: [[RES:%.*]] = call <8 x i16> @llvm.umax.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1330,7 +1349,7 @@ __m128i test_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_max_epu16(__M,__A,__B);
}
__m128i test_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epu16
+ // CHECK-LABEL: test_mm_mask_max_epu16
// CHECK: [[RES:%.*]] = call <8 x i16> @llvm.umax.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1338,7 +1357,7 @@ __m128i test_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_max_epu16(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epu16
+ // CHECK-LABEL: test_mm256_maskz_max_epu16
// CHECK: [[RES:%.*]] = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1346,7 +1365,7 @@ __m256i test_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_max_epu16(__M,__A,__B);
}
__m256i test_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epu16
+ // CHECK-LABEL: test_mm256_mask_max_epu16
// CHECK: [[RES:%.*]] = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1354,7 +1373,7 @@ __m256i test_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m25
return _mm256_mask_max_epu16(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epi8
+ // CHECK-LABEL: test_mm_maskz_min_epi8
// CHECK: [[RES:%.*]] = call <16 x i8> @llvm.smin.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1362,7 +1381,7 @@ __m128i test_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
return _mm_maskz_min_epi8(__M,__A,__B);
}
__m128i test_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epi8
+ // CHECK-LABEL: test_mm_mask_min_epi8
// CHECK: [[RES:%.*]] = call <16 x i8> @llvm.smin.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1370,7 +1389,7 @@ __m128i test_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i _
return _mm_mask_min_epi8(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epi8
+ // CHECK-LABEL: test_mm256_maskz_min_epi8
// CHECK: [[RES:%.*]] = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1378,7 +1397,7 @@ __m256i test_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_min_epi8(__M,__A,__B);
}
__m256i test_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epi8
+ // CHECK-LABEL: test_mm256_mask_min_epi8
// CHECK: [[RES:%.*]] = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1386,7 +1405,7 @@ __m256i test_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256
return _mm256_mask_min_epi8(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epi16
+ // CHECK-LABEL: test_mm_maskz_min_epi16
// CHECK: [[RES:%.*]] = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1394,7 +1413,7 @@ __m128i test_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_min_epi16(__M,__A,__B);
}
__m128i test_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epi16
+ // CHECK-LABEL: test_mm_mask_min_epi16
// CHECK: [[RES:%.*]] = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1402,7 +1421,7 @@ __m128i test_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_min_epi16(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epi16
+ // CHECK-LABEL: test_mm256_maskz_min_epi16
// CHECK: [[RES:%.*]] = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1410,7 +1429,7 @@ __m256i test_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_min_epi16(__M,__A,__B);
}
__m256i test_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epi16
+ // CHECK-LABEL: test_mm256_mask_min_epi16
// CHECK: [[RES:%.*]] = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1418,7 +1437,7 @@ __m256i test_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m25
return _mm256_mask_min_epi16(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epu8
+ // CHECK-LABEL: test_mm_maskz_min_epu8
// CHECK: [[RES:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1426,7 +1445,7 @@ __m128i test_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B) {
return _mm_maskz_min_epu8(__M,__A,__B);
}
__m128i test_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epu8
+ // CHECK-LABEL: test_mm_mask_min_epu8
// CHECK: [[RES:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1434,7 +1453,7 @@ __m128i test_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i _
return _mm_mask_min_epu8(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epu8(__mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epu8
+ // CHECK-LABEL: test_mm256_maskz_min_epu8
// CHECK: [[RES:%.*]] = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1442,7 +1461,7 @@ __m256i test_mm256_maskz_min_epu8(__mmask32 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_min_epu8(__M,__A,__B);
}
__m256i test_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epu8
+ // CHECK-LABEL: test_mm256_mask_min_epu8
// CHECK: [[RES:%.*]] = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1450,7 +1469,7 @@ __m256i test_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256
return _mm256_mask_min_epu8(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epu16
+ // CHECK-LABEL: test_mm_maskz_min_epu16
// CHECK: [[RES:%.*]] = call <8 x i16> @llvm.umin.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1458,7 +1477,7 @@ __m128i test_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_min_epu16(__M,__A,__B);
}
__m128i test_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epu16
+ // CHECK-LABEL: test_mm_mask_min_epu16
// CHECK: [[RES:%.*]] = call <8 x i16> @llvm.umin.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1466,7 +1485,7 @@ __m128i test_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_min_epu16(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epu16
+ // CHECK-LABEL: test_mm256_maskz_min_epu16
// CHECK: [[RES:%.*]] = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1474,7 +1493,7 @@ __m256i test_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_min_epu16(__M,__A,__B);
}
__m256i test_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epu16
+ // CHECK-LABEL: test_mm256_mask_min_epu16
// CHECK: [[RES:%.*]] = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x i64>]]
// CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
@@ -1482,128 +1501,128 @@ __m256i test_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m25
return _mm256_mask_min_epu16(__W,__M,__A,__B);
}
__m128i test_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shuffle_epi8
+ // CHECK-LABEL: test_mm_mask_shuffle_epi8
// CHECK: @llvm.x86.ssse3.pshuf.b
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_shuffle_epi8(__W,__U,__A,__B);
}
__m128i test_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shuffle_epi8
+ // CHECK-LABEL: test_mm_maskz_shuffle_epi8
// CHECK: @llvm.x86.ssse3.pshuf.b
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_shuffle_epi8(__U,__A,__B);
}
__m256i test_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_epi8
+ // CHECK-LABEL: test_mm256_mask_shuffle_epi8
// CHECK: @llvm.x86.avx2.pshuf.b
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_shuffle_epi8(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_epi8
+ // CHECK-LABEL: test_mm256_maskz_shuffle_epi8
// CHECK: @llvm.x86.avx2.pshuf.b
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_shuffle_epi8(__U,__A,__B);
}
__m128i test_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_subs_epi8
+ // CHECK-LABEL: test_mm_mask_subs_epi8
// CHECK: @llvm.ssub.sat.v16i8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_subs_epi8(__W,__U,__A,__B);
}
__m128i test_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_subs_epi8
+ // CHECK-LABEL: test_mm_maskz_subs_epi8
// CHECK: @llvm.ssub.sat.v16i8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_subs_epi8(__U,__A,__B);
}
__m256i test_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_subs_epi8
+ // CHECK-LABEL: test_mm256_mask_subs_epi8
// CHECK: @llvm.ssub.sat.v32i8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_subs_epi8(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_subs_epi8
+ // CHECK-LABEL: test_mm256_maskz_subs_epi8
// CHECK: @llvm.ssub.sat.v32i8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_subs_epi8(__U,__A,__B);
}
__m128i test_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_subs_epi16
+ // CHECK-LABEL: test_mm_mask_subs_epi16
// CHECK: @llvm.ssub.sat.v8i16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_subs_epi16(__W,__U,__A,__B);
}
__m128i test_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_subs_epi16
+ // CHECK-LABEL: test_mm_maskz_subs_epi16
// CHECK: @llvm.ssub.sat.v8i16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_subs_epi16(__U,__A,__B);
}
__m256i test_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_subs_epi16
+ // CHECK-LABEL: test_mm256_mask_subs_epi16
// CHECK: @llvm.ssub.sat.v16i16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_subs_epi16(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_subs_epi16
+ // CHECK-LABEL: test_mm256_maskz_subs_epi16
// CHECK: @llvm.ssub.sat.v16i16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_subs_epi16(__U,__A,__B);
}
__m128i test_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_subs_epu8
+ // CHECK-LABEL: test_mm_mask_subs_epu8
// CHECK-NOT: @llvm.x86.sse2.psubus.b
// CHECK: call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_subs_epu8(__W,__U,__A,__B);
}
__m128i test_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_subs_epu8
+ // CHECK-LABEL: test_mm_maskz_subs_epu8
// CHECK-NOT: @llvm.x86.sse2.psubus.b
// CHECK: call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_subs_epu8(__U,__A,__B);
}
__m256i test_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_subs_epu8
+ // CHECK-LABEL: test_mm256_mask_subs_epu8
// CHECK-NOT: @llvm.x86.avx2.psubus.b
// CHECK: call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_subs_epu8(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_subs_epu8
+ // CHECK-LABEL: test_mm256_maskz_subs_epu8
// CHECK-NOT: @llvm.x86.avx2.psubus.b
// CHECK: call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_subs_epu8(__U,__A,__B);
}
__m128i test_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_subs_epu16
+ // CHECK-LABEL: test_mm_mask_subs_epu16
// CHECK-NOT: @llvm.x86.sse2.psubus.w
// CHECK: call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_subs_epu16(__W,__U,__A,__B);
}
__m128i test_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_subs_epu16
+ // CHECK-LABEL: test_mm_maskz_subs_epu16
// CHECK-NOT: @llvm.x86.sse2.psubus.w
// CHECK: call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_subs_epu16(__U,__A,__B);
}
__m256i test_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_subs_epu16
+ // CHECK-LABEL: test_mm256_mask_subs_epu16
// CHECK-NOT: @llvm.x86.avx2.psubus.w
// CHECK: call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_subs_epu16(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_subs_epu16
+ // CHECK-LABEL: test_mm256_maskz_subs_epu16
// CHECK-NOT: @llvm.x86.avx2.psubus.w
// CHECK: call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
@@ -1612,1019 +1631,1038 @@ __m256i test_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
__m128i test_mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_epi16
+ // CHECK-LABEL: test_mm_mask2_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask2_permutex2var_epi16(__A,__I,__U,__B);
}
__m256i test_mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_epi16
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.256
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask2_permutex2var_epi16(__A,__I,__U,__B);
}
__m128i test_mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_permutex2var_epi16
+ // CHECK-LABEL: test_mm_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.128
return _mm_permutex2var_epi16(__A,__I,__B);
}
__m128i test_mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_epi16
+ // CHECK-LABEL: test_mm_mask_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_permutex2var_epi16(__A,__U,__I,__B);
}
__m128i test_mm_maskz_permutex2var_epi16(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_epi16
+ // CHECK-LABEL: test_mm_maskz_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_permutex2var_epi16(__U,__A,__I,__B);
}
__m256i test_mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_epi16
+ // CHECK-LABEL: test_mm256_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.256
return _mm256_permutex2var_epi16(__A,__I,__B);
}
__m256i test_mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_epi16
+ // CHECK-LABEL: test_mm256_mask_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.256
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_permutex2var_epi16(__A,__U,__I,__B);
}
__m256i test_mm256_maskz_permutex2var_epi16(__mmask16 __U, __m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_epi16
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_epi16
// CHECK: @llvm.x86.avx512.vpermi2var.hi.256
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_permutex2var_epi16(__U,__A,__I,__B);
}
__m128i test_mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_maddubs_epi16
+ // CHECK-LABEL: test_mm_mask_maddubs_epi16
// CHECK: @llvm.x86.ssse3.pmadd.ub.sw
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_maddubs_epi16(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_maddubs_epi16
+ // CHECK-LABEL: test_mm_maskz_maddubs_epi16
// CHECK: @llvm.x86.ssse3.pmadd.ub.sw
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_maddubs_epi16(__U, __X, __Y);
}
__m256i test_mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_maddubs_epi16
+ // CHECK-LABEL: test_mm256_mask_maddubs_epi16
// CHECK: @llvm.x86.avx2.pmadd.ub.sw
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_maddubs_epi16(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_maddubs_epi16
+ // CHECK-LABEL: test_mm256_maskz_maddubs_epi16
// CHECK: @llvm.x86.avx2.pmadd.ub.sw
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_maddubs_epi16(__U, __X, __Y);
}
__m128i test_mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_madd_epi16
+ // CHECK-LABEL: test_mm_mask_madd_epi16
// CHECK: @llvm.x86.sse2.pmadd.wd
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_madd_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_madd_epi16
+ // CHECK-LABEL: test_mm_maskz_madd_epi16
// CHECK: @llvm.x86.sse2.pmadd.wd
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_madd_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_madd_epi16
+ // CHECK-LABEL: test_mm256_mask_madd_epi16
// CHECK: @llvm.x86.avx2.pmadd.wd
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_madd_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_madd_epi16
+ // CHECK-LABEL: test_mm256_maskz_madd_epi16
// CHECK: @llvm.x86.avx2.pmadd.wd
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_madd_epi16(__U, __A, __B);
}
__m128i test_mm_cvtsepi16_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.128
return _mm_cvtsepi16_epi8(__A);
}
__m128i test_mm_mask_cvtsepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.128
return _mm_mask_cvtsepi16_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi16_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.128
return _mm_maskz_cvtsepi16_epi8(__M, __A);
}
__m128i test_mm256_cvtsepi16_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm256_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.256
return _mm256_cvtsepi16_epi8(__A);
}
__m128i test_mm256_mask_cvtsepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.256
return _mm256_mask_cvtsepi16_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi16_epi8(__mmask16 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi16_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.256
return _mm256_maskz_cvtsepi16_epi8(__M, __A);
}
__m128i test_mm_cvtusepi16_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.128
return _mm_cvtusepi16_epi8(__A);
}
__m128i test_mm_mask_cvtusepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.128
return _mm_mask_cvtusepi16_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi16_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.128
return _mm_maskz_cvtusepi16_epi8(__M, __A);
}
__m128i test_mm256_cvtusepi16_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm256_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.256
return _mm256_cvtusepi16_epi8(__A);
}
__m128i test_mm256_mask_cvtusepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.256
return _mm256_mask_cvtusepi16_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi16_epi8(__mmask16 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi16_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.256
return _mm256_maskz_cvtusepi16_epi8(__M, __A);
}
__m128i test_mm_cvtepi16_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi16_epi8
+ // CHECK-LABEL: test_mm_cvtepi16_epi8
// CHECK: trunc <8 x i16> %{{.*}} to <8 x i8>
// CHECK: shufflevector <8 x i8> %{{.*}}, <8 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm_cvtepi16_epi8(__A);
}
__m128i test_mm_mask_cvtepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi16_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.128
return _mm_mask_cvtepi16_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi16_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi16_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.128
return _mm_maskz_cvtepi16_epi8(__M, __A);
}
__m128i test_mm256_cvtepi16_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi16_epi8
+ // CHECK-LABEL: test_mm256_cvtepi16_epi8
// CHECK: trunc <16 x i16> %{{.*}} to <16 x i8>
return _mm256_cvtepi16_epi8(__A);
}
__m128i test_mm256_mask_cvtepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi16_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi16_epi8
// CHECK: trunc <16 x i16> %{{.*}} to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm256_mask_cvtepi16_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi16_epi8(__mmask16 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi16_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtepi16_epi8
// CHECK: trunc <16 x i16> %{{.*}} to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm256_maskz_cvtepi16_epi8(__M, __A);
}
__m128i test_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_mulhrs_epi16
+ // CHECK-LABEL: test_mm_mask_mulhrs_epi16
// CHECK: @llvm.x86.ssse3.pmul.hr.sw
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mulhrs_epi16(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_mulhrs_epi16
+ // CHECK-LABEL: test_mm_maskz_mulhrs_epi16
// CHECK: @llvm.x86.ssse3.pmul.hr.sw
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mulhrs_epi16(__U, __X, __Y);
}
__m256i test_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_mulhrs_epi16
+ // CHECK-LABEL: test_mm256_mask_mulhrs_epi16
// CHECK: @llvm.x86.avx2.pmul.hr.sw
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mulhrs_epi16(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_mulhrs_epi16
+ // CHECK-LABEL: test_mm256_maskz_mulhrs_epi16
// CHECK: @llvm.x86.avx2.pmul.hr.sw
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mulhrs_epi16(__U, __X, __Y);
}
__m128i test_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_mulhi_epu16
+ // CHECK-LABEL: test_mm_mask_mulhi_epu16
// CHECK: @llvm.x86.sse2.pmulhu.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mulhi_epu16(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mask_mulhi_epu16(_mm_set1_epi16(1), 0x3C, (__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), 1, 1, 0, 9, 4, 5, 1, 1));
__m128i test_mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_mulhi_epu16
+ // CHECK-LABEL: test_mm_maskz_mulhi_epu16
// CHECK: @llvm.x86.sse2.pmulhu.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mulhi_epu16(__U, __A, __B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_maskz_mulhi_epu16(0x87, (__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), 0, -16, 0, 0, 0, 0, 0, 1));
__m256i test_mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_mulhi_epu16
+ // CHECK-LABEL: test_mm256_mask_mulhi_epu16
// CHECK: @llvm.x86.avx2.pmulhu.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mulhi_epu16(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_mulhi_epu16(_mm256_set1_epi16(1), 0xF00F, (__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 0, -32, 0, 25, 1, 1, 1, 1, 1, 1, 1, 1, 12, 5, 14, 1));
__m256i test_mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_mulhi_epu16
+ // CHECK-LABEL: test_mm256_maskz_mulhi_epu16
// CHECK: @llvm.x86.avx2.pmulhu.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mulhi_epu16(__U, __A, __B);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_mulhi_epu16(0x0FF0, (__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 0, 0, 0, 0, 4, -28, 0, 17, 8, -24, 0, 9, 0, 0, 0, 0));
__m128i test_mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_mulhi_epi16
+ // CHECK-LABEL: test_mm_mask_mulhi_epi16
// CHECK: @llvm.x86.sse2.pmulh.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mulhi_epi16(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mask_mulhi_epu16(_mm_set1_epi16(1), 0x78, (__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), 1, 1, 1, 9, 4, 5, 6, 1));
__m128i test_mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_mulhi_epi16
+ // CHECK-LABEL: test_mm_maskz_mulhi_epi16
// CHECK: @llvm.x86.sse2.pmulh.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mulhi_epi16(__U, __A, __B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_maskz_mulhi_epi16(0xC3, (__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, 0, 0, 0, -1, -1));
__m256i test_mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_mulhi_epi16
+ // CHECK-LABEL: test_mm256_mask_mulhi_epi16
// CHECK: @llvm.x86.avx2.pmulh.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mulhi_epi16(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_mulhi_epi16(_mm256_set1_epi16(1), 0x0FF0, (__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 1, 1, 1, 1, -1, 0, 0, -1, -1, 0, 0, -1, 1, 1, 1, 1));
__m256i test_mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_mulhi_epi16
+ // CHECK-LABEL: test_mm256_maskz_mulhi_epi16
// CHECK: @llvm.x86.avx2.pmulh.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mulhi_epi16(__U, __A, __B);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_mulhi_epi16(0xF00F, (__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1));
__m128i test_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_epi8
+ // CHECK-LABEL: test_mm_mask_unpackhi_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_unpackhi_epi8(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_epi8
+ // CHECK-LABEL: test_mm_maskz_unpackhi_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_unpackhi_epi8(__U, __A, __B);
}
__m256i test_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_epi8
+ // CHECK-LABEL: test_mm256_mask_unpackhi_epi8
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_unpackhi_epi8(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_epi8
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_epi8
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_unpackhi_epi8(__U, __A, __B);
}
__m128i test_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_epi16
+ // CHECK-LABEL: test_mm_mask_unpackhi_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_unpackhi_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_epi16
+ // CHECK-LABEL: test_mm_maskz_unpackhi_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_unpackhi_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_epi16
+ // CHECK-LABEL: test_mm256_mask_unpackhi_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_unpackhi_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_epi16
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_unpackhi_epi16(__U, __A, __B);
}
__m128i test_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_epi8
+ // CHECK-LABEL: test_mm_mask_unpacklo_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_unpacklo_epi8(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_epi8
+ // CHECK-LABEL: test_mm_maskz_unpacklo_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_unpacklo_epi8(__U, __A, __B);
}
__m256i test_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_epi8
+ // CHECK-LABEL: test_mm256_mask_unpacklo_epi8
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_unpacklo_epi8(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_epi8
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_epi8
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_unpacklo_epi8(__U, __A, __B);
}
__m128i test_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_epi16
+ // CHECK-LABEL: test_mm_mask_unpacklo_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_unpacklo_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_epi16
+ // CHECK-LABEL: test_mm_maskz_unpacklo_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_unpacklo_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_epi16
+ // CHECK-LABEL: test_mm256_mask_unpacklo_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_unpacklo_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_epi16
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_unpacklo_epi16(__U, __A, __B);
}
__m128i test_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi8_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi8_epi16
// CHECK: sext <8 x i8> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_cvtepi8_epi16(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi8_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtepi8_epi16
// CHECK: sext <8 x i8> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_cvtepi8_epi16(__U, __A);
}
__m256i test_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi8_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi8_epi16
// CHECK: sext <16 x i8> %{{.*}} to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_cvtepi8_epi16(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi8_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtepi8_epi16
// CHECK: sext <16 x i8> %{{.*}} to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_cvtepi8_epi16(__U, __A);
}
__m128i test_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu8_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepu8_epi16
// CHECK: zext <8 x i8> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_cvtepu8_epi16(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu8_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtepu8_epi16
// CHECK: zext <8 x i8> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_cvtepu8_epi16(__U, __A);
}
__m256i test_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu8_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepu8_epi16
// CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_cvtepu8_epi16(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu8_epi16(__mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu8_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtepu8_epi16
// CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_cvtepu8_epi16(__U, __A);
}
__m256i test_mm256_sllv_epi16(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_sllv_epi16
+ // CHECK-LABEL: test_mm256_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.256(
return _mm256_sllv_epi16(__A, __B);
}
__m256i test_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_sllv_epi16
+ // CHECK-LABEL: test_mm256_mask_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.256(
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_sllv_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sllv_epi16
+ // CHECK-LABEL: test_mm256_maskz_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.256(
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_sllv_epi16(__U, __A, __B);
}
__m128i test_mm_sllv_epi16(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_sllv_epi16
+ // CHECK-LABEL: test_mm_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.128(
return _mm_sllv_epi16(__A, __B);
}
__m128i test_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sllv_epi16
+ // CHECK-LABEL: test_mm_mask_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.128(
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_sllv_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sllv_epi16
+ // CHECK-LABEL: test_mm_maskz_sllv_epi16
// CHECK: @llvm.x86.avx512.psllv.w.128(
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_sllv_epi16(__U, __A, __B);
}
__m128i test_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sll_epi16
+ // CHECK-LABEL: test_mm_mask_sll_epi16
// CHECK: @llvm.x86.sse2.psll.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_sll_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sll_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sll_epi16
+ // CHECK-LABEL: test_mm_maskz_sll_epi16
// CHECK: @llvm.x86.sse2.psll.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_sll_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sll_epi16
+ // CHECK-LABEL: test_mm256_mask_sll_epi16
// CHECK: @llvm.x86.avx2.psll.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_sll_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sll_epi16
+ // CHECK-LABEL: test_mm256_maskz_sll_epi16
// CHECK: @llvm.x86.avx2.psll.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_sll_epi16(__U, __A, __B);
}
__m128i test_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_slli_epi16
+ // CHECK-LABEL: test_mm_mask_slli_epi16
// CHECK: @llvm.x86.sse2.pslli.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_slli_epi16(__W, __U, __A, 5);
}
__m128i test_mm_mask_slli_epi16_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_slli_epi16_2
+ // CHECK-LABEL: test_mm_mask_slli_epi16_2
// CHECK: @llvm.x86.sse2.pslli.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_slli_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_slli_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi16
+ // CHECK-LABEL: test_mm_maskz_slli_epi16
// CHECK: @llvm.x86.sse2.pslli.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_slli_epi16(__U, __A, 5);
}
__m128i test_mm_maskz_slli_epi16_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi16_2
+ // CHECK-LABEL: test_mm_maskz_slli_epi16_2
// CHECK: @llvm.x86.sse2.pslli.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_slli_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi16
+ // CHECK-LABEL: test_mm256_mask_slli_epi16
// CHECK: @llvm.x86.avx2.pslli.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_slli_epi16(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_slli_epi16((__m256i)(__v16hi){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, (__mmask16)0xAAAA, (__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 20), 100, 0, 102, 0, 104, 0, 106, 0, 108, 0, 110, 0, 112, 0, 114, 0));
__m256i test_mm256_mask_slli_epi16_2(__m256i __W, __mmask16 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi16_2
+ // CHECK-LABEL: test_mm256_mask_slli_epi16_2
// CHECK: @llvm.x86.avx2.pslli.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_slli_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi16
+ // CHECK-LABEL: test_mm256_maskz_slli_epi16
// CHECK: @llvm.x86.avx2.pslli.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_slli_epi16(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_slli_epi16((__mmask16)0x00ffcc71, (__m256i)(__v16hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 32), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_slli_epi16((__mmask16)0, (__m256i)(__v16hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_slli_epi16((__mmask16)0xffff, (__m256i)(__v16hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x1fe, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e));
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_slli_epi16((__mmask16)0x7, (__m256i)(__v16hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x1fe, 0x2, 0x4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_slli_epi16((__mmask16)0x71, (__m256i)(__v16hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x1fe, 0, 0, 0, 0x8, 0xa, 0xc, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m256i test_mm256_maskz_slli_epi16_2(__mmask16 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi16_2
+ // CHECK-LABEL: test_mm256_maskz_slli_epi16_2
// CHECK: @llvm.x86.avx2.pslli.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_slli_epi16(__U, __A, __B);
}
__m256i test_mm256_srlv_epi16(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_srlv_epi16
+ // CHECK-LABEL: test_mm256_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.256(
return _mm256_srlv_epi16(__A, __B);
}
__m256i test_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_srlv_epi16
+ // CHECK-LABEL: test_mm256_mask_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.256(
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_srlv_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_srlv_epi16
+ // CHECK-LABEL: test_mm256_maskz_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.256(
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_srlv_epi16(__U, __A, __B);
}
__m128i test_mm_srlv_epi16(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_srlv_epi16
+ // CHECK-LABEL: test_mm_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.128(
return _mm_srlv_epi16(__A, __B);
}
__m128i test_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_srlv_epi16
+ // CHECK-LABEL: test_mm_mask_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.128(
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_srlv_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_srlv_epi16
+ // CHECK-LABEL: test_mm_maskz_srlv_epi16
// CHECK: @llvm.x86.avx512.psrlv.w.128(
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_srlv_epi16(__U, __A, __B);
}
__m128i test_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_srl_epi16
+ // CHECK-LABEL: test_mm_mask_srl_epi16
// CHECK: @llvm.x86.sse2.psrl.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_srl_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srl_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_srl_epi16
+ // CHECK-LABEL: test_mm_maskz_srl_epi16
// CHECK: @llvm.x86.sse2.psrl.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_srl_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_srl_epi16
+ // CHECK-LABEL: test_mm256_mask_srl_epi16
// CHECK: @llvm.x86.avx2.psrl.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_srl_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_srl_epi16
+ // CHECK-LABEL: test_mm256_maskz_srl_epi16
// CHECK: @llvm.x86.avx2.psrl.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_srl_epi16(__U, __A, __B);
}
__m128i test_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srli_epi16
+ // CHECK-LABEL: test_mm_mask_srli_epi16
// CHECK: @llvm.x86.sse2.psrli.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_srli_epi16(__W, __U, __A, 5);
}
__m128i test_mm_mask_srli_epi16_2(__m128i __W, __mmask8 __U, __m128i __A, int __B) {
- // CHECK-LABEL: @test_mm_mask_srli_epi16_2
+ // CHECK-LABEL: test_mm_mask_srli_epi16_2
// CHECK: @llvm.x86.sse2.psrli.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_srli_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srli_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi16
+ // CHECK-LABEL: test_mm_maskz_srli_epi16
// CHECK: @llvm.x86.sse2.psrli.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_srli_epi16(__U, __A, 5);
}
__m128i test_mm_maskz_srli_epi16_2(__mmask8 __U, __m128i __A, int __B) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi16_2
+ // CHECK-LABEL: test_mm_maskz_srli_epi16_2
// CHECK: @llvm.x86.sse2.psrli.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_srli_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi16
+ // CHECK-LABEL: test_mm256_mask_srli_epi16
// CHECK: @llvm.x86.avx2.psrli.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_srli_epi16(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_srli_epi16((__m256i)(__v16hi){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, (__mmask16)0xAAAA, (__m256i)(__v16hi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 20), 100, 0, 102, 0, 104, 0, 106, 0, 108, 0, 110, 0, 112, 0, 114, 0));
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_srli_epi16((__m256i)(__v16hi){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, (__mmask16)0xAAAA, (__m256i)(__v16hi){0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 480}, 5), 100, 1, 102, 3, 104, 5, 106, 7, 108, 9, 110, 11, 112, 13, 114, 15));
__m256i test_mm256_mask_srli_epi16_2(__m256i __W, __mmask16 __U, __m256i __A, int __B) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi16_2
+ // CHECK-LABEL: test_mm256_mask_srli_epi16_2
// CHECK: @llvm.x86.avx2.psrli.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_srli_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi16
+ // CHECK-LABEL: test_mm256_maskz_srli_epi16
// CHECK: @llvm.x86.avx2.psrli.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_srli_epi16(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_srli_epi16((__mmask16)0x71, (__m256i)(__v16hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x7f, 0, 0, 0, 0x2, 0x2, 0x3, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m256i test_mm256_maskz_srli_epi16_2(__mmask16 __U, __m256i __A, int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi16_2
+ // CHECK-LABEL: test_mm256_maskz_srli_epi16_2
// CHECK: @llvm.x86.avx2.psrli.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_srli_epi16(__U, __A, __B);
}
__m256i test_mm256_srav_epi16(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_srav_epi16
+ // CHECK-LABEL: test_mm256_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.256(
return _mm256_srav_epi16(__A, __B);
}
__m256i test_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_srav_epi16
+ // CHECK-LABEL: test_mm256_mask_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.256(
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_srav_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_srav_epi16
+ // CHECK-LABEL: test_mm256_maskz_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.256(
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_srav_epi16(__U, __A, __B);
}
__m128i test_mm_srav_epi16(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_srav_epi16
+ // CHECK-LABEL: test_mm_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.128(
return _mm_srav_epi16(__A, __B);
}
__m128i test_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_srav_epi16
+ // CHECK-LABEL: test_mm_mask_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.128(
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_srav_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_srav_epi16
+ // CHECK-LABEL: test_mm_maskz_srav_epi16
// CHECK: @llvm.x86.avx512.psrav.w.128(
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_srav_epi16(__U, __A, __B);
}
__m128i test_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sra_epi16
+ // CHECK-LABEL: test_mm_mask_sra_epi16
// CHECK: @llvm.x86.sse2.psra.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_sra_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sra_epi16
+ // CHECK-LABEL: test_mm_maskz_sra_epi16
// CHECK: @llvm.x86.sse2.psra.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_sra_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sra_epi16
+ // CHECK-LABEL: test_mm256_mask_sra_epi16
// CHECK: @llvm.x86.avx2.psra.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_sra_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sra_epi16
+ // CHECK-LABEL: test_mm256_maskz_sra_epi16
// CHECK: @llvm.x86.avx2.psra.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_sra_epi16(__U, __A, __B);
}
__m128i test_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srai_epi16
+ // CHECK-LABEL: test_mm_mask_srai_epi16
// CHECK: @llvm.x86.sse2.psrai.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_srai_epi16(__W, __U, __A, 5);
}
__m128i test_mm_mask_srai_epi16_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srai_epi16_2
+ // CHECK-LABEL: test_mm_mask_srai_epi16_2
// CHECK: @llvm.x86.sse2.psrai.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_srai_epi16(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi16
+ // CHECK-LABEL: test_mm_maskz_srai_epi16
// CHECK: @llvm.x86.sse2.psrai.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_srai_epi16(__U, __A, 5);
}
__m128i test_mm_maskz_srai_epi16_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi16_2
+ // CHECK-LABEL: test_mm_maskz_srai_epi16_2
// CHECK: @llvm.x86.sse2.psrai.w
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_srai_epi16(__U, __A, __B);
}
__m256i test_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi16
+ // CHECK-LABEL: test_mm256_mask_srai_epi16
// CHECK: @llvm.x86.avx2.psrai.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_srai_epi16(__W, __U, __A, 5);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_srai_epi16((__m256i)(__v16hi){100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, (__mmask16)0xAAAA, (__m256i)(__v16hi){0, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 20), 100, 0Xffff, 102, 0, 104, 0, 106, 0, 108, 0, 110, 0, 112, 0, 114, 0));
__m256i test_mm256_mask_srai_epi16_2(__m256i __W, __mmask16 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi16_2
+ // CHECK-LABEL: test_mm256_mask_srai_epi16_2
// CHECK: @llvm.x86.avx2.psrai.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_srai_epi16(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi16
+ // CHECK-LABEL: test_mm256_maskz_srai_epi16
// CHECK: @llvm.x86.avx2.psrai.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_srai_epi16(__U, __A, 5);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_srai_epi16((__mmask16)0x71, (__m256i)(__v16hi){0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1), 0x7f, 0, 0, 0, 0x2, 0x2, 0x3, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m256i test_mm256_maskz_srai_epi16_2(__mmask16 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi16_2
+ // CHECK-LABEL: test_mm256_maskz_srai_epi16_2
// CHECK: @llvm.x86.avx2.psrai.w
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_srai_epi16(__U, __A, __B);
}
__m128i test_mm_mask_mov_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_mov_epi16
+ // CHECK-LABEL: test_mm_mask_mov_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mov_epi16(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_epi16
+ // CHECK-LABEL: test_mm_maskz_mov_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mov_epi16(__U, __A);
}
__m256i test_mm256_mask_mov_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_epi16
+ // CHECK-LABEL: test_mm256_mask_mov_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mov_epi16(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi16(__mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_epi16
+ // CHECK-LABEL: test_mm256_maskz_mov_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mov_epi16(__U, __A);
}
__m128i test_mm_mask_mov_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_mov_epi8
+ // CHECK-LABEL: test_mm_mask_mov_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_mov_epi8(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi8(__mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_epi8
+ // CHECK-LABEL: test_mm_maskz_mov_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_mov_epi8(__U, __A);
}
__m256i test_mm256_mask_mov_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_epi8
+ // CHECK-LABEL: test_mm256_mask_mov_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_mov_epi8(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi8(__mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_epi8
+ // CHECK-LABEL: test_mm256_maskz_mov_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_mov_epi8(__U, __A);
}
__m128i test_mm_loadu_epi16(void const *__P) {
- // CHECK-LABEL: @test_mm_loadu_epi16
+ // CHECK-LABEL: test_mm_loadu_epi16
// CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm_loadu_epi16(__P);
}
__m128i test_mm_mask_loadu_epi16(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_epi16
+ // CHECK-LABEL: test_mm_mask_loadu_epi16
// CHECK: @llvm.masked.load.v8i16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mask_loadu_epi16(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi16(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_epi16
+ // CHECK-LABEL: test_mm_maskz_loadu_epi16
// CHECK: @llvm.masked.load.v8i16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
return _mm_maskz_loadu_epi16(__U, __P);
}
__m256i test_mm256_loadu_epi16(void const *__P) {
- // CHECK-LABEL: @test_mm256_loadu_epi16
+ // CHECK-LABEL: test_mm256_loadu_epi16
// CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm256_loadu_epi16(__P);
}
__m256i test_mm256_mask_loadu_epi16(__m256i __W, __mmask16 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_epi16
+ // CHECK-LABEL: test_mm256_mask_loadu_epi16
// CHECK: @llvm.masked.load.v16i16.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mask_loadu_epi16(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi16(__mmask16 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_epi16
+ // CHECK-LABEL: test_mm256_maskz_loadu_epi16
// CHECK: @llvm.masked.load.v16i16.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_maskz_loadu_epi16(__U, __P);
}
__m128i test_mm_loadu_epi8(void const *__P) {
- // CHECK-LABEL: @test_mm_loadu_epi8
+ // CHECK-LABEL: test_mm_loadu_epi8
// CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm_loadu_epi8(__P);
}
__m128i test_mm_mask_loadu_epi8(__m128i __W, __mmask16 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_epi8
+ // CHECK-LABEL: test_mm_mask_loadu_epi8
// CHECK: @llvm.masked.load.v16i8.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
return _mm_mask_loadu_epi8(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi8(__mmask16 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_epi8
+ // CHECK-LABEL: test_mm_maskz_loadu_epi8
// CHECK: @llvm.masked.load.v16i8.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
return _mm_maskz_loadu_epi8(__U, __P);
}
__m256i test_mm256_loadu_epi8(void const *__P) {
- // CHECK-LABEL: @test_mm256_loadu_epi8
+ // CHECK-LABEL: test_mm256_loadu_epi8
// CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm256_loadu_epi8(__P);
}
__m256i test_mm256_mask_loadu_epi8(__m256i __W, __mmask32 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_epi8
+ // CHECK-LABEL: test_mm256_mask_loadu_epi8
// CHECK: @llvm.masked.load.v32i8.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_mask_loadu_epi8(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi8(__mmask32 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_epi8
+ // CHECK-LABEL: test_mm256_maskz_loadu_epi8
// CHECK: @llvm.masked.load.v32i8.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_maskz_loadu_epi8(__U, __P);
}
void test_mm_storeu_epi16(void *__p, __m128i __a) {
- // check-label: @test_mm_storeu_epi16
+ // CHECK-LABEL: test_mm_storeu_epi16
// check: store <2 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm_storeu_epi16(__p, __a);
}
void test_mm_mask_storeu_epi16(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_storeu_epi16
// CHECK: @llvm.masked.store.v8i16.p0(<8 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm_mask_storeu_epi16(__P, __U, __A);
}
void test_mm256_storeu_epi16(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_storeu_epi16
+ // CHECK-LABEL: test_mm256_storeu_epi16
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm256_storeu_epi16(__P, __A);
}
void test_mm256_mask_storeu_epi16(void *__P, __mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_storeu_epi16
// CHECK: @llvm.masked.store.v16i16.p0(<16 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <16 x i1> %{{.*}})
return _mm256_mask_storeu_epi16(__P, __U, __A);
}
void test_mm_storeu_epi8(void *__p, __m128i __a) {
- // check-label: @test_mm_storeu_epi8
+ // CHECK-LABEL: test_mm_storeu_epi8
// check: store <2 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm_storeu_epi8(__p, __a);
}
void test_mm_mask_storeu_epi8(void *__P, __mmask16 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_storeu_epi8
// CHECK: @llvm.masked.store.v16i8.p0(<16 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <16 x i1> %{{.*}})
return _mm_mask_storeu_epi8(__P, __U, __A);
}
void test_mm256_storeu_epi8(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_storeu_epi8
+ // CHECK-LABEL: test_mm256_storeu_epi8
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm256_storeu_epi8(__P, __A);
}
void test_mm256_mask_storeu_epi8(void *__P, __mmask32 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_storeu_epi8
// CHECK: @llvm.masked.store.v32i8.p0(<32 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <32 x i1> %{{.*}})
return _mm256_mask_storeu_epi8(__P, __U, __A);
}
__mmask16 test_mm_test_epi8_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_test_epi8_mask
+ // CHECK-LABEL: test_mm_test_epi8_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <16 x i8> %{{.*}}, %{{.*}}
return _mm_test_epi8_mask(__A, __B);
}
__mmask16 test_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_test_epi8_mask
+ // CHECK-LABEL: test_mm_mask_test_epi8_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
@@ -2632,14 +2670,14 @@ __mmask16 test_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
}
__mmask32 test_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_test_epi8_mask
+ // CHECK-LABEL: test_mm256_test_epi8_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
return _mm256_test_epi8_mask(__A, __B);
}
__mmask32 test_mm256_mask_test_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_test_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_test_epi8_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
@@ -2647,14 +2685,14 @@ __mmask32 test_mm256_mask_test_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B
}
__mmask8 test_mm_test_epi16_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_test_epi16_mask
+ // CHECK-LABEL: test_mm_test_epi16_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <8 x i16> %{{.*}}, %{{.*}}
return _mm_test_epi16_mask(__A, __B);
}
__mmask8 test_mm_mask_test_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_test_epi16_mask
+ // CHECK-LABEL: test_mm_mask_test_epi16_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
@@ -2662,14 +2700,14 @@ __mmask8 test_mm_mask_test_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask16 test_mm256_test_epi16_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_test_epi16_mask
+ // CHECK-LABEL: test_mm256_test_epi16_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <16 x i16> %{{.*}}, %{{.*}}
return _mm256_test_epi16_mask(__A, __B);
}
__mmask16 test_mm256_mask_test_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_test_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_test_epi16_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
@@ -2677,14 +2715,14 @@ __mmask16 test_mm256_mask_test_epi16_mask(__mmask16 __U, __m256i __A, __m256i __
}
__mmask16 test_mm_testn_epi8_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_testn_epi8_mask
+ // CHECK-LABEL: test_mm_testn_epi8_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
return _mm_testn_epi8_mask(__A, __B);
}
__mmask16 test_mm_mask_testn_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_testn_epi8_mask
+ // CHECK-LABEL: test_mm_mask_testn_epi8_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
@@ -2692,14 +2730,14 @@ __mmask16 test_mm_mask_testn_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B)
}
__mmask32 test_mm256_testn_epi8_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_testn_epi8_mask
+ // CHECK-LABEL: test_mm256_testn_epi8_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
return _mm256_testn_epi8_mask(__A, __B);
}
__mmask32 test_mm256_mask_testn_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_testn_epi8_mask
+ // CHECK-LABEL: test_mm256_mask_testn_epi8_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
@@ -2707,14 +2745,14 @@ __mmask32 test_mm256_mask_testn_epi8_mask(__mmask32 __U, __m256i __A, __m256i __
}
__mmask8 test_mm_testn_epi16_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_testn_epi16_mask
+ // CHECK-LABEL: test_mm_testn_epi16_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
return _mm_testn_epi16_mask(__A, __B);
}
__mmask8 test_mm_mask_testn_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_testn_epi16_mask
+ // CHECK-LABEL: test_mm_mask_testn_epi16_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
@@ -2722,14 +2760,14 @@ __mmask8 test_mm_mask_testn_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask16 test_mm256_testn_epi16_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_testn_epi16_mask
+ // CHECK-LABEL: test_mm256_testn_epi16_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
return _mm256_testn_epi16_mask(__A, __B);
}
__mmask16 test_mm256_mask_testn_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_testn_epi16_mask
+ // CHECK-LABEL: test_mm256_mask_testn_epi16_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
@@ -2737,102 +2775,102 @@ __mmask16 test_mm256_mask_testn_epi16_mask(__mmask16 __U, __m256i __A, __m256i _
}
__mmask16 test_mm_movepi8_mask(__m128i __A) {
- // CHECK-LABEL: @test_mm_movepi8_mask
+ // CHECK-LABEL: test_mm_movepi8_mask
// CHECK: [[CMP:%.*]] = icmp slt <16 x i8> %{{.*}}, zeroinitializer
return _mm_movepi8_mask(__A);
}
__mmask32 test_mm256_movepi8_mask(__m256i __A) {
- // CHECK-LABEL: @test_mm256_movepi8_mask
+ // CHECK-LABEL: test_mm256_movepi8_mask
// CHECK: [[CMP:%.*]] = icmp slt <32 x i8> %{{.*}}, zeroinitializer
return _mm256_movepi8_mask(__A);
}
__m128i test_mm_movm_epi8(__mmask16 __A) {
- // CHECK-LABEL: @test_mm_movm_epi8
+ // CHECK-LABEL: test_mm_movm_epi8
// CHECK: %{{.*}} = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: %vpmovm2.i = sext <16 x i1> %{{.*}} to <16 x i8>
return _mm_movm_epi8(__A);
}
__m256i test_mm256_movm_epi8(__mmask32 __A) {
- // CHECK-LABEL: @test_mm256_movm_epi8
+ // CHECK-LABEL: test_mm256_movm_epi8
// CHECK: %{{.*}} = bitcast i32 %{{.*}} to <32 x i1>
// CHECK: %vpmovm2.i = sext <32 x i1> %{{.*}} to <32 x i8>
return _mm256_movm_epi8(__A);
}
__m128i test_mm_movm_epi16(__mmask8 __A) {
- // CHECK-LABEL: @test_mm_movm_epi16
+ // CHECK-LABEL: test_mm_movm_epi16
// CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: %vpmovm2.i = sext <8 x i1> %{{.*}} to <8 x i16>
return _mm_movm_epi16(__A);
}
__m256i test_mm256_movm_epi16(__mmask16 __A) {
- // CHECK-LABEL: @test_mm256_movm_epi16
+ // CHECK-LABEL: test_mm256_movm_epi16
// CHECK: %{{.*}} = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: %vpmovm2.i = sext <16 x i1> %{{.*}} to <16 x i16>
return _mm256_movm_epi16(__A);
}
__m128i test_mm_mask_broadcastb_epi8(__m128i __O, __mmask16 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_broadcastb_epi8
+ // CHECK-LABEL: test_mm_mask_broadcastb_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_broadcastb_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_broadcastb_epi8(__mmask16 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcastb_epi8
+ // CHECK-LABEL: test_mm_maskz_broadcastb_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_broadcastb_epi8(__M, __A);
}
__m256i test_mm256_mask_broadcastb_epi8(__m256i __O, __mmask32 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastb_epi8
+ // CHECK-LABEL: test_mm256_mask_broadcastb_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <32 x i32> zeroinitializer
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_broadcastb_epi8(__O, __M, __A);
}
__m256i test_mm256_maskz_broadcastb_epi8(__mmask32 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastb_epi8
+ // CHECK-LABEL: test_mm256_maskz_broadcastb_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <32 x i32> zeroinitializer
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_broadcastb_epi8(__M, __A);
}
__m128i test_mm_mask_broadcastw_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_broadcastw_epi16
+ // CHECK-LABEL: test_mm_mask_broadcastw_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_broadcastw_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_broadcastw_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcastw_epi16
+ // CHECK-LABEL: test_mm_maskz_broadcastw_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_broadcastw_epi16(__M, __A);
}
__m256i test_mm256_mask_broadcastw_epi16(__m256i __O, __mmask16 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastw_epi16
+ // CHECK-LABEL: test_mm256_mask_broadcastw_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i32> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_broadcastw_epi16(__O, __M, __A);
}
__m256i test_mm256_maskz_broadcastw_epi16(__mmask16 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastw_epi16
+ // CHECK-LABEL: test_mm256_maskz_broadcastw_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i32> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_broadcastw_epi16(__M, __A);
}
__m128i test_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A){
- // CHECK-LABEL: @test_mm_mask_set1_epi8
+ // CHECK-LABEL: test_mm_mask_set1_epi8
// CHECK: insertelement <16 x i8> poison, i8 %{{.*}}, i32 0
// CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 1
// CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 2
@@ -2853,7 +2891,7 @@ __m128i test_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A){
return _mm_mask_set1_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_set1_epi8 ( __mmask16 __M, char __A){
- // CHECK-LABEL: @test_mm_maskz_set1_epi8
+ // CHECK-LABEL: test_mm_maskz_set1_epi8
// CHECK: insertelement <16 x i8> poison, i8 %{{.*}}, i32 0
// CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 1
// CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 2
@@ -2875,7 +2913,7 @@ __m128i test_mm_maskz_set1_epi8 ( __mmask16 __M, char __A){
}
__m256i test_mm256_mask_set1_epi8(__m256i __O, __mmask32 __M, char __A) {
- // CHECK-LABEL: @test_mm256_mask_set1_epi8
+ // CHECK-LABEL: test_mm256_mask_set1_epi8
// CHECK: insertelement <32 x i8> poison, i8 %{{.*}}, i32 0
// CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 1
// CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 2
@@ -2913,7 +2951,7 @@ __m256i test_mm256_mask_set1_epi8(__m256i __O, __mmask32 __M, char __A) {
}
__m256i test_mm256_maskz_set1_epi8( __mmask32 __M, char __A) {
- // CHECK-LABEL: @test_mm256_maskz_set1_epi8
+ // CHECK-LABEL: test_mm256_maskz_set1_epi8
// CHECK: insertelement <32 x i8> poison, i8 %{{.*}}, i32 0
// CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 1
// CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 2
@@ -2952,7 +2990,7 @@ __m256i test_mm256_maskz_set1_epi8( __mmask32 __M, char __A) {
__m256i test_mm256_mask_set1_epi16(__m256i __O, __mmask16 __M, short __A) {
- // CHECK-LABEL: @test_mm256_mask_set1_epi16
+ // CHECK-LABEL: test_mm256_mask_set1_epi16
// CHECK: insertelement <16 x i16> poison, i16 %{{.*}}, i32 0
// CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 1
// CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 2
@@ -2974,7 +3012,7 @@ __m256i test_mm256_mask_set1_epi16(__m256i __O, __mmask16 __M, short __A) {
}
__m256i test_mm256_maskz_set1_epi16(__mmask16 __M, short __A) {
- // CHECK-LABEL: @test_mm256_maskz_set1_epi16
+ // CHECK-LABEL: test_mm256_maskz_set1_epi16
// CHECK: insertelement <16 x i16> poison, i16 %{{.*}}, i32 0
// CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 1
// CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 2
@@ -2996,7 +3034,7 @@ __m256i test_mm256_maskz_set1_epi16(__mmask16 __M, short __A) {
}
__m128i test_mm_mask_set1_epi16(__m128i __O, __mmask8 __M, short __A) {
- // CHECK-LABEL: @test_mm_mask_set1_epi16
+ // CHECK-LABEL: test_mm_mask_set1_epi16
// CHECK: insertelement <8 x i16> poison, i16 %{{.*}}, i32 0
// CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 1
// CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 2
@@ -3010,7 +3048,7 @@ __m128i test_mm_mask_set1_epi16(__m128i __O, __mmask8 __M, short __A) {
}
__m128i test_mm_maskz_set1_epi16(__mmask8 __M, short __A) {
- // CHECK-LABEL: @test_mm_maskz_set1_epi16
+ // CHECK-LABEL: test_mm_maskz_set1_epi16
// CHECK: insertelement <8 x i16> poison, i16 %{{.*}}, i32 0
// CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 1
// CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 2
@@ -3023,174 +3061,174 @@ __m128i test_mm_maskz_set1_epi16(__mmask8 __M, short __A) {
return _mm_maskz_set1_epi16(__M, __A);
}
__m128i test_mm_permutexvar_epi16(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_permutexvar_epi16
+ // CHECK-LABEL: test_mm_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.128
return _mm_permutexvar_epi16(__A, __B);
}
__m128i test_mm_maskz_permutexvar_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_permutexvar_epi16
+ // CHECK-LABEL: test_mm_maskz_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_permutexvar_epi16(__M, __A, __B);
}
__m128i test_mm_mask_permutexvar_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_permutexvar_epi16
+ // CHECK-LABEL: test_mm_mask_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_permutexvar_epi16(__W, __M, __A, __B);
}
__m256i test_mm256_permutexvar_epi16(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_permutexvar_epi16
+ // CHECK-LABEL: test_mm256_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.256
return _mm256_permutexvar_epi16(__A, __B);
}
__m256i test_mm256_maskz_permutexvar_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_epi16
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.256
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_permutexvar_epi16(__M, __A, __B);
}
__m256i test_mm256_mask_permutexvar_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_epi16
+ // CHECK-LABEL: test_mm256_mask_permutexvar_epi16
// CHECK: @llvm.x86.avx512.permvar.hi.256
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_permutexvar_epi16(__W, __M, __A, __B);
}
__m128i test_mm_mask_alignr_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_alignr_epi8
+ // CHECK-LABEL: test_mm_mask_alignr_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_alignr_epi8(__W, __U, __A, __B, 2);
}
__m128i test_mm_maskz_alignr_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_alignr_epi8
+ // CHECK-LABEL: test_mm_maskz_alignr_epi8
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_alignr_epi8(__U, __A, __B, 2);
}
__m256i test_mm256_mask_alignr_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_alignr_epi8
+ // CHECK-LABEL: test_mm256_mask_alignr_epi8
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_alignr_epi8(__W, __U, __A, __B, 2);
}
__m256i test_mm256_maskz_alignr_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_alignr_epi8
+ // CHECK-LABEL: test_mm256_maskz_alignr_epi8
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_alignr_epi8(__U, __A, __B, 2);
}
__m128i test_mm_dbsad_epu8(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dbsad_epu8
+ // CHECK-LABEL: test_mm_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.128
return _mm_dbsad_epu8(__A, __B, 170);
}
__m128i test_mm_mask_dbsad_epu8(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_dbsad_epu8
+ // CHECK-LABEL: test_mm_mask_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_dbsad_epu8(__W, __U, __A, __B, 170);
}
__m128i test_mm_maskz_dbsad_epu8(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_dbsad_epu8
+ // CHECK-LABEL: test_mm_maskz_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_dbsad_epu8(__U, __A, __B, 170);
}
__m256i test_mm256_dbsad_epu8(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dbsad_epu8
+ // CHECK-LABEL: test_mm256_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.256
return _mm256_dbsad_epu8(__A, __B, 170);
}
__m256i test_mm256_mask_dbsad_epu8(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_dbsad_epu8
+ // CHECK-LABEL: test_mm256_mask_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.256
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_dbsad_epu8(__W, __U, __A, __B, 170);
}
__m256i test_mm256_maskz_dbsad_epu8(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_dbsad_epu8
+ // CHECK-LABEL: test_mm256_maskz_dbsad_epu8
// CHECK: @llvm.x86.avx512.dbpsadbw.256
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_dbsad_epu8(__U, __A, __B, 170);
}
__mmask8 test_mm_movepi16_mask(__m128i __A) {
- // CHECK-LABEL: @test_mm_movepi16_mask
+ // CHECK-LABEL: test_mm_movepi16_mask
// CHECK: [[CMP:%.*]] = icmp slt <8 x i16> %{{.*}}, zeroinitializer
return _mm_movepi16_mask(__A);
}
__mmask16 test_mm256_movepi16_mask(__m256i __A) {
- // CHECK-LABEL: @test_mm256_movepi16_mask
+ // CHECK-LABEL: test_mm256_movepi16_mask
// CHECK: [[CMP:%.*]] = icmp slt <16 x i16> %{{.*}}, zeroinitializer
return _mm256_movepi16_mask(__A);
}
__m128i test_mm_mask_shufflehi_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_shufflehi_epi16
+ // CHECK-LABEL: test_mm_mask_shufflehi_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_shufflehi_epi16(__W, __U, __A, 5);
}
__m128i test_mm_maskz_shufflehi_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_shufflehi_epi16
+ // CHECK-LABEL: test_mm_maskz_shufflehi_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_shufflehi_epi16(__U, __A, 5);
}
__m128i test_mm_mask_shufflelo_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_shufflelo_epi16
+ // CHECK-LABEL: test_mm_mask_shufflelo_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_shufflelo_epi16(__W, __U, __A, 5);
}
__m128i test_mm_maskz_shufflelo_epi16(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_shufflelo_epi16
+ // CHECK-LABEL: test_mm_maskz_shufflelo_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_shufflelo_epi16(__U, __A, 5);
}
__m256i test_mm256_mask_shufflehi_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_shufflehi_epi16
+ // CHECK-LABEL: test_mm256_mask_shufflehi_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 13, i32 13, i32 12, i32 12>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_shufflehi_epi16(__W, __U, __A, 5);
}
__m256i test_mm256_maskz_shufflehi_epi16(__mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_shufflehi_epi16
+ // CHECK-LABEL: test_mm256_maskz_shufflehi_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 13, i32 13, i32 12, i32 12>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_shufflehi_epi16(__U, __A, 5);
}
__m256i test_mm256_mask_shufflelo_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_shufflelo_epi16
+ // CHECK-LABEL: test_mm256_mask_shufflelo_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> poison, <16 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 9, i32 9, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_shufflelo_epi16(__W, __U, __A, 5);
}
__m256i test_mm256_maskz_shufflelo_epi16(__mmask16 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_shufflelo_epi16
+ // CHECK-LABEL: test_mm256_maskz_shufflelo_epi16
// CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> poison, <16 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 9, i32 9, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_shufflelo_epi16(__U, __A, 5);
@@ -3198,42 +3236,42 @@ __m256i test_mm256_maskz_shufflelo_epi16(__mmask16 __U, __m256i __A) {
void test_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
- // CHECK-LABEL:@test_mm_mask_cvtepi16_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.mem.128
_mm_mask_cvtepi16_storeu_epi8 (__P, __M, __A);
}
void test_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
- // CHECK-LABEL:@test_mm_mask_cvtsepi16_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.mem.128
_mm_mask_cvtsepi16_storeu_epi8 ( __P, __M, __A);
}
void test_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
- // CHECK-LABEL:@test_mm_mask_cvtusepi16_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.mem.128
_mm_mask_cvtusepi16_storeu_epi8 (__P, __M, __A);
}
void test_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
{
- // CHECK-LABEL:@test_mm256_mask_cvtusepi16_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.mem.256
_mm256_mask_cvtusepi16_storeu_epi8 ( __P, __M, __A);
}
void test_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
{
- // CHECK-LABEL:@test_mm256_mask_cvtepi16_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.mem.256
_mm256_mask_cvtepi16_storeu_epi8 ( __P, __M, __A);
}
void test_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
{
- // CHECK-LABEL:@test_mm256_mask_cvtsepi16_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.mem.256
_mm256_mask_cvtsepi16_storeu_epi8 ( __P, __M, __A);
}
diff --git a/clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c b/clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c
index 0a16920..6b76da3 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c
@@ -1,420 +1,532 @@
-// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
short test_mm_reduce_add_epi16(__m128i __W){
-// CHECK-LABEL: @test_mm_reduce_add_epi16(
-// CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %{{.*}})
+// CHECK-LABEL: test_mm_reduce_add_epi16
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %{{.*}})
return _mm_reduce_add_epi16(__W);
}
+TEST_CONSTEXPR(_mm_reduce_add_epi16((__m128i)(__v8hi){1,2,3,4,5,6,7,8}) == 36);
short test_mm_reduce_mul_epi16(__m128i __W){
-// CHECK-LABEL: @test_mm_reduce_mul_epi16(
-// CHECK: call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %{{.*}})
+// CHECK-LABEL: test_mm_reduce_mul_epi16
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %{{.*}})
return _mm_reduce_mul_epi16(__W);
}
+TEST_CONSTEXPR(_mm_reduce_mul_epi16((__m128i)(__v8hi){1,2,3,1,2,3,1,2}) == 72);
short test_mm_reduce_or_epi16(__m128i __W){
-// CHECK-LABEL: @test_mm_reduce_or_epi16(
-// CHECK: call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %{{.*}})
+// CHECK-LABEL: test_mm_reduce_or_epi16
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %{{.*}})
return _mm_reduce_or_epi16(__W);
}
+TEST_CONSTEXPR(_mm_reduce_or_epi16((__m128i)(__v8hi){1,2,4,8,0,0,0,0}) == 15);
short test_mm_reduce_and_epi16(__m128i __W){
-// CHECK-LABEL: @test_mm_reduce_and_epi16(
-// CHECK: call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %{{.*}})
+// CHECK-LABEL: test_mm_reduce_and_epi16
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %{{.*}})
return _mm_reduce_and_epi16(__W);
}
+TEST_CONSTEXPR(_mm_reduce_and_epi16((__m128i)(__v8hi){1,3,5,7,9,11,13,15}) == 1);
short test_mm_mask_reduce_add_epi16(__mmask8 __M, __m128i __W){
-// CHECK-LABEL: @test_mm_mask_reduce_add_epi16(
+// CHECK-LABEL: test_mm_mask_reduce_add_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %{{.*}})
return _mm_mask_reduce_add_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_add_epi16((__mmask8)0b11110000, (__m128i)(__v8hi){1,2,3,4,5,6,7,8}) == 26);
+TEST_CONSTEXPR(_mm_mask_reduce_add_epi16((__mmask8)0b00001111, (__m128i)(__v8hi){1,2,3,4,5,6,7,8}) == 10);
short test_mm_mask_reduce_mul_epi16(__mmask8 __M, __m128i __W){
-// CHECK-LABEL: @test_mm_mask_reduce_mul_epi16(
+// CHECK-LABEL: test_mm_mask_reduce_mul_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %{{.*}})
return _mm_mask_reduce_mul_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_mul_epi16((__mmask8)0b11110000, (__m128i)(__v8hi){1,2,3,1,2,3,1,2}) == 12);
+TEST_CONSTEXPR(_mm_mask_reduce_mul_epi16((__mmask8)0b00001111, (__m128i)(__v8hi){1,2,3,1,2,3,1,2}) == 6);
short test_mm_mask_reduce_and_epi16(__mmask8 __M, __m128i __W){
-// CHECK-LABEL: @test_mm_mask_reduce_and_epi16(
+// CHECK-LABEL: test_mm_mask_reduce_and_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %{{.*}}
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %{{.*}}
return _mm_mask_reduce_and_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_and_epi16((__mmask8)0b11110000, (__m128i)(__v8hi){1,3,5,7,0,0,0,0}) == 0);
+TEST_CONSTEXPR(_mm_mask_reduce_and_epi16((__mmask8)0b00001111, (__m128i)(__v8hi){1,3,5,7,0,0,0,0}) == 1);
short test_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W){
-// CHECK-LABEL: @test_mm_mask_reduce_or_epi16(
+// CHECK-LABEL: test_mm_mask_reduce_or_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %{{.*}})
return _mm_mask_reduce_or_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_or_epi16((__mmask8)0b11110000, (__m128i)(__v8hi){1,2,4,8,0,0,0,0}) == 0);
+TEST_CONSTEXPR(_mm_mask_reduce_or_epi16((__mmask8)0b00001111, (__m128i)(__v8hi){1,2,4,8,0,0,0,0}) == 15);
short test_mm256_reduce_add_epi16(__m256i __W){
-// CHECK-LABEL: @test_mm256_reduce_add_epi16(
-// CHECK: call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %{{.*}})
+// CHECK-LABEL: test_mm256_reduce_add_epi16
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %{{.*}})
return _mm256_reduce_add_epi16(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_add_epi16((__m256i)(__v16hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}) == 120);
short test_mm256_reduce_mul_epi16(__m256i __W){
-// CHECK-LABEL: @test_mm256_reduce_mul_epi16(
-// CHECK: call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %{{.*}})
+// CHECK-LABEL: test_mm256_reduce_mul_epi16
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %{{.*}})
return _mm256_reduce_mul_epi16(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_mul_epi16((__m256i)(__v16hi){1,2,3,1,2,3,1,2,3,1,2,3,1,2,3,1}) == 7776);
short test_mm256_reduce_or_epi16(__m256i __W){
-// CHECK-LABEL: @test_mm256_reduce_or_epi16(
-// CHECK: call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %{{.*}})
+// CHECK-LABEL: test_mm256_reduce_or_epi16
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %{{.*}})
return _mm256_reduce_or_epi16(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_or_epi16((__m256i)(__v16hi){1,2,4,8,16,32,64,128,0,0,0,0,0,0,0,0}) == 255);
short test_mm256_reduce_and_epi16(__m256i __W){
-// CHECK-LABEL: @test_mm256_reduce_and_epi16(
-// CHECK: call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %{{.*}})
+// CHECK-LABEL: test_mm256_reduce_and_epi16
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %{{.*}})
return _mm256_reduce_and_epi16(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_and_epi16((__m256i)(__v16hi){1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31}) == 1);
short test_mm256_mask_reduce_add_epi16(__mmask16 __M, __m256i __W){
-// CHECK-LABEL: @test_mm256_mask_reduce_add_epi16(
+// CHECK-LABEL: test_mm256_mask_reduce_add_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %{{.*}})
return _mm256_mask_reduce_add_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_add_epi16((__mmask16)0b1111111100000000, (__m256i)(__v16hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}) == 84);
+TEST_CONSTEXPR(_mm256_mask_reduce_add_epi16((__mmask16)0b0000000011111111, (__m256i)(__v16hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}) == 36);
short test_mm256_mask_reduce_mul_epi16(__mmask16 __M, __m256i __W){
-// CHECK-LABEL: @test_mm256_mask_reduce_mul_epi16(
+// CHECK-LABEL: test_mm256_mask_reduce_mul_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %{{.*}})
return _mm256_mask_reduce_mul_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_mul_epi16((__mmask16)0b1111111100000000, (__m256i)(__v16hi){1,2,3,1,2,3,1,2,3,1,2,3,1,2,3,1}) == 108);
+TEST_CONSTEXPR(_mm256_mask_reduce_mul_epi16((__mmask16)0b0000000011111111, (__m256i)(__v16hi){1,2,3,1,2,3,1,2,3,1,2,3,1,2,3,1}) == 72);
short test_mm256_mask_reduce_and_epi16(__mmask16 __M, __m256i __W){
-// CHECK-LABEL: @test_mm256_mask_reduce_and_epi16(
+// CHECK-LABEL: test_mm256_mask_reduce_and_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %{{.*}})
return _mm256_mask_reduce_and_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_and_epi16((__mmask16)0b1111111100000000, (__m256i)(__v16hi){1,3,5,7,9,11,13,15,0,0,0,0,0,0,0,0}) == 0);
+TEST_CONSTEXPR(_mm256_mask_reduce_and_epi16((__mmask16)0b0000000011111111, (__m256i)(__v16hi){1,3,5,7,9,11,13,15,0,0,0,0,0,0,0,0}) == 1);
short test_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W){
-// CHECK-LABEL: @test_mm256_mask_reduce_or_epi16(
+// CHECK-LABEL: test_mm256_mask_reduce_or_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %{{.*}})
return _mm256_mask_reduce_or_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_or_epi16((__mmask16)0b1111111100000000, (__m256i)(__v16hi){1,2,4,8,16,32,64,128,0,0,0,0,0,0,0,0}) == 0);
+TEST_CONSTEXPR(_mm256_mask_reduce_or_epi16((__mmask16)0b0000000011111111, (__m256i)(__v16hi){1,2,4,8,16,32,64,128,0,0,0,0,0,0,0,0}) == 255);
signed char test_mm_reduce_add_epi8(__m128i __W){
-// CHECK-LABEL: @test_mm_reduce_add_epi8(
-// CHECK: call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %{{.*}})
+// CHECK-LABEL: test_mm_reduce_add_epi8
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %{{.*}})
return _mm_reduce_add_epi8(__W);
}
+TEST_CONSTEXPR(_mm_reduce_add_epi8((__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}) == 120);
signed char test_mm_reduce_mul_epi8(__m128i __W){
-// CHECK-LABEL: @test_mm_reduce_mul_epi8(
-// CHECK: call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %{{.*}})
+// CHECK-LABEL: test_mm_reduce_mul_epi8
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %{{.*}})
return _mm_reduce_mul_epi8(__W);
}
+TEST_CONSTEXPR(_mm_reduce_mul_epi8((__m128i)(__v16qs){1,2,1,1,2,1,1,2,1,1,2,1,1,2,1,1}) == 32);
signed char test_mm_reduce_and_epi8(__m128i __W){
-// CHECK-LABEL: @test_mm_reduce_and_epi8(
-// CHECK: call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %{{.*}})
+// CHECK-LABEL: test_mm_reduce_and_epi8
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %{{.*}})
return _mm_reduce_and_epi8(__W);
}
+TEST_CONSTEXPR(_mm_reduce_and_epi8((__m128i)(__v16qs){1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31}) == 1);
signed char test_mm_reduce_or_epi8(__m128i __W){
-// CHECK-LABEL: @test_mm_reduce_or_epi8(
-// CHECK: call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %{{.*}})
+// CHECK-LABEL: test_mm_reduce_or_epi8
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %{{.*}})
return _mm_reduce_or_epi8(__W);
}
+TEST_CONSTEXPR(_mm_reduce_or_epi8((__m128i)(__v16qs){0,1,2,4,8,16,32,64,0,0,0,0,0,0,0,0}) == 127);
signed char test_mm_mask_reduce_add_epi8(__mmask16 __M, __m128i __W){
-// CHECK-LABEL: @test_mm_mask_reduce_add_epi8(
+// CHECK-LABEL: test_mm_mask_reduce_add_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %{{.*}})
return _mm_mask_reduce_add_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_add_epi8((__mmask16)0b1111111100000000, (__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}) == 84);
+TEST_CONSTEXPR(_mm_mask_reduce_add_epi8((__mmask16)0b0000000011111111, (__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}) == 36);
signed char test_mm_mask_reduce_mul_epi8(__mmask16 __M, __m128i __W){
-// CHECK-LABEL: @test_mm_mask_reduce_mul_epi8(
+// CHECK-LABEL: test_mm_mask_reduce_mul_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %{{.*}})
return _mm_mask_reduce_mul_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_mul_epi8((__mmask16)0b1111111100000000, (__m128i)(__v16qs){1,2,1,1,2,1,1,2,1,1,2,1,1,2,1,1}) == 4);
+TEST_CONSTEXPR(_mm_mask_reduce_mul_epi8((__mmask16)0b0000000011111111, (__m128i)(__v16qs){1,2,1,1,2,1,1,2,1,1,2,1,1,2,1,1}) == 8);
signed char test_mm_mask_reduce_and_epi8(__mmask16 __M, __m128i __W){
-// CHECK-LABEL: @test_mm_mask_reduce_and_epi8(
+// CHECK-LABEL: test_mm_mask_reduce_and_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %{{.*}})
return _mm_mask_reduce_and_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_and_epi8((__mmask16)0b1111111100000000, (__m128i)(__v16qs){1,3,5,7,9,11,13,15,0,0,0,0,0,0,0,0}) == 0);
+TEST_CONSTEXPR(_mm_mask_reduce_and_epi8((__mmask16)0b0000000011111111, (__m128i)(__v16qs){1,3,5,7,9,11,13,15,0,0,0,0,0,0,0,0}) == 1);
signed char test_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W){
-// CHECK-LABEL: @test_mm_mask_reduce_or_epi8(
+// CHECK-LABEL: test_mm_mask_reduce_or_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %{{.*}})
return _mm_mask_reduce_or_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_or_epi8((__mmask16)0b1111111100000000, (__m128i)(__v16qs){0,1,2,4,8,16,32,64,0,0,0,0,0,0,0,0}) == 0);
+TEST_CONSTEXPR(_mm_mask_reduce_or_epi8((__mmask16)0b0000000011111111, (__m128i)(__v16qs){0,1,2,4,8,16,32,64,0,0,0,0,0,0,0,0}) == 127);
signed char test_mm256_reduce_add_epi8(__m256i __W){
-// CHECK-LABEL: @test_mm256_reduce_add_epi8(
-// CHECK: call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %{{.*}})
+// CHECK-LABEL: test_mm256_reduce_add_epi8
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %{{.*}})
return _mm256_reduce_add_epi8(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_add_epi8((__m256i)(__v32qs){0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7}) == 112);
signed char test_mm256_reduce_mul_epi8(__m256i __W){
-// CHECK-LABEL: @test_mm256_reduce_mul_epi8(
-// CHECK: call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %{{.*}})
+// CHECK-LABEL: test_mm256_reduce_mul_epi8
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %{{.*}})
return _mm256_reduce_mul_epi8(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_mul_epi8((__m256i)(__v32qs){1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2}) == 16);
signed char test_mm256_reduce_and_epi8(__m256i __W){
-// CHECK-LABEL: @test_mm256_reduce_and_epi8(
-// CHECK: call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %{{.*}})
+// CHECK-LABEL: test_mm256_reduce_and_epi8
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %{{.*}})
return _mm256_reduce_and_epi8(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_and_epi8((__m256i)(__v32qs){1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63}) == 1);
signed char test_mm256_reduce_or_epi8(__m256i __W){
-// CHECK-LABEL: @test_mm256_reduce_or_epi8(
-// CHECK: call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %{{.*}})
+// CHECK-LABEL: test_mm256_reduce_or_epi8
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %{{.*}})
return _mm256_reduce_or_epi8(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_or_epi8((__m256i)(__v32qs){1,2,4,8,16,32,64,127,1,2,4,8,16,32,64,127,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}) == 127);
signed char test_mm256_mask_reduce_add_epi8(__mmask32 __M, __m256i __W){
-// CHECK-LABEL: @test_mm256_mask_reduce_add_epi8(
+// CHECK-LABEL: test_mm256_mask_reduce_add_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %{{.*}})
return _mm256_mask_reduce_add_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_add_epi8((__mmask32)0b11111111111111110000000000000000, (__m256i)(__v32qs){0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7}) == 56);
+TEST_CONSTEXPR(_mm256_mask_reduce_add_epi8((__mmask32)0b00000000000000001111111111111111, (__m256i)(__v32qs){8,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7}) == 64);
signed char test_mm256_mask_reduce_mul_epi8(__mmask32 __M, __m256i __W){
-// CHECK-LABEL: @test_mm256_mask_reduce_mul_epi8(
+// CHECK-LABEL: test_mm256_mask_reduce_mul_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %{{.*}})
return _mm256_mask_reduce_mul_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_mul_epi8((__mmask32)0b11111111111111110000000000000000, (__m256i)(__v32qs){1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2}) == 4);
+TEST_CONSTEXPR(_mm256_mask_reduce_mul_epi8((__mmask32)0b00000000000000001111111111111111, (__m256i)(__v32qs){4,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2}) == 16);
signed char test_mm256_mask_reduce_and_epi8(__mmask32 __M, __m256i __W){
-// CHECK-LABEL: @test_mm256_mask_reduce_and_epi8(
+// CHECK-LABEL: test_mm256_mask_reduce_and_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %{{.*}})
return _mm256_mask_reduce_and_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_and_epi8((__mmask32)0b11111111111111110000000000000000, (__m256i)(__v32qs){1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}) == 0);
+TEST_CONSTEXPR(_mm256_mask_reduce_and_epi8((__mmask32)0b00000000000000001111111111111111, (__m256i)(__v32qs){1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}) == 1);
signed char test_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W){
-// CHECK-LABEL: @test_mm256_mask_reduce_or_epi8(
+// CHECK-LABEL: test_mm256_mask_reduce_or_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %{{.*}})
return _mm256_mask_reduce_or_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_or_epi8((__mmask32)0b11111111111111110000000000000000, (__m256i)(__v32qs){1,2,4,8,16,32,64,127,1,2,4,8,16,32,64,127,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}) == 0);
+TEST_CONSTEXPR(_mm256_mask_reduce_or_epi8((__mmask32)0b00000000000000001111111111111111, (__m256i)(__v32qs){1,2,4,8,16,32,64,127,1,2,4,8,16,32,64,127,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}) == 127);
short test_mm_reduce_max_epi16(__m128i __W){
// CHECK-LABEL: test_mm_reduce_max_epi16
-// CHECK: call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %{{.*}})
return _mm_reduce_max_epi16(__W);
}
+TEST_CONSTEXPR(_mm_reduce_max_epi16((__m128i)(__v8hi){-4,-3,-2,-1,1,2,3,4}) == 4);
short test_mm_reduce_min_epi16(__m128i __W){
// CHECK-LABEL: test_mm_reduce_min_epi16
-// CHECK: call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %{{.*}})
return _mm_reduce_min_epi16(__W);
}
+TEST_CONSTEXPR(_mm_reduce_min_epi16((__m128i)(__v8hi){-4,-3,-2,-1,1,2,3,4}) == -4);
unsigned short test_mm_reduce_max_epu16(__m128i __W){
// CHECK-LABEL: test_mm_reduce_max_epu16
-// CHECK: call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %{{.*}})
return _mm_reduce_max_epu16(__W);
}
+TEST_CONSTEXPR(_mm_reduce_max_epu16((__m128i)(__v8hu){1,2,3,4,5,6,7,8}) == 8);
unsigned short test_mm_reduce_min_epu16(__m128i __W){
// CHECK-LABEL: test_mm_reduce_min_epu16
-// CHECK: call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %{{.*}})
return _mm_reduce_min_epu16(__W);
}
+TEST_CONSTEXPR(_mm_reduce_min_epu16((__m128i)(__v8hu){1,2,3,4,5,6,7,8}) == 1);
short test_mm_mask_reduce_max_epi16(__mmask8 __M, __m128i __W){
// CHECK-LABEL: test_mm_mask_reduce_max_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %{{.*}})
return _mm_mask_reduce_max_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_max_epi16((__mmask8)0b11110000, (__m128i)(__v8hi){-4,-3,-2,-1,1,2,3,4}) == 4);
+TEST_CONSTEXPR(_mm_mask_reduce_max_epi16((__mmask8)0b00001111, (__m128i)(__v8hi){-4,-3,-2,-1,1,2,3,4}) == -1);
short test_mm_mask_reduce_min_epi16(__mmask8 __M, __m128i __W){
// CHECK-LABEL: test_mm_mask_reduce_min_epi16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %{{.*}})
return _mm_mask_reduce_min_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_min_epi16((__mmask8)0b11110000, (__m128i)(__v8hi){-4,-3,-2,-1,1,2,3,4}) == 1);
+TEST_CONSTEXPR(_mm_mask_reduce_min_epi16((__mmask8)0b00001111, (__m128i)(__v8hi){-4,-3,-2,-1,1,2,3,4}) == -4);
unsigned short test_mm_mask_reduce_max_epu16(__mmask8 __M, __m128i __W){
// CHECK-LABEL: test_mm_mask_reduce_max_epu16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %{{.*}})
return _mm_mask_reduce_max_epu16(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_max_epu16((__mmask8)0b11110000, (__m128i)(__v8hu){1,2,3,4,5,6,7,8}) == 8);
+TEST_CONSTEXPR(_mm_mask_reduce_max_epu16((__mmask8)0b00001111, (__m128i)(__v8hu){1,2,3,4,5,6,7,8}) == 4);
unsigned short test_mm_mask_reduce_min_epu16(__mmask8 __M, __m128i __W){
// CHECK-LABEL: test_mm_mask_reduce_min_epu16
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %{{.*}})
return _mm_mask_reduce_min_epu16(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_min_epu16((__mmask8)0b11110000, (__m128i)(__v8hu){1,2,3,4,5,6,7,8}) == 5);
+TEST_CONSTEXPR(_mm_mask_reduce_min_epu16((__mmask8)0b00001111, (__m128i)(__v8hu){1,2,3,4,5,6,7,8}) == 1);
short test_mm256_reduce_max_epi16(__m256i __W){
// CHECK-LABEL: test_mm256_reduce_max_epi16
-// CHECK: call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %{{.*}})
return _mm256_reduce_max_epi16(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_max_epi16((__m256i)(__v16hi){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == 8);
short test_mm256_reduce_min_epi16(__m256i __W){
// CHECK-LABEL: test_mm256_reduce_min_epi16
-// CHECK: call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %{{.*}})
return _mm256_reduce_min_epi16(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_min_epi16((__m256i)(__v16hi){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == -8);
unsigned short test_mm256_reduce_max_epu16(__m256i __W){
// CHECK-LABEL: test_mm256_reduce_max_epu16
-// CHECK: call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %{{.*}})
return _mm256_reduce_max_epu16(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_max_epu16((__m256i)(__v16hu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 16);
unsigned short test_mm256_reduce_min_epu16(__m256i __W){
// CHECK-LABEL: test_mm256_reduce_min_epu16
-// CHECK: call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %{{.*}})
return _mm256_reduce_min_epu16(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_min_epu16((__m256i)(__v16hu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 1);
short test_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __W){
// CHECK-LABEL: test_mm256_mask_reduce_max_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %{{.*}})
return _mm256_mask_reduce_max_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_max_epi16((__mmask16){0b1111111100000000}, (__m256i)(__v16hi){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == 8);
+TEST_CONSTEXPR(_mm256_mask_reduce_max_epi16((__mmask16){0b0000000011111111}, (__m256i)(__v16hi){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == -1);
short test_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __W){
// CHECK-LABEL: test_mm256_mask_reduce_min_epi16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %{{.*}})
return _mm256_mask_reduce_min_epi16(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_min_epi16((__mmask16){0b1111111100000000}, (__m256i)(__v16hi){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == 1);
+TEST_CONSTEXPR(_mm256_mask_reduce_min_epi16((__mmask16){0b0000000011111111}, (__m256i)(__v16hi){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == -8);
unsigned short test_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __W){
// CHECK-LABEL: test_mm256_mask_reduce_max_epu16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %{{.*}})
return _mm256_mask_reduce_max_epu16(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_max_epu16((__mmask16){0b1111111100000000}, (__m256i)(__v16hu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 16);
+TEST_CONSTEXPR(_mm256_mask_reduce_max_epu16((__mmask16){0b0000000011111111}, (__m256i)(__v16hu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 8);
unsigned short test_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __W){
// CHECK-LABEL: test_mm256_mask_reduce_min_epu16
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
-// CHECK: call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %{{.*}})
+// CHECK: call {{.*}}i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %{{.*}})
return _mm256_mask_reduce_min_epu16(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_min_epu16((__mmask16){0b1111111100000000}, (__m256i)(__v16hu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 9);
+TEST_CONSTEXPR(_mm256_mask_reduce_min_epu16((__mmask16){0b0000000011111111}, (__m256i)(__v16hu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 1);
signed char test_mm_reduce_max_epi8(__m128i __W){
// CHECK-LABEL: test_mm_reduce_max_epi8
-// CHECK: call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %{{.*}})
return _mm_reduce_max_epi8(__W);
}
+TEST_CONSTEXPR(_mm_reduce_max_epi8((__m128i)(__v16qs){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == 8);
signed char test_mm_reduce_min_epi8(__m128i __W){
// CHECK-LABEL: test_mm_reduce_min_epi8
-// CHECK: call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %{{.*}})
return _mm_reduce_min_epi8(__W);
}
+TEST_CONSTEXPR(_mm_reduce_min_epi8((__m128i)(__v16qs){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == -8);
unsigned char test_mm_reduce_max_epu8(__m128i __W){
// CHECK-LABEL: test_mm_reduce_max_epu8
-// CHECK: call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %{{.*}})
return _mm_reduce_max_epu8(__W);
}
+TEST_CONSTEXPR(_mm_reduce_max_epu8((__m128i)(__v16qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 16);
unsigned char test_mm_reduce_min_epu8(__m128i __W){
// CHECK-LABEL: test_mm_reduce_min_epu8
-// CHECK: call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %{{.*}})
return _mm_reduce_min_epu8(__W);
}
+TEST_CONSTEXPR(_mm_reduce_min_epu8((__m128i)(__v16qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 1);
signed char test_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __W){
// CHECK-LABEL: test_mm_mask_reduce_max_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %{{.*}})
return _mm_mask_reduce_max_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_max_epi8((__mmask16)0b1111111100000000, (__m128i)(__v16qs){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == 8);
+TEST_CONSTEXPR(_mm_mask_reduce_max_epi8((__mmask16)0b0000000011111111, (__m128i)(__v16qs){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == -1);
signed char test_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __W){
// CHECK-LABEL: test_mm_mask_reduce_min_epi8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %{{.*}})
return _mm_mask_reduce_min_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_min_epi8((__mmask16)0b1111111100000000, (__m128i)(__v16qs){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == 1);
+TEST_CONSTEXPR(_mm_mask_reduce_min_epi8((__mmask16)0b0000000011111111, (__m128i)(__v16qs){-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8}) == -8);
unsigned char test_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __W){
// CHECK-LABEL: test_mm_mask_reduce_max_epu8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %{{.*}})
return _mm_mask_reduce_max_epu8(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_max_epu8((__mmask16)0b1111111100000000, (__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 16);
+TEST_CONSTEXPR(_mm_mask_reduce_max_epu8((__mmask16)0b0000000011111111, (__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 8);
unsigned char test_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __W){
// CHECK-LABEL: test_mm_mask_reduce_min_epu8
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %{{.*}})
return _mm_mask_reduce_min_epu8(__M, __W);
}
+TEST_CONSTEXPR(_mm_mask_reduce_min_epu8((__mmask16)0b1111111100000000, (__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 9);
+TEST_CONSTEXPR(_mm_mask_reduce_min_epu8((__mmask16)0b0000000011111111, (__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 1);
signed char test_mm256_reduce_max_epi8(__m256i __W){
// CHECK-LABEL: test_mm256_reduce_max_epi8
-// CHECK: call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %{{.*}})
return _mm256_reduce_max_epi8(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_max_epi8((__m256i)(__v32qs){-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 16);
signed char test_mm256_reduce_min_epi8(__m256i __W){
// CHECK-LABEL: test_mm256_reduce_min_epi8
-// CHECK: call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %{{.*}})
return _mm256_reduce_min_epi8(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_min_epi8((__m256i)(__v32qs){-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == -16);
unsigned char test_mm256_reduce_max_epu8(__m256i __W){
// CHECK-LABEL: test_mm256_reduce_max_epu8
-// CHECK: call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %{{.*}})
return _mm256_reduce_max_epu8(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_max_epu8((__m256i)(__v32qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}) == 32);
unsigned char test_mm256_reduce_min_epu8(__m256i __W){
// CHECK-LABEL: test_mm256_reduce_min_epu8
-// CHECK: call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %{{.*}})
return _mm256_reduce_min_epu8(__W);
}
+TEST_CONSTEXPR(_mm256_reduce_min_epu8((__m256i)(__v32qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}) == 1);
signed char test_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __W){
// CHECK-LABEL: test_mm256_mask_reduce_max_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %{{.*}})
return _mm256_mask_reduce_max_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_max_epi8((__mmask32)0b11111111111111110000000000000000, (__m256i)(__v32qs){-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 16);
+TEST_CONSTEXPR(_mm256_mask_reduce_max_epi8((__mmask32)0b00000000000000001111111111111111, (__m256i)(__v32qs){-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == -1);
signed char test_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __W){
// CHECK-LABEL: test_mm256_mask_reduce_min_epi8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %{{.*}})
return _mm256_mask_reduce_min_epi8(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_min_epi8((__mmask32)0b11111111111111110000000000000000, (__m256i)(__v32qs){-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == 1);
+TEST_CONSTEXPR(_mm256_mask_reduce_min_epi8((__mmask32)0b00000000000000001111111111111111, (__m256i)(__v32qs){-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}) == -16);
unsigned char test_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __W){
// CHECK-LABEL: test_mm256_mask_reduce_max_epu8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %{{.*}})
return _mm256_mask_reduce_max_epu8(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_max_epu8((__mmask32)0b11111111111111110000000000000000, (__m256i)(__v32qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}) == 32);
+TEST_CONSTEXPR(_mm256_mask_reduce_max_epu8((__mmask32)0b00000000000000001111111111111111, (__m256i)(__v32qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}) == 16);
unsigned char test_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __W){
// CHECK-LABEL: test_mm256_mask_reduce_min_epu8
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
-// CHECK: call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %{{.*}})
+// CHECK: call {{.*}}i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %{{.*}})
return _mm256_mask_reduce_min_epu8(__M, __W);
}
+TEST_CONSTEXPR(_mm256_mask_reduce_min_epu8((__mmask32)0b11111111111111110000000000000000, (__m256i)(__v32qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}) == 17);
+TEST_CONSTEXPR(_mm256_mask_reduce_min_epu8((__mmask32)0b00000000000000001111111111111111, (__m256i)(__v32qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}) == 1);
diff --git a/clang/test/CodeGen/X86/avx512vlcd-builtins.c b/clang/test/CodeGen/X86/avx512vlcd-builtins.c
index b784809..1619305 100644
--- a/clang/test/CodeGen/X86/avx512vlcd-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlcd-builtins.c
@@ -1,10 +1,18 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vl -target-feature +avx512cd -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m128i test_mm_broadcastmb_epi64(__m128i a,__m128i b) {
- // CHECK-LABEL: @test_mm_broadcastmb_epi64
+ // CHECK-LABEL: test_mm_broadcastmb_epi64
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: shufflevector <4 x i1> %{{.*}}, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: zext i8 %{{.*}} to i64
@@ -14,7 +22,7 @@ __m128i test_mm_broadcastmb_epi64(__m128i a,__m128i b) {
}
__m256i test_mm256_broadcastmb_epi64(__m256i a, __m256i b) {
- // CHECK-LABEL: @test_mm256_broadcastmb_epi64
+ // CHECK-LABEL: test_mm256_broadcastmb_epi64
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: shufflevector <4 x i1> %{{.*}}, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: zext i8 %{{.*}} to i64
@@ -26,7 +34,7 @@ __m256i test_mm256_broadcastmb_epi64(__m256i a, __m256i b) {
}
__m128i test_mm_broadcastmw_epi32(__m512i a, __m512i b) {
- // CHECK-LABEL: @test_mm_broadcastmw_epi32
+ // CHECK-LABEL: test_mm_broadcastmw_epi32
// CHECK: icmp eq <16 x i32> %{{.*}}, %{{.*}}
// CHECK: zext i16 %{{.*}} to i32
// CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i32 0
@@ -37,7 +45,7 @@ __m128i test_mm_broadcastmw_epi32(__m512i a, __m512i b) {
}
__m256i test_mm256_broadcastmw_epi32(__m512i a, __m512i b) {
- // CHECK-LABEL: @test_mm256_broadcastmw_epi32
+ // CHECK-LABEL: test_mm256_broadcastmw_epi32
// CHECK: icmp eq <16 x i32> %{{.*}}, %{{.*}}
// CHECK: zext i16 %{{.*}} to i32
// CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0
@@ -52,161 +60,213 @@ __m256i test_mm256_broadcastmw_epi32(__m512i a, __m512i b) {
}
__m128i test_mm_conflict_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.128
+ // CHECK-LABEL: test_mm_conflict_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}})
return _mm_conflict_epi64(__A);
}
__m128i test_mm_mask_conflict_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.128
+ // CHECK-LABEL: test_mm_mask_conflict_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_conflict_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_conflict_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.128
+ // CHECK-LABEL: test_mm_maskz_conflict_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_conflict_epi64(__U, __A);
}
__m256i test_mm256_conflict_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.256
+ // CHECK-LABEL: test_mm256_conflict_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}})
return _mm256_conflict_epi64(__A);
}
__m256i test_mm256_mask_conflict_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.256
+ // CHECK-LABEL: test_mm256_mask_conflict_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_conflict_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_conflict_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_conflict_epi64
- // CHECK: @llvm.x86.avx512.conflict.q.256
+ // CHECK-LABEL: test_mm256_maskz_conflict_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_conflict_epi64(__U, __A);
}
__m128i test_mm_conflict_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.128
+ // CHECK-LABEL: test_mm_conflict_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}})
return _mm_conflict_epi32(__A);
}
__m128i test_mm_mask_conflict_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.128
+ // CHECK-LABEL: test_mm_mask_conflict_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_conflict_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_conflict_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.128
+ // CHECK-LABEL: test_mm_maskz_conflict_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_conflict_epi32(__U, __A);
}
__m256i test_mm256_conflict_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.256
+ // CHECK-LABEL: test_mm256_conflict_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}})
return _mm256_conflict_epi32(__A);
}
__m256i test_mm256_mask_conflict_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.256
+ // CHECK-LABEL: test_mm256_mask_conflict_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_conflict_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_conflict_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_conflict_epi32
- // CHECK: @llvm.x86.avx512.conflict.d.256
+ // CHECK-LABEL: test_mm256_maskz_conflict_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_conflict_epi32(__U, __A);
}
__m128i test_mm_lzcnt_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_lzcnt_epi32
- // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_lzcnt_epi32
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <4 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <4 x i1> [[ISZERO]], <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_lzcnt_epi32(__A);
}
+TEST_CONSTEXPR(match_v4si(_mm_lzcnt_epi32((__m128i)(__v4si){8, 16, 32, 64}), 28, 27, 26, 25));
+TEST_CONSTEXPR(match_v4si(_mm_lzcnt_epi32((__m128i)(__v4si){0, 0, 0, 0}), 32, 32, 32, 32));
+
__m128i test_mm_mask_lzcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_lzcnt_epi32
- // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_mask_lzcnt_epi32
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <4 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <4 x i1> [[ISZERO]], <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_lzcnt_epi32(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_lzcnt_epi32(_mm_set1_epi32(32), /*0000 0101=*/0x5, (__m128i)(__v4si){8, 16, 32, 64}), 28, 32, 26, 32));
+
__m128i test_mm_maskz_lzcnt_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_lzcnt_epi32
- // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_maskz_lzcnt_epi32
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <4 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <4 x i1> [[ISZERO]], <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_lzcnt_epi32(__U, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_lzcnt_epi32(/*0000 0101=*/0x5, (__m128i)(__v4si){8, 16, 32, 64}), 28, 0, 26, 0));
+
__m256i test_mm256_lzcnt_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_lzcnt_epi32
- // CHECK: call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_lzcnt_epi32
+ // CHECK: call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <8 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <8 x i1> [[ISZERO]], <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_lzcnt_epi32(__A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_lzcnt_epi32((__m256i)(__v8si){1, 2, 4, 8, 16, 32, 64, 128}), 31, 30, 29, 28, 27, 26, 25, 24));
+TEST_CONSTEXPR(match_v8si(_mm256_lzcnt_epi32((__m256i)(__v8si){0, 0, 0, 0, 0, 0, 0, 0}), 32, 32, 32, 32, 32, 32, 32, 32));
+
__m256i test_mm256_mask_lzcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_lzcnt_epi32
- // CHECK: call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_mask_lzcnt_epi32
+ // CHECK: call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <8 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <8 x i1> [[ISZERO]], <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_lzcnt_epi32(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_lzcnt_epi32(_mm256_set1_epi32(32), /*0101 0101=*/0x55, (__m256i)(__v8si){1, 2, 4, 8, 16, 32, 64, 128}), 31, 32, 29, 32, 27, 32, 25, 32));
+
__m256i test_mm256_maskz_lzcnt_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_lzcnt_epi32
- // CHECK: call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_maskz_lzcnt_epi32
+ // CHECK: call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <8 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <8 x i1> [[ISZERO]], <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_lzcnt_epi32(__U, __A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_lzcnt_epi32(/*0101 0101=*/0x55, (__m256i)(__v8si){1, 2, 4, 8, 16, 32, 64, 128}), 31, 0, 29, 0, 27, 0, 25, 0));
+
__m128i test_mm_lzcnt_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_lzcnt_epi64
- // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_lzcnt_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <2 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <2 x i1> [[ISZERO]], <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_lzcnt_epi64(__A);
}
+TEST_CONSTEXPR(match_v2di(_mm_lzcnt_epi64((__m128i)(__v2di){1, 2}), 63, 62));
+TEST_CONSTEXPR(match_v2di(_mm_lzcnt_epi64((__m128i)(__v2di){0, 0}), 64, 64));
+
__m128i test_mm_mask_lzcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_lzcnt_epi64
- // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_mask_lzcnt_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <2 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <2 x i1> [[ISZERO]], <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_lzcnt_epi64(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_lzcnt_epi64(_mm_set1_epi64x((long long)64), /*0000 0010=*/0x2, (__m128i)(__v2di){1, 2}), 64, 62));
+
__m128i test_mm_maskz_lzcnt_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_lzcnt_epi64
- // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_maskz_lzcnt_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <2 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <2 x i1> [[ISZERO]], <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_lzcnt_epi64(__U, __A);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_lzcnt_epi64(/*0000 0010=*/0x2, (__m128i)(__v2di){1, 2}), 0, 62));
+
__m256i test_mm256_lzcnt_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_lzcnt_epi64
- // CHECK: call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_lzcnt_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.ctlz.v4i64(<4 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <4 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <4 x i1> [[ISZERO]], <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_lzcnt_epi64(__A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_lzcnt_epi64((__m256i)(__v4di){1, 2, 4, 8}), 63, 62, 61, 60));
+TEST_CONSTEXPR(match_v4di(_mm256_lzcnt_epi64((__m256i)(__v4di){0, 0, 0, 0}), 64, 64, 64, 64));
+
__m256i test_mm256_mask_lzcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_lzcnt_epi64
- // CHECK: call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_mask_lzcnt_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.ctlz.v4i64(<4 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <4 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <4 x i1> [[ISZERO]], <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_lzcnt_epi64(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_lzcnt_epi64(_mm256_set1_epi64x((long long) 64), /*0000 0110=*/0x6, (__m256i)(__v4di){1, 2, 4, 8}), 64, 62, 61, 64));
+
__m256i test_mm256_maskz_lzcnt_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_lzcnt_epi64
- // CHECK: call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_maskz_lzcnt_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.ctlz.v4i64(<4 x i64> %{{.*}}, i1 true)
+ // CHECK: [[ISZERO:%.+]] = icmp eq <4 x i64> %{{.*}}, zeroinitializer
+ // CHECK: select <4 x i1> [[ISZERO]], <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_lzcnt_epi64(__U, __A);
}
+
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_lzcnt_epi64(/*0000 0011*/0x3, (__m256i)(__v4di){1, 2, 4, 8}), 63, 62, 0, 0));
diff --git a/clang/test/CodeGen/X86/avx512vldq-builtins.c b/clang/test/CodeGen/X86/avx512vldq-builtins.c
index cdbd19a..720999c 100644
--- a/clang/test/CodeGen/X86/avx512vldq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vldq-builtins.c
@@ -1,50 +1,59 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m256i test_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mullo_epi64
+ // CHECK-LABEL: test_mm256_mullo_epi64
// CHECK: mul <4 x i64>
return _mm256_mullo_epi64(__A, __B);
}
__m256i test_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_mullo_epi64
+ // CHECK-LABEL: test_mm256_mask_mullo_epi64
// CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return (__m256i) _mm256_mask_mullo_epi64 ( __W, __U, __A, __B);
}
__m256i test_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_mullo_epi64
+ // CHECK-LABEL: test_mm256_maskz_mullo_epi64
// CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return (__m256i) _mm256_maskz_mullo_epi64 (__U, __A, __B);
}
__m128i test_mm_mullo_epi64 (__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mullo_epi64
+ // CHECK-LABEL: test_mm_mullo_epi64
// CHECK: mul <2 x i64>
return (__m128i) _mm_mullo_epi64(__A, __B);
}
__m128i test_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_mullo_epi64
+ // CHECK-LABEL: test_mm_mask_mullo_epi64
// CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return (__m128i) _mm_mask_mullo_epi64 ( __W, __U, __A, __B);
}
__m128i test_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_mullo_epi64
+ // CHECK-LABEL: test_mm_maskz_mullo_epi64
// CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return (__m128i) _mm_maskz_mullo_epi64 (__U, __A, __B);
}
__m256d test_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_andnot_pd
+ // CHECK-LABEL: test_mm256_mask_andnot_pd
// CHECK: xor <4 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -52,7 +61,7 @@ __m256d test_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m25
}
__m256d test_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_andnot_pd
+ // CHECK-LABEL: test_mm256_maskz_andnot_pd
// CHECK: xor <4 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -60,7 +69,7 @@ __m256d test_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) {
}
__m128d test_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_andnot_pd
+ // CHECK-LABEL: test_mm_mask_andnot_pd
// CHECK: xor <2 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
@@ -68,7 +77,7 @@ __m128d test_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d
}
__m128d test_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_andnot_pd
+ // CHECK-LABEL: test_mm_maskz_andnot_pd
// CHECK: xor <2 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
@@ -76,7 +85,7 @@ __m128d test_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) {
}
__m256 test_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_andnot_ps
+ // CHECK-LABEL: test_mm256_mask_andnot_ps
// CHECK: xor <8 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <8 x i32> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -84,7 +93,7 @@ __m256 test_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 _
}
__m256 test_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_andnot_ps
+ // CHECK-LABEL: test_mm256_maskz_andnot_ps
// CHECK: xor <8 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <8 x i32> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -92,7 +101,7 @@ __m256 test_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) {
}
__m128 test_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_andnot_ps
+ // CHECK-LABEL: test_mm_mask_andnot_ps
// CHECK: xor <4 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <4 x i32> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -100,7 +109,7 @@ __m128 test_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
__m128 test_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_andnot_ps
+ // CHECK-LABEL: test_mm_maskz_andnot_ps
// CHECK: xor <4 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <4 x i32> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -108,776 +117,776 @@ __m128 test_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) {
}
__m256d test_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_and_pd
+ // CHECK-LABEL: test_mm256_mask_and_pd
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return (__m256d) _mm256_mask_and_pd ( __W, __U, __A, __B);
}
__m256d test_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_and_pd
+ // CHECK-LABEL: test_mm256_maskz_and_pd
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return (__m256d) _mm256_maskz_and_pd (__U, __A, __B);
}
__m128d test_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_and_pd
+ // CHECK-LABEL: test_mm_mask_and_pd
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return (__m128d) _mm_mask_and_pd ( __W, __U, __A, __B);
}
__m128d test_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_and_pd
+ // CHECK-LABEL: test_mm_maskz_and_pd
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return (__m128d) _mm_maskz_and_pd (__U, __A, __B);
}
__m256 test_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_and_ps
+ // CHECK-LABEL: test_mm256_mask_and_ps
// CHECK: and <8 x i32> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return (__m256) _mm256_mask_and_ps ( __W, __U, __A, __B);
}
__m256 test_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_and_ps
+ // CHECK-LABEL: test_mm256_maskz_and_ps
// CHECK: and <8 x i32> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return (__m256) _mm256_maskz_and_ps (__U, __A, __B);
}
__m128 test_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_and_ps
+ // CHECK-LABEL: test_mm_mask_and_ps
// CHECK: and <4 x i32> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return (__m128) _mm_mask_and_ps ( __W, __U, __A, __B);
}
__m128 test_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_and_ps
+ // CHECK-LABEL: test_mm_maskz_and_ps
// CHECK: and <4 x i32> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return (__m128) _mm_maskz_and_ps (__U, __A, __B);
}
__m256d test_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_xor_pd
+ // CHECK-LABEL: test_mm256_mask_xor_pd
// CHECK: xor <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return (__m256d) _mm256_mask_xor_pd ( __W, __U, __A, __B);
}
__m256d test_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_xor_pd
+ // CHECK-LABEL: test_mm256_maskz_xor_pd
// CHECK: xor <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return (__m256d) _mm256_maskz_xor_pd (__U, __A, __B);
}
__m128d test_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_xor_pd
+ // CHECK-LABEL: test_mm_mask_xor_pd
// CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return (__m128d) _mm_mask_xor_pd ( __W, __U, __A, __B);
}
__m128d test_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_xor_pd
+ // CHECK-LABEL: test_mm_maskz_xor_pd
// CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return (__m128d) _mm_maskz_xor_pd (__U, __A, __B);
}
__m256 test_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_xor_ps
+ // CHECK-LABEL: test_mm256_mask_xor_ps
// CHECK: xor <8 x i32> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return (__m256) _mm256_mask_xor_ps ( __W, __U, __A, __B);
}
__m256 test_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_xor_ps
+ // CHECK-LABEL: test_mm256_maskz_xor_ps
// CHECK: xor <8 x i32> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return (__m256) _mm256_maskz_xor_ps (__U, __A, __B);
}
__m128 test_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_xor_ps
+ // CHECK-LABEL: test_mm_mask_xor_ps
// CHECK: xor <4 x i32> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return (__m128) _mm_mask_xor_ps ( __W, __U, __A, __B);
}
__m128 test_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_xor_ps
+ // CHECK-LABEL: test_mm_maskz_xor_ps
// CHECK: xor <4 x i32> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return (__m128) _mm_maskz_xor_ps (__U, __A, __B);
}
__m256d test_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_or_pd
+ // CHECK-LABEL: test_mm256_mask_or_pd
// CHECK: or <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return (__m256d) _mm256_mask_or_pd ( __W, __U, __A, __B);
}
__m256d test_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_or_pd
+ // CHECK-LABEL: test_mm256_maskz_or_pd
// CHECK: or <4 x i64> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return (__m256d) _mm256_maskz_or_pd (__U, __A, __B);
}
__m128d test_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_or_pd
+ // CHECK-LABEL: test_mm_mask_or_pd
// CHECK: or <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return (__m128d) _mm_mask_or_pd ( __W, __U, __A, __B);
}
__m128d test_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_or_pd
+ // CHECK-LABEL: test_mm_maskz_or_pd
// CHECK: or <2 x i64> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return (__m128d) _mm_maskz_or_pd (__U, __A, __B);
}
__m256 test_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_or_ps
+ // CHECK-LABEL: test_mm256_mask_or_ps
// CHECK: or <8 x i32> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return (__m256) _mm256_mask_or_ps ( __W, __U, __A, __B);
}
__m256 test_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_or_ps
+ // CHECK-LABEL: test_mm256_maskz_or_ps
// CHECK: or <8 x i32> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return (__m256) _mm256_maskz_or_ps (__U, __A, __B);
}
__m128 test_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_or_ps
+ // CHECK-LABEL: test_mm_mask_or_ps
// CHECK: or <4 x i32> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return (__m128) _mm_mask_or_ps ( __W, __U, __A, __B);
}
__m128 test_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_or_ps
+ // CHECK-LABEL: test_mm_maskz_or_ps
// CHECK: or <4 x i32> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return (__m128) _mm_maskz_or_ps(__U, __A, __B);
}
__m128i test_mm_cvtpd_epi64(__m128d __A) {
- // CHECK-LABEL: @test_mm_cvtpd_epi64
+ // CHECK-LABEL: test_mm_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.128
return _mm_cvtpd_epi64(__A);
}
__m128i test_mm_mask_cvtpd_epi64(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvtpd_epi64
+ // CHECK-LABEL: test_mm_mask_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.128
return _mm_mask_cvtpd_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtpd_epi64(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpd_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.128
return _mm_maskz_cvtpd_epi64(__U, __A);
}
__m256i test_mm256_cvtpd_epi64(__m256d __A) {
- // CHECK-LABEL: @test_mm256_cvtpd_epi64
+ // CHECK-LABEL: test_mm256_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.256
return _mm256_cvtpd_epi64(__A);
}
__m256i test_mm256_mask_cvtpd_epi64(__m256i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpd_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.256
return _mm256_mask_cvtpd_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtpd_epi64(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpd_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.256
return _mm256_maskz_cvtpd_epi64(__U, __A);
}
__m128i test_mm_cvtpd_epu64(__m128d __A) {
- // CHECK-LABEL: @test_mm_cvtpd_epu64
+ // CHECK-LABEL: test_mm_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.128
return _mm_cvtpd_epu64(__A);
}
__m128i test_mm_mask_cvtpd_epu64(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvtpd_epu64
+ // CHECK-LABEL: test_mm_mask_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.128
return _mm_mask_cvtpd_epu64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtpd_epu64(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpd_epu64
+ // CHECK-LABEL: test_mm_maskz_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.128
return _mm_maskz_cvtpd_epu64(__U, __A);
}
__m256i test_mm256_cvtpd_epu64(__m256d __A) {
- // CHECK-LABEL: @test_mm256_cvtpd_epu64
+ // CHECK-LABEL: test_mm256_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.256
return _mm256_cvtpd_epu64(__A);
}
__m256i test_mm256_mask_cvtpd_epu64(__m256i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpd_epu64
+ // CHECK-LABEL: test_mm256_mask_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.256
return _mm256_mask_cvtpd_epu64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtpd_epu64(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpd_epu64
+ // CHECK-LABEL: test_mm256_maskz_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.256
return _mm256_maskz_cvtpd_epu64(__U, __A);
}
__m128i test_mm_cvtps_epi64(__m128 __A) {
- // CHECK-LABEL: @test_mm_cvtps_epi64
+ // CHECK-LABEL: test_mm_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.128
return _mm_cvtps_epi64(__A);
}
__m128i test_mm_mask_cvtps_epi64(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_epi64
+ // CHECK-LABEL: test_mm_mask_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.128
return _mm_mask_cvtps_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtps_epi64(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.128
return _mm_maskz_cvtps_epi64(__U, __A);
}
__m256i test_mm256_cvtps_epi64(__m128 __A) {
- // CHECK-LABEL: @test_mm256_cvtps_epi64
+ // CHECK-LABEL: test_mm256_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.256
return _mm256_cvtps_epi64(__A);
}
__m256i test_mm256_mask_cvtps_epi64(__m256i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.256
return _mm256_mask_cvtps_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtps_epi64(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.256
return _mm256_maskz_cvtps_epi64(__U, __A);
}
__m128i test_mm_cvtps_epu64(__m128 __A) {
- // CHECK-LABEL: @test_mm_cvtps_epu64
+ // CHECK-LABEL: test_mm_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.128
return _mm_cvtps_epu64(__A);
}
__m128i test_mm_mask_cvtps_epu64(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_epu64
+ // CHECK-LABEL: test_mm_mask_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.128
return _mm_mask_cvtps_epu64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtps_epu64(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_epu64
+ // CHECK-LABEL: test_mm_maskz_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.128
return _mm_maskz_cvtps_epu64(__U, __A);
}
__m256i test_mm256_cvtps_epu64(__m128 __A) {
- // CHECK-LABEL: @test_mm256_cvtps_epu64
+ // CHECK-LABEL: test_mm256_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.256
return _mm256_cvtps_epu64(__A);
}
__m256i test_mm256_mask_cvtps_epu64(__m256i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_epu64
+ // CHECK-LABEL: test_mm256_mask_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.256
return _mm256_mask_cvtps_epu64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtps_epu64(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_epu64
+ // CHECK-LABEL: test_mm256_maskz_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.256
return _mm256_maskz_cvtps_epu64(__U, __A);
}
__m128d test_mm_cvtepi64_pd(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi64_pd
+ // CHECK-LABEL: test_mm_cvtepi64_pd
// CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
return _mm_cvtepi64_pd(__A);
}
__m128d test_mm_mask_cvtepi64_pd(__m128d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_pd
+ // CHECK-LABEL: test_mm_mask_cvtepi64_pd
// CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_cvtepi64_pd(__W, __U, __A);
}
__m128d test_mm_maskz_cvtepi64_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi64_pd
+ // CHECK-LABEL: test_mm_maskz_cvtepi64_pd
// CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_cvtepi64_pd(__U, __A);
}
__m256d test_mm256_cvtepi64_pd(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi64_pd
+ // CHECK-LABEL: test_mm256_cvtepi64_pd
// CHECK: sitofp <4 x i64> %{{.*}} to <4 x double>
return _mm256_cvtepi64_pd(__A);
}
__m256d test_mm256_mask_cvtepi64_pd(__m256d __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_pd
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_pd
// CHECK: sitofp <4 x i64> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_cvtepi64_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_cvtepi64_pd(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi64_pd
+ // CHECK-LABEL: test_mm256_maskz_cvtepi64_pd
// CHECK: sitofp <4 x i64> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_cvtepi64_pd(__U, __A);
}
__m128 test_mm_cvtepi64_ps(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi64_ps
+ // CHECK-LABEL: test_mm_cvtepi64_ps
// CHECK: @llvm.x86.avx512.mask.cvtqq2ps.128
return _mm_cvtepi64_ps(__A);
}
__m128 test_mm_mask_cvtepi64_ps(__m128 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_ps
+ // CHECK-LABEL: test_mm_mask_cvtepi64_ps
// CHECK: @llvm.x86.avx512.mask.cvtqq2ps.128
return _mm_mask_cvtepi64_ps(__W, __U, __A);
}
__m128 test_mm_maskz_cvtepi64_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi64_ps
+ // CHECK-LABEL: test_mm_maskz_cvtepi64_ps
// CHECK: @llvm.x86.avx512.mask.cvtqq2ps.128
return _mm_maskz_cvtepi64_ps(__U, __A);
}
__m128 test_mm256_cvtepi64_ps(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi64_ps
+ // CHECK-LABEL: test_mm256_cvtepi64_ps
// CHECK: sitofp <4 x i64> %{{.*}} to <4 x float>
return _mm256_cvtepi64_ps(__A);
}
__m128 test_mm256_mask_cvtepi64_ps(__m128 __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_ps
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_ps
// CHECK: sitofp <4 x i64> %{{.*}} to <4 x float>
// select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_mask_cvtepi64_ps(__W, __U, __A);
}
__m128 test_mm256_maskz_cvtepi64_ps(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi64_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtepi64_ps
// CHECK: sitofp <4 x i64> %{{.*}} to <4 x float>
// select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_maskz_cvtepi64_ps(__U, __A);
}
__m128i test_mm_cvttpd_epi64(__m128d __A) {
- // CHECK-LABEL: @test_mm_cvttpd_epi64
+ // CHECK-LABEL: test_mm_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.128
return _mm_cvttpd_epi64(__A);
}
__m128i test_mm_mask_cvttpd_epi64(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvttpd_epi64
+ // CHECK-LABEL: test_mm_mask_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.128
return _mm_mask_cvttpd_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvttpd_epi64(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttpd_epi64
+ // CHECK-LABEL: test_mm_maskz_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.128
return _mm_maskz_cvttpd_epi64(__U, __A);
}
__m256i test_mm256_cvttpd_epi64(__m256d __A) {
- // CHECK-LABEL: @test_mm256_cvttpd_epi64
+ // CHECK-LABEL: test_mm256_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.256
return _mm256_cvttpd_epi64(__A);
}
__m256i test_mm256_mask_cvttpd_epi64(__m256i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttpd_epi64
+ // CHECK-LABEL: test_mm256_mask_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.256
return _mm256_mask_cvttpd_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvttpd_epi64(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttpd_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.256
return _mm256_maskz_cvttpd_epi64(__U, __A);
}
__m128i test_mm_cvttpd_epu64(__m128d __A) {
- // CHECK-LABEL: @test_mm_cvttpd_epu64
+ // CHECK-LABEL: test_mm_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.128
return _mm_cvttpd_epu64(__A);
}
__m128i test_mm_mask_cvttpd_epu64(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvttpd_epu64
+ // CHECK-LABEL: test_mm_mask_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.128
return _mm_mask_cvttpd_epu64(__W, __U, __A);
}
__m128i test_mm_maskz_cvttpd_epu64(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttpd_epu64
+ // CHECK-LABEL: test_mm_maskz_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.128
return _mm_maskz_cvttpd_epu64(__U, __A);
}
__m256i test_mm256_cvttpd_epu64(__m256d __A) {
- // CHECK-LABEL: @test_mm256_cvttpd_epu64
+ // CHECK-LABEL: test_mm256_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.256
return _mm256_cvttpd_epu64(__A);
}
__m256i test_mm256_mask_cvttpd_epu64(__m256i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttpd_epu64
+ // CHECK-LABEL: test_mm256_mask_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.256
return _mm256_mask_cvttpd_epu64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvttpd_epu64(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttpd_epu64
+ // CHECK-LABEL: test_mm256_maskz_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.256
return _mm256_maskz_cvttpd_epu64(__U, __A);
}
__m128i test_mm_cvttps_epi64(__m128 __A) {
- // CHECK-LABEL: @test_mm_cvttps_epi64
+ // CHECK-LABEL: test_mm_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.128
return _mm_cvttps_epi64(__A);
}
__m128i test_mm_mask_cvttps_epi64(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvttps_epi64
+ // CHECK-LABEL: test_mm_mask_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.128
return _mm_mask_cvttps_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvttps_epi64(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttps_epi64
+ // CHECK-LABEL: test_mm_maskz_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.128
return _mm_maskz_cvttps_epi64(__U, __A);
}
__m256i test_mm256_cvttps_epi64(__m128 __A) {
- // CHECK-LABEL: @test_mm256_cvttps_epi64
+ // CHECK-LABEL: test_mm256_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.256
return _mm256_cvttps_epi64(__A);
}
__m256i test_mm256_mask_cvttps_epi64(__m256i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttps_epi64
+ // CHECK-LABEL: test_mm256_mask_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.256
return _mm256_mask_cvttps_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvttps_epi64(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttps_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.256
return _mm256_maskz_cvttps_epi64(__U, __A);
}
__m128i test_mm_cvttps_epu64(__m128 __A) {
- // CHECK-LABEL: @test_mm_cvttps_epu64
+ // CHECK-LABEL: test_mm_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.128
return _mm_cvttps_epu64(__A);
}
__m128i test_mm_mask_cvttps_epu64(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvttps_epu64
+ // CHECK-LABEL: test_mm_mask_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.128
return _mm_mask_cvttps_epu64(__W, __U, __A);
}
__m128i test_mm_maskz_cvttps_epu64(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttps_epu64
+ // CHECK-LABEL: test_mm_maskz_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.128
return _mm_maskz_cvttps_epu64(__U, __A);
}
__m256i test_mm256_cvttps_epu64(__m128 __A) {
- // CHECK-LABEL: @test_mm256_cvttps_epu64
+ // CHECK-LABEL: test_mm256_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.256
return _mm256_cvttps_epu64(__A);
}
__m256i test_mm256_mask_cvttps_epu64(__m256i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttps_epu64
+ // CHECK-LABEL: test_mm256_mask_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.256
return _mm256_mask_cvttps_epu64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvttps_epu64(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttps_epu64
+ // CHECK-LABEL: test_mm256_maskz_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.256
return _mm256_maskz_cvttps_epu64(__U, __A);
}
__m128d test_mm_cvtepu64_pd(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepu64_pd
+ // CHECK-LABEL: test_mm_cvtepu64_pd
// CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
return _mm_cvtepu64_pd(__A);
}
__m128d test_mm_mask_cvtepu64_pd(__m128d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu64_pd
+ // CHECK-LABEL: test_mm_mask_cvtepu64_pd
// CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_cvtepu64_pd(__W, __U, __A);
}
__m128d test_mm_maskz_cvtepu64_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu64_pd
+ // CHECK-LABEL: test_mm_maskz_cvtepu64_pd
// CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_cvtepu64_pd(__U, __A);
}
__m256d test_mm256_cvtepu64_pd(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepu64_pd
+ // CHECK-LABEL: test_mm256_cvtepu64_pd
// CHECK: uitofp <4 x i64> %{{.*}} to <4 x double>
return _mm256_cvtepu64_pd(__A);
}
__m256d test_mm256_mask_cvtepu64_pd(__m256d __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu64_pd
+ // CHECK-LABEL: test_mm256_mask_cvtepu64_pd
// CHECK: uitofp <4 x i64> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_cvtepu64_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_cvtepu64_pd(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu64_pd
+ // CHECK-LABEL: test_mm256_maskz_cvtepu64_pd
// CHECK: uitofp <4 x i64> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_cvtepu64_pd(__U, __A);
}
__m128 test_mm_cvtepu64_ps(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepu64_ps
+ // CHECK-LABEL: test_mm_cvtepu64_ps
// CHECK: @llvm.x86.avx512.mask.cvtuqq2ps.128
return _mm_cvtepu64_ps(__A);
}
__m128 test_mm_mask_cvtepu64_ps(__m128 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu64_ps
+ // CHECK-LABEL: test_mm_mask_cvtepu64_ps
// CHECK: @llvm.x86.avx512.mask.cvtuqq2ps.128
return _mm_mask_cvtepu64_ps(__W, __U, __A);
}
__m128 test_mm_maskz_cvtepu64_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu64_ps
+ // CHECK-LABEL: test_mm_maskz_cvtepu64_ps
// CHECK: @llvm.x86.avx512.mask.cvtuqq2ps.128
return _mm_maskz_cvtepu64_ps(__U, __A);
}
__m128 test_mm256_cvtepu64_ps(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepu64_ps
+ // CHECK-LABEL: test_mm256_cvtepu64_ps
// CHECK: uitofp <4 x i64> %{{.*}} to <4 x float>
return _mm256_cvtepu64_ps(__A);
}
__m128 test_mm256_mask_cvtepu64_ps(__m128 __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu64_ps
+ // CHECK-LABEL: test_mm256_mask_cvtepu64_ps
// CHECK: uitofp <4 x i64> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_mask_cvtepu64_ps(__W, __U, __A);
}
__m128 test_mm256_maskz_cvtepu64_ps(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu64_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtepu64_ps
// CHECK: uitofp <4 x i64> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_maskz_cvtepu64_ps(__U, __A);
}
__m128d test_mm_range_pd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_range_pd
+ // CHECK-LABEL: test_mm_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.128
return _mm_range_pd(__A, __B, 4);
}
__m128d test_mm_mask_range_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_range_pd
+ // CHECK-LABEL: test_mm_mask_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.128
return _mm_mask_range_pd(__W, __U, __A, __B, 4);
}
__m128d test_mm_maskz_range_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_range_pd
+ // CHECK-LABEL: test_mm_maskz_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.128
return _mm_maskz_range_pd(__U, __A, __B, 4);
}
__m256d test_mm256_range_pd(__m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_range_pd
+ // CHECK-LABEL: test_mm256_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.256
return _mm256_range_pd(__A, __B, 4);
}
__m256d test_mm256_mask_range_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_range_pd
+ // CHECK-LABEL: test_mm256_mask_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.256
return _mm256_mask_range_pd(__W, __U, __A, __B, 4);
}
__m256d test_mm256_maskz_range_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_range_pd
+ // CHECK-LABEL: test_mm256_maskz_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.256
return _mm256_maskz_range_pd(__U, __A, __B, 4);
}
__m128 test_mm_range_ps(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_range_ps
+ // CHECK-LABEL: test_mm_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.128
return _mm_range_ps(__A, __B, 4);
}
__m128 test_mm_mask_range_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_range_ps
+ // CHECK-LABEL: test_mm_mask_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.128
return _mm_mask_range_ps(__W, __U, __A, __B, 4);
}
__m128 test_mm_maskz_range_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_range_ps
+ // CHECK-LABEL: test_mm_maskz_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.128
return _mm_maskz_range_ps(__U, __A, __B, 4);
}
__m256 test_mm256_range_ps(__m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_range_ps
+ // CHECK-LABEL: test_mm256_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.256
return _mm256_range_ps(__A, __B, 4);
}
__m256 test_mm256_mask_range_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_range_ps
+ // CHECK-LABEL: test_mm256_mask_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.256
return _mm256_mask_range_ps(__W, __U, __A, __B, 4);
}
__m256 test_mm256_maskz_range_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_range_ps
+ // CHECK-LABEL: test_mm256_maskz_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.256
return _mm256_maskz_range_ps(__U, __A, __B, 4);
}
__m128d test_mm_reduce_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_reduce_pd
+ // CHECK-LABEL: test_mm_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.128
return _mm_reduce_pd(__A, 4);
}
__m128d test_mm_mask_reduce_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_reduce_pd
+ // CHECK-LABEL: test_mm_mask_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.128
return _mm_mask_reduce_pd(__W, __U, __A, 4);
}
__m128d test_mm_maskz_reduce_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_reduce_pd
+ // CHECK-LABEL: test_mm_maskz_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.128
return _mm_maskz_reduce_pd(__U, __A, 4);
}
__m256d test_mm256_reduce_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_reduce_pd
+ // CHECK-LABEL: test_mm256_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.256
return _mm256_reduce_pd(__A, 4);
}
__m256d test_mm256_mask_reduce_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_reduce_pd
+ // CHECK-LABEL: test_mm256_mask_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.256
return _mm256_mask_reduce_pd(__W, __U, __A, 4);
}
__m256d test_mm256_maskz_reduce_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_reduce_pd
+ // CHECK-LABEL: test_mm256_maskz_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.256
return _mm256_maskz_reduce_pd(__U, __A, 4);
}
__m128 test_mm_reduce_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_reduce_ps
+ // CHECK-LABEL: test_mm_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.128
return _mm_reduce_ps(__A, 4);
}
__m128 test_mm_mask_reduce_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_reduce_ps
+ // CHECK-LABEL: test_mm_mask_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.128
return _mm_mask_reduce_ps(__W, __U, __A, 4);
}
__m128 test_mm_maskz_reduce_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_reduce_ps
+ // CHECK-LABEL: test_mm_maskz_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.128
return _mm_maskz_reduce_ps(__U, __A, 4);
}
__m256 test_mm256_reduce_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_reduce_ps
+ // CHECK-LABEL: test_mm256_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.256
return _mm256_reduce_ps(__A, 4);
}
__m256 test_mm256_mask_reduce_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_reduce_ps
+ // CHECK-LABEL: test_mm256_mask_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.256
return _mm256_mask_reduce_ps(__W, __U, __A, 4);
}
__m256 test_mm256_maskz_reduce_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_reduce_ps
+ // CHECK-LABEL: test_mm256_maskz_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.256
return _mm256_maskz_reduce_ps(__U, __A, 4);
}
__mmask8 test_mm_movepi32_mask(__m128i __A) {
- // CHECK-LABEL: @test_mm_movepi32_mask
+ // CHECK-LABEL: test_mm_movepi32_mask
// CHECK: [[CMP:%.*]] = icmp slt <4 x i32> %{{.*}}, zeroinitializer
// CHECK: [[SHUF:%.*]] = shufflevector <4 x i1> [[CMP]], <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm_movepi32_mask(__A);
}
__mmask8 test_mm256_movepi32_mask(__m256i __A) {
- // CHECK-LABEL: @test_mm256_movepi32_mask
+ // CHECK-LABEL: test_mm256_movepi32_mask
// CHECK: [[CMP:%.*]] = icmp slt <8 x i32> %{{.*}}, zeroinitializer
return _mm256_movepi32_mask(__A);
}
__m128i test_mm_movm_epi32(__mmask8 __A) {
- // CHECK-LABEL: @test_mm_movm_epi32
+ // CHECK-LABEL: test_mm_movm_epi32
// CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: %extract.i = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: %vpmovm2.i = sext <4 x i1> %extract.i to <4 x i32>
@@ -885,14 +894,14 @@ __m128i test_mm_movm_epi32(__mmask8 __A) {
}
__m256i test_mm256_movm_epi32(__mmask8 __A) {
- // CHECK-LABEL: @test_mm256_movm_epi32
+ // CHECK-LABEL: test_mm256_movm_epi32
// CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: %vpmovm2.i = sext <8 x i1> %{{.*}} to <8 x i32>
return _mm256_movm_epi32(__A);
}
__m128i test_mm_movm_epi64(__mmask8 __A) {
- // CHECK-LABEL: @test_mm_movm_epi64
+ // CHECK-LABEL: test_mm_movm_epi64
// CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: %extract.i = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: %vpmovm2.i = sext <2 x i1> %extract.i to <2 x i64>
@@ -900,7 +909,7 @@ __m128i test_mm_movm_epi64(__mmask8 __A) {
}
__m256i test_mm256_movm_epi64(__mmask8 __A) {
- // CHECK-LABEL: @test_mm256_movm_epi64
+ // CHECK-LABEL: test_mm256_movm_epi64
// CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: %extract.i = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: %vpmovm2.i = sext <4 x i1> %extract.i to <4 x i64>
@@ -908,14 +917,14 @@ __m256i test_mm256_movm_epi64(__mmask8 __A) {
}
__mmask8 test_mm_movepi64_mask(__m128i __A) {
- // CHECK-LABEL: @test_mm_movepi64_mask
+ // CHECK-LABEL: test_mm_movepi64_mask
// CHECK: [[CMP:%.*]] = icmp slt <2 x i64> %{{.*}}, zeroinitializer
// CHECK: [[SHUF:%.*]] = shufflevector <2 x i1> [[CMP]], <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
return _mm_movepi64_mask(__A);
}
__mmask8 test_mm256_movepi64_mask(__m256i __A) {
- // CHECK-LABEL: @test_mm256_movepi64_mask
+ // CHECK-LABEL: test_mm256_movepi64_mask
// CHECK: [[CMP:%.*]] = icmp slt <4 x i64> %{{.*}}, zeroinitializer
// CHECK: [[SHUF:%.*]] = shufflevector <4 x i1> [[CMP]], <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm256_movepi64_mask(__A);
@@ -923,229 +932,234 @@ __mmask8 test_mm256_movepi64_mask(__m256i __A) {
__m256 test_mm256_broadcast_f32x2(__m128 __A) {
- // CHECK-LABEL: @test_mm256_broadcast_f32x2
+ // CHECK-LABEL: test_mm256_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcast_f32x2(__A);
}
+TEST_CONSTEXPR(match_m256(_mm256_broadcast_f32x2((__m128)(__v4sf){1.0f, -2.0f, 3.0f, -4.0f}), 1.0f, -2.0f, 1.0f, -2.0f, 1.0f, -2.0f, 1.0f, -2.0f));
__m256 test_mm256_mask_broadcast_f32x2(__m256 __O, __mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcast_f32x2
+ // CHECK-LABEL: test_mm256_mask_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_broadcast_f32x2(__O, __M, __A);
}
__m256 test_mm256_maskz_broadcast_f32x2(__mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcast_f32x2
+ // CHECK-LABEL: test_mm256_maskz_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_broadcast_f32x2(__M, __A);
}
__m256d test_mm256_broadcast_f64x2(double const* __A) {
- // CHECK-LABEL: @test_mm256_broadcast_f64x2
+ // CHECK-LABEL: test_mm256_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcast_f64x2(_mm_loadu_pd(__A));
}
+TEST_CONSTEXPR(match_m256d(_mm256_broadcast_f64x2((__m128d)(__v2df){1.0, -2.0}), 1.0, -2.0, 1.0, -2.0));
__m256d test_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, double const* __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcast_f64x2
+ // CHECK-LABEL: test_mm256_mask_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A));
}
__m256d test_mm256_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcast_f64x2
+ // CHECK-LABEL: test_mm256_maskz_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A));
}
__m128i test_mm_broadcast_i32x2(__m128i __A) {
- // CHECK-LABEL: @test_mm_broadcast_i32x2
+ // CHECK-LABEL: test_mm_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
return _mm_broadcast_i32x2(__A);
}
+TEST_CONSTEXPR(match_v4si(_mm_broadcast_i32x2((__m128i)(__v4si){1, -2, 3, -4}), 1, -2, 1, -2));
__m128i test_mm_mask_broadcast_i32x2(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_broadcast_i32x2
+ // CHECK-LABEL: test_mm_mask_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_broadcast_i32x2(__O, __M, __A);
}
__m128i test_mm_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcast_i32x2
+ // CHECK-LABEL: test_mm_maskz_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_broadcast_i32x2(__M, __A);
}
__m256i test_mm256_broadcast_i32x2(__m128i __A) {
- // CHECK-LABEL: @test_mm256_broadcast_i32x2
+ // CHECK-LABEL: test_mm256_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcast_i32x2(__A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_broadcast_i32x2((__m128i)(__v4si){1, -2, 3, -4}), 1, -2, 1, -2, 1, -2, 1, -2));
__m256i test_mm256_mask_broadcast_i32x2(__m256i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcast_i32x2
+ // CHECK-LABEL: test_mm256_mask_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_broadcast_i32x2(__O, __M, __A);
}
__m256i test_mm256_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcast_i32x2
+ // CHECK-LABEL: test_mm256_maskz_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_broadcast_i32x2(__M, __A);
}
__m256i test_mm256_broadcast_i64x2(__m128i const* __A) {
- // CHECK-LABEL: @test_mm256_broadcast_i64x2
+ // CHECK-LABEL: test_mm256_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcast_i64x2(_mm_loadu_si128(__A));
}
+TEST_CONSTEXPR(match_v4di(_mm256_broadcast_i64x2((__m128i)(__v2di){1, -2}), 1, -2, 1, -2));
__m256i test_mm256_mask_broadcast_i64x2(__m256i __O, __mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcast_i64x2
+ // CHECK-LABEL: test_mm256_mask_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_broadcast_i64x2(__O, __M, _mm_loadu_si128(__A));
}
__m256i test_mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcast_i64x2
+ // CHECK-LABEL: test_mm256_maskz_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_broadcast_i64x2(__M, _mm_loadu_si128(__A));
}
__m128d test_mm256_extractf64x2_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_extractf64x2_pd
+ // CHECK-LABEL: test_mm256_extractf64x2_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
return _mm256_extractf64x2_pd(__A, 1);
}
__m128d test_mm256_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_extractf64x2_pd
+ // CHECK-LABEL: test_mm256_mask_extractf64x2_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm256_mask_extractf64x2_pd(__W, __U, __A, 1);
}
__m128d test_mm256_maskz_extractf64x2_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_extractf64x2_pd
+ // CHECK-LABEL: test_mm256_maskz_extractf64x2_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm256_maskz_extractf64x2_pd(__U, __A, 1);
}
__m128i test_mm256_extracti64x2_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_extracti64x2_epi64
+ // CHECK-LABEL: test_mm256_extracti64x2_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <2 x i32> <i32 2, i32 3>
return _mm256_extracti64x2_epi64(__A, 1);
}
__m128i test_mm256_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_extracti64x2_epi64
+ // CHECK-LABEL: test_mm256_mask_extracti64x2_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <2 x i32> <i32 2, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm256_mask_extracti64x2_epi64(__W, __U, __A, 1);
}
__m128i test_mm256_maskz_extracti64x2_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_extracti64x2_epi64
+ // CHECK-LABEL: test_mm256_maskz_extracti64x2_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <2 x i32> <i32 2, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm256_maskz_extracti64x2_epi64(__U, __A, 1);
}
__m256d test_mm256_insertf64x2(__m256d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm256_insertf64x2
+ // CHECK-LABEL: test_mm256_insertf64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
return _mm256_insertf64x2(__A, __B, 1);
}
__m256d test_mm256_mask_insertf64x2(__m256d __W, __mmask8 __U, __m256d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm256_mask_insertf64x2
+ // CHECK-LABEL: test_mm256_mask_insertf64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_insertf64x2(__W, __U, __A, __B, 1);
}
__m256d test_mm256_maskz_insertf64x2(__mmask8 __U, __m256d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm256_maskz_insertf64x2
+ // CHECK-LABEL: test_mm256_maskz_insertf64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_insertf64x2(__U, __A, __B, 1);
}
__m256i test_mm256_inserti64x2(__m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_inserti64x2
+ // CHECK-LABEL: test_mm256_inserti64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
return _mm256_inserti64x2(__A, __B, 1);
}
__m256i test_mm256_mask_inserti64x2(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_inserti64x2
+ // CHECK-LABEL: test_mm256_mask_inserti64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_inserti64x2(__W, __U, __A, __B, 1);
}
__m256i test_mm256_maskz_inserti64x2(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_inserti64x2
+ // CHECK-LABEL: test_mm256_maskz_inserti64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_inserti64x2(__U, __A, __B, 1);
}
__mmask8 test_mm_mask_fpclass_pd_mask(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_fpclass_pd_mask
+ // CHECK-LABEL: test_mm_mask_fpclass_pd_mask
// CHECK: @llvm.x86.avx512.fpclass.pd.128
return _mm_mask_fpclass_pd_mask(__U, __A, 2);
}
__mmask8 test_mm_fpclass_pd_mask(__m128d __A) {
- // CHECK-LABEL: @test_mm_fpclass_pd_mask
+ // CHECK-LABEL: test_mm_fpclass_pd_mask
// CHECK: @llvm.x86.avx512.fpclass.pd.128
return _mm_fpclass_pd_mask(__A, 2);
}
__mmask8 test_mm256_mask_fpclass_pd_mask(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_fpclass_pd_mask
+ // CHECK-LABEL: test_mm256_mask_fpclass_pd_mask
// CHECK: @llvm.x86.avx512.fpclass.pd.256
return _mm256_mask_fpclass_pd_mask(__U, __A, 2);
}
__mmask8 test_mm256_fpclass_pd_mask(__m256d __A) {
- // CHECK-LABEL: @test_mm256_fpclass_pd_mask
+ // CHECK-LABEL: test_mm256_fpclass_pd_mask
// CHECK: @llvm.x86.avx512.fpclass.pd.256
return _mm256_fpclass_pd_mask(__A, 2);
}
__mmask8 test_mm_mask_fpclass_ps_mask(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_fpclass_ps_mask
+ // CHECK-LABEL: test_mm_mask_fpclass_ps_mask
// CHECK: @llvm.x86.avx512.fpclass.ps.128
return _mm_mask_fpclass_ps_mask(__U, __A, 2);
}
__mmask8 test_mm_fpclass_ps_mask(__m128 __A) {
- // CHECK-LABEL: @test_mm_fpclass_ps_mask
+ // CHECK-LABEL: test_mm_fpclass_ps_mask
// CHECK: @llvm.x86.avx512.fpclass.ps.128
return _mm_fpclass_ps_mask(__A, 2);
}
__mmask8 test_mm256_mask_fpclass_ps_mask(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_fpclass_ps_mask
+ // CHECK-LABEL: test_mm256_mask_fpclass_ps_mask
// CHECK: @llvm.x86.avx512.fpclass.ps.256
return _mm256_mask_fpclass_ps_mask(__U, __A, 2);
}
__mmask8 test_mm256_fpclass_ps_mask(__m256 __A) {
- // CHECK-LABEL: @test_mm256_fpclass_ps_mask
+ // CHECK-LABEL: test_mm256_fpclass_ps_mask
// CHECK: @llvm.x86.avx512.fpclass.ps.256
return _mm256_fpclass_ps_mask(__A, 2);
}
diff --git a/clang/test/CodeGen/X86/avx512vlfp16-builtins.c b/clang/test/CodeGen/X86/avx512vlfp16-builtins.c
index 3a212ed..828876e 100644
--- a/clang/test/CodeGen/X86/avx512vlfp16-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlfp16-builtins.c
@@ -1,21 +1,31 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-unknown-unknown -target-feature +avx512vl -target-feature +avx512fp16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
+#include "builtin_test_helpers.h"
_Float16 test_mm_cvtsh_h(__m128h __A) {
- // CHECK-LABEL: @test_mm_cvtsh_h
+ // CHECK-LABEL: test_mm_cvtsh_h
// CHECK: extractelement <8 x half> %{{.*}}, i32 0
return _mm_cvtsh_h(__A);
}
_Float16 test_mm256_cvtsh_h(__m256h __A) {
- // CHECK-LABEL: @test_mm256_cvtsh_h
+ // CHECK-LABEL: test_mm256_cvtsh_h
// CHECK: extractelement <16 x half> %{{.*}}, i32 0
return _mm256_cvtsh_h(__A);
}
__m128h test_mm_set_sh(_Float16 __h) {
- // CHECK-LABEL: @test_mm_set_sh
+ // CHECK-LABEL: test_mm_set_sh
// CHECK: insertelement <8 x half> {{.*}}, i32 0
// CHECK: insertelement <8 x half> %{{.*}}, half 0xH0000, i32 1
// CHECK: insertelement <8 x half> %{{.*}}, half 0xH0000, i32 2
@@ -28,7 +38,7 @@ __m128h test_mm_set_sh(_Float16 __h) {
}
__m128h test_mm_set1_ph(_Float16 h) {
- // CHECK-LABEL: @test_mm_set1_ph
+ // CHECK-LABEL: test_mm_set1_ph
// CHECK: insertelement <8 x half> {{.*}}, i32 0
// CHECK: insertelement <8 x half> {{.*}}, i32 1
// CHECK: insertelement <8 x half> {{.*}}, i32 2
@@ -41,7 +51,7 @@ __m128h test_mm_set1_ph(_Float16 h) {
}
__m256h test_mm256_set1_ph(_Float16 h) {
- // CHECK-LABEL: @test_mm256_set1_ph
+ // CHECK-LABEL: test_mm256_set1_ph
// CHECK: insertelement <16 x half> {{.*}}, i32 0
// CHECK: insertelement <16 x half> {{.*}}, i32 1
// CHECK: insertelement <16 x half> {{.*}}, i32 2
@@ -62,7 +72,7 @@ __m256h test_mm256_set1_ph(_Float16 h) {
}
__m128h test_mm_set1_pch(_Float16 _Complex h) {
- // CHECK-LABEL: @test_mm_set1_pch
+ // CHECK-LABEL: test_mm_set1_pch
// CHECK: insertelement <4 x float> {{.*}}, i32 0
// CHECK: insertelement <4 x float> {{.*}}, i32 1
// CHECK: insertelement <4 x float> {{.*}}, i32 2
@@ -71,7 +81,7 @@ __m128h test_mm_set1_pch(_Float16 _Complex h) {
}
__m256h test_mm256_set1_pch(_Float16 _Complex h) {
- // CHECK-LABEL: @test_mm256_set1_pch
+ // CHECK-LABEL: test_mm256_set1_pch
// CHECK: insertelement <8 x float> {{.*}}, i32 0
// CHECK: insertelement <8 x float> {{.*}}, i32 1
// CHECK: insertelement <8 x float> {{.*}}, i32 2
@@ -85,7 +95,7 @@ __m256h test_mm256_set1_pch(_Float16 _Complex h) {
__m128h test_mm_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
_Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8) {
- // CHECK-LABEL: @test_mm_set_ph
+ // CHECK-LABEL: test_mm_set_ph
// CHECK: insertelement <8 x half> {{.*}}, i32 0
// CHECK: insertelement <8 x half> {{.*}}, i32 1
// CHECK: insertelement <8 x half> {{.*}}, i32 2
@@ -101,7 +111,7 @@ __m256h test_mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16
_Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8,
_Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12,
_Float16 __h13, _Float16 __h14, _Float16 __h15, _Float16 __h16) {
- // CHECK-LABEL: @test_mm256_set_ph
+ // CHECK-LABEL: test_mm256_set_ph
// CHECK: insertelement <16 x half> {{.*}}, i32 0
// CHECK: insertelement <16 x half> {{.*}}, i32 1
// CHECK: insertelement <16 x half> {{.*}}, i32 2
@@ -124,7 +134,7 @@ __m256h test_mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16
__m128h test_mm_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
_Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8) {
- // CHECK-LABEL: @test_mm_setr_ph
+ // CHECK-LABEL: test_mm_setr_ph
// CHECK: insertelement <8 x half> {{.*}}, i32 0
// CHECK: insertelement <8 x half> {{.*}}, i32 1
// CHECK: insertelement <8 x half> {{.*}}, i32 2
@@ -140,7 +150,7 @@ __m256h test_mm256_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16
_Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8,
_Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12,
_Float16 __h13, _Float16 __h14, _Float16 __h15, _Float16 __h16) {
- // CHECK-LABEL: @test_mm256_setr_ph
+ // CHECK-LABEL: test_mm256_setr_ph
// CHECK: insertelement <16 x half> {{.*}}, i32 0
// CHECK: insertelement <16 x half> {{.*}}, i32 1
// CHECK: insertelement <16 x half> {{.*}}, i32 2
@@ -162,251 +172,253 @@ __m256h test_mm256_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16
}
__m256h test_mm256_add_ph(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_add_ph
+ // CHECK-LABEL: test_mm256_add_ph
// CHECK: %{{.*}} = fadd <16 x half> %{{.*}}, %{{.*}}
return _mm256_add_ph(__A, __B);
}
__m256h test_mm256_mask_add_ph(__m256h __W, __mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_add_ph
+ // CHECK-LABEL: test_mm256_mask_add_ph
// CHECK: %{{.*}} = fadd <16 x half> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return (__m256h)_mm256_mask_add_ph(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_add_ph(__mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_add_ph
+ // CHECK-LABEL: test_mm256_maskz_add_ph
// CHECK: %{{.*}} = fadd <16 x half> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_add_ph(__U, __A, __B);
}
__m128h test_mm_add_ph(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_add_ph
+ // CHECK-LABEL: test_mm_add_ph
// CHECK: %{{.*}} = fadd <8 x half> %{{.*}}, %{{.*}}
return _mm_add_ph(__A, __B);
}
__m128h test_mm_mask_add_ph(__m128h __W, __mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_add_ph
+ // CHECK-LABEL: test_mm_mask_add_ph
// CHECK: %{{.*}} = fadd <8 x half> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return (__m128h)_mm_mask_add_ph(__W, __U, __A, __B);
}
__m128h test_mm_maskz_add_ph(__mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_add_ph
+ // CHECK-LABEL: test_mm_maskz_add_ph
// CHECK: %{{.*}} = fadd <8 x half> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_add_ph(__U, __A, __B);
}
__m256h test_mm256_sub_ph(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_sub_ph
+ // CHECK-LABEL: test_mm256_sub_ph
// CHECK: %{{.*}} = fsub <16 x half> %{{.*}}, %{{.*}}
return _mm256_sub_ph(__A, __B);
}
__m256h test_mm256_mask_sub_ph(__m256h __W, __mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_sub_ph
+ // CHECK-LABEL: test_mm256_mask_sub_ph
// CHECK: %{{.*}} = fsub <16 x half> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return (__m256h)_mm256_mask_sub_ph(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_sub_ph(__mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_sub_ph
+ // CHECK-LABEL: test_mm256_maskz_sub_ph
// CHECK: %{{.*}} = fsub <16 x half> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_sub_ph(__U, __A, __B);
}
__m128h test_mm_sub_ph(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_sub_ph
+ // CHECK-LABEL: test_mm_sub_ph
// CHECK: %{{.*}} = fsub <8 x half> %{{.*}}, %{{.*}}
return _mm_sub_ph(__A, __B);
}
__m128h test_mm_mask_sub_ph(__m128h __W, __mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_sub_ph
+ // CHECK-LABEL: test_mm_mask_sub_ph
// CHECK: %{{.*}} = fsub <8 x half> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return (__m128h)_mm_mask_sub_ph(__W, __U, __A, __B);
}
__m128h test_mm_maskz_sub_ph(__mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_sub_ph
+ // CHECK-LABEL: test_mm_maskz_sub_ph
// CHECK: %{{.*}} = fsub <8 x half> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_sub_ph(__U, __A, __B);
}
__m256h test_mm256_mul_ph(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mul_ph
+ // CHECK-LABEL: test_mm256_mul_ph
// CHECK: %{{.*}} = fmul <16 x half> %{{.*}}, %{{.*}}
return _mm256_mul_ph(__A, __B);
}
__m256h test_mm256_mask_mul_ph(__m256h __W, __mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_mul_ph
+ // CHECK-LABEL: test_mm256_mask_mul_ph
// CHECK: %{{.*}} = fmul <16 x half> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return (__m256h)_mm256_mask_mul_ph(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_mul_ph(__mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_mul_ph
+ // CHECK-LABEL: test_mm256_maskz_mul_ph
// CHECK: %{{.*}} = fmul <16 x half> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_mul_ph(__U, __A, __B);
}
__m128h test_mm_mul_ph(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mul_ph
+ // CHECK-LABEL: test_mm_mul_ph
// CHECK: %{{.*}} = fmul <8 x half> %{{.*}}, %{{.*}}
return _mm_mul_ph(__A, __B);
}
__m128h test_mm_mask_mul_ph(__m128h __W, __mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_mul_ph
+ // CHECK-LABEL: test_mm_mask_mul_ph
// CHECK: %{{.*}} = fmul <8 x half> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return (__m128h)_mm_mask_mul_ph(__W, __U, __A, __B);
}
__m128h test_mm_maskz_mul_ph(__mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_ph
+ // CHECK-LABEL: test_mm_maskz_mul_ph
// CHECK: %{{.*}} = fmul <8 x half> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_mul_ph(__U, __A, __B);
}
__m256h test_mm256_div_ph(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_div_ph
+ // CHECK-LABEL: test_mm256_div_ph
// CHECK: %{{.*}} = fdiv <16 x half> %{{.*}}, %{{.*}}
return _mm256_div_ph(__A, __B);
}
__m256h test_mm256_mask_div_ph(__m256h __W, __mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_div_ph
+ // CHECK-LABEL: test_mm256_mask_div_ph
// CHECK: %{{.*}} = fdiv <16 x half> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return (__m256h)_mm256_mask_div_ph(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_div_ph(__mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_div_ph
+ // CHECK-LABEL: test_mm256_maskz_div_ph
// CHECK: %{{.*}} = fdiv <16 x half> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_div_ph(__U, __A, __B);
}
__m128h test_mm_div_ph(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_div_ph
+ // CHECK-LABEL: test_mm_div_ph
// CHECK: %{{.*}} = fdiv <8 x half> %{{.*}}, %{{.*}}
return _mm_div_ph(__A, __B);
}
__m128h test_mm_mask_div_ph(__m128h __W, __mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_div_ph
+ // CHECK-LABEL: test_mm_mask_div_ph
// CHECK: %{{.*}} = fdiv <8 x half> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return (__m128h)_mm_mask_div_ph(__W, __U, __A, __B);
}
__m128h test_mm_maskz_div_ph(__mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_div_ph
+ // CHECK-LABEL: test_mm_maskz_div_ph
// CHECK: %{{.*}} = fdiv <8 x half> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_div_ph(__U, __A, __B);
}
__m256h test_mm256_min_ph(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_min_ph
+ // CHECK-LABEL: test_mm256_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.256
return _mm256_min_ph(__A, __B);
}
__m256h test_mm256_mask_min_ph(__m256h __W, __mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_min_ph
+ // CHECK-LABEL: test_mm256_mask_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.256
return (__m256h)_mm256_mask_min_ph(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_min_ph(__mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_ph
+ // CHECK-LABEL: test_mm256_maskz_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.256
return _mm256_maskz_min_ph(__U, __A, __B);
}
__m128h test_mm_min_ph(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_min_ph
+ // CHECK-LABEL: test_mm_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.128
return _mm_min_ph(__A, __B);
}
__m128h test_mm_mask_min_ph(__m128h __W, __mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_min_ph
+ // CHECK-LABEL: test_mm_mask_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.128
return (__m128h)_mm_mask_min_ph(__W, __U, __A, __B);
}
__m128h test_mm_maskz_min_ph(__mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_min_ph
+ // CHECK-LABEL: test_mm_maskz_min_ph
// CHECK: @llvm.x86.avx512fp16.min.ph.128
return _mm_maskz_min_ph(__U, __A, __B);
}
__m256h test_mm256_max_ph(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_max_ph
+ // CHECK-LABEL: test_mm256_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.256
return _mm256_max_ph(__A, __B);
}
__m256h test_mm256_mask_max_ph(__m256h __W, __mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_max_ph
+ // CHECK-LABEL: test_mm256_mask_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.256
return (__m256h)_mm256_mask_max_ph(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_max_ph(__mmask32 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_ph
+ // CHECK-LABEL: test_mm256_maskz_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.256
return _mm256_maskz_max_ph(__U, __A, __B);
}
__m128h test_mm_max_ph(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_max_ph
+ // CHECK-LABEL: test_mm_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.128
return _mm_max_ph(__A, __B);
}
__m128h test_mm_mask_max_ph(__m128h __W, __mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_max_ph
+ // CHECK-LABEL: test_mm_mask_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.128
return (__m128h)_mm_mask_max_ph(__W, __U, __A, __B);
}
__m128h test_mm_maskz_max_ph(__mmask32 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_max_ph
+ // CHECK-LABEL: test_mm_maskz_max_ph
// CHECK: @llvm.x86.avx512fp16.max.ph.128
return _mm_maskz_max_ph(__U, __A, __B);
}
__m128h test_mm_abs_ph(__m128h a) {
- // CHECK-LABEL: @test_mm_abs_ph
+ // CHECK-LABEL: test_mm_abs_ph
// CHECK: and <4 x i32>
return _mm_abs_ph(a);
}
+TEST_CONSTEXPR(match_m128h(_mm_abs_ph((__m128h){-1.0, 2.0, -3.0, 4.0, -5.0, 6.0, -7.0, 8.0}), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0));
__m256h test_mm256_abs_ph(__m256h a) {
- // CHECK-LABEL: @test_mm256_abs_ph
+ // CHECK-LABEL: test_mm256_abs_ph
// CHECK: and <8 x i32>
return _mm256_abs_ph(a);
}
+TEST_CONSTEXPR(match_m256h(_mm256_abs_ph((__m256h){-1.0, 2.0, -3.0, 4.0, -5.0, 6.0, -7.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0, 14.0, -15.0, 16.0}), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0));
__m256h test_mm256_conj_pch(__m256h __A) {
- // CHECK-LABEL: @test_mm256_conj_pch
+ // CHECK-LABEL: test_mm256_conj_pch
// CHECK: %{{.*}} = bitcast <16 x half> %{{.*}} to <8 x float>
// CHECK: %{{.*}} = bitcast <8 x float> %{{.*}} to <8 x i32>
// CHECK: %{{.*}} = bitcast <8 x float> %{{.*}} to <8 x i32>
@@ -417,7 +429,7 @@ __m256h test_mm256_conj_pch(__m256h __A) {
}
__m256h test_mm256_mask_conj_pch(__m256h __W, __mmask32 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_conj_pch
+ // CHECK-LABEL: test_mm256_mask_conj_pch
// CHECK: %{{.*}} = trunc i32 %{{.*}} to i8
// CHECK: %{{.*}} = bitcast <16 x half> %{{.*}} to <8 x float>
// CHECK: %{{.*}} = bitcast <8 x float> %{{.*}} to <8 x i32>
@@ -433,7 +445,7 @@ __m256h test_mm256_mask_conj_pch(__m256h __W, __mmask32 __U, __m256h __A) {
}
__m256h test_mm256_maskz_conj_pch(__mmask32 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_maskz_conj_pch
+ // CHECK-LABEL: test_mm256_maskz_conj_pch
// CHECK: %{{.*}} = trunc i32 %{{.*}} to i8
// CHECK: %{{.*}} = bitcast <16 x half> %{{.*}} to <8 x float>
// CHECK: %{{.*}} = bitcast <8 x float> %{{.*}} to <8 x i32>
@@ -448,7 +460,7 @@ __m256h test_mm256_maskz_conj_pch(__mmask32 __U, __m256h __A) {
}
__m128h test_mm_conj_pch(__m128h __A) {
- // CHECK-LABEL: @test_mm_conj_pch
+ // CHECK-LABEL: test_mm_conj_pch
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <4 x float> %{{.*}} to <4 x i32>
// CHECK: %{{.*}} = bitcast <4 x float> %{{.*}} to <4 x i32>
@@ -459,7 +471,7 @@ __m128h test_mm_conj_pch(__m128h __A) {
}
__m128h test_mm_mask_conj_pch(__m128h __W, __mmask32 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_conj_pch
+ // CHECK-LABEL: test_mm_mask_conj_pch
// CHECK: %{{.*}} = trunc i32 %{{.*}} to i8
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <4 x float> %{{.*}} to <4 x i32>
@@ -476,7 +488,7 @@ __m128h test_mm_mask_conj_pch(__m128h __W, __mmask32 __U, __m128h __A) {
}
__m128h test_mm_maskz_conj_pch(__mmask32 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_maskz_conj_pch
+ // CHECK-LABEL: test_mm_maskz_conj_pch
// CHECK: %{{.*}} = trunc i32 %{{.*}} to i8
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <4 x float>
// CHECK: %{{.*}} = bitcast <4 x float> %{{.*}} to <4 x i32>
@@ -492,7 +504,7 @@ __m128h test_mm_maskz_conj_pch(__mmask32 __U, __m128h __A) {
}
__mmask16 test_mm256_cmp_ph_mask_eq_oq(__m256h a, __m256h b) {
- // CHECK-LABEL: @test_mm256_cmp_ph_mask_eq_oq
+ // CHECK-LABEL: test_mm256_cmp_ph_mask_eq_oq
// CHECK: fcmp oeq <16 x half> %{{.*}}, %{{.*}}
return _mm256_cmp_ph_mask(a, b, _CMP_EQ_OQ);
}
@@ -684,7 +696,7 @@ __mmask16 test_mm256_cmp_ph_mask_true_us(__m256h a, __m256h b) {
}
__mmask16 test_mm256_mask_cmp_ph_mask_eq_oq(__mmask16 m, __m256h a, __m256h b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_ph_mask_eq_oq
+ // CHECK-LABEL: test_mm256_mask_cmp_ph_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <16 x half> %{{.*}}, %{{.*}}
// CHECK: and <16 x i1> [[CMP]], {{.*}}
return _mm256_mask_cmp_ph_mask(m, a, b, _CMP_EQ_OQ);
@@ -908,7 +920,7 @@ __mmask16 test_mm256_mask_cmp_ph_mask_true_us(__mmask16 m, __m256h a, __m256h b)
}
__mmask8 test_mm_cmp_ph_mask_eq_oq(__m128h a, __m128h b) {
- // CHECK-LABEL: @test_mm_cmp_ph_mask_eq_oq
+ // CHECK-LABEL: test_mm_cmp_ph_mask_eq_oq
// CHECK: fcmp oeq <8 x half> %{{.*}}, %{{.*}}
return _mm_cmp_ph_mask(a, b, _CMP_EQ_OQ);
}
@@ -1100,7 +1112,7 @@ __mmask8 test_mm_cmp_ph_mask_true_us(__m128h a, __m128h b) {
}
__mmask8 test_mm_mask_cmp_ph_mask_eq_oq(__mmask8 m, __m128h a, __m128h b) {
- // CHECK-LABEL: @test_mm_mask_cmp_ph_mask_eq_oq
+ // CHECK-LABEL: test_mm_mask_cmp_ph_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <8 x half> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> [[CMP]], {{.*}}
return _mm_mask_cmp_ph_mask(m, a, b, _CMP_EQ_OQ);
@@ -1324,315 +1336,315 @@ __mmask8 test_mm_mask_cmp_ph_mask_true_us(__mmask8 m, __m128h a, __m128h b) {
}
__m256h test_mm256_rcp_ph(__m256h __A) {
- // CHECK-LABEL: @test_mm256_rcp_ph
+ // CHECK-LABEL: test_mm256_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.256
return _mm256_rcp_ph(__A);
}
__m256h test_mm256_mask_rcp_ph(__m256h __W, __mmask32 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_rcp_ph
+ // CHECK-LABEL: test_mm256_mask_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.256
return (__m256h)_mm256_mask_rcp_ph(__W, __U, __A);
}
__m256h test_mm256_maskz_rcp_ph(__mmask32 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_maskz_rcp_ph
+ // CHECK-LABEL: test_mm256_maskz_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.256
return _mm256_maskz_rcp_ph(__U, __A);
}
__m128h test_mm_rcp_ph(__m128h __A) {
- // CHECK-LABEL: @test_mm_rcp_ph
+ // CHECK-LABEL: test_mm_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.128
return _mm_rcp_ph(__A);
}
__m128h test_mm_mask_rcp_ph(__m128h __W, __mmask32 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_rcp_ph
+ // CHECK-LABEL: test_mm_mask_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.128
return (__m128h)_mm_mask_rcp_ph(__W, __U, __A);
}
__m128h test_mm_maskz_rcp_ph(__mmask32 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_maskz_rcp_ph
+ // CHECK-LABEL: test_mm_maskz_rcp_ph
// CHECK: @llvm.x86.avx512fp16.mask.rcp.ph.128
return _mm_maskz_rcp_ph(__U, __A);
}
__m256h test_mm256_rsqrt_ph(__m256h __A) {
- // CHECK-LABEL: @test_mm256_rsqrt_ph
+ // CHECK-LABEL: test_mm256_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.256
return _mm256_rsqrt_ph(__A);
}
__m256h test_mm256_mask_rsqrt_ph(__m256h __W, __mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_rsqrt_ph
+ // CHECK-LABEL: test_mm256_mask_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.256
return (__m256h)_mm256_mask_rsqrt_ph(__W, __U, __A);
}
__m256h test_mm256_maskz_rsqrt_ph(__mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_maskz_rsqrt_ph
+ // CHECK-LABEL: test_mm256_maskz_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.256
return _mm256_maskz_rsqrt_ph(__U, __A);
}
__m128h test_mm_rsqrt_ph(__m128h __A) {
- // CHECK-LABEL: @test_mm_rsqrt_ph
+ // CHECK-LABEL: test_mm_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.128
return _mm_rsqrt_ph(__A);
}
__m128h test_mm_mask_rsqrt_ph(__m128h __W, __mmask32 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_rsqrt_ph
+ // CHECK-LABEL: test_mm_mask_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.128
return (__m128h)_mm_mask_rsqrt_ph(__W, __U, __A);
}
__m128h test_mm_maskz_rsqrt_ph(__mmask32 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt_ph
+ // CHECK-LABEL: test_mm_maskz_rsqrt_ph
// CHECK: @llvm.x86.avx512fp16.mask.rsqrt.ph.128
return _mm_maskz_rsqrt_ph(__U, __A);
}
__m128h test_mm_getmant_ph(__m128h __A) {
- // CHECK-LABEL: @test_mm_getmant_ph
+ // CHECK-LABEL: test_mm_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.128
return _mm_getmant_ph(__A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128h test_mm_mask_getmant_ph(__m128h __W, __mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_getmant_ph
+ // CHECK-LABEL: test_mm_mask_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.128
return _mm_mask_getmant_ph(__W, __U, __A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128h test_mm_maskz_getmant_ph(__mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_maskz_getmant_ph
+ // CHECK-LABEL: test_mm_maskz_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.128
return _mm_maskz_getmant_ph(__U, __A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256h test_mm256_getmant_ph(__m256h __A) {
- // CHECK-LABEL: @test_mm256_getmant_ph
+ // CHECK-LABEL: test_mm256_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.256
return _mm256_getmant_ph(__A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256h test_mm256_mask_getmant_ph(__m256h __W, __mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_getmant_ph
+ // CHECK-LABEL: test_mm256_mask_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.256
return _mm256_mask_getmant_ph(__W, __U, __A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256h test_mm256_maskz_getmant_ph(__mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_maskz_getmant_ph
+ // CHECK-LABEL: test_mm256_maskz_getmant_ph
// CHECK: @llvm.x86.avx512fp16.mask.getmant.ph.256
return _mm256_maskz_getmant_ph(__U, __A, _MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128h test_mm_getexp_ph(__m128h __A) {
- // CHECK-LABEL: @test_mm_getexp_ph
+ // CHECK-LABEL: test_mm_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.128
return _mm_getexp_ph(__A);
}
__m128h test_mm_mask_getexp_ph(__m128h __W, __mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_getexp_ph
+ // CHECK-LABEL: test_mm_mask_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.128
return _mm_mask_getexp_ph(__W, __U, __A);
}
__m128h test_mm_maskz_getexp_ph(__mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_maskz_getexp_ph
+ // CHECK-LABEL: test_mm_maskz_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.128
return _mm_maskz_getexp_ph(__U, __A);
}
__m256h test_mm256_getexp_ph(__m256h __A) {
- // CHECK-LABEL: @test_mm256_getexp_ph
+ // CHECK-LABEL: test_mm256_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.256
return _mm256_getexp_ph(__A);
}
__m256h test_mm256_mask_getexp_ph(__m256h __W, __mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_getexp_ph
+ // CHECK-LABEL: test_mm256_mask_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.256
return _mm256_mask_getexp_ph(__W, __U, __A);
}
__m256h test_mm256_maskz_getexp_ph(__mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_maskz_getexp_ph
+ // CHECK-LABEL: test_mm256_maskz_getexp_ph
// CHECK: @llvm.x86.avx512fp16.mask.getexp.ph.256
return _mm256_maskz_getexp_ph(__U, __A);
}
__m128h test_mm_scalef_ph(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_scalef_ph
+ // CHECK-LABEL: test_mm_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.128
return _mm_scalef_ph(__A, __B);
}
__m128h test_mm_mask_scalef_ph(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_scalef_ph
+ // CHECK-LABEL: test_mm_mask_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.128
return _mm_mask_scalef_ph(__W, __U, __A, __B);
}
__m128h test_mm_maskz_scalef_ph(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_scalef_ph
+ // CHECK-LABEL: test_mm_maskz_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.128
return _mm_maskz_scalef_ph(__U, __A, __B);
}
__m256h test_mm256_scalef_ph(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_scalef_ph
+ // CHECK-LABEL: test_mm256_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.256
return _mm256_scalef_ph(__A, __B);
}
__m256h test_mm256_mask_scalef_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_scalef_ph
+ // CHECK-LABEL: test_mm256_mask_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.256
return _mm256_mask_scalef_ph(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_scalef_ph(__mmask16 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_scalef_ph
+ // CHECK-LABEL: test_mm256_maskz_scalef_ph
// CHECK: @llvm.x86.avx512fp16.mask.scalef.ph.256
return _mm256_maskz_scalef_ph(__U, __A, __B);
}
__m128h test_mm_roundscale_ph(__m128h __A) {
- // CHECK-LABEL: @test_mm_roundscale_ph
+ // CHECK-LABEL: test_mm_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.128
return _mm_roundscale_ph(__A, 4);
}
__m128h test_mm_mask_roundscale_ph(__m128h __W, __mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_roundscale_ph
+ // CHECK-LABEL: test_mm_mask_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.128
return _mm_mask_roundscale_ph(__W, __U, __A, 4);
}
__m128h test_mm_maskz_roundscale_ph(__mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_maskz_roundscale_ph
+ // CHECK-LABEL: test_mm_maskz_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.128
return _mm_maskz_roundscale_ph(__U, __A, 4);
}
__m256h test_mm256_roundscale_ph(__m256h __A) {
- // CHECK-LABEL: @test_mm256_roundscale_ph
+ // CHECK-LABEL: test_mm256_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.256
return _mm256_roundscale_ph(__A, 4);
}
__m256h test_mm256_mask_roundscale_ph(__m256h __W, __mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_roundscale_ph
+ // CHECK-LABEL: test_mm256_mask_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.256
return _mm256_mask_roundscale_ph(__W, __U, __A, 4);
}
__m256h test_mm256_maskz_roundscale_ph(__mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_maskz_roundscale_ph
+ // CHECK-LABEL: test_mm256_maskz_roundscale_ph
// CHECK: @llvm.x86.avx512fp16.mask.rndscale.ph.256
return _mm256_maskz_roundscale_ph(__U, __A, 4);
}
__m128h test_mm_reduce_ph(__m128h __A) {
- // CHECK-LABEL: @test_mm_reduce_ph
+ // CHECK-LABEL: test_mm_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.128
return _mm_reduce_ph(__A, 4);
}
__m128h test_mm_mask_reduce_ph(__m128h __W, __mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_reduce_ph
+ // CHECK-LABEL: test_mm_mask_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.128
return _mm_mask_reduce_ph(__W, __U, __A, 4);
}
__m128h test_mm_maskz_reduce_ph(__mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_maskz_reduce_ph
+ // CHECK-LABEL: test_mm_maskz_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.128
return _mm_maskz_reduce_ph(__U, __A, 4);
}
__m256h test_mm256_reduce_ph(__m256h __A) {
- // CHECK-LABEL: @test_mm256_reduce_ph
+ // CHECK-LABEL: test_mm256_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.256
return _mm256_reduce_ph(__A, 4);
}
__m256h test_mm256_mask_reduce_ph(__m256h __W, __mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_reduce_ph
+ // CHECK-LABEL: test_mm256_mask_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.256
return _mm256_mask_reduce_ph(__W, __U, __A, 4);
}
__m256h test_mm256_maskz_reduce_ph(__mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_maskz_reduce_ph
+ // CHECK-LABEL: test_mm256_maskz_reduce_ph
// CHECK: @llvm.x86.avx512fp16.mask.reduce.ph.256
return _mm256_maskz_reduce_ph(__U, __A, 4);
}
__m128h test_mm_sqrt_ph(__m128h x) {
// CHECK-LABEL: test_mm_sqrt_ph
- // CHECK: call <8 x half> @llvm.sqrt.v8f16(<8 x half> {{.*}})
+ // CHECK: @llvm.sqrt.v8f16(<8 x half> {{.*}})
return _mm_sqrt_ph(x);
}
__m256h test_mm256_sqrt_ph(__m256h A) {
// CHECK-LABEL: test_mm256_sqrt_ph
- // CHECK: call <16 x half> @llvm.sqrt.v16f16(<16 x half> %{{.*}})
+ // CHECK: @llvm.sqrt.v16f16(<16 x half> %{{.*}})
return _mm256_sqrt_ph(A);
}
__m128h test_mm_mask_sqrt_ph(__m128h __W, __mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_sqrt_ph
+ // CHECK-LABEL: test_mm_mask_sqrt_ph
// CHECK: @llvm.sqrt.v8f16
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask_sqrt_ph(__W, __U, __A);
}
__m128h test_mm_maskz_sqrt_ph(__mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_maskz_sqrt_ph
+ // CHECK-LABEL: test_mm_maskz_sqrt_ph
// CHECK: @llvm.sqrt.v8f16
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_sqrt_ph(__U, __A);
}
__m256h test_mm256_mask_sqrt_ph(__m256h __W, __mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_sqrt_ph
+ // CHECK-LABEL: test_mm256_mask_sqrt_ph
// CHECK: @llvm.sqrt.v16f16
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask_sqrt_ph(__W, __U, __A);
}
__m256h test_mm256_maskz_sqrt_ph(__mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_maskz_sqrt_ph
+ // CHECK-LABEL: test_mm256_maskz_sqrt_ph
// CHECK: @llvm.sqrt.v16f16
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_sqrt_ph(__U, __A);
}
__mmask8 test_mm_mask_fpclass_ph_mask(__mmask8 __U, __m128h __A) {
- // CHECK-LABEL: @test_mm_mask_fpclass_ph_mask
+ // CHECK-LABEL: test_mm_mask_fpclass_ph_mask
// CHECK: @llvm.x86.avx512fp16.fpclass.ph.128
return _mm_mask_fpclass_ph_mask(__U, __A, 2);
}
__mmask8 test_mm_fpclass_ph_mask(__m128h __A) {
- // CHECK-LABEL: @test_mm_fpclass_ph_mask
+ // CHECK-LABEL: test_mm_fpclass_ph_mask
// CHECK: @llvm.x86.avx512fp16.fpclass.ph.128
return _mm_fpclass_ph_mask(__A, 2);
}
__mmask16 test_mm256_mask_fpclass_ph_mask(__mmask16 __U, __m256h __A) {
- // CHECK-LABEL: @test_mm256_mask_fpclass_ph_mask
+ // CHECK-LABEL: test_mm256_mask_fpclass_ph_mask
// CHECK: @llvm.x86.avx512fp16.fpclass.ph.256
return _mm256_mask_fpclass_ph_mask(__U, __A, 2);
}
__mmask16 test_mm256_fpclass_ph_mask(__m256h __A) {
- // CHECK-LABEL: @test_mm256_fpclass_ph_mask
+ // CHECK-LABEL: test_mm256_fpclass_ph_mask
// CHECK: @llvm.x86.avx512fp16.fpclass.ph.256
return _mm256_fpclass_ph_mask(__A, 2);
}
@@ -2430,420 +2442,420 @@ __m128h test_mm256_maskz_cvtxps_ph(__mmask8 A, __m256 B) {
}
__m128h test_mm_fmadd_ph(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmadd_ph
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_fmadd_ph
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
return _mm_fmadd_ph(__A, __B, __C);
}
__m128h test_mm_mask_fmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fmadd_ph
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_fmadd_ph
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask_fmadd_ph(__A, __U, __B, __C);
}
__m128h test_mm_fmsub_ph(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmsub_ph
+ // CHECK-LABEL: test_mm_fmsub_ph
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
return _mm_fmsub_ph(__A, __B, __C);
}
__m128h test_mm_mask_fmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fmsub_ph
+ // CHECK-LABEL: test_mm_mask_fmsub_ph
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask_fmsub_ph(__A, __U, __B, __C);
}
__m128h test_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_ph
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_mask3_fmadd_ph
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask3_fmadd_ph(__A, __B, __C, __U);
}
__m128h test_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmadd_ph
+ // CHECK-LABEL: test_mm_mask3_fnmadd_ph
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask3_fnmadd_ph(__A, __B, __C, __U);
}
__m128h test_mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_ph
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_fmadd_ph
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_fmadd_ph(__U, __A, __B, __C);
}
__m128h test_mm_maskz_fmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsub_ph
+ // CHECK-LABEL: test_mm_maskz_fmsub_ph
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_fmsub_ph(__U, __A, __B, __C);
}
__m128h test_mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmadd_ph
+ // CHECK-LABEL: test_mm_maskz_fnmadd_ph
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_fnmadd_ph(__U, __A, __B, __C);
}
__m128h test_mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmsub_ph
+ // CHECK-LABEL: test_mm_maskz_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_fnmsub_ph(__U, __A, __B, __C);
}
__m256h test_mm256_fmadd_ph(__m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_fmadd_ph
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK-LABEL: test_mm256_fmadd_ph
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
return _mm256_fmadd_ph(__A, __B, __C);
}
__m256h test_mm256_mask_fmadd_ph(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_mask_fmadd_ph
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_fmadd_ph
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask_fmadd_ph(__A, __U, __B, __C);
}
__m256h test_mm256_fmsub_ph(__m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_fmsub_ph
+ // CHECK-LABEL: test_mm256_fmsub_ph
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
return _mm256_fmsub_ph(__A, __B, __C);
}
__m256h test_mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsub_ph
+ // CHECK-LABEL: test_mm256_mask_fmsub_ph
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask_fmsub_ph(__A, __U, __B, __C);
}
__m256h test_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmadd_ph
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask3_fmadd_ph
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask3_fmadd_ph(__A, __B, __C, __U);
}
__m256h test_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmadd_ph
+ // CHECK-LABEL: test_mm256_mask3_fnmadd_ph
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask3_fnmadd_ph(__A, __B, __C, __U);
}
__m256h test_mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmadd_ph
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_fmadd_ph
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_fmadd_ph(__U, __A, __B, __C);
}
__m256h test_mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsub_ph
+ // CHECK-LABEL: test_mm256_maskz_fmsub_ph
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_fmsub_ph(__U, __A, __B, __C);
}
__m256h test_mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmadd_ph
+ // CHECK-LABEL: test_mm256_maskz_fnmadd_ph
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_fnmadd_ph(__U, __A, __B, __C);
}
__m256h test_mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmsub_ph
+ // CHECK-LABEL: test_mm256_maskz_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_fnmsub_ph(__U, __A, __B, __C);
}
__m128h test_mm_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmaddsub_ph
+ // CHECK-LABEL: test_mm_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
return _mm_fmaddsub_ph(__A, __B, __C);
}
__m128h test_mm_mask_fmaddsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fmaddsub_ph
+ // CHECK-LABEL: test_mm_mask_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask_fmaddsub_ph(__A, __U, __B, __C);
}
__m128h test_mm_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmsubadd_ph
+ // CHECK-LABEL: test_mm_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]])
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]])
return _mm_fmsubadd_ph(__A, __B, __C);
}
__m128h test_mm_mask_fmsubadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fmsubadd_ph
+ // CHECK-LABEL: test_mm_mask_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]])
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask_fmsubadd_ph(__A, __U, __B, __C);
}
__m128h test_mm_mask3_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmaddsub_ph
+ // CHECK-LABEL: test_mm_mask3_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask3_fmaddsub_ph(__A, __B, __C, __U);
}
__m128h test_mm_maskz_fmaddsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmaddsub_ph
+ // CHECK-LABEL: test_mm_maskz_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_fmaddsub_ph(__U, __A, __B, __C);
}
__m128h test_mm_maskz_fmsubadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsubadd_ph
+ // CHECK-LABEL: test_mm_maskz_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]])
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_maskz_fmsubadd_ph(__U, __A, __B, __C);
}
__m256h test_mm256_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_fmaddsub_ph
+ // CHECK-LABEL: test_mm256_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
return _mm256_fmaddsub_ph(__A, __B, __C);
}
__m256h test_mm256_mask_fmaddsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_mask_fmaddsub_ph
+ // CHECK-LABEL: test_mm256_mask_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask_fmaddsub_ph(__A, __U, __B, __C);
}
__m256h test_mm256_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_fmsubadd_ph
+ // CHECK-LABEL: test_mm256_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]])
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]])
return _mm256_fmsubadd_ph(__A, __B, __C);
}
__m256h test_mm256_mask_fmsubadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsubadd_ph
+ // CHECK-LABEL: test_mm256_mask_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]])
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]])
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask_fmsubadd_ph(__A, __U, __B, __C);
}
__m256h test_mm256_mask3_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmaddsub_ph
+ // CHECK-LABEL: test_mm256_mask3_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask3_fmaddsub_ph(__A, __B, __C, __U);
}
__m256h test_mm256_maskz_fmaddsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmaddsub_ph
+ // CHECK-LABEL: test_mm256_maskz_fmaddsub_ph
// CHECK-NOT: fneg
- // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_fmaddsub_ph(__U, __A, __B, __C);
}
__m256h test_mm256_maskz_fmsubadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsubadd_ph
+ // CHECK-LABEL: test_mm256_maskz_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]])
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]])
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_maskz_fmsubadd_ph(__U, __A, __B, __C);
}
__m128h test_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsub_ph
+ // CHECK-LABEL: test_mm_mask3_fmsub_ph
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask3_fmsub_ph(__A, __B, __C, __U);
}
__m256h test_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsub_ph
+ // CHECK-LABEL: test_mm256_mask3_fmsub_ph
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask3_fmsub_ph(__A, __B, __C, __U);
}
__m128h test_mm_mask3_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsubadd_ph
+ // CHECK-LABEL: test_mm_mask3_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]])
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask3_fmsubadd_ph(__A, __B, __C, __U);
}
__m256h test_mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsubadd_ph
+ // CHECK-LABEL: test_mm256_mask3_fmsubadd_ph
// CHECK: [[NEG:%.+]] = fneg
- // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]])
+ // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]])
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask3_fmsubadd_ph(__A, __B, __C, __U);
}
__m128h test_mm_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fnmadd_ph
+ // CHECK-LABEL: test_mm_fnmadd_ph
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
return _mm_fnmadd_ph(__A, __B, __C);
}
__m128h test_mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fnmadd_ph
+ // CHECK-LABEL: test_mm_mask_fnmadd_ph
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask_fnmadd_ph(__A, __U, __B, __C);
}
__m256h test_mm256_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_fnmadd_ph
+ // CHECK-LABEL: test_mm256_fnmadd_ph
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
return _mm256_fnmadd_ph(__A, __B, __C);
}
__m256h test_mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmadd_ph
+ // CHECK-LABEL: test_mm256_mask_fnmadd_ph
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask_fnmadd_ph(__A, __U, __B, __C);
}
__m128h test_mm_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fnmsub_ph
+ // CHECK-LABEL: test_mm_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
return _mm_fnmsub_ph(__A, __B, __C);
}
__m128h test_mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fnmsub_ph
+ // CHECK-LABEL: test_mm_mask_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask_fnmsub_ph(__A, __U, __B, __C);
}
__m128h test_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmsub_ph
+ // CHECK-LABEL: test_mm_mask3_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask3_fnmsub_ph(__A, __B, __C, __U);
}
__m256h test_mm256_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_fnmsub_ph
+ // CHECK-LABEL: test_mm256_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
return _mm256_fnmsub_ph(__A, __B, __C);
}
__m256h test_mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmsub_ph
+ // CHECK-LABEL: test_mm256_mask_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask_fnmsub_ph(__A, __U, __B, __C);
}
__m256h test_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmsub_ph
+ // CHECK-LABEL: test_mm256_mask3_fnmsub_ph
// CHECK: fneg
// CHECK: fneg
- // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
+ // CHECK: @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}})
return _mm256_mask3_fnmsub_ph(__A, __B, __C, __U);
}
__m128h test_mm_fcmul_pch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fcmul_pch
+ // CHECK-LABEL: test_mm_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.128
return _mm_fcmul_pch(__A, __B);
}
__m128h test_mm_mask_fcmul_pch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fcmul_pch
+ // CHECK-LABEL: test_mm_mask_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.128
return _mm_mask_fcmul_pch(__W, __U, __A, __B);
}
__m128h test_mm_maskz_fcmul_pch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_fcmul_pch
+ // CHECK-LABEL: test_mm_maskz_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.128
return _mm_maskz_fcmul_pch(__U, __A, __B);
}
__m256h test_mm256_fcmul_pch(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_fcmul_pch
+ // CHECK-LABEL: test_mm256_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.256
return _mm256_fcmul_pch(__A, __B);
}
__m256h test_mm256_mask_fcmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_fcmul_pch
+ // CHECK-LABEL: test_mm256_mask_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.256
return _mm256_mask_fcmul_pch(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_fcmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_fcmul_pch
+ // CHECK-LABEL: test_mm256_maskz_fcmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.256
return _mm256_maskz_fcmul_pch(__U, __A, __B);
}
__m128h test_mm_fcmadd_pch(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fcmadd_pch
+ // CHECK-LABEL: test_mm_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.128
return _mm_fcmadd_pch(__A, __B, __C);
}
__m128h test_mm_mask_fcmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fcmadd_pch
+ // CHECK-LABEL: test_mm_mask_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.128
// CHECK: %{{.*}} = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: %{{.*}} = select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -2851,88 +2863,88 @@ __m128h test_mm_mask_fcmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h
}
__m128h test_mm_mask3_fcmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fcmadd_pch
+ // CHECK-LABEL: test_mm_mask3_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.128
// CHECK-NOT: %{{.*}} = select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask3_fcmadd_pch(__A, __B, __C, __U);
}
__m128h test_mm_maskz_fcmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fcmadd_pch
+ // CHECK-LABEL: test_mm_maskz_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.maskz.vfcmadd.cph.128
return _mm_maskz_fcmadd_pch(__U, __A, __B, __C);
}
__m256h test_mm256_fcmadd_pch(__m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_fcmadd_pch
+ // CHECK-LABEL: test_mm256_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.256
return _mm256_fcmadd_pch(__A, __B, __C);
}
__m256h test_mm256_mask_fcmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_mask_fcmadd_pch
+ // CHECK-LABEL: test_mm256_mask_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.256
// CHECK: %{{.*}} = select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_fcmadd_pch(__A, __U, __B, __C);
}
__m256h test_mm256_mask3_fcmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fcmadd_pch
+ // CHECK-LABEL: test_mm256_mask3_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmadd.cph.256
// CHECK-NOT: %{{.*}} = select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask3_fcmadd_pch(__A, __B, __C, __U);
}
__m256h test_mm256_maskz_fcmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_maskz_fcmadd_pch
+ // CHECK-LABEL: test_mm256_maskz_fcmadd_pch
// CHECK: @llvm.x86.avx512fp16.maskz.vfcmadd.cph.256
return _mm256_maskz_fcmadd_pch(__U, __A, __B, __C);
}
__m128h test_mm_fmul_pch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_fmul_pch
+ // CHECK-LABEL: test_mm_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.128
return _mm_fmul_pch(__A, __B);
}
__m128h test_mm_mask_fmul_pch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_fmul_pch
+ // CHECK-LABEL: test_mm_mask_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.128
return _mm_mask_fmul_pch(__W, __U, __A, __B);
}
__m128h test_mm_maskz_fmul_pch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_fmul_pch
+ // CHECK-LABEL: test_mm_maskz_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.128
return _mm_maskz_fmul_pch(__U, __A, __B);
}
__m256h test_mm256_fmul_pch(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_fmul_pch
+ // CHECK-LABEL: test_mm256_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.256
return _mm256_fmul_pch(__A, __B);
}
__m256h test_mm256_mask_fmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_fmul_pch
+ // CHECK-LABEL: test_mm256_mask_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.256
return _mm256_mask_fmul_pch(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_fmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_fmul_pch
+ // CHECK-LABEL: test_mm256_maskz_fmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.256
return _mm256_maskz_fmul_pch(__U, __A, __B);
}
__m128h test_mm_fmadd_pch(__m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_fmadd_pch
+ // CHECK-LABEL: test_mm_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.128
return _mm_fmadd_pch(__A, __B, __C);
}
__m128h test_mm_mask_fmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_mask_fmadd_pch
+ // CHECK-LABEL: test_mm_mask_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.128
// CHECK: %{{.*}} = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: %{{.*}} = select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -2940,58 +2952,58 @@ __m128h test_mm_mask_fmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h _
}
__m128h test_mm_mask3_fmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_pch
+ // CHECK-LABEL: test_mm_mask3_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.128
return _mm_mask3_fmadd_pch(__A, __B, __C, __U);
}
__m128h test_mm_maskz_fmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_pch
+ // CHECK-LABEL: test_mm_maskz_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.maskz.vfmadd.cph.128
return _mm_maskz_fmadd_pch(__U, __A, __B, __C);
}
__m256h test_mm256_fmadd_pch(__m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_fmadd_pch
+ // CHECK-LABEL: test_mm256_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.256
return _mm256_fmadd_pch(__A, __B, __C);
}
__m256h test_mm256_mask_fmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_mask_fmadd_pch
+ // CHECK-LABEL: test_mm256_mask_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.256
// CHECK: %{{.*}} = select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_fmadd_pch(__A, __U, __B, __C);
}
__m256h test_mm256_mask3_fmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmadd_pch
+ // CHECK-LABEL: test_mm256_mask3_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmadd.cph.256
return _mm256_mask3_fmadd_pch(__A, __B, __C, __U);
}
__m256h test_mm256_maskz_fmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmadd_pch
+ // CHECK-LABEL: test_mm256_maskz_fmadd_pch
// CHECK: @llvm.x86.avx512fp16.maskz.vfmadd.cph.256
return _mm256_maskz_fmadd_pch(__U, __A, __B, __C);
}
__m128h test_mm_mask_blend_ph(__mmask8 __U, __m128h __A, __m128h __W) {
- // CHECK-LABEL: @test_mm_mask_blend_ph
+ // CHECK-LABEL: test_mm_mask_blend_ph
// CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: %{{.*}} = select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}
return _mm_mask_blend_ph(__U, __A, __W);
}
__m256h test_mm256_mask_blend_ph(__mmask16 __U, __m256h __A, __m256h __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_ph
+ // CHECK-LABEL: test_mm256_mask_blend_ph
// CHECK: %{{.*}} = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: %{{.*}} = select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}
return _mm256_mask_blend_ph(__U, __A, __W);
}
__m128h test_mm_permutex2var_ph(__m128h __A, __m128i __I, __m128h __B) {
- // CHECK-LABEL: @test_mm_permutex2var_ph
+ // CHECK-LABEL: test_mm_permutex2var_ph
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <8 x i16>
// CHECK: %{{.*}} = bitcast <2 x i64> %{{.*}} to <8 x i16>
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <8 x i16>
@@ -3001,7 +3013,7 @@ __m128h test_mm_permutex2var_ph(__m128h __A, __m128i __I, __m128h __B) {
}
__m256h test_mm256_permutex2var_ph(__m256h __A, __m256i __I, __m256h __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_ph
+ // CHECK-LABEL: test_mm256_permutex2var_ph
// CHECK: %{{.*}} = bitcast <16 x half> %{{.*}} to <16 x i16>
// CHECK: %{{.*}} = bitcast <4 x i64> %{{.*}} to <16 x i16>
// CHECK: %{{.*}} = bitcast <16 x half> %{{.*}} to <16 x i16>
@@ -3011,7 +3023,7 @@ __m256h test_mm256_permutex2var_ph(__m256h __A, __m256i __I, __m256h __B) {
}
__m128h test_mm_permutexvar_ph(__m128i __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_permutexvar_ph
+ // CHECK-LABEL: test_mm_permutexvar_ph
// CHECK: %{{.*}} = bitcast <8 x half> %{{.*}} to <8 x i16>
// CHECK: %{{.*}} = bitcast <2 x i64> %{{.*}} to <8 x i16>
// CHECK: %{{.*}} = call <8 x i16> @llvm.x86.avx512.permvar.hi.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
@@ -3020,7 +3032,7 @@ __m128h test_mm_permutexvar_ph(__m128i __A, __m128h __B) {
}
__m256h test_mm256_permutexvar_ph(__m256i __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_permutexvar_ph
+ // CHECK-LABEL: test_mm256_permutexvar_ph
// CHECK: %{{.*}} = bitcast <16 x half> %{{.*}} to <16 x i16>
// CHECK: %{{.*}} = bitcast <4 x i64> %{{.*}} to <16 x i16>
// CHECK: %{{.*}} = call <16 x i16> @llvm.x86.avx512.permvar.hi.256(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
@@ -3029,122 +3041,122 @@ __m256h test_mm256_permutexvar_ph(__m256i __A, __m256h __B) {
}
_Float16 test_mm256_reduce_add_ph(__m256h __W) {
- // CHECK-LABEL: @test_mm256_reduce_add_ph
- // CHECK: call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> %{{.*}})
+ // CHECK-LABEL: test_mm256_reduce_add_ph
+ // CHECK: @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> %{{.*}})
return _mm256_reduce_add_ph(__W);
}
_Float16 test_mm256_reduce_mul_ph(__m256h __W) {
- // CHECK-LABEL: @test_mm256_reduce_mul_ph
- // CHECK: call reassoc half @llvm.vector.reduce.fmul.v16f16(half 0xH3C00, <16 x half> %{{.*}})
+ // CHECK-LABEL: test_mm256_reduce_mul_ph
+ // CHECK: @llvm.vector.reduce.fmul.v16f16(half 0xH3C00, <16 x half> %{{.*}})
return _mm256_reduce_mul_ph(__W);
}
_Float16 test_mm256_reduce_max_ph(__m256h __W) {
- // CHECK-LABEL: @test_mm256_reduce_max_ph
- // CHECK: call nnan half @llvm.vector.reduce.fmax.v16f16(<16 x half> %{{.*}})
+ // CHECK-LABEL: test_mm256_reduce_max_ph
+ // CHECK: @llvm.vector.reduce.fmax.v16f16(<16 x half> %{{.*}})
return _mm256_reduce_max_ph(__W);
}
_Float16 test_mm256_reduce_min_ph(__m256h __W) {
- // CHECK-LABEL: @test_mm256_reduce_min_ph
- // CHECK: call nnan half @llvm.vector.reduce.fmin.v16f16(<16 x half> %{{.*}})
+ // CHECK-LABEL: test_mm256_reduce_min_ph
+ // CHECK: @llvm.vector.reduce.fmin.v16f16(<16 x half> %{{.*}})
return _mm256_reduce_min_ph(__W);
}
_Float16 test_mm_reduce_add_ph(__m128h __W) {
- // CHECK-LABEL: @test_mm_reduce_add_ph
- // CHECK: call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_reduce_add_ph
+ // CHECK: @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> %{{.*}})
return _mm_reduce_add_ph(__W);
}
_Float16 test_mm_reduce_mul_ph(__m128h __W) {
- // CHECK-LABEL: @test_mm_reduce_mul_ph
- // CHECK: call reassoc half @llvm.vector.reduce.fmul.v8f16(half 0xH3C00, <8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_reduce_mul_ph
+ // CHECK: @llvm.vector.reduce.fmul.v8f16(half 0xH3C00, <8 x half> %{{.*}})
return _mm_reduce_mul_ph(__W);
}
_Float16 test_mm_reduce_min_ph(__m128h __W) {
- // CHECK-LABEL: @test_mm_reduce_min_ph
- // CHECK: call nnan half @llvm.vector.reduce.fmin.v8f16(<8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_reduce_min_ph
+ // CHECK: @llvm.vector.reduce.fmin.v8f16(<8 x half> %{{.*}})
return _mm_reduce_min_ph(__W);
}
_Float16 test_mm_reduce_max_ph(__m128h __W) {
- // CHECK-LABEL: @test_mm_reduce_max_ph
- // CHECK: call nnan half @llvm.vector.reduce.fmax.v8f16(<8 x half> %{{.*}})
+ // CHECK-LABEL: test_mm_reduce_max_ph
+ // CHECK: @llvm.vector.reduce.fmax.v8f16(<8 x half> %{{.*}})
return _mm_reduce_max_ph(__W);
}
// tests below are for alias intrinsics.
__m128h test_mm_mul_pch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mul_pch
+ // CHECK-LABEL: test_mm_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.128
return _mm_mul_pch(__A, __B);
}
__m128h test_mm_mask_mul_pch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_mul_pch
+ // CHECK-LABEL: test_mm_mask_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.128
return _mm_mask_mul_pch(__W, __U, __A, __B);
}
__m128h test_mm_maskz_mul_pch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_pch
+ // CHECK-LABEL: test_mm_maskz_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.128
return _mm_maskz_mul_pch(__U, __A, __B);
}
__m256h test_mm256_mul_pch(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mul_pch
+ // CHECK-LABEL: test_mm256_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.256
return _mm256_mul_pch(__A, __B);
}
__m256h test_mm256_mask_mul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_mul_pch
+ // CHECK-LABEL: test_mm256_mask_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.256
return _mm256_mask_mul_pch(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_mul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_mul_pch
+ // CHECK-LABEL: test_mm256_maskz_mul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfmul.cph.256
return _mm256_maskz_mul_pch(__U, __A, __B);
}
__m128h test_mm_cmul_pch(__m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_cmul_pch
+ // CHECK-LABEL: test_mm_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.128
return _mm_cmul_pch(__A, __B);
}
__m128h test_mm_mask_cmul_pch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_mask_cmul_pch
+ // CHECK-LABEL: test_mm_mask_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.128
return _mm_mask_fcmul_pch(__W, __U, __A, __B);
}
__m128h test_mm_maskz_cmul_pch(__mmask8 __U, __m128h __A, __m128h __B) {
- // CHECK-LABEL: @test_mm_maskz_cmul_pch
+ // CHECK-LABEL: test_mm_maskz_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.128
return _mm_maskz_cmul_pch(__U, __A, __B);
}
__m256h test_mm256_cmul_pch(__m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_cmul_pch
+ // CHECK-LABEL: test_mm256_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.256
return _mm256_cmul_pch(__A, __B);
}
__m256h test_mm256_mask_cmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_mask_cmul_pch
+ // CHECK-LABEL: test_mm256_mask_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.256
return _mm256_mask_cmul_pch(__W, __U, __A, __B);
}
__m256h test_mm256_maskz_cmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
- // CHECK-LABEL: @test_mm256_maskz_cmul_pch
+ // CHECK-LABEL: test_mm256_maskz_cmul_pch
// CHECK: @llvm.x86.avx512fp16.mask.vfcmul.cph.256
return _mm256_maskz_cmul_pch(__U, __A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512vlvbmi2-builtins.c b/clang/test/CodeGen/X86/avx512vlvbmi2-builtins.c
index 5760c71..7259325 100644
--- a/clang/test/CodeGen/X86/avx512vlvbmi2-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlvbmi2-builtins.c
@@ -1,652 +1,655 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vl -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vl -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vl -target-feature +avx512vbmi2 -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m128i test_mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_mask_compress_epi16
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm_mask_compress_epi16
+ // CHECK: call <8 x i16> @llvm.x86.avx512.mask.compress.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i1> %{{.*}})
return _mm_mask_compress_epi16(__S, __U, __D);
}
__m128i test_mm_maskz_compress_epi16(__mmask8 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_maskz_compress_epi16
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm_maskz_compress_epi16
+ // CHECK: call <8 x i16> @llvm.x86.avx512.mask.compress.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i1> %{{.*}})
return _mm_maskz_compress_epi16(__U, __D);
}
__m128i test_mm_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_mask_compress_epi8
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm_mask_compress_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.mask.compress.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i1> %{{.*}})
return _mm_mask_compress_epi8(__S, __U, __D);
}
__m128i test_mm_maskz_compress_epi8(__mmask16 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_maskz_compress_epi8
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm_maskz_compress_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.mask.compress.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i1> %{{.*}})
return _mm_maskz_compress_epi8(__U, __D);
}
void test_mm_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_epi16
- // CHECK: @llvm.masked.compressstore.v8i16(<8 x i16> %{{.*}}, ptr %{{.*}}, <8 x i1> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_compressstoreu_epi16
+ // CHECK: call void @llvm.masked.compressstore.v8i16(<8 x i16> %{{.*}}, ptr %{{.*}}, <8 x i1> %{{.*}})
_mm_mask_compressstoreu_epi16(__P, __U, __D);
}
void test_mm_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_epi8
- // CHECK: @llvm.masked.compressstore.v16i8(<16 x i8> %{{.*}}, ptr %{{.*}}, <16 x i1> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_compressstoreu_epi8
+ // CHECK: call void @llvm.masked.compressstore.v16i8(<16 x i8> %{{.*}}, ptr %{{.*}}, <16 x i1> %{{.*}})
_mm_mask_compressstoreu_epi8(__P, __U, __D);
}
__m128i test_mm_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_mask_expand_epi16
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm_mask_expand_epi16
+ // CHECK: call <8 x i16> @llvm.x86.avx512.mask.expand.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i1> %{{.*}})
return _mm_mask_expand_epi16(__S, __U, __D);
}
__m128i test_mm_maskz_expand_epi16(__mmask8 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_maskz_expand_epi16
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm_maskz_expand_epi16
+ // CHECK: call <8 x i16> @llvm.x86.avx512.mask.expand.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i1> %{{.*}})
return _mm_maskz_expand_epi16(__U, __D);
}
__m128i test_mm_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_mask_expand_epi8
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm_mask_expand_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.mask.expand.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i1> %{{.*}})
return _mm_mask_expand_epi8(__S, __U, __D);
}
__m128i test_mm_maskz_expand_epi8(__mmask16 __U, __m128i __D) {
- // CHECK-LABEL: @test_mm_maskz_expand_epi8
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm_maskz_expand_epi8
+ // CHECK: call <16 x i8> @llvm.x86.avx512.mask.expand.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i1> %{{.*}})
return _mm_maskz_expand_epi8(__U, __D);
}
__m128i test_mm_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const* __P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_epi16
- // CHECK: @llvm.masked.expandload.v8i16(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_expandloadu_epi16
+ // CHECK: call <8 x i16> @llvm.masked.expandload.v8i16(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mask_expandloadu_epi16(__S, __U, __P);
}
__m128i test_mm_maskz_expandloadu_epi16(__mmask8 __U, void const* __P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_epi16
- // CHECK: @llvm.masked.expandload.v8i16(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_expandloadu_epi16
+ // CHECK: call <8 x i16> @llvm.masked.expandload.v8i16(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
return _mm_maskz_expandloadu_epi16(__U, __P);
}
__m128i test_mm_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const* __P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_epi8
- // CHECK: @llvm.masked.expandload.v16i8(ptr %{{.*}}, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_expandloadu_epi8
+ // CHECK: call <16 x i8> @llvm.masked.expandload.v16i8(ptr %{{.*}}, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
return _mm_mask_expandloadu_epi8(__S, __U, __P);
}
__m128i test_mm_maskz_expandloadu_epi8(__mmask16 __U, void const* __P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_epi8
- // CHECK: @llvm.masked.expandload.v16i8(ptr %{{.*}}, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_expandloadu_epi8
+ // CHECK: call <16 x i8> @llvm.masked.expandload.v16i8(ptr %{{.*}}, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
return _mm_maskz_expandloadu_epi8(__U, __P);
}
__m256i test_mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_mask_compress_epi16
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm256_mask_compress_epi16
+ // CHECK: call <16 x i16> @llvm.x86.avx512.mask.compress.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i1> %{{.*}})
return _mm256_mask_compress_epi16(__S, __U, __D);
}
__m256i test_mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_maskz_compress_epi16
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm256_maskz_compress_epi16
+ // CHECK: call <16 x i16> @llvm.x86.avx512.mask.compress.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i1> %{{.*}})
return _mm256_maskz_compress_epi16(__U, __D);
}
__m256i test_mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_mask_compress_epi8
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm256_mask_compress_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.mask.compress.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i1> %{{.*}})
return _mm256_mask_compress_epi8(__S, __U, __D);
}
__m256i test_mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_maskz_compress_epi8
- // CHECK: @llvm.x86.avx512.mask.compress
+ // CHECK-LABEL: test_mm256_maskz_compress_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.mask.compress.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i1> %{{.*}})
return _mm256_maskz_compress_epi8(__U, __D);
}
void test_mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi16
- // CHECK: @llvm.masked.compressstore.v16i16(<16 x i16> %{{.*}}, ptr %{{.*}}, <16 x i1> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_epi16
+ // CHECK: call void @llvm.masked.compressstore.v16i16(<16 x i16> %{{.*}}, ptr %{{.*}}, <16 x i1> %{{.*}})
_mm256_mask_compressstoreu_epi16(__P, __U, __D);
}
void test_mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi8
- // CHECK: @llvm.masked.compressstore.v32i8(<32 x i8> %{{.*}}, ptr %{{.*}}, <32 x i1> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_epi8
+ // CHECK: call void @llvm.masked.compressstore.v32i8(<32 x i8> %{{.*}}, ptr %{{.*}}, <32 x i1> %{{.*}})
_mm256_mask_compressstoreu_epi8(__P, __U, __D);
}
__m256i test_mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_mask_expand_epi16
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm256_mask_expand_epi16
+ // CHECK: call <16 x i16> @llvm.x86.avx512.mask.expand.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i1> %{{.*}})
return _mm256_mask_expand_epi16(__S, __U, __D);
}
__m256i test_mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_maskz_expand_epi16
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm256_maskz_expand_epi16
+ // CHECK: call <16 x i16> @llvm.x86.avx512.mask.expand.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i1> %{{.*}})
return _mm256_maskz_expand_epi16(__U, __D);
}
__m256i test_mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_mask_expand_epi8
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm256_mask_expand_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.mask.expand.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i1> %{{.*}})
return _mm256_mask_expand_epi8(__S, __U, __D);
}
__m256i test_mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D) {
- // CHECK-LABEL: @test_mm256_maskz_expand_epi8
- // CHECK: @llvm.x86.avx512.mask.expand
+ // CHECK-LABEL: test_mm256_maskz_expand_epi8
+ // CHECK: call <32 x i8> @llvm.x86.avx512.mask.expand.v32i8(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i1> %{{.*}})
return _mm256_maskz_expand_epi8(__U, __D);
}
__m256i test_mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const* __P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_epi16
- // CHECK: @llvm.masked.expandload.v16i16(ptr %{{.*}}, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_expandloadu_epi16
+ // CHECK: call <16 x i16> @llvm.masked.expandload.v16i16(ptr %{{.*}}, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mask_expandloadu_epi16(__S, __U, __P);
}
__m256i test_mm256_maskz_expandloadu_epi16(__mmask16 __U, void const* __P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi16
- // CHECK: @llvm.masked.expandload.v16i16(ptr %{{.*}}, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_epi16
+ // CHECK: call <16 x i16> @llvm.masked.expandload.v16i16(ptr %{{.*}}, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_maskz_expandloadu_epi16(__U, __P);
}
__m256i test_mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const* __P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_epi8
- // CHECK: @llvm.masked.expandload.v32i8(ptr %{{.*}}, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_expandloadu_epi8
+ // CHECK: call <32 x i8> @llvm.masked.expandload.v32i8(ptr %{{.*}}, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_mask_expandloadu_epi8(__S, __U, __P);
}
__m256i test_mm256_maskz_expandloadu_epi8(__mmask32 __U, void const* __P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi8
- // CHECK: @llvm.masked.expandload.v32i8(ptr %{{.*}}, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_epi8
+ // CHECK: call <32 x i8> @llvm.masked.expandload.v32i8(ptr %{{.*}}, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_maskz_expandloadu_epi8(__U, __P);
}
__m256i test_mm256_mask_shldi_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shldi_epi64
- // CHECK: @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 47))
+ // CHECK-LABEL: test_mm256_mask_shldi_epi64
+ // CHECK: call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 47))
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_shldi_epi64(__S, __U, __A, __B, 47);
}
__m256i test_mm256_maskz_shldi_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shldi_epi64
- // CHECK: @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 63))
+ // CHECK-LABEL: test_mm256_maskz_shldi_epi64
+ // CHECK: call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 63))
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_shldi_epi64(__U, __A, __B, 63);
}
__m256i test_mm256_shldi_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shldi_epi64
- // CHECK: @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 31))
+ // CHECK-LABEL: test_mm256_shldi_epi64
+ // CHECK: call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 31))
return _mm256_shldi_epi64(__A, __B, 31);
}
__m128i test_mm_mask_shldi_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shldi_epi64
- // CHECK: @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 47))
+ // CHECK-LABEL: test_mm_mask_shldi_epi64
+ // CHECK: call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 47))
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_shldi_epi64(__S, __U, __A, __B, 47);
}
__m128i test_mm_maskz_shldi_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shldi_epi64
- // CHECK: @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 63))
+ // CHECK-LABEL: test_mm_maskz_shldi_epi64
+ // CHECK: call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 63))
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_shldi_epi64(__U, __A, __B, 63);
}
__m128i test_mm_shldi_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shldi_epi64
- // CHECK: @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 31))
+ // CHECK-LABEL: test_mm_shldi_epi64
+ // CHECK: call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 31))
return _mm_shldi_epi64(__A, __B, 31);
}
__m256i test_mm256_mask_shldi_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shldi_epi32
- // CHECK: @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 7))
+ // CHECK-LABEL: test_mm256_mask_shldi_epi32
+ // CHECK: call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 7))
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shldi_epi32(__S, __U, __A, __B, 7);
}
__m256i test_mm256_maskz_shldi_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shldi_epi32
- // CHECK: @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 15))
+ // CHECK-LABEL: test_mm256_maskz_shldi_epi32
+ // CHECK: call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 15))
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shldi_epi32(__U, __A, __B, 15);
}
__m256i test_mm256_shldi_epi32(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shldi_epi32
- // CHECK: @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 31))
+ // CHECK-LABEL: test_mm256_shldi_epi32
+ // CHECK: call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 31))
return _mm256_shldi_epi32(__A, __B, 31);
}
__m128i test_mm_mask_shldi_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shldi_epi32
- // CHECK: @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 7))
+ // CHECK-LABEL: test_mm_mask_shldi_epi32
+ // CHECK: call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 7))
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_shldi_epi32(__S, __U, __A, __B, 7);
}
__m128i test_mm_maskz_shldi_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shldi_epi32
- // CHECK: @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 15))
+ // CHECK-LABEL: test_mm_maskz_shldi_epi32
+ // CHECK: call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 15))
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_shldi_epi32(__U, __A, __B, 15);
}
__m128i test_mm_shldi_epi32(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shldi_epi32
- // CHECK: @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 31))
+ // CHECK-LABEL: test_mm_shldi_epi32
+ // CHECK: call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 31))
return _mm_shldi_epi32(__A, __B, 31);
}
__m256i test_mm256_mask_shldi_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shldi_epi16
- // CHECK: @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 3))
+ // CHECK-LABEL: test_mm256_mask_shldi_epi16
+ // CHECK: call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 3))
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_shldi_epi16(__S, __U, __A, __B, 3);
}
__m256i test_mm256_maskz_shldi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shldi_epi16
- // CHECK: @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 7))
+ // CHECK-LABEL: test_mm256_maskz_shldi_epi16
+ // CHECK: call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 7))
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_shldi_epi16(__U, __A, __B, 7);
}
__m256i test_mm256_shldi_epi16(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shldi_epi16
- // CHECK: @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 31))
+ // CHECK-LABEL: test_mm256_shldi_epi16
+ // CHECK: call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 31))
return _mm256_shldi_epi16(__A, __B, 31);
}
__m128i test_mm_mask_shldi_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shldi_epi16
- // CHECK: @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 3))
+ // CHECK-LABEL: test_mm_mask_shldi_epi16
+ // CHECK: call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 3))
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_shldi_epi16(__S, __U, __A, __B, 3);
}
__m128i test_mm_maskz_shldi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shldi_epi16
- // CHECK: @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 7))
+ // CHECK-LABEL: test_mm_maskz_shldi_epi16
+ // CHECK: call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 7))
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_shldi_epi16(__U, __A, __B, 7);
}
__m128i test_mm_shldi_epi16(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shldi_epi16
- // CHECK: @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 31))
+ // CHECK-LABEL: test_mm_shldi_epi16
+ // CHECK: call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 31))
return _mm_shldi_epi16(__A, __B, 31);
}
__m256i test_mm256_mask_shrdi_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shrdi_epi64
- // CHECK: @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 47))
+ // CHECK-LABEL: test_mm256_mask_shrdi_epi64
+ // CHECK: call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 47))
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_shrdi_epi64(__S, __U, __A, __B, 47);
}
__m256i test_mm256_maskz_shrdi_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shrdi_epi64
- // CHECK: @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 63))
+ // CHECK-LABEL: test_mm256_maskz_shrdi_epi64
+ // CHECK: call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 63))
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_shrdi_epi64(__U, __A, __B, 63);
}
__m256i test_mm256_shrdi_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shrdi_epi64
- // CHECK: @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 31)
+ // CHECK-LABEL: test_mm256_shrdi_epi64
+ // CHECK: call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> splat (i64 31)
return _mm256_shrdi_epi64(__A, __B, 31);
}
__m128i test_mm_mask_shrdi_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shrdi_epi64
- // CHECK: @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 47))
+ // CHECK-LABEL: test_mm_mask_shrdi_epi64
+ // CHECK: call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 47))
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_shrdi_epi64(__S, __U, __A, __B, 47);
}
__m128i test_mm_maskz_shrdi_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shrdi_epi64
- // CHECK: @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 63))
+ // CHECK-LABEL: test_mm_maskz_shrdi_epi64
+ // CHECK: call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 63))
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_shrdi_epi64(__U, __A, __B, 63);
}
__m128i test_mm_shrdi_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shrdi_epi64
- // CHECK: @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 31))
+ // CHECK-LABEL: test_mm_shrdi_epi64
+ // CHECK: call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 31))
return _mm_shrdi_epi64(__A, __B, 31);
}
__m256i test_mm256_mask_shrdi_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shrdi_epi32
- // CHECK: @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 7))
+ // CHECK-LABEL: test_mm256_mask_shrdi_epi32
+ // CHECK: call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 7))
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shrdi_epi32(__S, __U, __A, __B, 7);
}
__m256i test_mm256_maskz_shrdi_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shrdi_epi32
- // CHECK: @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 15))
+ // CHECK-LABEL: test_mm256_maskz_shrdi_epi32
+ // CHECK: call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 15))
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shrdi_epi32(__U, __A, __B, 15);
}
__m256i test_mm256_shrdi_epi32(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shrdi_epi32
- // CHECK: @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 31)
+ // CHECK-LABEL: test_mm256_shrdi_epi32
+ // CHECK: call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> splat (i32 31)
return _mm256_shrdi_epi32(__A, __B, 31);
}
__m128i test_mm_mask_shrdi_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shrdi_epi32
- // CHECK: @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 7))
+ // CHECK-LABEL: test_mm_mask_shrdi_epi32
+ // CHECK: call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 7))
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_shrdi_epi32(__S, __U, __A, __B, 7);
}
__m128i test_mm_maskz_shrdi_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shrdi_epi32
- // CHECK: @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 15))
+ // CHECK-LABEL: test_mm_maskz_shrdi_epi32
+ // CHECK: call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 15))
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_shrdi_epi32(__U, __A, __B, 15);
}
__m128i test_mm_shrdi_epi32(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shrdi_epi32
- // CHECK: @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 31))
+ // CHECK-LABEL: test_mm_shrdi_epi32
+ // CHECK: call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 31))
return _mm_shrdi_epi32(__A, __B, 31);
}
__m256i test_mm256_mask_shrdi_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shrdi_epi16
- // CHECK: @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 3))
+ // CHECK-LABEL: test_mm256_mask_shrdi_epi16
+ // CHECK: call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 3))
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_shrdi_epi16(__S, __U, __A, __B, 3);
}
__m256i test_mm256_maskz_shrdi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shrdi_epi16
- // CHECK: @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 7))
+ // CHECK-LABEL: test_mm256_maskz_shrdi_epi16
+ // CHECK: call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 7))
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_shrdi_epi16(__U, __A, __B, 7);
}
__m256i test_mm256_shrdi_epi16(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shrdi_epi16
- // CHECK: @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 31))
+ // CHECK-LABEL: test_mm256_shrdi_epi16
+ // CHECK: call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> splat (i16 31))
return _mm256_shrdi_epi16(__A, __B, 31);
}
__m128i test_mm_mask_shrdi_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shrdi_epi16
- // CHECK: @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 3))
+ // CHECK-LABEL: test_mm_mask_shrdi_epi16
+ // CHECK: call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 3))
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_shrdi_epi16(__S, __U, __A, __B, 3);
}
__m128i test_mm_maskz_shrdi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shrdi_epi16
- // CHECK: @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 7))
+ // CHECK-LABEL: test_mm_maskz_shrdi_epi16
+ // CHECK: call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 7))
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_shrdi_epi16(__U, __A, __B, 7);
}
__m128i test_mm_shrdi_epi16(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shrdi_epi16
- // CHECK: @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 31))
+ // CHECK-LABEL: test_mm_shrdi_epi16
+ // CHECK: call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 31))
return _mm_shrdi_epi16(__A, __B, 31);
}
__m256i test_mm256_mask_shldv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shldv_epi64
- // CHECK: @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_shldv_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_shldv_epi64(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shldv_epi64
- // CHECK: @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_shldv_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_shldv_epi64(__U, __S, __A, __B);
}
__m256i test_mm256_shldv_epi64(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shldv_epi64
- // CHECK: @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_shldv_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.fshl.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_shldv_epi64(__S, __A, __B);
}
__m128i test_mm_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shldv_epi64
- // CHECK: @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_shldv_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_shldv_epi64(__S, __U, __A, __B);
}
__m128i test_mm_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shldv_epi64
- // CHECK: @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_shldv_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_shldv_epi64(__U, __S, __A, __B);
}
__m128i test_mm_shldv_epi64(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shldv_epi64
- // CHECK: @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_shldv_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_shldv_epi64(__S, __A, __B);
}
__m256i test_mm256_mask_shldv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shldv_epi32
- // CHECK: @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_shldv_epi32
+ // CHECK: call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shldv_epi32(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shldv_epi32
- // CHECK: @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_shldv_epi32
+ // CHECK: call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shldv_epi32(__U, __S, __A, __B);
}
__m256i test_mm256_shldv_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shldv_epi32
- // CHECK: @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm256_shldv_epi32
+ // CHECK: call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_shldv_epi32(__S, __A, __B);
}
__m128i test_mm_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shldv_epi32
- // CHECK: @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_shldv_epi32
+ // CHECK: call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_shldv_epi32(__S, __U, __A, __B);
}
__m128i test_mm_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shldv_epi32
- // CHECK: @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_shldv_epi32
+ // CHECK: call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_shldv_epi32(__U, __S, __A, __B);
}
__m128i test_mm_shldv_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shldv_epi32
- // CHECK: @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm_shldv_epi32
+ // CHECK: call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_shldv_epi32(__S, __A, __B);
}
__m256i test_mm256_mask_shldv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shldv_epi16
- // CHECK: @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_shldv_epi16
+ // CHECK: call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_shldv_epi16(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shldv_epi16
- // CHECK: @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_shldv_epi16
+ // CHECK: call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_shldv_epi16(__U, __S, __A, __B);
}
__m256i test_mm256_shldv_epi16(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shldv_epi16
- // CHECK: @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm256_shldv_epi16
+ // CHECK: call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_shldv_epi16(__S, __A, __B);
}
__m128i test_mm_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shldv_epi16
- // CHECK: @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_shldv_epi16
+ // CHECK: call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_shldv_epi16(__S, __U, __A, __B);
}
__m128i test_mm_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shldv_epi16
- // CHECK: @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_shldv_epi16
+ // CHECK: call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_shldv_epi16(__U, __S, __A, __B);
}
__m128i test_mm_shldv_epi16(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shldv_epi16
- // CHECK: @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm_shldv_epi16
+ // CHECK: call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_shldv_epi16(__S, __A, __B);
}
__m256i test_mm256_mask_shrdv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shrdv_epi64
- // CHECK: @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_shrdv_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_shrdv_epi64(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shrdv_epi64
- // CHECK: @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_shrdv_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_shrdv_epi64(__U, __S, __A, __B);
}
__m256i test_mm256_shrdv_epi64(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shrdv_epi64
- // CHECK: @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_shrdv_epi64
+ // CHECK: call {{.*}}<4 x i64> @llvm.fshr.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_shrdv_epi64(__S, __A, __B);
}
__m128i test_mm_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shrdv_epi64
- // CHECK: @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_shrdv_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_shrdv_epi64(__S, __U, __A, __B);
}
__m128i test_mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shrdv_epi64
- // CHECK: @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_shrdv_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_shrdv_epi64(__U, __S, __A, __B);
}
__m128i test_mm_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shrdv_epi64
- // CHECK: @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_shrdv_epi64
+ // CHECK: call {{.*}}<2 x i64> @llvm.fshr.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_shrdv_epi64(__S, __A, __B);
}
__m256i test_mm256_mask_shrdv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shrdv_epi32
- // CHECK: @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_shrdv_epi32
+ // CHECK: call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shrdv_epi32(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shrdv_epi32
- // CHECK: @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_shrdv_epi32
+ // CHECK: call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shrdv_epi32(__U, __S, __A, __B);
}
__m256i test_mm256_shrdv_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shrdv_epi32
- // CHECK: @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm256_shrdv_epi32
+ // CHECK: call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_shrdv_epi32(__S, __A, __B);
}
__m128i test_mm_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shrdv_epi32
- // CHECK: @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_shrdv_epi32
+ // CHECK: call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_shrdv_epi32(__S, __U, __A, __B);
}
__m128i test_mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shrdv_epi32
- // CHECK: @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_shrdv_epi32
+ // CHECK: call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_shrdv_epi32(__U, __S, __A, __B);
}
__m128i test_mm_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shrdv_epi32
- // CHECK: @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK-LABEL: test_mm_shrdv_epi32
+ // CHECK: call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_shrdv_epi32(__S, __A, __B);
}
__m256i test_mm256_mask_shrdv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shrdv_epi16
- // CHECK: @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_shrdv_epi16
+ // CHECK: call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_shrdv_epi16(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shrdv_epi16
- // CHECK: @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_shrdv_epi16
+ // CHECK: call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_shrdv_epi16(__U, __S, __A, __B);
}
__m256i test_mm256_shrdv_epi16(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shrdv_epi16
- // CHECK: @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm256_shrdv_epi16
+ // CHECK: call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_shrdv_epi16(__S, __A, __B);
}
__m128i test_mm_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_shrdv_epi16
- // CHECK: @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_shrdv_epi16
+ // CHECK: call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_shrdv_epi16(__S, __U, __A, __B);
}
__m128i test_mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_shrdv_epi16
- // CHECK: @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_shrdv_epi16
+ // CHECK: call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_shrdv_epi16(__U, __S, __A, __B);
}
__m128i test_mm_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_shrdv_epi16
- // CHECK: @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-LABEL: test_mm_shrdv_epi16
+ // CHECK: call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_shrdv_epi16(__S, __A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512vlvnni-builtins.c b/clang/test/CodeGen/X86/avx512vlvnni-builtins.c
index a69412d..3de4cca 100644
--- a/clang/test/CodeGen/X86/avx512vlvnni-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlvnni-builtins.c
@@ -1,164 +1,167 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vnni -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vnni -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vnni -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vnni -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vnni -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m256i test_mm256_mask_dpbusd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.256
+ // CHECK-LABEL: test_mm256_mask_dpbusd_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_dpbusd_epi32(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_dpbusd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.256
+ // CHECK-LABEL: test_mm256_maskz_dpbusd_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_dpbusd_epi32(__U, __S, __A, __B);
}
__m256i test_mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.256
+ // CHECK-LABEL: test_mm256_dpbusd_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpbusd_epi32(__S, __A, __B);
}
__m256i test_mm256_mask_dpbusds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.256
+ // CHECK-LABEL: test_mm256_mask_dpbusds_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_dpbusds_epi32(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_dpbusds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.256
+ // CHECK-LABEL: test_mm256_maskz_dpbusds_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_dpbusds_epi32(__U, __S, __A, __B);
}
__m256i test_mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.256
+ // CHECK-LABEL: test_mm256_dpbusds_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpbusds_epi32(__S, __A, __B);
}
__m256i test_mm256_mask_dpwssd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.256
+ // CHECK-LABEL: test_mm256_mask_dpwssd_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_dpwssd_epi32(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_dpwssd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.256
+ // CHECK-LABEL: test_mm256_maskz_dpwssd_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_dpwssd_epi32(__U, __S, __A, __B);
}
__m256i test_mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.256
+ // CHECK-LABEL: test_mm256_dpwssd_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwssd_epi32(__S, __A, __B);
}
__m256i test_mm256_mask_dpwssds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.256
+ // CHECK-LABEL: test_mm256_mask_dpwssds_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_dpwssds_epi32(__S, __U, __A, __B);
}
__m256i test_mm256_maskz_dpwssds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.256
+ // CHECK-LABEL: test_mm256_maskz_dpwssds_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_dpwssds_epi32(__U, __S, __A, __B);
}
__m256i test_mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.256
+ // CHECK-LABEL: test_mm256_dpwssds_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwssds_epi32(__S, __A, __B);
}
__m128i test_mm_mask_dpbusd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.128
+ // CHECK-LABEL: test_mm_mask_dpbusd_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_dpbusd_epi32(__S, __U, __A, __B);
}
__m128i test_mm_maskz_dpbusd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.128
+ // CHECK-LABEL: test_mm_maskz_dpbusd_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_dpbusd_epi32(__U, __S, __A, __B);
}
__m128i test_mm_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.128
+ // CHECK-LABEL: test_mm_dpbusd_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpbusd_epi32(__S, __A, __B);
}
__m128i test_mm_mask_dpbusds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.128
+ // CHECK-LABEL: test_mm_mask_dpbusds_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_dpbusds_epi32(__S, __U, __A, __B);
}
__m128i test_mm_maskz_dpbusds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.128
+ // CHECK-LABEL: test_mm_maskz_dpbusds_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_dpbusds_epi32(__U, __S, __A, __B);
}
__m128i test_mm_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.128
+ // CHECK-LABEL: test_mm_dpbusds_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpbusds_epi32(__S, __A, __B);
}
__m128i test_mm_mask_dpwssd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.128
+ // CHECK-LABEL: test_mm_mask_dpwssd_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_dpwssd_epi32(__S, __U, __A, __B);
}
__m128i test_mm_maskz_dpwssd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.128
+ // CHECK-LABEL: test_mm_maskz_dpwssd_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_dpwssd_epi32(__U, __S, __A, __B);
}
__m128i test_mm_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.128
+ // CHECK-LABEL: test_mm_dpwssd_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwssd_epi32(__S, __A, __B);
}
__m128i test_mm_mask_dpwssds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.128
+ // CHECK-LABEL: test_mm_mask_dpwssds_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_dpwssds_epi32(__S, __U, __A, __B);
}
__m128i test_mm_maskz_dpwssds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.128
+ // CHECK-LABEL: test_mm_maskz_dpwssds_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_dpwssds_epi32(__U, __S, __A, __B);
}
__m128i test_mm_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.128
+ // CHECK-LABEL: test_mm_dpwssds_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwssds_epi32(__S, __A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512vlvp2intersect-builtins.c b/clang/test/CodeGen/X86/avx512vlvp2intersect-builtins.c
index 1b360d1..37ce638 100644
--- a/clang/test/CodeGen/X86/avx512vlvp2intersect-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlvp2intersect-builtins.c
@@ -1,5 +1,7 @@
-// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx512vp2intersect -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=i386-unknown-unknown -target-feature +avx512vp2intersect -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx512vp2intersect -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=i386-unknown-unknown -target-feature +avx512vp2intersect -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx512vp2intersect -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=i386-unknown-unknown -target-feature +avx512vp2intersect -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
diff --git a/clang/test/CodeGen/X86/avx512vnni-builtins.c b/clang/test/CodeGen/X86/avx512vnni-builtins.c
index db39fb0..a0177b3 100644
--- a/clang/test/CodeGen/X86/avx512vnni-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vnni-builtins.c
@@ -1,84 +1,87 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vnni -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vnni -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vnni -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vnni -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vnni -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m512i test_mm512_mask_dpbusd_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.512
+ // CHECK-LABEL: test_mm512_mask_dpbusd_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpbusd.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_dpbusd_epi32(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_dpbusd_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.512
+ // CHECK-LABEL: test_mm512_maskz_dpbusd_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpbusd.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_dpbusd_epi32(__U, __S, __A, __B);
}
__m512i test_mm512_dpbusd_epi32(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.512
+ // CHECK-LABEL: test_mm512_dpbusd_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpbusd.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_dpbusd_epi32(__S, __A, __B);
}
__m512i test_mm512_mask_dpbusds_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.51
+ // CHECK-LABEL: test_mm512_mask_dpbusds_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpbusds.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_dpbusds_epi32(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_dpbusds_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.512
+ // CHECK-LABEL: test_mm512_maskz_dpbusds_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpbusds.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_dpbusds_epi32(__U, __S, __A, __B);
}
__m512i test_mm512_dpbusds_epi32(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.512
+ // CHECK-LABEL: test_mm512_dpbusds_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpbusds.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_dpbusds_epi32(__S, __A, __B);
}
__m512i test_mm512_mask_dpwssd_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.512
+ // CHECK-LABEL: test_mm512_mask_dpwssd_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_dpwssd_epi32(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_dpwssd_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.512
+ // CHECK-LABEL: test_mm512_maskz_dpwssd_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_dpwssd_epi32(__U, __S, __A, __B);
}
__m512i test_mm512_dpwssd_epi32(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.512
+ // CHECK-LABEL: test_mm512_dpwssd_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_dpwssd_epi32(__S, __A, __B);
}
__m512i test_mm512_mask_dpwssds_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.512
+ // CHECK-LABEL: test_mm512_mask_dpwssds_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpwssds.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_dpwssds_epi32(__S, __U, __A, __B);
}
__m512i test_mm512_maskz_dpwssds_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.512
+ // CHECK-LABEL: test_mm512_maskz_dpwssds_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpwssds.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_dpwssds_epi32(__U, __S, __A, __B);
}
__m512i test_mm512_dpwssds_epi32(__m512i __S, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.512
+ // CHECK-LABEL: test_mm512_dpwssds_epi32
+ // CHECK: call <16 x i32> @llvm.x86.avx512.vpdpwssds.512(<16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_dpwssds_epi32(__S, __A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512vp2intersect-builtins.c b/clang/test/CodeGen/X86/avx512vp2intersect-builtins.c
index 2a3d38a..16704e8 100644
--- a/clang/test/CodeGen/X86/avx512vp2intersect-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vp2intersect-builtins.c
@@ -1,5 +1,7 @@
-// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx512vp2intersect -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=i386-unknown-unknown -target-feature +avx512vp2intersect -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx512vp2intersect -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=i386-unknown-unknown -target-feature +avx512vp2intersect -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx512vp2intersect -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=i386-unknown-unknown -target-feature +avx512vp2intersect -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
diff --git a/clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c b/clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c
index ca8f5e4..4fcc34e 100644
--- a/clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c
@@ -1,46 +1,59 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
#include "builtin_test_helpers.h"
__m512i test_mm512_popcnt_epi64(__m512i __A) {
- // CHECK-LABEL: @test_mm512_popcnt_epi64
+ // CHECK-LABEL: test_mm512_popcnt_epi64
// CHECK: @llvm.ctpop.v8i64
return _mm512_popcnt_epi64(__A);
}
-TEST_CONSTEXPR(match_v8di(_mm512_popcnt_epi64((__m512i)(__v8di){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 31, 30, 1, 0, 24, 1, 25));
+TEST_CONSTEXPR(match_v8di(_mm512_popcnt_epi64((__m512i)(__v8di){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 63, 62, 1, 0, 56, 1, 57));
__m512i test_mm512_mask_popcnt_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_popcnt_epi64
+ // CHECK-LABEL: test_mm512_mask_popcnt_epi64
// CHECK: @llvm.ctpop.v8i64
// CHECK: select <8 x i1> %{{[0-9]+}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_popcnt_epi64(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_popcnt_epi64(_mm512_set1_epi64(-1), 0x81, (__m512i)(__v8di){+5, -3, -10, +8, 0, -256, +256, -128}), 2, -1, -1, -1, -1, -1, -1, 57));
__m512i test_mm512_maskz_popcnt_epi64(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_popcnt_epi64
+ // CHECK-LABEL: test_mm512_maskz_popcnt_epi64
// CHECK: @llvm.ctpop.v8i64
// CHECK: select <8 x i1> %{{[0-9]+}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_popcnt_epi64(__U, __A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_popcnt_epi64(0x42, (__m512i)(__v8di){+5, -3, -10, +8, 0, -256, +256, -128}), 0, 63, 0, 0, 0, 0, 1, 0));
__m512i test_mm512_popcnt_epi32(__m512i __A) {
- // CHECK-LABEL: @test_mm512_popcnt_epi32
+ // CHECK-LABEL: test_mm512_popcnt_epi32
// CHECK: @llvm.ctpop.v16i32
return _mm512_popcnt_epi32(__A);
}
TEST_CONSTEXPR(match_v16si(_mm512_popcnt_epi32((__m512i)(__v16si){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, 31, 30, 1, 0, 24, 1, 25, 2, 2, 4, 2, 6, 2, 9, 2));
__m512i test_mm512_mask_popcnt_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_popcnt_epi32
+ // CHECK-LABEL: test_mm512_mask_popcnt_epi32
// CHECK: @llvm.ctpop.v16i32
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_popcnt_epi32(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_popcnt_epi32(_mm512_set1_epi32(-1), 0x0F81, (__m512i)(__v16si){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, -1, -1, -1, -1, -1, -1, 25, 2, 2, 4, 2, -1, -1, -1, -1));
__m512i test_mm512_maskz_popcnt_epi32(__mmask16 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_popcnt_epi32
+ // CHECK-LABEL: test_mm512_maskz_popcnt_epi32
// CHECK: @llvm.ctpop.v16i32
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_popcnt_epi32(__U, __A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_popcnt_epi32(0xF042, (__m512i)(__v16si){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 0, 31, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 6, 2, 9, 2));
diff --git a/clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c b/clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c
index 5d18b68..8e36b35 100644
--- a/clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c
@@ -1,88 +1,105 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
#include "builtin_test_helpers.h"
__m128i test_mm_popcnt_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_popcnt_epi64
+ // CHECK-LABEL: test_mm_popcnt_epi64
// CHECK: @llvm.ctpop.v2i64
return _mm_popcnt_epi64(__A);
}
TEST_CONSTEXPR(match_v2di(_mm_popcnt_epi64((__m128i)(__v2di){+5, -3}), 2, 63));
__m128i test_mm_mask_popcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_popcnt_epi64
+ // CHECK-LABEL: test_mm_mask_popcnt_epi64
// CHECK: @llvm.ctpop.v2i64
// CHECK: select <2 x i1> %{{.+}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_popcnt_epi64(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_popcnt_epi64(_mm_set1_epi64x(-1), 0x2, (__m128i)(__v2di){+5, -3}), -1, 63));
__m128i test_mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_popcnt_epi64
+ // CHECK-LABEL: test_mm_maskz_popcnt_epi64
// CHECK: @llvm.ctpop.v2i64
// CHECK: select <2 x i1> %{{.+}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_popcnt_epi64(__U, __A);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_popcnt_epi64(0x1, (__m128i)(__v2di){+5, -3}), 2, 0));
__m128i test_mm_popcnt_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_popcnt_epi32
+ // CHECK-LABEL: test_mm_popcnt_epi32
// CHECK: @llvm.ctpop.v4i32
return _mm_popcnt_epi32(__A);
}
TEST_CONSTEXPR(match_v4si(_mm_popcnt_epi32((__m128i)(__v4si){+5, -3, -10, +8}), 2, 31, 30, 1));
__m128i test_mm_mask_popcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_popcnt_epi32
+ // CHECK-LABEL: test_mm_mask_popcnt_epi32
// CHECK: @llvm.ctpop.v4i32
// CHECK: select <4 x i1> %{{.+}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_popcnt_epi32(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_popcnt_epi32(_mm_set1_epi32(-1), 0x3, (__m128i)(__v4si){+5, -3, -10, +8}), 2, 31, -1, -1));
__m128i test_mm_maskz_popcnt_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_popcnt_epi32
+ // CHECK-LABEL: test_mm_maskz_popcnt_epi32
// CHECK: @llvm.ctpop.v4i32
// CHECK: select <4 x i1> %{{.+}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_popcnt_epi32(__U, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_popcnt_epi32(0x5, (__m128i)(__v4si){+5, -3, -10, +8}), 2, 0, 30, 0));
__m256i test_mm256_popcnt_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_popcnt_epi64
+ // CHECK-LABEL: test_mm256_popcnt_epi64
// CHECK: @llvm.ctpop.v4i64
return _mm256_popcnt_epi64(__A);
}
TEST_CONSTEXPR(match_v4di(_mm256_popcnt_epi64((__m256i)(__v4di){+5, -3, -10, +8}), 2, 63, 62, 1));
__m256i test_mm256_mask_popcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_popcnt_epi64
+ // CHECK-LABEL: test_mm256_mask_popcnt_epi64
// CHECK: @llvm.ctpop.v4i64
// CHECK: select <4 x i1> %{{.+}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_popcnt_epi64(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_popcnt_epi64(_mm256_set1_epi64x(-1), 0x3, (__m256i)(__v4di){+5, -3, -10, +8}), 2, 63, -1, -1));
__m256i test_mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_popcnt_epi64
+ // CHECK-LABEL: test_mm256_maskz_popcnt_epi64
// CHECK: @llvm.ctpop.v4i64
// CHECK: select <4 x i1> %{{.+}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_popcnt_epi64(__U, __A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_popcnt_epi64(0x5, (__m256i)(__v4di){+5, -3, -10, +8}), 2, 0, 62, 0));
__m256i test_mm256_popcnt_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_popcnt_epi32
+ // CHECK-LABEL: test_mm256_popcnt_epi32
// CHECK: @llvm.ctpop.v8i32
return _mm256_popcnt_epi32(__A);
}
TEST_CONSTEXPR(match_v8si(_mm256_popcnt_epi32((__m256i)(__v8si){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 31, 30, 1, 0, 24, 1, 25));
__m256i test_mm256_mask_popcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_popcnt_epi32
+ // CHECK-LABEL: test_mm256_mask_popcnt_epi32
// CHECK: @llvm.ctpop.v8i32
// CHECK: select <8 x i1> %{{.+}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_popcnt_epi32(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_popcnt_epi32(_mm256_set1_epi32(-1), 0x37, (__m256i)(__v8si){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 31, 30, -1, 0, 24, -1, -1));
__m256i test_mm256_maskz_popcnt_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_popcnt_epi32
+ // CHECK-LABEL: test_mm256_maskz_popcnt_epi32
// CHECK: @llvm.ctpop.v8i32
// CHECK: select <8 x i1> %{{.+}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_popcnt_epi32(__U, __A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_popcnt_epi32(0x8C, (__m256i)(__v8si){+5, -3, -10, +8, 0, -256, +256, -128}), 0, 0, 30, 1, 0, 0, 0, 25));
diff --git a/clang/test/CodeGen/X86/avxifma-builtins.c b/clang/test/CodeGen/X86/avxifma-builtins.c
index 56e434c..dd0f220 100644
--- a/clang/test/CodeGen/X86/avxifma-builtins.c
+++ b/clang/test/CodeGen/X86/avxifma-builtins.c
@@ -1,52 +1,54 @@
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
-// CHECK-LABEL: @test_mm_madd52hi_epu64
-// CHECK: call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128
+// CHECK-LABEL: test_mm_madd52hi_epu64
+// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52hi_epu64(__X, __Y, __Z);
}
__m256i test_mm256_madd52hi_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
-// CHECK-LABEL: @test_mm256_madd52hi_epu64
-// CHECK: call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256
+// CHECK-LABEL: test_mm256_madd52hi_epu64
+// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52hi_epu64(__X, __Y, __Z);
}
__m128i test_mm_madd52lo_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
-// CHECK-LABEL: @test_mm_madd52lo_epu64
-// CHECK: call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128
+// CHECK-LABEL: test_mm_madd52lo_epu64
+// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52lo_epu64(__X, __Y, __Z);
}
__m256i test_mm256_madd52lo_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
-// CHECK-LABEL: @test_mm256_madd52lo_epu64
-// CHECK: call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256
+// CHECK-LABEL: test_mm256_madd52lo_epu64
+// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52lo_epu64(__X, __Y, __Z);
}
__m128i test_mm_madd52hi_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
-// CHECK-LABEL: @test_mm_madd52hi_avx_epu64
-// CHECK: call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128
+// CHECK-LABEL: test_mm_madd52hi_avx_epu64
+// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52hi_avx_epu64(__X, __Y, __Z);
}
__m256i test_mm256_madd52hi_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
-// CHECK-LABEL: @test_mm256_madd52hi_avx_epu64
-// CHECK: call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256
+// CHECK-LABEL: test_mm256_madd52hi_avx_epu64
+// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52hi_avx_epu64(__X, __Y, __Z);
}
__m128i test_mm_madd52lo_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
-// CHECK-LABEL: @test_mm_madd52lo_avx_epu64
-// CHECK: call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128
+// CHECK-LABEL: test_mm_madd52lo_avx_epu64
+// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52lo_avx_epu64(__X, __Y, __Z);
}
__m256i test_mm256_madd52lo_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
-// CHECK-LABEL: @test_mm256_madd52lo_avx_epu64
-// CHECK: call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256
+// CHECK-LABEL: test_mm256_madd52lo_avx_epu64
+// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52lo_avx_epu64(__X, __Y, __Z);
}
diff --git a/clang/test/CodeGen/X86/avxvnni-builtins.c b/clang/test/CodeGen/X86/avxvnni-builtins.c
index 089578d..bb28a35 100644
--- a/clang/test/CodeGen/X86/avxvnni-builtins.c
+++ b/clang/test/CodeGen/X86/avxvnni-builtins.c
@@ -1,100 +1,102 @@
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxvnni -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxvnni -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxvnni -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxvnni -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxvnni -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxvnni -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m256i test_mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.256
+ // CHECK-LABEL: test_mm256_dpbusd_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpbusd_epi32(__S, __A, __B);
}
__m256i test_mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.256
+ // CHECK-LABEL: test_mm256_dpbusds_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpbusds_epi32(__S, __A, __B);
}
__m256i test_mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.256
+ // CHECK-LABEL: test_mm256_dpwssd_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwssd_epi32(__S, __A, __B);
}
__m256i test_mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.256
+ // CHECK-LABEL: test_mm256_dpwssds_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwssds_epi32(__S, __A, __B);
}
__m128i test_mm_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpbusd_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.128
+ // CHECK-LABEL: test_mm_dpbusd_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpbusd_epi32(__S, __A, __B);
}
__m128i test_mm_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpbusds_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.128
+ // CHECK-LABEL: test_mm_dpbusds_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpbusds_epi32(__S, __A, __B);
}
__m128i test_mm_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpwssd_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.128
+ // CHECK-LABEL: test_mm_dpwssd_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwssd_epi32(__S, __A, __B);
}
__m128i test_mm_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpwssds_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.128
+ // CHECK-LABEL: test_mm_dpwssds_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwssds_epi32(__S, __A, __B);
}
__m256i test_mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpbusd_avx_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.256
+ // CHECK-LABEL: test_mm256_dpbusd_avx_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpbusd_avx_epi32(__S, __A, __B);
}
__m256i test_mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpbusds_avx_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.256
+ // CHECK-LABEL: test_mm256_dpbusds_avx_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpbusds_avx_epi32(__S, __A, __B);
}
__m256i test_mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpwssd_avx_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.256
+ // CHECK-LABEL: test_mm256_dpwssd_avx_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwssd_avx_epi32(__S, __A, __B);
}
__m256i test_mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_dpwssds_avx_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.256
+ // CHECK-LABEL: test_mm256_dpwssds_avx_epi32
+ // CHECK: call <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwssds_avx_epi32(__S, __A, __B);
}
__m128i test_mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpbusd_avx_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusd.128
+ // CHECK-LABEL: test_mm_dpbusd_avx_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpbusd_avx_epi32(__S, __A, __B);
}
__m128i test_mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpbusds_avx_epi32
- // CHECK: @llvm.x86.avx512.vpdpbusds.128
+ // CHECK-LABEL: test_mm_dpbusds_avx_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpbusds_avx_epi32(__S, __A, __B);
}
__m128i test_mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpwssd_avx_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssd.128
+ // CHECK-LABEL: test_mm_dpwssd_avx_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwssd_avx_epi32(__S, __A, __B);
}
__m128i test_mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_dpwssds_avx_epi32
- // CHECK: @llvm.x86.avx512.vpdpwssds.128
+ // CHECK-LABEL: test_mm_dpwssds_avx_epi32
+ // CHECK: call <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwssds_avx_epi32(__S, __A, __B);
}
diff --git a/clang/test/CodeGen/X86/avxvnniint16-builtins.c b/clang/test/CodeGen/X86/avxvnniint16-builtins.c
index f9feaea..c25367b 100644
--- a/clang/test/CodeGen/X86/avxvnniint16-builtins.c
+++ b/clang/test/CodeGen/X86/avxvnniint16-builtins.c
@@ -1,78 +1,82 @@
-// RUN: %clang_cc1 %s -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avxvnniint16 -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 %s -ffreestanding -triple=i386-unknown-unknown -target-feature +avxvnniint16 -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 %s -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 %s -ffreestanding -triple=i386-unknown-unknown -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avxvnniint16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -ffreestanding -triple=i386-unknown-unknown -target-feature +avxvnniint16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -ffreestanding -triple=i386-unknown-unknown -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avxvnniint16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -ffreestanding -triple=i386-unknown-unknown -target-feature +avxvnniint16 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -ffreestanding -triple=i386-unknown-unknown -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
__m128i test_mm_dpwsud_epi32(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_dpwsud_epi32(
+ // CHECK-LABEL: test_mm_dpwsud_epi32
// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpwsud.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwsud_epi32(__A, __B, __C);
}
__m256i test_mm256_dpwsud_epi32(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_dpwsud_epi32(
+ // CHECK-LABEL: test_mm256_dpwsud_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpwsud.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwsud_epi32(__A, __B, __C);
}
__m128i test_mm_dpwsuds_epi32(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_dpwsuds_epi32(
+ // CHECK-LABEL: test_mm_dpwsuds_epi32
// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpwsuds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwsuds_epi32(__A, __B, __C);
}
__m256i test_mm256_dpwsuds_epi32(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_dpwsuds_epi32(
+ // CHECK-LABEL: test_mm256_dpwsuds_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpwsuds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwsuds_epi32(__A, __B, __C);
}
__m128i test_mm_dpwusd_epi32(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_dpwusd_epi32(
+ // CHECK-LABEL: test_mm_dpwusd_epi32
// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpwusd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwusd_epi32(__A, __B, __C);
}
__m256i test_mm256_dpwusd_epi32(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_dpwusd_epi32(
+ // CHECK-LABEL: test_mm256_dpwusd_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpwusd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwusd_epi32(__A, __B, __C);
}
__m128i test_mm_dpwusds_epi32(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_dpwusds_epi32(
+ // CHECK-LABEL: test_mm_dpwusds_epi32
// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpwusds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwusds_epi32(__A, __B, __C);
}
__m256i test_mm256_dpwusds_epi32(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_dpwusds_epi32(
+ // CHECK-LABEL: test_mm256_dpwusds_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpwusds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwusds_epi32(__A, __B, __C);
}
__m128i test_mm_dpwuud_epi32(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_dpwuud_epi32(
+ // CHECK-LABEL: test_mm_dpwuud_epi32
// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpwuud.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwuud_epi32(__A, __B, __C);
}
__m256i test_mm256_dpwuud_epi32(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_dpwuud_epi32(
+ // CHECK-LABEL: test_mm256_dpwuud_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpwuud.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwuud_epi32(__A, __B, __C);
}
__m128i test_mm_dpwuuds_epi32(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_dpwuuds_epi32(
+ // CHECK-LABEL: test_mm_dpwuuds_epi32
// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpwuuds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_dpwuuds_epi32(__A, __B, __C);
}
__m256i test_mm256_dpwuuds_epi32(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_dpwuuds_epi32(
+ // CHECK-LABEL: test_mm256_dpwuuds_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpwuuds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_dpwuuds_epi32(__A, __B, __C);
}
diff --git a/clang/test/CodeGen/X86/avxvnniint8-builtins.c b/clang/test/CodeGen/X86/avxvnniint8-builtins.c
index 80d005c..f808dee 100644
--- a/clang/test/CodeGen/X86/avxvnniint8-builtins.c
+++ b/clang/test/CodeGen/X86/avxvnniint8-builtins.c
@@ -1,78 +1,82 @@
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64- -target-feature +avxvnniint8 -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -ffreestanding %s -triple=i386- -target-feature +avxvnniint8 -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64- -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -ffreestanding %s -triple=i386- -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64- -target-feature +avxvnniint8 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386- -target-feature +avxvnniint8 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64- -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386- -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64- -target-feature +avxvnniint8 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386- -target-feature +avxvnniint8 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64- -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386- -target-feature +avx10.2-256 -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
-// CHECK-LABEL: @test_mm_dpbssd_epi32(
-// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbssd.128
+// CHECK-LABEL: test_mm_dpbssd_epi32
+// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
__m128i test_mm_dpbssd_epi32(__m128i __W, __m128i __A, __m128i __B) {
return _mm_dpbssd_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm_dpbssds_epi32(
-// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbssds.128
+// CHECK-LABEL: test_mm_dpbssds_epi32
+// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
__m128i test_mm_dpbssds_epi32(__m128i __W, __m128i __A, __m128i __B) {
return _mm_dpbssds_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm_dpbsud_epi32(
-// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbsud.128
+// CHECK-LABEL: test_mm_dpbsud_epi32
+// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
__m128i test_mm_dpbsud_epi32(__m128i __W, __m128i __A, __m128i __B) {
return _mm_dpbsud_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm_dpbsuds_epi32(
-// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128
+// CHECK-LABEL: test_mm_dpbsuds_epi32
+// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
__m128i test_mm_dpbsuds_epi32(__m128i __W, __m128i __A, __m128i __B) {
return _mm_dpbsuds_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm_dpbuud_epi32(
-// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbuud.128
+// CHECK-LABEL: test_mm_dpbuud_epi32
+// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
__m128i test_mm_dpbuud_epi32(__m128i __W, __m128i __A, __m128i __B) {
return _mm_dpbuud_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm_dpbuuds_epi32(
-// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128
+// CHECK-LABEL: test_mm_dpbuuds_epi32
+// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
__m128i test_mm_dpbuuds_epi32(__m128i __W, __m128i __A, __m128i __B) {
return _mm_dpbuuds_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm256_dpbssd_epi32(
-// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbssd.256
+// CHECK-LABEL: test_mm256_dpbssd_epi32
+// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
__m256i test_mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B) {
return _mm256_dpbssd_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm256_dpbssds_epi32(
-// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbssds.256
+// CHECK-LABEL: test_mm256_dpbssds_epi32
+// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
__m256i test_mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B) {
return _mm256_dpbssds_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm256_dpbsud_epi32(
-// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbsud.256
+// CHECK-LABEL: test_mm256_dpbsud_epi32
+// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
__m256i test_mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B) {
return _mm256_dpbsud_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm256_dpbsuds_epi32(
-// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256
+// CHECK-LABEL: test_mm256_dpbsuds_epi32
+// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
__m256i test_mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
return _mm256_dpbsuds_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm256_dpbuud_epi32(
-// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbuud.256
+// CHECK-LABEL: test_mm256_dpbuud_epi32
+// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
__m256i test_mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B) {
return _mm256_dpbuud_epi32(__W, __A, __B);
}
-// CHECK-LABEL: @test_mm256_dpbuuds_epi32(
-// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256
+// CHECK-LABEL: test_mm256_dpbuuds_epi32
+// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}})
__m256i test_mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
return _mm256_dpbuuds_epi32(__W, __A, __B);
}
diff --git a/clang/test/CodeGen/X86/bitscan-builtins.c b/clang/test/CodeGen/X86/bitscan-builtins.c
index 9fd4666..06c6994 100644
--- a/clang/test/CodeGen/X86/bitscan-builtins.c
+++ b/clang/test/CodeGen/X86/bitscan-builtins.c
@@ -1,11 +1,16 @@
// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-unknown-unknown -no-enable-noundef-analysis -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-unknown-unknown -no-enable-noundef-analysis -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-unknown-unknown -no-enable-noundef-analysis -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-unknown-unknown -no-enable-noundef-analysis -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+
// PR33722
// RUN: %clang_cc1 -x c -ffreestanding %s -triple x86_64-unknown-unknown -fms-extensions -fms-compatibility-version=19.00 -no-enable-noundef-analysis -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple x86_64-unknown-unknown -fms-extensions -fms-compatibility-version=19.00 -no-enable-noundef-analysis -emit-llvm -o - | FileCheck %s
+
#include <x86intrin.h>
+#include "builtin_test_helpers.h"
int test_bit_scan_forward(int a) {
// CHECK-LABEL: test_bit_scan_forward
@@ -13,6 +18,8 @@ int test_bit_scan_forward(int a) {
// CHECK: ret i32 %[[call]]
return _bit_scan_forward(a);
}
+TEST_CONSTEXPR(_bit_scan_forward(0x00000001) == 0);
+TEST_CONSTEXPR(_bit_scan_forward(0x10000000) == 28);
int test_bit_scan_reverse(int a) {
// CHECK-LABEL: test_bit_scan_reverse
@@ -21,18 +28,24 @@ int test_bit_scan_reverse(int a) {
// CHECK: ret i32 %[[sub]]
return _bit_scan_reverse(a);
}
+TEST_CONSTEXPR(_bit_scan_reverse(0x00000001) == 0);
+TEST_CONSTEXPR(_bit_scan_reverse(0x01000000) == 24);
int test__bsfd(int X) {
// CHECK-LABEL: test__bsfd
// CHECK: %[[call:.*]] = call i32 @llvm.cttz.i32(i32 %{{.*}}, i1 true)
return __bsfd(X);
}
+TEST_CONSTEXPR(__bsfd(0x00000008) == 3);
+TEST_CONSTEXPR(__bsfd(0x00010008) == 3);
int test__bsfq(long long X) {
// CHECK-LABEL: test__bsfq
// CHECK: %[[call:.*]] = call i64 @llvm.cttz.i64(i64 %{{.*}}, i1 true)
return __bsfq(X);
}
+TEST_CONSTEXPR(__bsfq(0x0000000800000000ULL) == 35);
+TEST_CONSTEXPR(__bsfq(0x0004000000000000ULL) == 50);
int test__bsrd(int X) {
// CHECK-LABEL: test__bsrd
@@ -40,6 +53,8 @@ int test__bsrd(int X) {
// CHECK: %[[sub:.*]] = sub nsw i32 31, %[[call]]
return __bsrd(X);
}
+TEST_CONSTEXPR(__bsrd(0x00000010) == 4);
+TEST_CONSTEXPR(__bsrd(0x00100100) == 20);
int test__bsrq(long long X) {
// CHECK-LABEL: test__bsrq
@@ -48,26 +63,5 @@ int test__bsrq(long long X) {
// CHECK: %[[sub:.*]] = sub nsw i32 63, %[[cast]]
return __bsrq(X);
}
-
-// Test constexpr handling.
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-
-char bsf_0[_bit_scan_forward(0x00000001) == 0 ? 1 : -1];
-char bsf_1[_bit_scan_forward(0x10000000) == 28 ? 1 : -1];
-
-char bsr_0[_bit_scan_reverse(0x00000001) == 0 ? 1 : -1];
-char bsr_1[_bit_scan_reverse(0x01000000) == 24 ? 1 : -1];
-
-char bsfd_0[__bsfd(0x00000008) == 3 ? 1 : -1];
-char bsfd_1[__bsfd(0x00010008) == 3 ? 1 : -1];
-
-char bsrd_0[__bsrd(0x00000010) == 4 ? 1 : -1];
-char bsrd_1[__bsrd(0x00100100) == 20 ? 1 : -1];
-
-char bsfq_0[__bsfq(0x0000000800000000ULL) == 35 ? 1 : -1];
-char bsfq_1[__bsfq(0x0004000000000000ULL) == 50 ? 1 : -1];
-
-char bsrq_0[__bsrq(0x0000100800000000ULL) == 44 ? 1 : -1];
-char bsrq_1[__bsrq(0x0004000100000000ULL) == 50 ? 1 : -1];
-
-#endif
+TEST_CONSTEXPR(__bsrq(0x0000100800000000ULL) == 44);
+TEST_CONSTEXPR(__bsrq(0x0004000100000000ULL) == 50);
diff --git a/clang/test/CodeGen/X86/builtin_test_helpers.h b/clang/test/CodeGen/X86/builtin_test_helpers.h
index f719694..2476a2b 100644
--- a/clang/test/CodeGen/X86/builtin_test_helpers.h
+++ b/clang/test/CodeGen/X86/builtin_test_helpers.h
@@ -13,29 +13,59 @@ constexpr bool match_v1di(__m64 v, long long a) {
return v[0] == a;
}
+constexpr bool match_v1du(__m64 _v, unsigned long long a) {
+ __v1du v = (__v1du)_v;
+ return v[0] == a;
+}
+
constexpr bool match_v2si(__m64 _v, int a, int b) {
__v2si v = (__v2si)_v;
return v[0] == a && v[1] == b;
}
+constexpr bool match_v2su(__m64 _v, unsigned a, unsigned b) {
+ __v2su v = (__v2su)_v;
+ return v[0] == a && v[1] == b;
+}
+
constexpr bool match_v4hi(__m64 _v, short a, short b, short c, short d) {
__v4hi v = (__v4hi)_v;
return v[0] == a && v[1] == b && v[2] == c && v[3] == d;
}
-constexpr bool match_v8qi(__m64 _v, char a, char b, char c, char d, char e, char f, char g, char h) {
- __v8qi v = (__v8qi)_v;
+constexpr bool match_v4hu(__m64 _v, unsigned short a, unsigned short b, unsigned short c, unsigned short d) {
+ __v4hu v = (__v4hu)_v;
+ return v[0] == a && v[1] == b && v[2] == c && v[3] == d;
+}
+
+constexpr bool match_v8qi(__m64 _v, signed char a, signed char b, signed char c, signed char d, signed char e, signed char f, signed char g, signed char h) {
+ __v8qs v = (__v8qs)_v;
return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h;
}
-constexpr bool match_m128(__m128 v, float a, float b, float c, float d) {
- return v[0] == a && v[1] == b && v[2] == c && v[3] == d;
+constexpr bool match_v8qu(__m64 _v, unsigned char a, unsigned char b, unsigned char c, unsigned char d, unsigned char e, unsigned char f, unsigned char g, unsigned char h) {
+ __v8qu v = (__v8qu)_v;
+ return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h;
}
-constexpr bool match_m128d(__m128d v, double a, double b) {
- return v[0] == a && v[1] == b;
+constexpr bool match_m128(__m128 _v, float a, float b, float c, float d) {
+ __v4su v = (__v4su)_v;
+ return v[0] == __builtin_bit_cast(unsigned, a) && v[1] == __builtin_bit_cast(unsigned, b) && v[2] == __builtin_bit_cast(unsigned, c) && v[3] == __builtin_bit_cast(unsigned, d);
+}
+
+constexpr bool match_m128d(__m128d _v, double a, double b) {
+ __v2du v = (__v2du)_v;
+ return v[0] == __builtin_bit_cast(unsigned long long, a) && v[1] == __builtin_bit_cast(unsigned long long, b);
}
+#ifdef __SSE2__
+constexpr bool match_m128h(__m128h _v, _Float16 __e00, _Float16 __e01, _Float16 __e02, _Float16 __e03, _Float16 __e04, _Float16 __e05, _Float16 __e06, _Float16 __e07) {
+ __v8hu v = (__v8hu)_v;
+ return v[ 0] == __builtin_bit_cast(unsigned short, __e00) && v[ 1] == __builtin_bit_cast(unsigned short, __e01) && v[ 2] == __builtin_bit_cast(unsigned short, __e02) && v[ 3] == __builtin_bit_cast(unsigned short, __e03) &&
+ v[ 4] == __builtin_bit_cast(unsigned short, __e04) && v[ 5] == __builtin_bit_cast(unsigned short, __e05) && v[ 6] == __builtin_bit_cast(unsigned short, __e06) && v[ 7] == __builtin_bit_cast(unsigned short, __e07);
+}
+#endif
+
constexpr bool match_m128i(__m128i _v, unsigned long long a, unsigned long long b) {
__v2du v = (__v2du)_v;
return v[0] == a && v[1] == b;
@@ -45,29 +75,63 @@ constexpr bool match_v2di(__m128i v, long long a, long long b) {
return v[0] == a && v[1] == b;
}
+constexpr bool match_v2du(__m128i _v, unsigned long long a, unsigned long long b) {
+ __v2du v = (__v2du)_v;
+ return v[0] == a && v[1] == b;
+}
+
constexpr bool match_v4si(__m128i _v, int a, int b, int c, int d) {
__v4si v = (__v4si)_v;
return v[0] == a && v[1] == b && v[2] == c && v[3] == d;
}
+constexpr bool match_v4su(__m128i _v, unsigned a, unsigned b, unsigned c, unsigned d) {
+ __v4su v = (__v4su)_v;
+ return v[0] == a && v[1] == b && v[2] == c && v[3] == d;
+}
+
constexpr bool match_v8hi(__m128i _v, short a, short b, short c, short d, short e, short f, short g, short h) {
__v8hi v = (__v8hi)_v;
return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h;
}
-constexpr bool match_v16qi(__m128i _v, char a, char b, char c, char d, char e, char f, char g, char h, char i, char j, char k, char l, char m, char n, char o, char p) {
- __v16qi v = (__v16qi)_v;
+constexpr bool match_v8hu(__m128i _v, unsigned short a, unsigned short b, unsigned short c, unsigned short d, unsigned short e, unsigned short f, unsigned short g, unsigned short h) {
+ __v8hu v = (__v8hu)_v;
+ return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h;
+}
+
+constexpr bool match_v16qi(__m128i _v, signed char a, signed char b, signed char c, signed char d, signed char e, signed char f, signed char g, signed char h, signed char i, signed char j, signed char k, signed char l, signed char m, signed char n, signed char o, signed char p) {
+ __v16qs v = (__v16qs)_v;
return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h && v[8] == i && v[9] == j && v[10] == k && v[11] == l && v[12] == m && v[13] == n && v[14] == o && v[15] == p;
}
-constexpr bool match_m256(__m256 v, float a, float b, float c, float d, float e, float f, float g, float h) {
- return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h;
+constexpr bool match_v16qu(__m128i _v, unsigned char a, unsigned char b, unsigned char c, unsigned char d, unsigned char e, unsigned char f, unsigned char g, unsigned char h, unsigned char i, unsigned char j, unsigned char k, unsigned char l, unsigned char m, unsigned char n, unsigned char o, unsigned char p) {
+ __v16qu v = (__v16qu)_v;
+ return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h && v[8] == i && v[9] == j && v[10] == k && v[11] == l && v[12] == m && v[13] == n && v[14] == o && v[15] == p;
}
-constexpr bool match_m256d(__m256d v, double a, double b, double c, double d) {
- return v[0] == a && v[1] == b && v[2] == c && v[3] == d;
+constexpr bool match_m256(__m256 _v, float __e00, float __e01, float __e02, float __e03, float __e04, float __e05, float __e06, float __e07) {
+ __v8su v = (__v8su)_v;
+ return v[ 0] == __builtin_bit_cast(unsigned, __e00) && v[ 1] == __builtin_bit_cast(unsigned, __e01) && v[ 2] == __builtin_bit_cast(unsigned, __e02) && v[ 3] == __builtin_bit_cast(unsigned, __e03) &&
+ v[ 4] == __builtin_bit_cast(unsigned, __e04) && v[ 5] == __builtin_bit_cast(unsigned, __e05) && v[ 6] == __builtin_bit_cast(unsigned, __e06) && v[ 7] == __builtin_bit_cast(unsigned, __e07);
}
+constexpr bool match_m256d(__m256d _v, double a, double b, double c, double d) {
+ __v4du v = (__v4du)_v;
+ return v[0] == __builtin_bit_cast(unsigned long long, a) && v[1] == __builtin_bit_cast(unsigned long long, b) && v[2] == __builtin_bit_cast(unsigned long long, c) && v[3] == __builtin_bit_cast(unsigned long long, d);
+}
+
+#ifdef __SSE2__
+constexpr bool match_m256h(__m256h _v, _Float16 __e00, _Float16 __e01, _Float16 __e02, _Float16 __e03, _Float16 __e04, _Float16 __e05, _Float16 __e06, _Float16 __e07,
+ _Float16 __e08, _Float16 __e09, _Float16 __e10, _Float16 __e11, _Float16 __e12, _Float16 __e13, _Float16 __e14, _Float16 __e15) {
+ __v16hu v = (__v16hu)_v;
+ return v[ 0] == __builtin_bit_cast(unsigned short, __e00) && v[ 1] == __builtin_bit_cast(unsigned short, __e01) && v[ 2] == __builtin_bit_cast(unsigned short, __e02) && v[ 3] == __builtin_bit_cast(unsigned short, __e03) &&
+ v[ 4] == __builtin_bit_cast(unsigned short, __e04) && v[ 5] == __builtin_bit_cast(unsigned short, __e05) && v[ 6] == __builtin_bit_cast(unsigned short, __e06) && v[ 7] == __builtin_bit_cast(unsigned short, __e07) &&
+ v[ 8] == __builtin_bit_cast(unsigned short, __e08) && v[ 9] == __builtin_bit_cast(unsigned short, __e09) && v[10] == __builtin_bit_cast(unsigned short, __e10) && v[11] == __builtin_bit_cast(unsigned short, __e11) &&
+ v[12] == __builtin_bit_cast(unsigned short, __e12) && v[13] == __builtin_bit_cast(unsigned short, __e13) && v[14] == __builtin_bit_cast(unsigned short, __e14) && v[15] == __builtin_bit_cast(unsigned short, __e15);
+}
+#endif
+
constexpr bool match_m256i(__m256i _v, unsigned long long a, unsigned long long b, unsigned long long c, unsigned long long d) {
__v4du v = (__v4du)_v;
return v[0] == a && v[1] == b && v[2] == c && v[3] == d;
@@ -83,29 +147,73 @@ constexpr bool match_v8si(__m256i _v, int a, int b, int c, int d, int e, int f,
return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h;
}
+constexpr bool match_v8su(__m256i _v, unsigned a, unsigned b, unsigned c, unsigned d, unsigned e, unsigned f, unsigned g, unsigned h) {
+ __v8su v = (__v8su)_v;
+ return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h;
+}
+
constexpr bool match_v16hi(__m256i _v, short a, short b, short c, short d, short e, short f, short g, short h, short i, short j, short k, short l, short m, short n, short o, short p) {
__v16hi v = (__v16hi)_v;
return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h && v[8] == i && v[9] == j && v[10] == k && v[11] == l && v[12] == m && v[13] == n && v[14] == o && v[15] == p;
}
-constexpr bool match_v32qi(__m256i _v, char __b00, char __b01, char __b02, char __b03, char __b04, char __b05, char __b06, char __b07,
- char __b08, char __b09, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15,
- char __b16, char __b17, char __b18, char __b19, char __b20, char __b21, char __b22, char __b23,
- char __b24, char __b25, char __b26, char __b27, char __b28, char __b29, char __b30, char __b31) {
- __v32qi v = (__v32qi)_v;
+constexpr bool match_v16hu(__m256i _v, unsigned short a, unsigned short b, unsigned short c, unsigned short d, unsigned short e, unsigned short f, unsigned short g, unsigned short h, unsigned short i, unsigned short j, unsigned short k, unsigned short l, unsigned short m, unsigned short n, unsigned short o, unsigned short p) {
+ __v16hu v = (__v16hu)_v;
+ return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h && v[8] == i && v[9] == j && v[10] == k && v[11] == l && v[12] == m && v[13] == n && v[14] == o && v[15] == p;
+}
+
+constexpr bool match_v32qi(__m256i _v, signed char __b00, signed char __b01, signed char __b02, signed char __b03, signed char __b04, signed char __b05, signed char __b06, signed char __b07,
+ signed char __b08, signed char __b09, signed char __b10, signed char __b11, signed char __b12, signed char __b13, signed char __b14, signed char __b15,
+ signed char __b16, signed char __b17, signed char __b18, signed char __b19, signed char __b20, signed char __b21, signed char __b22, signed char __b23,
+ signed char __b24, signed char __b25, signed char __b26, signed char __b27, signed char __b28, signed char __b29, signed char __b30, signed char __b31) {
+ __v32qs v = (__v32qs)_v;
return v[ 0] == __b00 && v[ 1] == __b01 && v[ 2] == __b02 && v[ 3] == __b03 && v[ 4] == __b04 && v[ 5] == __b05 && v[ 6] == __b06 && v[ 7] == __b07 &&
v[ 8] == __b08 && v[ 9] == __b09 && v[10] == __b10 && v[11] == __b11 && v[12] == __b12 && v[13] == __b13 && v[14] == __b14 && v[15] == __b15 &&
v[16] == __b16 && v[17] == __b17 && v[18] == __b18 && v[19] == __b19 && v[20] == __b20 && v[21] == __b21 && v[22] == __b22 && v[23] == __b23 &&
v[24] == __b24 && v[25] == __b25 && v[26] == __b26 && v[27] == __b27 && v[28] == __b28 && v[29] == __b29 && v[30] == __b30 && v[31] == __b31;
}
-constexpr bool match_m512(__m512 v, float a, float b, float c, float d, float e, float f, float g, float h, float i, float j, float k, float l, float m, float n, float o, float p) {
- return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h && v[8] == i && v[9] == j && v[10] == k && v[11] == l && v[12] == m && v[13] == n && v[14] == o && v[15] == p;
+constexpr bool match_v32qu(__m256i _v, unsigned char __b00, unsigned char __b01, unsigned char __b02, unsigned char __b03, unsigned char __b04, unsigned char __b05, unsigned char __b06, unsigned char __b07,
+ unsigned char __b08, unsigned char __b09, unsigned char __b10, unsigned char __b11, unsigned char __b12, unsigned char __b13, unsigned char __b14, unsigned char __b15,
+ unsigned char __b16, unsigned char __b17, unsigned char __b18, unsigned char __b19, unsigned char __b20, unsigned char __b21, unsigned char __b22, unsigned char __b23,
+ unsigned char __b24, unsigned char __b25, unsigned char __b26, unsigned char __b27, unsigned char __b28, unsigned char __b29, unsigned char __b30, unsigned char __b31) {
+ __v32qu v = (__v32qu)_v;
+ return v[ 0] == __b00 && v[ 1] == __b01 && v[ 2] == __b02 && v[ 3] == __b03 && v[ 4] == __b04 && v[ 5] == __b05 && v[ 6] == __b06 && v[ 7] == __b07 &&
+ v[ 8] == __b08 && v[ 9] == __b09 && v[10] == __b10 && v[11] == __b11 && v[12] == __b12 && v[13] == __b13 && v[14] == __b14 && v[15] == __b15 &&
+ v[16] == __b16 && v[17] == __b17 && v[18] == __b18 && v[19] == __b19 && v[20] == __b20 && v[21] == __b21 && v[22] == __b22 && v[23] == __b23 &&
+ v[24] == __b24 && v[25] == __b25 && v[26] == __b26 && v[27] == __b27 && v[28] == __b28 && v[29] == __b29 && v[30] == __b30 && v[31] == __b31;
}
-constexpr bool match_m512d(__m512d v, double a, double b, double c, double d, double e, double f, double g, double h) {
- return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h;
+constexpr bool match_m512(__m512 _v, float __e00, float __e01, float __e02, float __e03, float __e04, float __e05, float __e06, float __e07, float __e08, float __e09, float __e10, float __e11, float __e12, float __e13, float __e14, float __e15) {
+ __v16su v = (__v16su)_v;
+ return v[ 0] == __builtin_bit_cast(unsigned, __e00) && v[ 1] == __builtin_bit_cast(unsigned, __e01) && v[ 2] == __builtin_bit_cast(unsigned, __e02) && v[ 3] == __builtin_bit_cast(unsigned, __e03) &&
+ v[ 4] == __builtin_bit_cast(unsigned, __e04) && v[ 5] == __builtin_bit_cast(unsigned, __e05) && v[ 6] == __builtin_bit_cast(unsigned, __e06) && v[ 7] == __builtin_bit_cast(unsigned, __e07) &&
+ v[ 8] == __builtin_bit_cast(unsigned, __e08) && v[ 9] == __builtin_bit_cast(unsigned, __e09) && v[10] == __builtin_bit_cast(unsigned, __e10) && v[11] == __builtin_bit_cast(unsigned, __e11) &&
+ v[12] == __builtin_bit_cast(unsigned, __e12) && v[13] == __builtin_bit_cast(unsigned, __e13) && v[14] == __builtin_bit_cast(unsigned, __e14) && v[15] == __builtin_bit_cast(unsigned, __e15);
+}
+
+constexpr bool match_m512d(__m512d _v, double __e00, double __e01, double __e02, double __e03, double __e04, double __e05, double __e06, double __e07) {
+ __v8du v = (__v8du)_v;
+ return v[ 0] == __builtin_bit_cast(unsigned long long, __e00) && v[ 1] == __builtin_bit_cast(unsigned long long, __e01) && v[ 2] == __builtin_bit_cast(unsigned long long, __e02) && v[ 3] == __builtin_bit_cast(unsigned long long, __e03) &&
+ v[ 4] == __builtin_bit_cast(unsigned long long, __e04) && v[ 5] == __builtin_bit_cast(unsigned long long, __e05) && v[ 6] == __builtin_bit_cast(unsigned long long, __e06) && v[ 7] == __builtin_bit_cast(unsigned long long, __e07);
+}
+
+#ifdef __SSE2__
+constexpr bool match_m512h(__m512h _v, _Float16 __e00, _Float16 __e01, _Float16 __e02, _Float16 __e03, _Float16 __e04, _Float16 __e05, _Float16 __e06, _Float16 __e07,
+ _Float16 __e08, _Float16 __e09, _Float16 __e10, _Float16 __e11, _Float16 __e12, _Float16 __e13, _Float16 __e14, _Float16 __e15,
+ _Float16 __e16, _Float16 __e17, _Float16 __e18, _Float16 __e19, _Float16 __e20, _Float16 __e21, _Float16 __e22, _Float16 __e23,
+ _Float16 __e24, _Float16 __e25, _Float16 __e26, _Float16 __e27, _Float16 __e28, _Float16 __e29, _Float16 __e30, _Float16 __e31) {
+ __v32hu v = (__v32hu)_v;
+ return v[ 0] == __builtin_bit_cast(unsigned short, __e00) && v[ 1] == __builtin_bit_cast(unsigned short, __e01) && v[ 2] == __builtin_bit_cast(unsigned short, __e02) && v[ 3] == __builtin_bit_cast(unsigned short, __e03) &&
+ v[ 4] == __builtin_bit_cast(unsigned short, __e04) && v[ 5] == __builtin_bit_cast(unsigned short, __e05) && v[ 6] == __builtin_bit_cast(unsigned short, __e06) && v[ 7] == __builtin_bit_cast(unsigned short, __e07) &&
+ v[ 8] == __builtin_bit_cast(unsigned short, __e08) && v[ 9] == __builtin_bit_cast(unsigned short, __e09) && v[10] == __builtin_bit_cast(unsigned short, __e10) && v[11] == __builtin_bit_cast(unsigned short, __e11) &&
+ v[12] == __builtin_bit_cast(unsigned short, __e12) && v[13] == __builtin_bit_cast(unsigned short, __e13) && v[14] == __builtin_bit_cast(unsigned short, __e14) && v[15] == __builtin_bit_cast(unsigned short, __e15) &&
+ v[16] == __builtin_bit_cast(unsigned short, __e16) && v[17] == __builtin_bit_cast(unsigned short, __e17) && v[18] == __builtin_bit_cast(unsigned short, __e18) && v[19] == __builtin_bit_cast(unsigned short, __e19) &&
+ v[20] == __builtin_bit_cast(unsigned short, __e20) && v[21] == __builtin_bit_cast(unsigned short, __e21) && v[22] == __builtin_bit_cast(unsigned short, __e22) && v[23] == __builtin_bit_cast(unsigned short, __e23) &&
+ v[24] == __builtin_bit_cast(unsigned short, __e24) && v[25] == __builtin_bit_cast(unsigned short, __e25) && v[26] == __builtin_bit_cast(unsigned short, __e26) && v[27] == __builtin_bit_cast(unsigned short, __e27) &&
+ v[28] == __builtin_bit_cast(unsigned short, __e28) && v[29] == __builtin_bit_cast(unsigned short, __e29) && v[30] == __builtin_bit_cast(unsigned short, __e30) && v[31] == __builtin_bit_cast(unsigned short, __e31);
}
+#endif
constexpr bool match_m512i(__m512i _v, unsigned long long a, unsigned long long b, unsigned long long c, unsigned long long d, unsigned long long e, unsigned long long f, unsigned long long g, unsigned long long h) {
__v8du v = (__v8du)_v;
@@ -122,6 +230,66 @@ constexpr bool match_v16si(__m512i _v, int a, int b, int c, int d, int e, int f,
return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h && v[8] == i && v[9] == j && v[10] == k && v[11] == l && v[12] == m && v[13] == n && v[14] == o && v[15] == p;
}
+constexpr bool match_v32hi(__m512i _v, short __e00, short __e01, short __e02, short __e03, short __e04, short __e05, short __e06, short __e07,
+ short __e08, short __e09, short __e10, short __e11, short __e12, short __e13, short __e14, short __e15,
+ short __e16, short __e17, short __e18, short __e19, short __e20, short __e21, short __e22, short __e23,
+ short __e24, short __e25, short __e26, short __e27, short __e28, short __e29, short __e30, short __e31) {
+ __v32hi v = (__v32hi)_v;
+ return v[ 0] == __e00 && v[ 1] == __e01 && v[ 2] == __e02 && v[ 3] == __e03 && v[ 4] == __e04 && v[ 5] == __e05 && v[ 6] == __e06 && v[ 7] == __e07 &&
+ v[ 8] == __e08 && v[ 9] == __e09 && v[10] == __e10 && v[11] == __e11 && v[12] == __e12 && v[13] == __e13 && v[14] == __e14 && v[15] == __e15 &&
+ v[16] == __e16 && v[17] == __e17 && v[18] == __e18 && v[19] == __e19 && v[20] == __e20 && v[21] == __e21 && v[22] == __e22 && v[23] == __e23 &&
+ v[24] == __e24 && v[25] == __e25 && v[26] == __e26 && v[27] == __e27 && v[28] == __e28 && v[29] == __e29 && v[30] == __e30 && v[31] == __e31;
+}
+
+constexpr bool match_v32hu(__m512i _v, unsigned short __e00, unsigned short __e01, unsigned short __e02, unsigned short __e03, unsigned short __e04, unsigned short __e05, unsigned short __e06, unsigned short __e07,
+ unsigned short __e08, unsigned short __e09, unsigned short __e10, unsigned short __e11, unsigned short __e12, unsigned short __e13, unsigned short __e14, unsigned short __e15,
+ unsigned short __e16, unsigned short __e17, unsigned short __e18, unsigned short __e19, unsigned short __e20, unsigned short __e21, unsigned short __e22, unsigned short __e23,
+ unsigned short __e24, unsigned short __e25, unsigned short __e26, unsigned short __e27, unsigned short __e28, unsigned short __e29, unsigned short __e30, unsigned short __e31) {
+ __v32hu v = (__v32hu)_v;
+ return v[ 0] == __e00 && v[ 1] == __e01 && v[ 2] == __e02 && v[ 3] == __e03 && v[ 4] == __e04 && v[ 5] == __e05 && v[ 6] == __e06 && v[ 7] == __e07 &&
+ v[ 8] == __e08 && v[ 9] == __e09 && v[10] == __e10 && v[11] == __e11 && v[12] == __e12 && v[13] == __e13 && v[14] == __e14 && v[15] == __e15 &&
+ v[16] == __e16 && v[17] == __e17 && v[18] == __e18 && v[19] == __e19 && v[20] == __e20 && v[21] == __e21 && v[22] == __e22 && v[23] == __e23 &&
+ v[24] == __e24 && v[25] == __e25 && v[26] == __e26 && v[27] == __e27 && v[28] == __e28 && v[29] == __e29 && v[30] == __e30 && v[31] == __e31;
+}
+
+constexpr bool match_v64qi(__m512i _v, signed char __e00, signed char __e01, signed char __e02, signed char __e03, signed char __e04, signed char __e05, signed char __e06, signed char __e07,
+ signed char __e08, signed char __e09, signed char __e10, signed char __e11, signed char __e12, signed char __e13, signed char __e14, signed char __e15,
+ signed char __e16, signed char __e17, signed char __e18, signed char __e19, signed char __e20, signed char __e21, signed char __e22, signed char __e23,
+ signed char __e24, signed char __e25, signed char __e26, signed char __e27, signed char __e28, signed char __e29, signed char __e30, signed char __e31,
+ signed char __e32, signed char __e33, signed char __e34, signed char __e35, signed char __e36, signed char __e37, signed char __e38, signed char __e39,
+ signed char __e40, signed char __e41, signed char __e42, signed char __e43, signed char __e44, signed char __e45, signed char __e46, signed char __e47,
+ signed char __e48, signed char __e49, signed char __e50, signed char __e51, signed char __e52, signed char __e53, signed char __e54, signed char __e55,
+ signed char __e56, signed char __e57, signed char __e58, signed char __e59, signed char __e60, signed char __e61, signed char __e62, signed char __e63) {
+ __v64qs v = (__v64qs)_v;
+ return v[ 0] == __e00 && v[ 1] == __e01 && v[ 2] == __e02 && v[ 3] == __e03 && v[ 4] == __e04 && v[ 5] == __e05 && v[ 6] == __e06 && v[ 7] == __e07 &&
+ v[ 8] == __e08 && v[ 9] == __e09 && v[10] == __e10 && v[11] == __e11 && v[12] == __e12 && v[13] == __e13 && v[14] == __e14 && v[15] == __e15 &&
+ v[16] == __e16 && v[17] == __e17 && v[18] == __e18 && v[19] == __e19 && v[20] == __e20 && v[21] == __e21 && v[22] == __e22 && v[23] == __e23 &&
+ v[24] == __e24 && v[25] == __e25 && v[26] == __e26 && v[27] == __e27 && v[28] == __e28 && v[29] == __e29 && v[30] == __e30 && v[31] == __e31 &&
+ v[32] == __e32 && v[33] == __e33 && v[34] == __e34 && v[35] == __e35 && v[36] == __e36 && v[37] == __e37 && v[38] == __e38 && v[39] == __e39 &&
+ v[40] == __e40 && v[41] == __e41 && v[42] == __e42 && v[43] == __e43 && v[44] == __e44 && v[45] == __e45 && v[46] == __e46 && v[47] == __e47 &&
+ v[48] == __e48 && v[49] == __e49 && v[50] == __e50 && v[51] == __e51 && v[52] == __e52 && v[53] == __e53 && v[54] == __e54 && v[55] == __e55 &&
+ v[56] == __e56 && v[57] == __e57 && v[58] == __e58 && v[59] == __e59 && v[60] == __e60 && v[61] == __e61 && v[62] == __e62 && v[63] == __e63;
+}
+
+constexpr bool match_v64qu(__m512i _v, unsigned char __e00, unsigned char __e01, unsigned char __e02, unsigned char __e03, unsigned char __e04, unsigned char __e05, unsigned char __e06, unsigned char __e07,
+ unsigned char __e08, unsigned char __e09, unsigned char __e10, unsigned char __e11, unsigned char __e12, unsigned char __e13, unsigned char __e14, unsigned char __e15,
+ unsigned char __e16, unsigned char __e17, unsigned char __e18, unsigned char __e19, unsigned char __e20, unsigned char __e21, unsigned char __e22, unsigned char __e23,
+ unsigned char __e24, unsigned char __e25, unsigned char __e26, unsigned char __e27, unsigned char __e28, unsigned char __e29, unsigned char __e30, unsigned char __e31,
+ unsigned char __e32, unsigned char __e33, unsigned char __e34, unsigned char __e35, unsigned char __e36, unsigned char __e37, unsigned char __e38, unsigned char __e39,
+ unsigned char __e40, unsigned char __e41, unsigned char __e42, unsigned char __e43, unsigned char __e44, unsigned char __e45, unsigned char __e46, unsigned char __e47,
+ unsigned char __e48, unsigned char __e49, unsigned char __e50, unsigned char __e51, unsigned char __e52, unsigned char __e53, unsigned char __e54, unsigned char __e55,
+ unsigned char __e56, unsigned char __e57, unsigned char __e58, unsigned char __e59, unsigned char __e60, unsigned char __e61, unsigned char __e62, unsigned char __e63) {
+ __v64qu v = (__v64qu)_v;
+ return v[ 0] == __e00 && v[ 1] == __e01 && v[ 2] == __e02 && v[ 3] == __e03 && v[ 4] == __e04 && v[ 5] == __e05 && v[ 6] == __e06 && v[ 7] == __e07 &&
+ v[ 8] == __e08 && v[ 9] == __e09 && v[10] == __e10 && v[11] == __e11 && v[12] == __e12 && v[13] == __e13 && v[14] == __e14 && v[15] == __e15 &&
+ v[16] == __e16 && v[17] == __e17 && v[18] == __e18 && v[19] == __e19 && v[20] == __e20 && v[21] == __e21 && v[22] == __e22 && v[23] == __e23 &&
+ v[24] == __e24 && v[25] == __e25 && v[26] == __e26 && v[27] == __e27 && v[28] == __e28 && v[29] == __e29 && v[30] == __e30 && v[31] == __e31 &&
+ v[32] == __e32 && v[33] == __e33 && v[34] == __e34 && v[35] == __e35 && v[36] == __e36 && v[37] == __e37 && v[38] == __e38 && v[39] == __e39 &&
+ v[40] == __e40 && v[41] == __e41 && v[42] == __e42 && v[43] == __e43 && v[44] == __e44 && v[45] == __e45 && v[46] == __e46 && v[47] == __e47 &&
+ v[48] == __e48 && v[49] == __e49 && v[50] == __e50 && v[51] == __e51 && v[52] == __e52 && v[53] == __e53 && v[54] == __e54 && v[55] == __e55 &&
+ v[56] == __e56 && v[57] == __e57 && v[58] == __e58 && v[59] == __e59 && v[60] == __e60 && v[61] == __e61 && v[62] == __e62 && v[63] == __e63;
+}
+
#define TEST_CONSTEXPR(...) static_assert(__VA_ARGS__)
#else
diff --git a/clang/test/CodeGen/X86/f16c-builtins-constrained.c b/clang/test/CodeGen/X86/f16c-builtins-constrained.c
index bbd4d8f..50afea8 100644
--- a/clang/test/CodeGen/X86/f16c-builtins-constrained.c
+++ b/clang/test/CodeGen/X86/f16c-builtins-constrained.c
@@ -8,17 +8,8 @@
float test_cvtsh_ss(unsigned short a) {
// CHECK-LABEL: test_cvtsh_ss
- // CHECK: insertelement <8 x i16> poison, i16 %{{.*}}, i32 0
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 1
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 2
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 3
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 4
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 5
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 6
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 7
- // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- // CHECK: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
- // CHECK: extractelement <4 x float> %{{.*}}, i32 0
+ // CHECK: [[CONV:%.*]] = call {{.*}}float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+ // CHECK: ret float [[CONV]]
return _cvtsh_ss(a);
}
@@ -38,7 +29,7 @@ unsigned short test_cvtss_sh(float a) {
__m128 test_mm_cvtph_ps(__m128i a) {
// CHECK-LABEL: test_mm_cvtph_ps
- // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: call {{.*}}<4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
return _mm_cvtph_ps(a);
}
diff --git a/clang/test/CodeGen/X86/f16c-builtins.c b/clang/test/CodeGen/X86/f16c-builtins.c
index 3c6d64c..6a69627 100644
--- a/clang/test/CodeGen/X86/f16c-builtins.c
+++ b/clang/test/CodeGen/X86/f16c-builtins.c
@@ -3,22 +3,18 @@
// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +f16c -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +f16c -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +f16c -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-apple-darwin -target-feature +f16c -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +f16c -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +f16c -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
float test_cvtsh_ss(unsigned short a) {
// CHECK-LABEL: test_cvtsh_ss
- // CHECK: insertelement <8 x i16> poison, i16 %{{.*}}, i32 0
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 1
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 2
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 3
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 4
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 5
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 6
- // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 7
- // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- // CHECK: fpext <4 x half> %{{.*}} to <4 x float>
- // CHECK: extractelement <4 x float> %{{.*}}, i32 0
+ // CHECK: [[CONV:%.*]] = fpext half %{{.*}} to float
+ // CHECK: ret float [[CONV]]
return _cvtsh_ss(a);
}
@@ -35,7 +31,7 @@ unsigned short test_cvtss_sh(float a) {
__m128 test_mm_cvtph_ps(__m128i a) {
// CHECK-LABEL: test_mm_cvtph_ps
- // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: fpext <4 x half> %{{.*}} to <4 x float>
return _mm_cvtph_ps(a);
}
diff --git a/clang/test/CodeGen/X86/fma-builtins.c b/clang/test/CodeGen/X86/fma-builtins.c
index aa17dcc..5445e50 100644
--- a/clang/test/CodeGen/X86/fma-builtins.c
+++ b/clang/test/CodeGen/X86/fma-builtins.c
@@ -1,22 +1,30 @@
-// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -emit-llvm -o - | FileCheck %s
-// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma -emit-llvm -o - | FileCheck %s
-// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -emit-llvm -o - | FileCheck %s
-// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m128 test_mm_fmadd_ps(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_fmadd_ps
// CHECK: call {{.*}}<4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_fmadd_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m128(_mm_fmadd_ps((__m128){ 0.0f, 1.0f, -2.0f, -4.0f }, (__m128){ -0.0f, 4.0f, 2.0f, 1.0f }, (__m128){ -0.0f, -4.0f, 2.0f, 1.0f }), -0.0f, 0.0f, -2.0f, -3.0f));
__m128d test_mm_fmadd_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_fmadd_pd
// CHECK: call {{.*}}<2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_fmadd_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m128d(_mm_fmadd_pd((__m128d){ 0.0, -4.0 }, (__m128d){ -0.0, 1.0 }, (__m128d){ -0.0, 1.0 }), -0.0, -3.0));
__m128 test_mm_fmadd_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_fmadd_ss
@@ -44,6 +52,7 @@ __m128 test_mm_fmsub_ps(__m128 a, __m128 b, __m128 c) {
// CHECK: call {{.*}}<4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_fmsub_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m128(_mm_fmsub_ps((__m128){ 0.0f, 1.0f, -2.0f, -4.0f }, (__m128){ -0.0f, 4.0f, 2.0f, 1.0f }, (__m128){ -0.0f, -4.0f, 2.0f, 1.0f }), 0.0f, 8.0f, -6.0f, -5.0f));
__m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_fmsub_pd
@@ -51,6 +60,7 @@ __m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK: call {{.*}}<2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_fmsub_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m128d(_mm_fmsub_pd((__m128d){ 0.0, -4.0 }, (__m128d){ -0.0, 1.0 }, (__m128d){ -0.0, 1.0 }), 0.0, -5.0));
__m128 test_mm_fmsub_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_fmsub_ss
@@ -80,6 +90,7 @@ __m128 test_mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) {
// CHECK: call {{.*}}<4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_fnmadd_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m128(_mm_fnmadd_ps((__m128){ 0.0f, 1.0f, -2.0f, -4.0f }, (__m128){ -0.0f, 4.0f, 2.0f, 1.0f }, (__m128){ -0.0f, -4.0f, 2.0f, 1.0f }), 0.0f, -8.0f, 6.0f, 5.0f));
__m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_fnmadd_pd
@@ -87,6 +98,7 @@ __m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) {
// CHECK: call {{.*}}<2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_fnmadd_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m128d(_mm_fnmadd_pd((__m128d){ 0.0, -4.0 }, (__m128d){ -0.0, 1.0 }, (__m128d){ -0.0, 1.0 }), 0.0, 5.0));
__m128 test_mm_fnmadd_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_fnmadd_ss
@@ -117,6 +129,7 @@ __m128 test_mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) {
// CHECK: call {{.*}}<4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_fnmsub_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m128(_mm_fnmsub_ps((__m128){ 0.0f, 1.0f, -2.0f, -4.0f }, (__m128){ -0.0f, 4.0f, 2.0f, 1.0f }, (__m128){ -0.0f, -4.0f, 2.0f, 1.0f }), 0.0f, 0.0f, 2.0f, 3.0f));
__m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_fnmsub_pd
@@ -125,6 +138,7 @@ __m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK: call {{.*}}<2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_fnmsub_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m128d(_mm_fnmsub_pd((__m128d){ 0.0, -4.0 }, (__m128d){ -0.0, 1.0 }, (__m128d){ -0.0, 1.0 }), 0.0, 3.0));
__m128 test_mm_fnmsub_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_fnmsub_ss
@@ -183,12 +197,14 @@ __m256 test_mm256_fmadd_ps(__m256 a, __m256 b, __m256 c) {
// CHECK: call {{.*}}<8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_fmadd_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m256(_mm256_fmadd_ps((__m256){ 0.0f, 1.0f, -2.0f, -4.0f, -8.0f, -16.0f, 32.0f, 64.0f }, (__m256){ -0.0f, 64.0f, 32.0f, 16.0f, -8.0f, -4.0f, -2.0f, -1.0f }, (__m256){ -0.0f, -4.0f, 2.0f, 1.0f, -8.0f, -16.0f, 32.0f, 64.0f }), -0.0f, 60.0f, -62.0f, -63.0f, +56.0f, +48.0f, -32.0f, 0.0f));
__m256d test_mm256_fmadd_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_fmadd_pd
// CHECK: call {{.*}}<4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_fmadd_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m256d(_mm256_fmadd_pd((__m256d){ 0.0, 1.0, -2.0, -4.0 }, (__m256d){ -0.0, 4.0, 2.0, 1.0 }, (__m256d){ -0.0, -4.0, 2.0, 1.0 }), -0.0, 0.0, -2.0, -3.0));
__m256 test_mm256_fmsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_fmsub_ps
@@ -196,6 +212,7 @@ __m256 test_mm256_fmsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK: call {{.*}}<8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_fmsub_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m256(_mm256_fmsub_ps((__m256){ 0.0f, 1.0f, -2.0f, -4.0f, -8.0f, -16.0f, 32.0f, 64.0f }, (__m256){ -0.0f, 64.0f, 32.0f, 16.0f, -8.0f, -4.0f, -2.0f, -1.0f }, (__m256){ -0.0f, -4.0f, 2.0f, 1.0f, -8.0f, -16.0f, 32.0f, 64.0f }), 0.0f, 68.0f, -66.0f, -65.0f, 72.0f, 80.0f, -96.0f, -128.0f));
__m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_fmsub_pd
@@ -203,6 +220,7 @@ __m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK: call {{.*}}<4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_fmsub_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m256d(_mm256_fmsub_pd((__m256d){ 0.0, 1.0, -2.0, -4.0 }, (__m256d){ -0.0, 4.0, 2.0, 1.0 }, (__m256d){ -0.0, -4.0, 2.0, 1.0 }), 0.0, 8.0, -6.0, -5.0));
__m256 test_mm256_fnmadd_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_fnmadd_ps
@@ -210,6 +228,7 @@ __m256 test_mm256_fnmadd_ps(__m256 a, __m256 b, __m256 c) {
// CHECK: call {{.*}}<8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_fnmadd_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m256(_mm256_fnmadd_ps((__m256){ 0.0f, 1.0f, -2.0f, -4.0f, -8.0f, -16.0f, 32.0f, 64.0f }, (__m256){ -0.0f, 64.0f, 32.0f, 16.0f, -8.0f, -4.0f, -2.0f, -1.0f }, (__m256){ -0.0f, -4.0f, 2.0f, 1.0f, -8.0f, -16.0f, 32.0f, 64.0f }), 0.0f, -68.0f, 66.0f, 65.0f, -72.0f, -80.0f, 96.0f, 128.0f));
__m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_fnmadd_pd
@@ -217,6 +236,7 @@ __m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) {
// CHECK: call {{.*}}<4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_fnmadd_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m256d(_mm256_fnmadd_pd((__m256d){ 0.0, 1.0, -2.0, -4.0 }, (__m256d){ -0.0, 4.0, 2.0, 1.0 }, (__m256d){ -0.0, -4.0, 2.0, 1.0 }), 0.0, -8.0, 6.0, 5.0));
__m256 test_mm256_fnmsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_fnmsub_ps
@@ -225,6 +245,7 @@ __m256 test_mm256_fnmsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK: call {{.*}}<8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_fnmsub_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m256(_mm256_fnmsub_ps((__m256){ 0.0f, 1.0f, -2.0f, -4.0f, -8.0f, -16.0f, 32.0f, 64.0f }, (__m256){ -0.0f, 64.0f, 32.0f, 16.0f, -8.0f, -4.0f, -2.0f, -1.0f }, (__m256){ -0.0f, -4.0f, 2.0f, 1.0f, -8.0f, -16.0f, 32.0f, 64.0f }), 0.0f, -60.0f, 62.0f, 63.0f, -56.0f, -48.0f, 32.0f, 0.0f));
__m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_fnmsub_pd
@@ -233,6 +254,7 @@ __m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK: call {{.*}}<4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_fnmsub_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m256d(_mm256_fnmsub_pd((__m256d){ 0.0, 1.0, -2.0, -4.0 }, (__m256d){ -0.0, 4.0, 2.0, 1.0 }, (__m256d){ -0.0, -4.0, 2.0, 1.0 }), 0.0, 0.0, 2.0, 3.0));
__m256 test_mm256_fmaddsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_fmaddsub_ps
diff --git a/clang/test/CodeGen/X86/fma4-builtins.c b/clang/test/CodeGen/X86/fma4-builtins.c
index ccdba8f..fb449d5 100644
--- a/clang/test/CodeGen/X86/fma4-builtins.c
+++ b/clang/test/CodeGen/X86/fma4-builtins.c
@@ -3,20 +3,28 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma4 -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma4 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma4 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma4 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma4 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +fma4 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <x86intrin.h>
+#include "builtin_test_helpers.h"
__m128 test_mm_macc_ps(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_macc_ps
// CHECK: call {{.*}}<4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_macc_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m128(_mm_macc_ps((__m128){ 0.0f, 1.0f, -2.0f, -4.0f }, (__m128){ -0.0f, 4.0f, 2.0f, 1.0f }, (__m128){ -0.0f, -4.0f, 2.0f, 1.0f }), -0.0f, 0.0f, -2.0f, -3.0f));
__m128d test_mm_macc_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_macc_pd
// CHECK: call {{.*}}<2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_macc_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m128d(_mm_macc_pd((__m128d){ 0.0, -4.0 }, (__m128d){ -0.0, 1.0 }, (__m128d){ -0.0, 1.0 }), -0.0, -3.0));
__m128 test_mm_macc_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_macc_ss
@@ -44,6 +52,7 @@ __m128 test_mm_msub_ps(__m128 a, __m128 b, __m128 c) {
// CHECK: call {{.*}}<4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_msub_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m128(_mm_msub_ps((__m128){ 0.0f, 1.0f, -2.0f, -4.0f }, (__m128){ -0.0f, 4.0f, 2.0f, 1.0f }, (__m128){ -0.0f, -4.0f, 2.0f, 1.0f }), 0.0f, 8.0f, -6.0f, -5.0f));
__m128d test_mm_msub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_msub_pd
@@ -51,6 +60,7 @@ __m128d test_mm_msub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK: call {{.*}}<2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_msub_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m128d(_mm_msub_pd((__m128d){ 0.0, -4.0 }, (__m128d){ -0.0, 1.0 }, (__m128d){ -0.0, 1.0 }), 0.0, -5.0));
__m128 test_mm_msub_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_msub_ss
@@ -80,6 +90,7 @@ __m128 test_mm_nmacc_ps(__m128 a, __m128 b, __m128 c) {
// CHECK: call {{.*}}<4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_nmacc_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m128(_mm_nmacc_ps((__m128){ 0.0f, 1.0f, -2.0f, -4.0f }, (__m128){ -0.0f, 4.0f, 2.0f, 1.0f }, (__m128){ -0.0f, -4.0f, 2.0f, 1.0f }), 0.0f, -8.0f, 6.0f, 5.0f));
__m128d test_mm_nmacc_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_nmacc_pd
@@ -87,6 +98,7 @@ __m128d test_mm_nmacc_pd(__m128d a, __m128d b, __m128d c) {
// CHECK: call {{.*}}<2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_nmacc_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m128d(_mm_nmacc_pd((__m128d){ 0.0, -4.0 }, (__m128d){ -0.0, 1.0 }, (__m128d){ -0.0, 1.0 }), 0.0, 5.0));
__m128 test_mm_nmacc_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_nmacc_ss
@@ -117,6 +129,7 @@ __m128 test_mm_nmsub_ps(__m128 a, __m128 b, __m128 c) {
// CHECK: call {{.*}}<4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_nmsub_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m128(_mm_nmsub_ps((__m128){ 0.0f, 1.0f, -2.0f, -4.0f }, (__m128){ -0.0f, 4.0f, 2.0f, 1.0f }, (__m128){ -0.0f, -4.0f, 2.0f, 1.0f }), 0.0f, 0.0f, 2.0f, 3.0f));
__m128d test_mm_nmsub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_nmsub_pd
@@ -125,6 +138,7 @@ __m128d test_mm_nmsub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK: call {{.*}}<2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_nmsub_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m128d(_mm_nmsub_pd((__m128d){ 0.0, -4.0 }, (__m128d){ -0.0, 1.0 }, (__m128d){ -0.0, 1.0 }), 0.0, 3.0));
__m128 test_mm_nmsub_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_nmsub_ss
@@ -183,12 +197,14 @@ __m256 test_mm256_macc_ps(__m256 a, __m256 b, __m256 c) {
// CHECK: call {{.*}}<8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_macc_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m256(_mm256_macc_ps((__m256){ 0.0f, 1.0f, -2.0f, -4.0f, -8.0f, -16.0f, 32.0f, 64.0f }, (__m256){ -0.0f, 64.0f, 32.0f, 16.0f, -8.0f, -4.0f, -2.0f, -1.0f }, (__m256){ -0.0f, -4.0f, 2.0f, 1.0f, -8.0f, -16.0f, 32.0f, 64.0f }), -0.0f, 60.0f, -62.0f, -63.0f, +56.0f, +48.0f, -32.0f, 0.0f));
__m256d test_mm256_macc_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_macc_pd
// CHECK: call {{.*}}<4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_macc_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m256d(_mm256_macc_pd((__m256d){ 0.0, 1.0, -2.0, -4.0 }, (__m256d){ -0.0, 4.0, 2.0, 1.0 }, (__m256d){ -0.0, -4.0, 2.0, 1.0 }), -0.0, 0.0, -2.0, -3.0));
__m256 test_mm256_msub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_msub_ps
@@ -196,6 +212,7 @@ __m256 test_mm256_msub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK: call {{.*}}<8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_msub_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m256(_mm256_msub_ps((__m256){ 0.0f, 1.0f, -2.0f, -4.0f, -8.0f, -16.0f, 32.0f, 64.0f }, (__m256){ -0.0f, 64.0f, 32.0f, 16.0f, -8.0f, -4.0f, -2.0f, -1.0f }, (__m256){ -0.0f, -4.0f, 2.0f, 1.0f, -8.0f, -16.0f, 32.0f, 64.0f }), 0.0f, 68.0f, -66.0f, -65.0f, 72.0f, 80.0f, -96.0f, -128.0f));
__m256d test_mm256_msub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_msub_pd
@@ -203,6 +220,7 @@ __m256d test_mm256_msub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK: call {{.*}}<4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_msub_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m256d(_mm256_msub_pd((__m256d){ 0.0, 1.0, -2.0, -4.0 }, (__m256d){ -0.0, 4.0, 2.0, 1.0 }, (__m256d){ -0.0, -4.0, 2.0, 1.0 }), 0.0, 8.0, -6.0, -5.0));
__m256 test_mm256_nmacc_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_nmacc_ps
@@ -210,6 +228,7 @@ __m256 test_mm256_nmacc_ps(__m256 a, __m256 b, __m256 c) {
// CHECK: call {{.*}}<8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_nmacc_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m256(_mm256_nmacc_ps((__m256){ 0.0f, 1.0f, -2.0f, -4.0f, -8.0f, -16.0f, 32.0f, 64.0f }, (__m256){ -0.0f, 64.0f, 32.0f, 16.0f, -8.0f, -4.0f, -2.0f, -1.0f }, (__m256){ -0.0f, -4.0f, 2.0f, 1.0f, -8.0f, -16.0f, 32.0f, 64.0f }), 0.0f, -68.0f, 66.0f, 65.0f, -72.0f, -80.0f, 96.0f, 128.0f));
__m256d test_mm256_nmacc_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_nmacc_pd
@@ -217,6 +236,7 @@ __m256d test_mm256_nmacc_pd(__m256d a, __m256d b, __m256d c) {
// CHECK: call {{.*}}<4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_nmacc_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m256d(_mm256_nmacc_pd((__m256d){ 0.0, 1.0, -2.0, -4.0 }, (__m256d){ -0.0, 4.0, 2.0, 1.0 }, (__m256d){ -0.0, -4.0, 2.0, 1.0 }), 0.0, -8.0, 6.0, 5.0));
__m256 test_mm256_nmsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_nmsub_ps
@@ -225,6 +245,7 @@ __m256 test_mm256_nmsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK: call {{.*}}<8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_nmsub_ps(a, b, c);
}
+TEST_CONSTEXPR(match_m256(_mm256_nmsub_ps((__m256){ 0.0f, 1.0f, -2.0f, -4.0f, -8.0f, -16.0f, 32.0f, 64.0f }, (__m256){ -0.0f, 64.0f, 32.0f, 16.0f, -8.0f, -4.0f, -2.0f, -1.0f }, (__m256){ -0.0f, -4.0f, 2.0f, 1.0f, -8.0f, -16.0f, 32.0f, 64.0f }), 0.0f, -60.0f, 62.0f, 63.0f, -56.0f, -48.0f, 32.0f, 0.0f));
__m256d test_mm256_nmsub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_nmsub_pd
@@ -233,6 +254,7 @@ __m256d test_mm256_nmsub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK: call {{.*}}<4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_nmsub_pd(a, b, c);
}
+TEST_CONSTEXPR(match_m256d(_mm256_nmsub_pd((__m256d){ 0.0, 1.0, -2.0, -4.0 }, (__m256d){ -0.0, 4.0, 2.0, 1.0 }, (__m256d){ -0.0, -4.0, 2.0, 1.0 }), 0.0, 0.0, 2.0, 3.0));
__m256 test_mm256_maddsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_maddsub_ps
diff --git a/clang/test/CodeGen/X86/lzcnt-builtins.c b/clang/test/CodeGen/X86/lzcnt-builtins.c
index 212155f..eb02c11 100644
--- a/clang/test/CodeGen/X86/lzcnt-builtins.c
+++ b/clang/test/CodeGen/X86/lzcnt-builtins.c
@@ -1,59 +1,54 @@
// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-apple-darwin -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-apple-darwin -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
+#include "builtin_test_helpers.h"
unsigned short test__lzcnt16(unsigned short __X)
{
// CHECK: @llvm.ctlz.i16(i16 %{{.*}}, i1 false)
return __lzcnt16(__X);
}
+TEST_CONSTEXPR(__lzcnt16(0x0000) == 16);
+TEST_CONSTEXPR(__lzcnt16(0x8000) == 0);
+TEST_CONSTEXPR(__lzcnt16(0x0010) == 11);
unsigned int test_lzcnt32(unsigned int __X)
{
// CHECK: @llvm.ctlz.i32(i32 %{{.*}}, i1 false)
return __lzcnt32(__X);
}
+TEST_CONSTEXPR(__lzcnt32(0x00000000) == 32);
+TEST_CONSTEXPR(__lzcnt32(0x80000000) == 0);
+TEST_CONSTEXPR(__lzcnt32(0x00000010) == 27);
unsigned long long test__lzcnt64(unsigned long long __X)
{
// CHECK: @llvm.ctlz.i64(i64 %{{.*}}, i1 false)
return __lzcnt64(__X);
}
+TEST_CONSTEXPR(__lzcnt64(0x0000000000000000ULL) == 64);
+TEST_CONSTEXPR(__lzcnt64(0x8000000000000000ULL) == 0);
+TEST_CONSTEXPR(__lzcnt64(0x0000000100000000ULL) == 31);
unsigned int test_lzcnt_u32(unsigned int __X)
{
// CHECK: @llvm.ctlz.i32(i32 %{{.*}}, i1 false)
return _lzcnt_u32(__X);
}
+TEST_CONSTEXPR(_lzcnt_u32(0x00000000) == 32);
+TEST_CONSTEXPR(_lzcnt_u32(0x80000000) == 0);
+TEST_CONSTEXPR(_lzcnt_u32(0x00000010) == 27);
unsigned long long test__lzcnt_u64(unsigned long long __X)
{
// CHECK: @llvm.ctlz.i64(i64 %{{.*}}, i1 false)
return _lzcnt_u64(__X);
}
-
-
-// Test constexpr handling.
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-char lzcnt16_0[__lzcnt16(0x0000) == 16 ? 1 : -1];
-char lzcnt16_1[__lzcnt16(0x8000) == 0 ? 1 : -1];
-char lzcnt16_2[__lzcnt16(0x0010) == 11 ? 1 : -1];
-
-char lzcnt32_0[__lzcnt32(0x00000000) == 32 ? 1 : -1];
-char lzcnt32_1[__lzcnt32(0x80000000) == 0 ? 1 : -1];
-char lzcnt32_2[__lzcnt32(0x00000010) == 27 ? 1 : -1];
-
-char lzcnt64_0[__lzcnt64(0x0000000000000000ULL) == 64 ? 1 : -1];
-char lzcnt64_1[__lzcnt64(0x8000000000000000ULL) == 0 ? 1 : -1];
-char lzcnt64_2[__lzcnt64(0x0000000100000000ULL) == 31 ? 1 : -1];
-
-char lzcntu32_0[_lzcnt_u32(0x00000000) == 32 ? 1 : -1];
-char lzcntu32_1[_lzcnt_u32(0x80000000) == 0 ? 1 : -1];
-char lzcntu32_2[_lzcnt_u32(0x00000010) == 27 ? 1 : -1];
-
-char lzcntu64_0[_lzcnt_u64(0x0000000000000000ULL) == 64 ? 1 : -1];
-char lzcntu64_1[_lzcnt_u64(0x8000000000000000ULL) == 0 ? 1 : -1];
-char lzcntu64_2[_lzcnt_u64(0x0000000100000000ULL) == 31 ? 1 : -1];
-#endif
+TEST_CONSTEXPR(_lzcnt_u64(0x0000000000000000ULL) == 64);
+TEST_CONSTEXPR(_lzcnt_u64(0x8000000000000000ULL) == 0);
+TEST_CONSTEXPR(_lzcnt_u64(0x0000000100000000ULL) == 31);
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c
index 52cbe45..7bd2475 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -7,6 +7,15 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --implicit-check-not=x86mmx
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --implicit-check-not=x86mmx
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --implicit-check-not=x86mmx
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --implicit-check-not=x86mmx
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --implicit-check-not=x86mmx
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --implicit-check-not=x86mmx
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --implicit-check-not=x86mmx
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --implicit-check-not=x86mmx
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --implicit-check-not=x86mmx
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --implicit-check-not=x86mmx
+
#include <immintrin.h>
#include "builtin_test_helpers.h"
@@ -16,66 +25,77 @@ __m64 test_mm_abs_pi8(__m64 a) {
// CHECK: call <8 x i8> @llvm.abs.v8i8(
return _mm_abs_pi8(a);
}
+TEST_CONSTEXPR(match_v8qi(_mm_abs_pi8((__m64)(__v8qs){-3, +2, -1, 0, +1, -2, +3, -4}), 3, 2, 1, 0, 1, 2, 3, 4));
__m64 test_mm_abs_pi16(__m64 a) {
// CHECK-LABEL: test_mm_abs_pi16
// CHECK: call <4 x i16> @llvm.abs.v4i16(
return _mm_abs_pi16(a);
}
+TEST_CONSTEXPR(match_v4hi(_mm_abs_pi16((__m64)(__v4hi){+1, -2, +3, -4}), 1, 2, 3, 4));
__m64 test_mm_abs_pi32(__m64 a) {
// CHECK-LABEL: test_mm_abs_pi32
// CHECK: call <2 x i32> @llvm.abs.v2i32(
return _mm_abs_pi32(a);
}
+TEST_CONSTEXPR(match_v2si(_mm_abs_pi32((__m64)(__v2si){+5, -3}), 5, 3));
__m64 test_mm_add_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_add_pi8
// CHECK: add <8 x i8> {{%.*}}, {{%.*}}
return _mm_add_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_add_pi8((__m64)(__v8qs){-3, +2, -1, 0, +1, -2, +3, -4}, (__m64)(__v8qs){-18, +16, -14, +12, -10, +8, +6, -4}), -21, +18, -15, +12, -9, +6, +9, -8));
__m64 test_mm_add_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_add_pi16
// CHECK: add <4 x i16> {{%.*}}, {{%.*}}
return _mm_add_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_add_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), -9, +6, +9, -8));
__m64 test_mm_add_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_add_pi32
// CHECK: add <2 x i32> {{%.*}}, {{%.*}}
return _mm_add_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_add_pi32((__m64)(__v2si){+5, -3}, (__m64)(__v2si){-9, +8}), -4, +5));
__m64 test_mm_add_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_add_si64
// CHECK: add i64 {{%.*}}, {{%.*}}
return _mm_add_si64(a, b);
}
+TEST_CONSTEXPR(match_v1di(_mm_add_si64((__m64)(__v1di){+42}, (__m64)(__v1di){-100}), -58));
__m64 test_mm_adds_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_adds_pi8
// CHECK: call <8 x i8> @llvm.sadd.sat.v8i8(
return _mm_adds_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_adds_pi8((__m64)(__v8qs){+100, +50, -100, +20, +80, -50, +120, -20}, (__m64)(__v8qs){+50, +80, -50, +110, +60, -30, +20, -10}), +127, +127, -128, +127, +127, -80, +127, -30));
__m64 test_mm_adds_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_adds_pi16
// CHECK: call <4 x i16> @llvm.sadd.sat.v4i16(
return _mm_adds_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_adds_pi16((__m64)(__v4hi){+32000, -32000, +32000, -32000}, (__m64)(__v4hi){+800, -800, -800, +800}), +32767, -32768, +31200, -31200));
__m64 test_mm_adds_pu8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_adds_pu8
// CHECK: call <8 x i8> @llvm.uadd.sat.v8i8(
return _mm_adds_pu8(a, b);
}
+TEST_CONSTEXPR(match_v8qu(_mm_adds_pu8((__m64)(__v8qu){0, +1, +2, +3, +180, +150, +120, +200}, (__m64)(__v8qu){0, +1, +2, +3, +160, +30, +200, +10}), 0, +2, +4, +6, +255, +180, +255, +210));
__m64 test_mm_adds_pu16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_adds_pu16
// CHECK: call <4 x i16> @llvm.uadd.sat.v4i16(
return _mm_adds_pu16(a, b);
}
+TEST_CONSTEXPR(match_v4hu(_mm_adds_pu16((__m64)(__v4hu){+0, +1, +32000, +33000}, (__m64)(__v4hu){0, +1, +800, +33000}), 0, +2, +32800, +65535));
__m64 test_mm_alignr_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_alignr_pi8
@@ -88,6 +108,7 @@ __m64 test_mm_and_si64(__m64 a, __m64 b) {
// CHECK: and <1 x i64> {{%.*}}, {{%.*}}
return _mm_and_si64(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_and_si64((__m64)(__v4hi){0, -1, 0, -1}, (__m64)(__v4hi){0, 0, -1, -1}), 0, 0, 0, -1));
__m64 test_mm_andnot_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_andnot_si64
@@ -95,6 +116,7 @@ __m64 test_mm_andnot_si64(__m64 a, __m64 b) {
// CHECK: and <1 x i64> [[TMP]], {{%.*}}
return _mm_andnot_si64(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_andnot_si64((__m64)(__v4hi){0, -1, 0, -1}, (__m64)(__v4hi){0, 0, -1, -1}), 0, 0, -1, 0));
__m64 test_mm_avg_pu8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_avg_pu8
@@ -114,6 +136,7 @@ __m64 test_mm_cmpeq_pi8(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <8 x i1> [[CMP]] to <8 x i8>
return _mm_cmpeq_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_cmpeq_pi8((__m64)(__v8qs){-3, +2, -1, 0, +1, -2, +3, -4}, (__m64)(__v8qs){-3, -2, +1, 0, -1, -2, -3, -4}), -1, 0, 0, -1, 0, -1, 0, -1));
__m64 test_mm_cmpeq_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpeq_pi16
@@ -121,6 +144,7 @@ __m64 test_mm_cmpeq_pi16(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <4 x i1> [[CMP]] to <4 x i16>
return _mm_cmpeq_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_cmpeq_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-1, -1, +3, +4}), 0, 0, -1, 0));
__m64 test_mm_cmpeq_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpeq_pi32
@@ -128,6 +152,7 @@ __m64 test_mm_cmpeq_pi32(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <2 x i1> [[CMP]] to <2 x i32>
return _mm_cmpeq_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_cmpeq_pi32((__m64)(__v2si){+5, -3}, (__m64)(__v2si){-5, -3}), 0, -1));
__m64 test_mm_cmpgt_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpgt_pi8
@@ -135,6 +160,7 @@ __m64 test_mm_cmpgt_pi8(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <8 x i1> [[CMP]] to <8 x i8>
return _mm_cmpgt_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_cmpgt_pi8((__m64)(__v8qs){-3, +2, -1, 0, +1, -2, +3, -4}, (__m64)(__v8qs){-3, -2, +1, 0, -1, -2, -3, -4}), 0, -1, 0, 0, -1, 0, -1, 0));
__m64 test_mm_cmpgt_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpgt_pi16
@@ -142,6 +168,7 @@ __m64 test_mm_cmpgt_pi16(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <4 x i1> [[CMP]] to <4 x i16>
return _mm_cmpgt_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_cmpgt_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-1, -1, +3, +4}), -1, 0, 0, 0));
__m64 test_mm_cmpgt_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpgt_pi32
@@ -149,12 +176,14 @@ __m64 test_mm_cmpgt_pi32(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <2 x i1> [[CMP]] to <2 x i32>
return _mm_cmpgt_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_cmpgt_pi32((__m64)(__v2si){+5, -3}, (__m64)(__v2si){-5, -3}), -1, 0));
__m128 test_mm_cvt_pi2ps(__m128 a, __m64 b) {
// CHECK-LABEL: test_mm_cvt_pi2ps
// CHECK: sitofp <4 x i32> {{%.*}} to <4 x float>
return _mm_cvt_pi2ps(a, b);
}
+TEST_CONSTEXPR(match_m128(_mm_cvt_pi2ps((__m128){-5.0f, +7.0f, -9.0f, +11.0f}, (__m64)(__v2si){-2,+4}), -2.0f, +4.0f, -9.0f, +11.0f));
__m64 test_mm_cvt_ps2pi(__m128 a) {
// CHECK-LABEL: test_mm_cvt_ps2pi
@@ -168,29 +197,40 @@ __m64 test_mm_cvtpd_pi32(__m128d a) {
return _mm_cvtpd_pi32(a);
}
+__m128 test_mm_cvtpi8_ps(__m64 a) {
+ // CHECK-LABEL: test_mm_cvtpi8_ps
+ // CHECK: sitofp <4 x i8> {{%.*}} to <4 x float>
+ return _mm_cvtpi8_ps(a);
+}
+TEST_CONSTEXPR(match_m128(_mm_cvtpi8_ps((__m64)(__v8qi){1, 2, 3, 4, 5, 6, 7, 8}), +1.0f, +2.0f, +3.0f, +4.0f));
+
__m128 test_mm_cvtpi16_ps(__m64 a) {
// CHECK-LABEL: test_mm_cvtpi16_ps
// CHECK: sitofp <4 x i16> {{%.*}} to <4 x float>
return _mm_cvtpi16_ps(a);
}
+TEST_CONSTEXPR(match_m128(_mm_cvtpi16_ps((__m64)(__v4hi){-3, +9, -8, +256}), -3.0f, +9.0f, -8.0f, +256.0f));
__m128d test_mm_cvtpi32_pd(__m64 a) {
// CHECK-LABEL: test_mm_cvtpi32_pd
// CHECK: sitofp <2 x i32> {{%.*}} to <2 x double>
return _mm_cvtpi32_pd(a);
}
+TEST_CONSTEXPR(match_m128d(_mm_cvtpi32_pd((__m64)(__v2si){-10,+17}), -10.0, +17.0));
__m128 test_mm_cvtpi32_ps(__m128 a, __m64 b) {
// CHECK-LABEL: test_mm_cvtpi32_ps
// CHECK: sitofp <4 x i32> {{%.*}} to <4 x float>
return _mm_cvtpi32_ps(a, b);
}
+TEST_CONSTEXPR(match_m128(_mm_cvtpi32_ps((__m128){+1.0f, -2.0f, +3.0f, +5.0f}, (__m64)(__v2si){+100,-200}), +100.0f, -200.0f, +3.0f, +5.0f));
__m128 test_mm_cvtpi32x2_ps(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cvtpi32x2_ps
// CHECK: sitofp <4 x i32> {{%.*}} to <4 x float>
return _mm_cvtpi32x2_ps(a, b);
}
+TEST_CONSTEXPR(match_m128(_mm_cvtpi32x2_ps((__m64)(__v2si){-8,+7}, (__m64)(__v2si){+100,-200}), -8.0f, +7.0f, +100.0f, -200.0f));
__m64 test_mm_cvtps_pi16(__m128 a) {
// CHECK-LABEL: test_mm_cvtps_pi16
@@ -205,17 +245,33 @@ __m64 test_mm_cvtps_pi32(__m128 a) {
return _mm_cvtps_pi32(a);
}
+__m128 test_mm_cvtpu8_ps(__m64 a) {
+ // CHECK-LABEL: test_mm_cvtpu8_ps
+ // CHECK: uitofp <4 x i8> {{%.*}} to <4 x float>
+ return _mm_cvtpu8_ps(a);
+}
+TEST_CONSTEXPR(match_m128(_mm_cvtpu8_ps((__m64)(__v8qi){8, 7, 6, 5, 4, 3, 2, 1}), 8.0f, 7.0f, 6.0f, 5.0f));
+
+__m128 test_mm_cvtpu16_ps(__m64 a) {
+ // CHECK-LABEL: test_mm_cvtpu16_ps
+ // CHECK: uitofp <4 x i16> {{%.*}} to <4 x float>
+ return _mm_cvtpu16_ps(a);
+}
+TEST_CONSTEXPR(match_m128(_mm_cvtpu16_ps((__m64)(__v4hi){-3, +9, -8, +256}), 65533.0f, 9.0f, 65528.0f, 256.0f));
+
__m64 test_mm_cvtsi32_si64(int a) {
// CHECK-LABEL: test_mm_cvtsi32_si64
// CHECK: insertelement <2 x i32>
return _mm_cvtsi32_si64(a);
}
+TEST_CONSTEXPR(match_v2si(_mm_cvtsi32_si64(-127), -127, 0));
int test_mm_cvtsi64_si32(__m64 a) {
// CHECK-LABEL: test_mm_cvtsi64_si32
// CHECK: extractelement <2 x i32>
return _mm_cvtsi64_si32(a);
}
+TEST_CONSTEXPR(_mm_cvtsi64_si32((__m64)(__v4hi){-2, 0, -1, -1}) == 65534);
__m64 test_mm_cvttpd_pi32(__m128d a) {
// CHECK-LABEL: test_mm_cvttpd_pi32
@@ -240,11 +296,13 @@ __m64 test_m_from_int(int a) {
// CHECK: insertelement <2 x i32>
return _m_from_int(a);
}
+TEST_CONSTEXPR(match_v2si(_m_from_int(255), 255, 0));
__m64 test_m_from_int64(long long a) {
// CHECK-LABEL: test_m_from_int64
return _m_from_int64(a);
}
+TEST_CONSTEXPR(match_v1di(_m_from_int64(-65536), -65536LL));
__m64 test_mm_hadd_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_hadd_pi16
@@ -343,18 +401,21 @@ __m64 test_mm_mul_su32(__m64 a, __m64 b) {
// CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
return _mm_mul_su32(a, b);
}
+TEST_CONSTEXPR(match_m64(_mm_mul_su32((__m64)(__v2si){+1, -2}, (__m64)(__v2si){-10, +8}), 4294967286));
__m64 test_mm_mulhi_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mulhi_pi16
// CHECK: call <8 x i16> @llvm.x86.sse2.pmulh.w(
return _mm_mulhi_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mulhi_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), -1, -1, 0, 0));
__m64 test_mm_mulhi_pu16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mulhi_pu16
// CHECK: call <8 x i16> @llvm.x86.sse2.pmulhu.w(
return _mm_mulhi_pu16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mulhi_pu16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), 0, 7, 0, -8));
__m64 test_mm_mulhrs_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mulhrs_pi16
@@ -367,12 +428,14 @@ __m64 test_mm_mullo_pi16(__m64 a, __m64 b) {
// CHECK: mul <4 x i16> {{%.*}}, {{%.*}}
return _mm_mullo_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mullo_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), -10, -16, +18, +16));
__m64 test_mm_or_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_or_si64
// CHECK: or <1 x i64> {{%.*}}, {{%.*}}
return _mm_or_si64(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_or_si64((__m64)(__v4hi){0, -1, 0, -1}, (__m64)(__v4hi){0, 0, -1, -1}), 0, -1, -1, -1));
__m64 test_mm_packs_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_packs_pi16
@@ -554,18 +617,35 @@ __m64 test_mm_slli_pi16(__m64 a) {
// CHECK: call <8 x i16> @llvm.x86.sse2.pslli.w(
return _mm_slli_pi16(a, 3);
}
+TEST_CONSTEXPR(match_v4hi(_mm_slli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 0), 0, 1, 2, 3));
+TEST_CONSTEXPR(match_v4hi(_mm_slli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 1), 0, 0x2, 0x4, 0x6));
+TEST_CONSTEXPR(match_v4hi(_mm_slli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 15), 0, 0x8000, 0x0, 0x8000));
+TEST_CONSTEXPR(match_v4hi(_mm_slli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 16), 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_slli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 17), 0, 0, 0, 0));
__m64 test_mm_slli_pi32(__m64 a) {
// CHECK-LABEL: test_mm_slli_pi32
// CHECK: call <4 x i32> @llvm.x86.sse2.pslli.d(
return _mm_slli_pi32(a, 3);
}
+TEST_CONSTEXPR(match_v2si(_mm_slli_pi32((__m64)(__v2si){0, 1}, 0), 0, 1));
+TEST_CONSTEXPR(match_v2si(_mm_slli_pi32((__m64)(__v2si){0, 1}, 1), 0, 0x2));
+TEST_CONSTEXPR(match_v2si(_mm_slli_pi32((__m64)(__v2si){1, 2}, 2), 0x4, 0x8));
+TEST_CONSTEXPR(match_v2su(_mm_slli_pi32((__m64)(__v2su){1, 1}, 31), 0x80000000, 0x80000000));
+TEST_CONSTEXPR(match_v2si(_mm_slli_pi32((__m64)(__v2si){1, 1}, 32), 0, 0));
+TEST_CONSTEXPR(match_v2si(_mm_slli_pi32((__m64)(__v2si){1, 1}, 33), 0, 0));
__m64 test_mm_slli_si64(__m64 a) {
// CHECK-LABEL: test_mm_slli_si64
// CHECK: call <2 x i64> @llvm.x86.sse2.pslli.q(
return _mm_slli_si64(a, 3);
}
+TEST_CONSTEXPR(match_v1di(_mm_slli_si64((__m64)(__v1di){0}, 0), 0));
+TEST_CONSTEXPR(match_v1di(_mm_slli_si64((__m64)(__v1di){1}, 1), 0x2));
+TEST_CONSTEXPR(match_v1di(_mm_slli_si64((__m64)(__v1di){2}, 2), 0x8));
+TEST_CONSTEXPR(match_v1du(_mm_slli_si64((__m64)(__v1du){1}, 63), 0x8000000000000000ULL));
+TEST_CONSTEXPR(match_v1di(_mm_slli_si64((__m64)(__v1di){1}, 64), 0));
+TEST_CONSTEXPR(match_v1di(_mm_slli_si64((__m64)(__v1di){1}, 65), 0));
__m64 test_mm_sra_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_sra_pi16
@@ -584,12 +664,25 @@ __m64 test_mm_srai_pi16(__m64 a) {
// CHECK: call <8 x i16> @llvm.x86.sse2.psrai.w(
return _mm_srai_pi16(a, 3);
}
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-1, 1, 2, 3}, 1), -1, 0, 1, 1));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-32768, -1, 1, 2}, 15), -1, -1, 0, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-32768, -1, 2, 3}, 16), -1, -1, 0, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-1, 1, 2, 3}, 16), -1, 0, 0, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-1, 1, 2, 3}, 17), -1, 0, 0, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-1, 1, -42, -32768}, 100), -1, 0, -1, -1));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-1, 1, 2, 3}, 200), -1, 0, 0, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-32768, 32767, -2, 0}, 1), -16384, 16383, -1, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-32768, 32767, -2, 0}, 15), -1, 0, -1, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srai_pi16((__m64)(__v4hi){-32768, 32767, -2, 0}, 30), -1, 0, -1, 0));
__m64 test_mm_srai_pi32(__m64 a) {
// CHECK-LABEL: test_mm_srai_pi32
// CHECK: call <4 x i32> @llvm.x86.sse2.psrai.d(
return _mm_srai_pi32(a, 3);
}
+TEST_CONSTEXPR(match_v2si(_mm_srai_pi32((__m64)(__v2si){-32768, 32767}, 30), -1, 0));
+TEST_CONSTEXPR(match_v2si(_mm_srai_pi32((__m64)(__v2si){-2, 20}, 1), -1, 0xa));
+TEST_CONSTEXPR(match_v2si(_mm_srai_pi32((__m64)(__v2si){-1, 20}, 1), -1, 0xa));
__m64 test_mm_srl_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_srl_pi16
@@ -614,18 +707,25 @@ __m64 test_mm_srli_pi16(__m64 a) {
// CHECK: call <8 x i16> @llvm.x86.sse2.psrli.w(
return _mm_srli_pi16(a, 3);
}
+TEST_CONSTEXPR(match_v4hi(_mm_srli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 1), 0, 0x0, 0x1, 0x1));
+TEST_CONSTEXPR(match_v4hi(_mm_srli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 1), 0, 0x0, 0x1, 0x1));
+TEST_CONSTEXPR(match_v4hi(_mm_srli_pi16((__m64)(__v4hi){-1, 0, 0, 0}, 1), 0x7fff, 0, 0, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 16), 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v4hi(_mm_srli_pi16((__m64)(__v4hi){0, 1, 2, 3}, 17), 0, 0, 0, 0));
__m64 test_mm_srli_pi32(__m64 a) {
// CHECK-LABEL: test_mm_srli_pi32
// CHECK: call <4 x i32> @llvm.x86.sse2.psrli.d(
return _mm_srli_pi32(a, 3);
}
+TEST_CONSTEXPR(match_v2si(_mm_srli_pi32((__m64)(__v2si){1, 1025}, 2), 0x0, 0x100));
__m64 test_mm_srli_si64(__m64 a) {
// CHECK-LABEL: test_mm_srli_si64
// CHECK: call <2 x i64> @llvm.x86.sse2.psrli.q(
return _mm_srli_si64(a, 3);
}
+TEST_CONSTEXPR(match_v1di(_mm_srli_si64((__m64)(__v1di){1025}, 2), 0x100));
void test_mm_stream_pi(__m64 *p, __m64 a) {
// CHECK-LABEL: test_mm_stream_pi
@@ -644,98 +744,115 @@ __m64 test_mm_sub_pi8(__m64 a, __m64 b) {
// CHECK: sub <8 x i8> {{%.*}}, {{%.*}}
return _mm_sub_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_sub_pi8((__m64)(__v8qs){-3, +2, -1, 0, +1, -2, +3, -4}, (__m64)(__v8qs){-18, +16, -14, +12, -10, +8, +6, -4}), +15, -14, +13, -12, +11, -10, -3, 0));
__m64 test_mm_sub_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_sub_pi16
// CHECK: sub <4 x i16> {{%.*}}, {{%.*}}
return _mm_sub_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_sub_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), +11, -10, -3, 0));
__m64 test_mm_sub_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_sub_pi32
// CHECK: sub <2 x i32> {{%.*}}, {{%.*}}
return _mm_sub_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_sub_pi32((__m64)(__v2si){+5, -3}, (__m64)(__v2si){-9, +8}), +14, -11));
__m64 test_mm_sub_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_sub_si64
// CHECK: sub i64 {{%.*}}, {{%.*}}
return _mm_sub_si64(a, b);
}
+TEST_CONSTEXPR(match_v1di(_mm_sub_si64((__m64)(__v1di){+42}, (__m64)(__v1di){-100}), +142));
__m64 test_mm_subs_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_subs_pi8
// CHECK: call <8 x i8> @llvm.ssub.sat.v8i8(
return _mm_subs_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_subs_pi8((__m64)(__v8qs){+100, +50, -100, +20, +80, -50, +120, -20}, (__m64)(__v8qs){-50, -80, +50, -110, -60, +30, -20, +10}), +127, +127, -128, +127, +127, -80, +127, -30));
__m64 test_mm_subs_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_subs_pi16
// CHECK: call <4 x i16> @llvm.ssub.sat.v4i16(
return _mm_subs_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_subs_pi16((__m64)(__v4hi){+32000, -32000, +32000, -32000}, (__m64)(__v4hi){-800, +800, +800, -800}), +32767, -32768, +31200, -31200));
__m64 test_mm_subs_pu8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_subs_pu8
// CHECK: call <8 x i8> @llvm.usub.sat.v8i8(
return _mm_subs_pu8(a, b);
}
+TEST_CONSTEXPR(match_v8qu(_mm_subs_pu8((__m64)(__v8qu){0, +1, +2, +3, +180, +250, +120, +200}, (__m64)(__v8qu){0, +1, +1, +255, +200, +30, +200, +10}), 0, 0, +1, 0, 0, +220, 0, +190));
__m64 test_mm_subs_pu16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_subs_pu16
// CHECK: call <4 x i16> @llvm.usub.sat.v4i16(
return _mm_subs_pu16(a, b);
}
+TEST_CONSTEXPR(match_v4hu(_mm_subs_pu16((__m64)(__v4hu){+0, +1, +32000, +33000}, (__m64)(__v4hu){0, +65535, +800, +34000}), 0, 0, +31200, 0));
int test_m_to_int(__m64 a) {
// CHECK-LABEL: test_m_to_int
// CHECK: extractelement <2 x i32>
return _m_to_int(a);
}
+TEST_CONSTEXPR(_m_to_int((__m64)(__v4hi){0, -2, -1, -1}) == -131072);
long long test_m_to_int64(__m64 a) {
// CHECK-LABEL: test_m_to_int64
return _m_to_int64(a);
}
+TEST_CONSTEXPR(_m_to_int64((__m64)(__v4hi){0, -2, 0, -1}) == -281470681874432LL);
__m64 test_mm_unpackhi_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_unpackhi_pi8
// CHECK: shufflevector <8 x i8> {{%.*}}, <8 x i8> {{%.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
return _mm_unpackhi_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_unpackhi_pi8((__m64)(__v8qi){0, 1, 2, 3, 4, 5, 6, 7}, (__m64)(__v8qi){8, 9, 10, 11, 12, 13, 14, 15}), 4, 12, 5, 13, 6, 14, 7, 15));
__m64 test_mm_unpackhi_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_unpackhi_pi16
// CHECK: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
return _mm_unpackhi_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_unpackhi_pi16((__m64)(__v4hi){0, 1, 2, 3}, (__m64)(__v4hi){ 4, 5, 6, 7}), 2, 6, 3, 7));
__m64 test_mm_unpackhi_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_unpackhi_pi32
// CHECK: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <2 x i32> <i32 1, i32 3>
return _mm_unpackhi_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_unpackhi_pi32((__m64)(__v2si){0, 1}, (__m64)(__v2si){2, 3}), 1, 3));
__m64 test_mm_unpacklo_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_unpacklo_pi8
// CHECK: shufflevector <8 x i8> {{%.*}}, <8 x i8> {{%.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
return _mm_unpacklo_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_unpacklo_pi8((__m64)(__v8qi){0, 1, 2, 3, 4, 5, 6, 7}, (__m64)(__v8qi){8, 9, 10, 11, 12, 13, 14, 15}), 0, 8, 1, 9, 2, 10, 3, 11));
__m64 test_mm_unpacklo_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_unpacklo_pi16
// CHECK: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
return _mm_unpacklo_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_unpacklo_pi16((__m64)(__v4hi){0, 1, 2, 3}, (__m64)(__v4hi){ 4, 5, 6, 7}), 0, 4, 1, 5));
__m64 test_mm_unpacklo_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_unpacklo_pi32
// CHECK: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <2 x i32> <i32 0, i32 2>
return _mm_unpacklo_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_unpacklo_pi32((__m64)(__v2si){0, 1}, (__m64)(__v2si){2, 3}), 0, 2));
__m64 test_mm_xor_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_xor_si64
// CHECK: xor <1 x i64> {{%.*}}, {{%.*}}
return _mm_xor_si64(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_xor_si64((__m64)(__v4hi){0, -1, 0, -1}, (__m64)(__v4hi){0, 0, -1, -1}), 0, -1, -1, 0));
diff --git a/clang/test/CodeGen/X86/popcnt-builtins.c b/clang/test/CodeGen/X86/popcnt-builtins.c
index b27bc3f..fdd1a4c 100644
--- a/clang/test/CodeGen/X86/popcnt-builtins.c
+++ b/clang/test/CodeGen/X86/popcnt-builtins.c
@@ -3,24 +3,37 @@
// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -no-enable-noundef-analysis -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-apple-darwin -no-enable-noundef-analysis -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +popcnt -no-enable-noundef-analysis -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,CHECK-POPCNT
+// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +popcnt -no-enable-noundef-analysis -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,CHECK-POPCNT
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -no-enable-noundef-analysis -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-apple-darwin -no-enable-noundef-analysis -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+
+
#include <x86intrin.h>
+#include "builtin_test_helpers.h"
#ifdef __POPCNT__
int test_mm_popcnt_u32(unsigned int __X) {
//CHECK-POPCNT: call i32 @llvm.ctpop.i32
return _mm_popcnt_u32(__X);
}
+TEST_CONSTEXPR(_mm_popcnt_u32(0x00000000) == 0);
+TEST_CONSTEXPR(_mm_popcnt_u32(0x000000F0) == 4);
#endif
int test_popcnt32(unsigned int __X) {
//CHECK: call i32 @llvm.ctpop.i32
return _popcnt32(__X);
}
+TEST_CONSTEXPR(_popcnt32(0x00000000) == 0);
+TEST_CONSTEXPR(_popcnt32(0x100000F0) == 5);
int test__popcntd(unsigned int __X) {
//CHECK: call i32 @llvm.ctpop.i32
return __popcntd(__X);
}
+TEST_CONSTEXPR(__popcntd(0x00000000) == 0);
+TEST_CONSTEXPR(__popcntd(0x00F000F0) == 8);
#ifdef __x86_64__
#ifdef __POPCNT__
@@ -28,42 +41,21 @@ long long test_mm_popcnt_u64(unsigned long long __X) {
//CHECK-POPCNT: call i64 @llvm.ctpop.i64
return _mm_popcnt_u64(__X);
}
+TEST_CONSTEXPR(_mm_popcnt_u64(0x0000000000000000ULL) == 0);
+TEST_CONSTEXPR(_mm_popcnt_u64(0xF000000000000001ULL) == 5);
#endif
long long test_popcnt64(unsigned long long __X) {
//CHECK: call i64 @llvm.ctpop.i64
return _popcnt64(__X);
}
+TEST_CONSTEXPR(_popcnt64(0x0000000000000000ULL) == 0);
+TEST_CONSTEXPR(_popcnt64(0xF00000F000000001ULL) == 9);
long long test__popcntq(unsigned long long __X) {
//CHECK: call i64 @llvm.ctpop.i64
return __popcntq(__X);
}
-#endif
-
-// Test constexpr handling.
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-#if defined(__POPCNT__)
-char ctpop32_0[_mm_popcnt_u32(0x00000000) == 0 ? 1 : -1];
-char ctpop32_1[_mm_popcnt_u32(0x000000F0) == 4 ? 1 : -1];
-#endif
-
-char popcnt32_0[_popcnt32(0x00000000) == 0 ? 1 : -1];
-char popcnt32_1[_popcnt32(0x100000F0) == 5 ? 1 : -1];
-
-char popcntd_0[__popcntd(0x00000000) == 0 ? 1 : -1];
-char popcntd_1[__popcntd(0x00F000F0) == 8 ? 1 : -1];
-
-#ifdef __x86_64__
-#if defined(__POPCNT__)
-char ctpop64_0[_mm_popcnt_u64(0x0000000000000000ULL) == 0 ? 1 : -1];
-char ctpop64_1[_mm_popcnt_u64(0xF000000000000001ULL) == 5 ? 1 : -1];
-#endif
-
-char popcnt64_0[_popcnt64(0x0000000000000000ULL) == 0 ? 1 : -1];
-char popcnt64_1[_popcnt64(0xF00000F000000001ULL) == 9 ? 1 : -1];
-
-char popcntq_0[__popcntq(0x0000000000000000ULL) == 0 ? 1 : -1];
-char popcntq_1[__popcntq(0xF000010000300001ULL) == 8 ? 1 : -1];
-#endif
+TEST_CONSTEXPR(__popcntq(0x0000000000000000ULL) == 0);
+TEST_CONSTEXPR(__popcntq(0xF000010000300001ULL) == 8);
#endif
diff --git a/clang/test/CodeGen/X86/rot-intrinsics.c b/clang/test/CodeGen/X86/rot-intrinsics.c
index 5da300b..338b06fb 100644
--- a/clang/test/CodeGen/X86/rot-intrinsics.c
+++ b/clang/test/CodeGen/X86/rot-intrinsics.c
@@ -5,14 +5,22 @@
// RUN: %clang_cc1 -x c -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=i686-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
// RUN: %clang_cc1 -x c -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
-// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding -triple i686--linux -no-enable-noundef-analysis -emit-llvm %s -o - | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
-// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding -triple x86_64--linux -no-enable-noundef-analysis -emit-llvm %s -o - | FileCheck %s --check-prefixes CHECK,CHECK-64BIT-LONG
-// RUN: %clang_cc1 -x c++ -std=c++11 -fms-extensions -fms-compatibility -ffreestanding %s -triple=i686-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
-// RUN: %clang_cc1 -x c++ -std=c++11 -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
-// RUN: %clang_cc1 -x c++ -std=c++11 -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=i686-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
-// RUN: %clang_cc1 -x c++ -std=c++11 -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -ffreestanding -triple i686--linux -no-enable-noundef-analysis -emit-llvm %s -o - | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -ffreestanding -triple x86_64--linux -no-enable-noundef-analysis -emit-llvm %s -o - | FileCheck %s --check-prefixes CHECK,CHECK-64BIT-LONG
+// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -ffreestanding %s -triple=i686-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=i686-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+
+// RUN: %clang_cc1 -x c++ -ffreestanding -triple i686--linux -no-enable-noundef-analysis -emit-llvm %s -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -ffreestanding -triple x86_64--linux -no-enable-noundef-analysis -emit-llvm %s -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes CHECK,CHECK-64BIT-LONG
+// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -ffreestanding %s -triple=i686-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=i686-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
+// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -no-enable-noundef-analysis -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes CHECK,CHECK-32BIT-LONG
#include <x86intrin.h>
+#include "builtin_test_helpers.h"
unsigned char test__rolb(unsigned char value, int shift) {
// CHECK-LABEL: test__rolb
@@ -20,6 +28,7 @@ unsigned char test__rolb(unsigned char value, int shift) {
// CHECK: ret i8 [[R]]
return __rolb(value, shift);
}
+TEST_CONSTEXPR(__rolb(0x01, 5) == 0x20);
unsigned short test__rolw(unsigned short value, int shift) {
// CHECK-LABEL: test__rolw
@@ -27,6 +36,7 @@ unsigned short test__rolw(unsigned short value, int shift) {
// CHECK: ret i16 [[R]]
return __rolw(value, shift);
}
+TEST_CONSTEXPR(__rolw(0x3210, 11) == 0x8190);
unsigned int test__rold(unsigned int value, int shift) {
// CHECK-LABEL: test__rold
@@ -34,6 +44,7 @@ unsigned int test__rold(unsigned int value, int shift) {
// CHECK: ret i32 [[R]]
return __rold(value, shift);
}
+TEST_CONSTEXPR(__rold(0x76543210, 22) == 0x841D950C);
#if defined(__x86_64__)
unsigned long test__rolq(unsigned long value, int shift) {
@@ -42,6 +53,7 @@ unsigned long test__rolq(unsigned long value, int shift) {
// CHECK-LONG: ret i64 [[R]]
return __rolq(value, shift);
}
+TEST_CONSTEXPR(__rolq(0xFEDCBA9876543210ULL, 55) == 0x087F6E5D4C3B2A19ULL);
#endif
unsigned char test__rorb(unsigned char value, int shift) {
@@ -50,6 +62,7 @@ unsigned char test__rorb(unsigned char value, int shift) {
// CHECK: ret i8 [[R]]
return __rorb(value, shift);
}
+TEST_CONSTEXPR(__rorb(0x01, 5) == 0x08);
unsigned short test__rorw(unsigned short value, int shift) {
// CHECK-LABEL: test__rorw
@@ -57,6 +70,7 @@ unsigned short test__rorw(unsigned short value, int shift) {
// CHECK: ret i16 [[R]]
return __rorw(value, shift);
}
+TEST_CONSTEXPR(__rorw(0x3210, 11) == 0x4206);
unsigned int test__rord(unsigned int value, int shift) {
// CHECK-LABEL: test__rord
@@ -64,6 +78,7 @@ unsigned int test__rord(unsigned int value, int shift) {
// CHECK: ret i32 [[R]]
return __rord(value, shift);
}
+TEST_CONSTEXPR(__rord(0x76543210, 22) == 0x50C841D9);
#if defined(__x86_64__)
unsigned long test__rorq(unsigned long value, int shift) {
@@ -72,6 +87,7 @@ unsigned long test__rorq(unsigned long value, int shift) {
// CHECK-LONG: ret i64 [[R]]
return __rorq(value, shift);
}
+TEST_CONSTEXPR(__rorq(0xFEDCBA9876543210ULL, 55) == 0xB97530ECA86421FDULL);
#endif
unsigned short test_rotwl(unsigned short value, int shift) {
@@ -80,6 +96,7 @@ unsigned short test_rotwl(unsigned short value, int shift) {
// CHECK: ret i16 [[R]]
return _rotwl(value, shift);
}
+TEST_CONSTEXPR(_rotwl(0x3210, 4) == 0x2103);
unsigned int test_rotl(unsigned int value, int shift) {
// CHECK-LABEL: test_rotl
@@ -87,6 +104,7 @@ unsigned int test_rotl(unsigned int value, int shift) {
// CHECK: ret i32 [[R]]
return _rotl(value, shift);
}
+TEST_CONSTEXPR(_rotl(0x76543210, 8) == 0x54321076);
unsigned long test_lrotl(unsigned long value, int shift) {
// CHECK-32BIT-LONG-LABEL: test_lrotl
@@ -98,6 +116,11 @@ unsigned long test_lrotl(unsigned long value, int shift) {
// CHECK-64BIT-LONG: ret i64 [[R]]
return _lrotl(value, shift);
}
+#if defined(__LP64__) && !defined(_MSC_VER)
+TEST_CONSTEXPR(_lrotl(0xFEDCBA9876543210ULL, 55) == 0x087F6E5D4C3B2A19ULL);
+#else
+TEST_CONSTEXPR(_lrotl(0x76543210, 22) == 0x841D950C);
+#endif
unsigned short test_rotwr(unsigned short value, int shift) {
@@ -106,6 +129,7 @@ unsigned short test_rotwr(unsigned short value, int shift) {
// CHECK: ret i16 [[R]]
return _rotwr(value, shift);
}
+TEST_CONSTEXPR(_rotwr(0x3210, 4) == 0x0321);
unsigned int test_rotr(unsigned int value, int shift) {
// CHECK-LABEL: test_rotr
@@ -113,6 +137,7 @@ unsigned int test_rotr(unsigned int value, int shift) {
// CHECK: ret i32 [[R]]
return _rotr(value, shift);
}
+TEST_CONSTEXPR(_rotr(0x76543210, 8) == 0x10765432);
unsigned long test_lrotr(unsigned long value, int shift) {
// CHECK-32BIT-LONG-LABEL: test_lrotr
@@ -124,34 +149,9 @@ unsigned long test_lrotr(unsigned long value, int shift) {
// CHECK-64BIT-LONG: ret i64 [[R]]
return _lrotr(value, shift);
}
-
-// Test constexpr handling.
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-
-char rolb_0[__rolb(0x01, 5) == 0x20 ? 1 : -1];
-char rolw_0[__rolw(0x3210, 11) == 0x8190 ? 1 : -1];
-char rold_0[__rold(0x76543210, 22) == 0x841D950C ? 1 : -1];
-
-char rorb_0[__rorb(0x01, 5) == 0x08 ? 1 : -1];
-char rorw_0[__rorw(0x3210, 11) == 0x4206 ? 1 : -1];
-char rord_0[__rord(0x76543210, 22) == 0x50C841D9 ? 1 : -1];
-
-#if defined(__x86_64__)
-char rolq_0[__rolq(0xFEDCBA9876543210ULL, 55) == 0x087F6E5D4C3B2A19ULL ? 1 : -1];
-char rorq_0[__rorq(0xFEDCBA9876543210ULL, 55) == 0xB97530ECA86421FDULL ? 1 : -1];
-#endif
-
-char rotwl_0[_rotwl(0x3210, 4) == 0x2103 ? 1 : -1];
-char rotwr_0[_rotwr(0x3210, 4) == 0x0321 ? 1 : -1];
-char rotl_0[_rotl(0x76543210, 8) == 0x54321076 ? 1 : -1];
-char rotr_0[_rotr(0x76543210, 8) == 0x10765432 ? 1 : -1];
-
#if defined(__LP64__) && !defined(_MSC_VER)
-char lrotl_0[_lrotl(0xFEDCBA9876543210ULL, 55) == 0x087F6E5D4C3B2A19ULL ? 1 : -1];
-char lrotr_0[_lrotr(0xFEDCBA9876543210ULL, 55) == 0xB97530ECA86421FDULL ? 1 : -1];
+TEST_CONSTEXPR(_lrotr(0xFEDCBA9876543210ULL, 55) == 0xB97530ECA86421FDULL);
#else
-char lrotl_0[_lrotl(0x76543210, 22) == 0x841D950C ? 1 : -1];
-char lrotr_0[_lrotr(0x76543210, 22) == 0x50C841D9 ? 1 : -1];
+TEST_CONSTEXPR(_lrotr(0x76543210, 22) == 0x50C841D9);
#endif
-#endif
diff --git a/clang/test/CodeGen/X86/sse-builtins.c b/clang/test/CodeGen/X86/sse-builtins.c
index 104bfea..3bad342 100644
--- a/clang/test/CodeGen/X86/sse-builtins.c
+++ b/clang/test/CodeGen/X86/sse-builtins.c
@@ -3,6 +3,11 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
#include "builtin_test_helpers.h"
@@ -31,7 +36,7 @@ __m128 test_mm_and_ps(__m128 A, __m128 B) {
// CHECK: and <4 x i32>
return _mm_and_ps(A, B);
}
-TEST_CONSTEXPR(match_m128(_mm_and_ps((__m128){-4.0f, -5.0f, +6.0f, +7.0f}, (__m128){+0.0f, -0.0f, -0.0f, +7.0f}), -0.0f, -0.0f, +0.0f, +7.0f));
+TEST_CONSTEXPR(match_m128(_mm_and_ps((__m128){-4.0f, -5.0f, +6.0f, +7.0f}, (__m128){+0.0f, -0.0f, -0.0f, +7.0f}), +0.0f, -0.0f, +0.0f, +7.0f));
__m128 test_mm_andnot_ps(__m128 A, __m128 B) {
// CHECK-LABEL: test_mm_andnot_ps
@@ -39,7 +44,7 @@ __m128 test_mm_andnot_ps(__m128 A, __m128 B) {
// CHECK: and <4 x i32>
return _mm_andnot_ps(A, B);
}
-TEST_CONSTEXPR(match_m128(_mm_andnot_ps((__m128){-4.0f, -5.0f, +6.0f, +7.0f}, (__m128){+0.0f, -0.0f, -0.0f, +7.0f}), +0.0f, +0.0f, +0.0f, +0.0f));
+TEST_CONSTEXPR(match_m128(_mm_andnot_ps((__m128){-4.0f, -5.0f, +6.0f, +7.0f}, (__m128){+0.0f, -0.0f, -0.0f, +7.0f}), +0.0f, +0.0f, -0.0f, +0.0f));
__m128 test_mm_cmp_ps_eq_oq(__m128 a, __m128 b) {
// CHECK-LABEL: test_mm_cmp_ps_eq_oq
diff --git a/clang/test/CodeGen/X86/sse2-builtins.c b/clang/test/CodeGen/X86/sse2-builtins.c
index 04df59e..69a6d89 100644
--- a/clang/test/CodeGen/X86/sse2-builtins.c
+++ b/clang/test/CodeGen/X86/sse2-builtins.c
@@ -9,6 +9,17 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse2 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X86
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X86
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +sse2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+
#include <immintrin.h>
#include "builtin_test_helpers.h"
@@ -63,12 +74,14 @@ __m128i test_mm_adds_epi8(__m128i A, __m128i B) {
// CHECK: call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
return _mm_adds_epi8(A, B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_adds_epi8((__m128i)(__v16qs){+100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}, (__m128i)(__v16qs){+50, +80, -50, +110, +60, -30, +20, -10, +50, +80, -50, +110, +60, -30, +20, -10}), +127, +127, -128, +127, +127, -80, +127, -30, -50, +30, +50, +90, -20, +20, -100, +10));
__m128i test_mm_adds_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_adds_epi16
// CHECK: call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_adds_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_adds_epi16((__m128i)(__v8hi){+32000, -32000, +32000, -32000, +80, -50, +120, -20}, (__m128i)(__v8hi){+800, -800, -800, +800, +60, -30, +20, -10}), +32767, -32768, +31200, -31200, +140, -80, +140, -30));
__m128i test_mm_adds_epu8(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_adds_epu8
@@ -76,6 +89,7 @@ __m128i test_mm_adds_epu8(__m128i A, __m128i B) {
// CHECK: call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
return _mm_adds_epu8(A, B);
}
+TEST_CONSTEXPR(match_v16qu(_mm_adds_epu8((__m128i)(__v16qu){0, 0, 0, 0, +127, +127, +127, +127, +128, +128, +128, +128, +255, +255, +255, +255}, (__m128i)(__v16qu){0, +127, +128, +255, 0, +127, +128, +255, 0, +127, +128, +255, 0, +127, +128, +255}), 0, +127, +128, +255, +127, +254, +255, +255, +128, +255, +255, +255, +255, +255, +255, +255));
__m128i test_mm_adds_epu16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_adds_epu16
@@ -83,6 +97,7 @@ __m128i test_mm_adds_epu16(__m128i A, __m128i B) {
// CHECK: call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_adds_epu16(A, B);
}
+TEST_CONSTEXPR(match_v8hu(_mm_adds_epu16((__m128i)(__v8hu){0, 0, 0, +32767, +32767, +32767, +65535, +65535}, (__m128i)(__v8hu){0, +32767, +65535, 0, +32767, +65535, 0, +32767}), 0, +32767, +65535, +32767, +65534, +65535, +65535, +65535));
__m128d test_mm_and_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_and_pd
@@ -96,6 +111,7 @@ __m128i test_mm_and_si128(__m128i A, __m128i B) {
// CHECK: and <2 x i64>
return _mm_and_si128(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_and_si128((__m128i)(__v4si){0, -1, 0, -1}, (__m128i)(__v4si){0, 0, -1, -1}), 0, 0, 0, -1));
__m128d test_mm_andnot_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_andnot_pd
@@ -103,7 +119,7 @@ __m128d test_mm_andnot_pd(__m128d A, __m128d B) {
// CHECK: and <2 x i64>
return _mm_andnot_pd(A, B);
}
-TEST_CONSTEXPR(match_m128d(_mm_andnot_pd((__m128d){+1.0, -3.0}, (__m128d){+0.0, -0.0}), +0.0, -0.0));
+TEST_CONSTEXPR(match_m128d(_mm_andnot_pd((__m128d){+1.0, -3.0}, (__m128d){-0.0, +0.0}), -0.0, +0.0));
__m128i test_mm_andnot_si128(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_andnot_si128
@@ -111,6 +127,7 @@ __m128i test_mm_andnot_si128(__m128i A, __m128i B) {
// CHECK: and <2 x i64>
return _mm_andnot_si128(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_andnot_si128((__m128i)(__v4si){0, -1, 0, -1}, (__m128i)(__v4si){0, 0, -1, -1}), 0, 0, -1, 0));
__m128i test_mm_avg_epu8(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_avg_epu8
@@ -237,18 +254,21 @@ __m128i test_mm_cmpeq_epi8(__m128i A, __m128i B) {
// CHECK: icmp eq <16 x i8>
return _mm_cmpeq_epi8(A, B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_cmpeq_epi8((__m128i)(__v16qs){1,-2,3,-4,-5,6,-7,8,-9,10,-11,12,-13,14,-15,16}, (__m128i)(__v16qs){10,-2,6,-4,-5,12,-14,8,-9,20,-22,12,-26,14,-30,16}), 0,-1,0,-1,-1,0,0,-1,-1,0,0,-1,0,-1,0,-1));
__m128i test_mm_cmpeq_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_cmpeq_epi16
// CHECK: icmp eq <8 x i16>
return _mm_cmpeq_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_cmpeq_epi16((__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-10, -2, +6, -4, +5, -12, +14, -8}), 0, -1, 0, -1, -1, 0, 0, -1));
__m128i test_mm_cmpeq_epi32(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_cmpeq_epi32
// CHECK: icmp eq <4 x i32>
return _mm_cmpeq_epi32(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_cmpeq_epi32((__m128i)(__v4si){+1, -2, +3, -4}, (__m128i)(__v4si){-10, -2, +6, -4}), 0, -1, 0, -1));
__m128d test_mm_cmpeq_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_cmpeq_pd
@@ -287,18 +307,24 @@ __m128i test_mm_cmpgt_epi8(__m128i A, __m128i B) {
// CHECK: icmp sgt <16 x i8>
return _mm_cmpgt_epi8(A, B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_cmpgt_epi8(
+ (__m128i)(__v16qs){15,-2,8,-4,12,6,-20,8,25,-10,30,12,-35,14,40,-16},
+ (__m128i)(__v16qs){10,-2,6,-4,5,12,-14,8,9,-20,22,12,-26,14,30,-16}),
+ -1, 0, -1, 0, -1, 0, 0, 0,-1, -1, -1, 0, 0, 0, -1, 0));
__m128i test_mm_cmpgt_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_cmpgt_epi16
// CHECK: icmp sgt <8 x i16>
return _mm_cmpgt_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_cmpgt_epi16((__m128i)(__v8hi){15,2,8,4,12,6,20,8}, (__m128i)(__v8hi){10,2,6,4,5,12,14,8}), -1,0,-1,0,-1,0,-1,0));
__m128i test_mm_cmpgt_epi32(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_cmpgt_epi32
// CHECK: icmp sgt <4 x i32>
return _mm_cmpgt_epi32(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_cmpgt_epi32((__m128i)(__v4si){15,2,8,4}, (__m128i)(__v4si){10,2,6,4}), -1,0,-1,0));
__m128d test_mm_cmpgt_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_cmpgt_pd
@@ -337,18 +363,24 @@ __m128i test_mm_cmplt_epi8(__m128i A, __m128i B) {
// CHECK: icmp sgt <16 x i8>
return _mm_cmplt_epi8(A, B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_cmplt_epi8(
+ (__m128i)(__v16qs){15,-2,8,-4,12,6,-20,8,25,-10,30,12,-35,14,40,-16},
+ (__m128i)(__v16qs){10,-2,6,-4,5,12,-14,8,9,-20,22,12,-26,14,30,-16}),
+ 0, 0, 0, 0, 0, -1, -1, 0,0, 0, 0, 0, -1, 0, 0, 0));
__m128i test_mm_cmplt_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_cmplt_epi16
// CHECK: icmp sgt <8 x i16>
return _mm_cmplt_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_cmplt_epi16((__m128i)(__v8hi){5,2,3,4,1,6,7,8}, (__m128i)(__v8hi){10,2,6,4,5,12,14,8}), -1, 0, -1, 0, -1, -1, -1, 0));
__m128i test_mm_cmplt_epi32(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_cmplt_epi32
// CHECK: icmp sgt <4 x i32>
return _mm_cmplt_epi32(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_cmplt_epi32((__m128i)(__v4si){5,2,3,4}, (__m128i)(__v4si){10,2,6,4}), -1,0,-1,0));
__m128d test_mm_cmplt_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_cmplt_pd
@@ -579,12 +611,14 @@ int test_mm_cvtsi128_si32(__m128i A) {
// CHECK: extractelement <4 x i32> %{{.*}}, i32 0
return _mm_cvtsi128_si32(A);
}
+TEST_CONSTEXPR(_mm_cvtsi128_si32((__m128i)(__v4si){+1, -2, -3, +4}) == +1);
long long test_mm_cvtsi128_si64(__m128i A) {
// CHECK-LABEL: test_mm_cvtsi128_si64
// CHECK: extractelement <2 x i64> %{{.*}}, i32 0
return _mm_cvtsi128_si64(A);
}
+TEST_CONSTEXPR(_mm_cvtsi128_si64((__m128i)(__v2di){42LL, -42LL}) == 42LL);
__m128d test_mm_cvtsi32_sd(__m128d A, int B) {
// CHECK-LABEL: test_mm_cvtsi32_sd
@@ -602,6 +636,7 @@ __m128i test_mm_cvtsi32_si128(int A) {
// CHECK: insertelement <4 x i32> %{{.*}}, i32 0, i32 3
return _mm_cvtsi32_si128(A);
}
+TEST_CONSTEXPR(match_v4si(_mm_cvtsi32_si128(55), 55, 0, 0, 0));
#ifdef __x86_64__
__m128d test_mm_cvtsi64_sd(__m128d A, long long B) {
@@ -619,6 +654,7 @@ __m128i test_mm_cvtsi64_si128(long long A) {
// CHECK: insertelement <2 x i64> %{{.*}}, i64 0, i32 1
return _mm_cvtsi64_si128(A);
}
+TEST_CONSTEXPR(match_v2di(_mm_cvtsi64_si128(-99LL), -99LL, 0LL));
__m128d test_mm_cvtss_sd(__m128d A, __m128 B) {
// CHECK-LABEL: test_mm_cvtss_sd
@@ -915,6 +951,7 @@ __m128i test_mm_mul_epu32(__m128i A, __m128i B) {
// CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
return _mm_mul_epu32(A, B);
}
+TEST_CONSTEXPR(match_m128i(_mm_mul_epu32((__m128i)(__v4si){+1, -2, +3, -4}, (__m128i)(__v4si){-16, -14, +12, +10}), 4294967280, 36));
__m128d test_mm_mul_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_mul_pd
@@ -938,18 +975,21 @@ __m128i test_mm_mulhi_epi16(__m128i A, __m128i B) {
// CHECK: call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mulhi_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mulhi_epi16((__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, -1, -1, -1, -1, -1));
__m128i test_mm_mulhi_epu16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_mulhi_epu16
// CHECK: call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mulhi_epu16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mulhi_epu16((__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), 0, -16, 0, 9, 4, 5, 6, 1));
__m128i test_mm_mullo_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_mullo_epi16
// CHECK: mul <8 x i16> %{{.*}}, %{{.*}}
return _mm_mullo_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mullo_epi16((__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), -16, 28, 36, -40, -40, -36, -28, -16));
__m128d test_mm_or_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_or_pd
@@ -963,6 +1003,7 @@ __m128i test_mm_or_si128(__m128i A, __m128i B) {
// CHECK: or <2 x i64> %{{.*}}, %{{.*}}
return _mm_or_si128(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_or_si128((__m128i)(__v4si){0, -1, 0, -1}, (__m128i)(__v4si){0, 0, -1, -1}), 0, -1, -1, -1));
__m128i test_mm_packs_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_packs_epi16
@@ -1281,6 +1322,10 @@ __m128i test_mm_slli_epi16(__m128i A) {
// CHECK: call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %{{.*}}, i32 %{{.*}})
return _mm_slli_epi16(A, 1);
}
+TEST_CONSTEXPR(match_v8hi(_mm_slli_epi16((__m128i)(__v8hi){0, 1, 2, 3, 4, 5, 6, 7}, 0), 0, 1, 2, 3, 4, 5, 6, 7));
+TEST_CONSTEXPR(match_v8hi(_mm_slli_epi16((__m128i)(__v8hi){0, 1, 2, 3, 4, 5, 6, 7}, 1), 0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe));
+TEST_CONSTEXPR(match_v8hi(_mm_slli_epi16((__m128i)(__v8hi){0, 8, 2, 3, 4, 5, 6, 7}, 8), 0, 0x800, 0x200, 0x300, 0x400, 0x500, 0x600, 0x700));
+TEST_CONSTEXPR(match_v8hi(_mm_slli_epi16((__m128i)(__v8hi){0, 8, 2, 3, 4, 5, 6, 7}, 16), 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_slli_epi16_1(__m128i A) {
// CHECK-LABEL: test_mm_slli_epi16_1
@@ -1299,6 +1344,11 @@ __m128i test_mm_slli_epi32(__m128i A) {
// CHECK: call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %{{.*}}, i32 %{{.*}})
return _mm_slli_epi32(A, 1);
}
+TEST_CONSTEXPR(match_v4si(_mm_slli_epi32((__m128i)(__v4si){0, 1, 2, 3}, 0), 0, 1, 2, 3));
+TEST_CONSTEXPR(match_v4si(_mm_slli_epi32((__m128i)(__v4si){0, 1, 2, 3}, 1), 0, 0x2, 0x4, 0x6));
+TEST_CONSTEXPR(match_v4su(_mm_slli_epi32((__m128i)(__v4su){0, 1, 2, 3}, 31), 0, 0x80000000, 0x0, 0x80000000));
+TEST_CONSTEXPR(match_v4si(_mm_slli_epi32((__m128i)(__v4si){0, 1, 2, 3}, 32), 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v4si(_mm_slli_epi32((__m128i)(__v4si){0, 1, 2, 3}, 33), 0, 0, 0, 0));
__m128i test_mm_slli_epi32_1(__m128i A) {
// CHECK-LABEL: test_mm_slli_epi32_1
@@ -1317,6 +1367,12 @@ __m128i test_mm_slli_epi64(__m128i A) {
// CHECK: call {{.*}}<2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %{{.*}}, i32 %{{.*}})
return _mm_slli_epi64(A, 1);
}
+TEST_CONSTEXPR(match_v2di(_mm_slli_epi64((__m128i)(__v2di){0, 1}, 0), 0, 1));
+TEST_CONSTEXPR(match_v2di(_mm_slli_epi64((__m128i)(__v2di){0, 1}, 1), 0, 0x2));
+TEST_CONSTEXPR(match_v2di(_mm_slli_epi64((__m128i)(__v2di){5, 8}, 6), 0x140, 0x200));
+TEST_CONSTEXPR(match_v2du(_mm_slli_epi64((__m128i)(__v2du){0, 1}, 63), 0, 0x8000000000000000ULL));
+TEST_CONSTEXPR(match_v2di(_mm_slli_epi64((__m128i)(__v2di){0, 1}, 64), 0, 0));
+TEST_CONSTEXPR(match_v2di(_mm_slli_epi64((__m128i)(__v2di){0, 1}, 65), 0, 0));
__m128i test_mm_slli_epi64_1(__m128i A) {
// CHECK-LABEL: test_mm_slli_epi64_1
@@ -1373,6 +1429,8 @@ __m128i test_mm_srai_epi16(__m128i A) {
// CHECK: call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %{{.*}}, i32 %{{.*}})
return _mm_srai_epi16(A, 1);
}
+TEST_CONSTEXPR(match_v8hi(_mm_srai_epi16((__m128i)(__v8hi){-32768, 32767, -3, -2, -1, 0, 1, 2}, 1), -16384, 16383, -2, -1, -1, 0, 0, 1));
+TEST_CONSTEXPR(match_v8hi(_mm_srai_epi16((__m128i)(__v8hi){-32768, 32767, -3, -2, -1, 0, 1, 2}, 1), -16384, 16383, -2, -1, -1, 0, 0, 1));
__m128i test_mm_srai_epi16_1(__m128i A) {
// CHECK-LABEL: test_mm_srai_epi16_1
@@ -1391,6 +1449,7 @@ __m128i test_mm_srai_epi32(__m128i A) {
// CHECK: call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %{{.*}}, i32 %{{.*}})
return _mm_srai_epi32(A, 1);
}
+TEST_CONSTEXPR(match_v4si(_mm_srai_epi32((__m128i)(__v4si){-32768, 32767, -3, 2}, 1), -16384, 16383, -2, 1));
__m128i test_mm_srai_epi32_1(__m128i A) {
// CHECK-LABEL: test_mm_srai_epi32_1
@@ -1427,6 +1486,7 @@ __m128i test_mm_srli_epi16(__m128i A) {
// CHECK: call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %{{.*}}, i32 %{{.*}})
return _mm_srli_epi16(A, 1);
}
+TEST_CONSTEXPR(match_v8hi(_mm_srli_epi16((__m128i)(__v8hi){0, 1, 2, 3, 4, 5, 6, 7}, 1), 0, 0x0, 0x1, 0x1, 0x2, 0x2, 0x3, 0x3));
__m128i test_mm_srli_epi16_1(__m128i A) {
// CHECK-LABEL: test_mm_srli_epi16_1
@@ -1445,6 +1505,7 @@ __m128i test_mm_srli_epi32(__m128i A) {
// CHECK: call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %{{.*}}, i32 %{{.*}})
return _mm_srli_epi32(A, 1);
}
+TEST_CONSTEXPR(match_v4si(_mm_srli_epi32((__m128i)(__v4si){0, 1, 2, 3}, 8), 0, 0x0, 0x0, 0x0));
__m128i test_mm_srli_epi32_1(__m128i A) {
// CHECK-LABEL: test_mm_srli_epi32_1
@@ -1463,6 +1524,7 @@ __m128i test_mm_srli_epi64(__m128i A) {
// CHECK: call {{.*}}<2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %{{.*}}, i32 %{{.*}})
return _mm_srli_epi64(A, 1);
}
+TEST_CONSTEXPR(match_v2di(_mm_srli_epi64((__m128i)(__v2di){100005, 100008}, 6), 0x61a, 0x61a));
__m128i test_mm_srli_epi64_1(__m128i A) {
// CHECK-LABEL: test_mm_srli_epi64_1
@@ -1685,12 +1747,14 @@ __m128i test_mm_subs_epi8(__m128i A, __m128i B) {
// CHECK: call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
return _mm_subs_epi8(A, B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_subs_epi8((__m128i)(__v16qs){+100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}, (__m128i)(__v16qs){-50, -80, +50, -110, -60, +30, -20, +10, -50, -80, +50, -110, -60, +30, -20, +10}), +127, +127, -128, +127, +127, -80, +127, -30, -50, +30, +50, +90, -20, +20, -100, +10));
__m128i test_mm_subs_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_subs_epi16
// CHECK: call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_subs_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_subs_epi16((__m128i)(__v8hi){+32000, -32000, +32000, -32000, +80, -50, +120, -20}, (__m128i)(__v8hi){-800, +800, +800, -800, -60, +30, -20, +10}), +32767, -32768, +31200, -31200, +140, -80, +140, -30));
__m128i test_mm_subs_epu8(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_subs_epu8
@@ -1698,6 +1762,7 @@ __m128i test_mm_subs_epu8(__m128i A, __m128i B) {
// CHECK: call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
return _mm_subs_epu8(A, B);
}
+TEST_CONSTEXPR(match_v16qu(_mm_subs_epu8((__m128i)(__v16qu){0, 0, 0, 0, +127, +127, +127, +127, +128, +128, +128, +128, +255, +255, +255, +255}, (__m128i)(__v16qu){0, +127, +128, +255, 0, +127, +128, +255, 0, +127, +128, +255, 0, +127, +128, +255}), 0, 0, 0, 0, +127, 0, 0, 0, +128, +1, 0, 0, +255, +128, +127, 0));
__m128i test_mm_subs_epu16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_subs_epu16
@@ -1705,6 +1770,7 @@ __m128i test_mm_subs_epu16(__m128i A, __m128i B) {
// CHECK: call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_subs_epu16(A, B);
}
+TEST_CONSTEXPR(match_v8hu(_mm_subs_epu16((__m128i)(__v8hu){0, 0, 0, +32767, +32767, +32767, +65535, +65535}, (__m128i)(__v8hu){0, +32767, +65535, 0, +32767, +65535, 0, +32767}), 0, 0, 0, +32767, 0, 0, +65535, +32768));
int test_mm_ucomieq_sd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_ucomieq_sd
@@ -1762,24 +1828,28 @@ __m128i test_mm_unpackhi_epi8(__m128i A, __m128i B) {
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
return _mm_unpackhi_epi8(A, B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_unpackhi_epi8((__m128i)(__v16qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v16qi){16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31));
__m128i test_mm_unpackhi_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_unpackhi_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
return _mm_unpackhi_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_unpackhi_epi16((__m128i)(__v8hi){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v8hi){8, 9, 10, 11, 12, 13, 14, 15}), 4, 12, 5, 13, 6, 14, 7, 15));
__m128i test_mm_unpackhi_epi32(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_unpackhi_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
return _mm_unpackhi_epi32(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_unpackhi_epi32((__m128i)(__v4si){0, 1, 2, 3}, (__m128i)(__v4si){ 4, 5, 6, 7}), 2, 6, 3, 7));
__m128i test_mm_unpackhi_epi64(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_unpackhi_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 3>
return _mm_unpackhi_epi64(A, B);
}
+TEST_CONSTEXPR(match_v2di(_mm_unpackhi_epi64((__m128i)(__v2di){0, 1}, (__m128i)(__v2di){2, 3}), 1, 3));
__m128d test_mm_unpackhi_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_unpackhi_pd
@@ -1793,24 +1863,28 @@ __m128i test_mm_unpacklo_epi8(__m128i A, __m128i B) {
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
return _mm_unpacklo_epi8(A, B);
}
+TEST_CONSTEXPR(match_v16qi(_mm_unpacklo_epi8((__m128i)(__v16qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, (__m128i)(__v16qi){16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23));
__m128i test_mm_unpacklo_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_unpacklo_epi16
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
return _mm_unpacklo_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_unpacklo_epi16((__m128i)(__v8hi){0, 1, 2, 3, 4, 5, 6, 7}, (__m128i)(__v8hi){8, 9, 10, 11, 12, 13, 14, 15}), 0, 8, 1, 9, 2, 10, 3, 11));
__m128i test_mm_unpacklo_epi32(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_unpacklo_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
return _mm_unpacklo_epi32(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_unpacklo_epi32((__m128i)(__v4si){0, 1, 2, 3}, (__m128i)(__v4si){ 4, 5, 6, 7}), 0, 4, 1, 5));
__m128i test_mm_unpacklo_epi64(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_unpacklo_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
return _mm_unpacklo_epi64(A, B);
}
+TEST_CONSTEXPR(match_v2di(_mm_unpacklo_epi64((__m128i)(__v2di){0, 1}, (__m128i)(__v2di){2, 3}), 0, 2));
__m128d test_mm_unpacklo_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_unpacklo_pd
@@ -1831,3 +1905,4 @@ __m128i test_mm_xor_si128(__m128i A, __m128i B) {
// CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
return _mm_xor_si128(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_xor_si128((__m128i)(__v4si){0, -1, 0, -1}, (__m128i)(__v4si){0, 0, -1, -1}), 0, -1, -1, 0));
diff --git a/clang/test/CodeGen/X86/sse3-builtins.c b/clang/test/CodeGen/X86/sse3-builtins.c
index d47c19b..c53afc5 100644
--- a/clang/test/CodeGen/X86/sse3-builtins.c
+++ b/clang/test/CodeGen/X86/sse3-builtins.c
@@ -3,6 +3,11 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse3 -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse3 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
#include "builtin_test_helpers.h"
diff --git a/clang/test/CodeGen/X86/sse41-builtins.c b/clang/test/CodeGen/X86/sse41-builtins.c
index d71a4b7..d3e4edc 100644
--- a/clang/test/CodeGen/X86/sse41-builtins.c
+++ b/clang/test/CodeGen/X86/sse41-builtins.c
@@ -7,8 +7,18 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.1 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.1 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse4.1 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse4.1 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.1 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.1 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse4.1 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse4.1 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.1 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.1 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+
#include <immintrin.h>
+#include "builtin_test_helpers.h"
// NOTE: This should match the tests in llvm/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
@@ -78,6 +88,7 @@ __m128i test_mm_cmpeq_epi64(__m128i A, __m128i B) {
// CHECK: sext <2 x i1> %{{.*}} to <2 x i64>
return _mm_cmpeq_epi64(A, B);
}
+TEST_CONSTEXPR(match_v2di(_mm_cmpeq_epi64((__m128i)(__v2di){+1, -8}, (__m128i)(__v2di){-10, -8}), 0, -1));
__m128i test_mm_cvtepi8_epi16(__m128i a) {
// CHECK-LABEL: test_mm_cvtepi8_epi16
@@ -86,6 +97,8 @@ __m128i test_mm_cvtepi8_epi16(__m128i a) {
return _mm_cvtepi8_epi16(a);
}
+TEST_CONSTEXPR(match_v8hi(_mm_cvtepi8_epi16(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 0, 0, 0, 0, 0, 0, 0, 0)), -3, 2, -1, 0, 1, -2, 3, -4));
+
__m128i test_mm_cvtepi8_epi32(__m128i a) {
// CHECK-LABEL: test_mm_cvtepi8_epi32
// CHECK: shufflevector <16 x i8> {{.*}}, <16 x i8> {{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -93,6 +106,8 @@ __m128i test_mm_cvtepi8_epi32(__m128i a) {
return _mm_cvtepi8_epi32(a);
}
+TEST_CONSTEXPR(match_v4si(_mm_cvtepi8_epi32(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 0, 0, 0, 0, 0, 0, 0, 0)), -3, 2, -1, 0));
+
__m128i test_mm_cvtepi8_epi64(__m128i a) {
// CHECK-LABEL: test_mm_cvtepi8_epi64
// CHECK: shufflevector <16 x i8> {{.*}}, <16 x i8> {{.*}}, <2 x i32> <i32 0, i32 1>
@@ -100,6 +115,8 @@ __m128i test_mm_cvtepi8_epi64(__m128i a) {
return _mm_cvtepi8_epi64(a);
}
+TEST_CONSTEXPR(match_v2di(_mm_cvtepi8_epi64(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 0, 0, 0, 0, 0, 0, 0, 0)), -3, 2));
+
__m128i test_mm_cvtepi16_epi32(__m128i a) {
// CHECK-LABEL: test_mm_cvtepi16_epi32
// CHECK: shufflevector <8 x i16> {{.*}}, <8 x i16> {{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -107,6 +124,8 @@ __m128i test_mm_cvtepi16_epi32(__m128i a) {
return _mm_cvtepi16_epi32(a);
}
+TEST_CONSTEXPR(match_v4si(_mm_cvtepi16_epi32(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), -300, 2, -1, 0));
+
__m128i test_mm_cvtepi16_epi64(__m128i a) {
// CHECK-LABEL: test_mm_cvtepi16_epi64
// CHECK: shufflevector <8 x i16> {{.*}}, <8 x i16> {{.*}}, <2 x i32> <i32 0, i32 1>
@@ -114,6 +133,8 @@ __m128i test_mm_cvtepi16_epi64(__m128i a) {
return _mm_cvtepi16_epi64(a);
}
+TEST_CONSTEXPR(match_v2di(_mm_cvtepi16_epi64(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), -300, 2));
+
__m128i test_mm_cvtepi32_epi64(__m128i a) {
// CHECK-LABEL: test_mm_cvtepi32_epi64
// CHECK: shufflevector <4 x i32> {{.*}}, <4 x i32> {{.*}}, <2 x i32> <i32 0, i32 1>
@@ -121,6 +142,8 @@ __m128i test_mm_cvtepi32_epi64(__m128i a) {
return _mm_cvtepi32_epi64(a);
}
+TEST_CONSTEXPR(match_v2di(_mm_cvtepi32_epi64(_mm_setr_epi32(-70000, 2, -1, 0)), -70000, 2));
+
__m128i test_mm_cvtepu8_epi16(__m128i a) {
// CHECK-LABEL: test_mm_cvtepu8_epi16
// CHECK: shufflevector <16 x i8> {{.*}}, <16 x i8> {{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -128,6 +151,8 @@ __m128i test_mm_cvtepu8_epi16(__m128i a) {
return _mm_cvtepu8_epi16(a);
}
+TEST_CONSTEXPR(match_v8hi(_mm_cvtepu8_epi16(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 0, 0, 0, 0, 0, 0, 0, 0)), 253, 2, 255, 0, 1, 254, 3, 252));
+
__m128i test_mm_cvtepu8_epi32(__m128i a) {
// CHECK-LABEL: test_mm_cvtepu8_epi32
// CHECK: shufflevector <16 x i8> {{.*}}, <16 x i8> {{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -135,6 +160,8 @@ __m128i test_mm_cvtepu8_epi32(__m128i a) {
return _mm_cvtepu8_epi32(a);
}
+TEST_CONSTEXPR(match_v4si(_mm_cvtepu8_epi32(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 0, 0, 0, 0, 0, 0, 0, 0)), 253, 2, 255, 0));
+
__m128i test_mm_cvtepu8_epi64(__m128i a) {
// CHECK-LABEL: test_mm_cvtepu8_epi64
// CHECK: shufflevector <16 x i8> {{.*}}, <16 x i8> {{.*}}, <2 x i32> <i32 0, i32 1>
@@ -142,6 +169,8 @@ __m128i test_mm_cvtepu8_epi64(__m128i a) {
return _mm_cvtepu8_epi64(a);
}
+TEST_CONSTEXPR(match_v2di(_mm_cvtepu8_epi64(_mm_setr_epi8(-3, 2, -1, 0, 1, -2, 3, -4, 0, 0, 0, 0, 0, 0, 0, 0)), 253, 2));
+
__m128i test_mm_cvtepu16_epi32(__m128i a) {
// CHECK-LABEL: test_mm_cvtepu16_epi32
// CHECK: shufflevector <8 x i16> {{.*}}, <8 x i16> {{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -149,6 +178,8 @@ __m128i test_mm_cvtepu16_epi32(__m128i a) {
return _mm_cvtepu16_epi32(a);
}
+TEST_CONSTEXPR(match_v4si(_mm_cvtepu16_epi32(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), 65236, 2, 65535, 0));
+
__m128i test_mm_cvtepu16_epi64(__m128i a) {
// CHECK-LABEL: test_mm_cvtepu16_epi64
// CHECK: shufflevector <8 x i16> {{.*}}, <8 x i16> {{.*}}, <2 x i32> <i32 0, i32 1>
@@ -156,6 +187,8 @@ __m128i test_mm_cvtepu16_epi64(__m128i a) {
return _mm_cvtepu16_epi64(a);
}
+TEST_CONSTEXPR(match_v2di(_mm_cvtepu16_epi64(_mm_setr_epi16(-300, 2, -1, 0, 1, -2, 3, -4)), 65236, 2));
+
__m128i test_mm_cvtepu32_epi64(__m128i a) {
// CHECK-LABEL: test_mm_cvtepu32_epi64
// CHECK: shufflevector <4 x i32> {{.*}}, <4 x i32> {{.*}}, <2 x i32> <i32 0, i32 1>
@@ -163,6 +196,8 @@ __m128i test_mm_cvtepu32_epi64(__m128i a) {
return _mm_cvtepu32_epi64(a);
}
+TEST_CONSTEXPR(match_v2di(_mm_cvtepu32_epi64(_mm_setr_epi32(-70000, 2, -1, 0)), 4294897296, 2));
+
__m128d test_mm_dp_pd(__m128d x, __m128d y) {
// CHECK-LABEL: test_mm_dp_pd
// CHECK: call {{.*}}<2 x double> @llvm.x86.sse41.dppd(<2 x double> {{.*}}, <2 x double> {{.*}}, i8 7)
@@ -319,6 +354,7 @@ __m128i test_mm_mul_epi32(__m128i x, __m128i y) {
// CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
return _mm_mul_epi32(x, y);
}
+TEST_CONSTEXPR(match_m128i(_mm_mul_epi32((__m128i)(__v4si){+1, -2, +3, -4}, (__m128i)(__v4si){-16, -14, +12, +10}), -16, 36));
__m128i test_mm_mullo_epi32(__m128i x, __m128i y) {
// CHECK-LABEL: test_mm_mullo_epi32
diff --git a/clang/test/CodeGen/X86/sse42-builtins.c b/clang/test/CodeGen/X86/sse42-builtins.c
index d0c0cce..3a1e8fc 100644
--- a/clang/test/CodeGen/X86/sse42-builtins.c
+++ b/clang/test/CodeGen/X86/sse42-builtins.c
@@ -7,8 +7,18 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.2 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.2 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse4.2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse4.2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse4.2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse4.2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.2 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +sse4.2 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK
+
#include <immintrin.h>
+#include "builtin_test_helpers.h"
// NOTE: This should match the tests in llvm/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
@@ -59,6 +69,7 @@ __m128i test_mm_cmpgt_epi64(__m128i A, __m128i B) {
// CHECK: icmp sgt <2 x i64>
return _mm_cmpgt_epi64(A, B);
}
+TEST_CONSTEXPR(match_v2di(_mm_cmpgt_epi64((__m128i)(__v2di){+1, -8}, (__m128i)(__v2di){-10, -8}), -1, 0));
int test_mm_cmpistra(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_cmpistra
diff --git a/clang/test/CodeGen/X86/ssse3-builtins.c b/clang/test/CodeGen/X86/ssse3-builtins.c
index 982c74c..56ff73f0 100644
--- a/clang/test/CodeGen/X86/ssse3-builtins.c
+++ b/clang/test/CodeGen/X86/ssse3-builtins.c
@@ -1,10 +1,24 @@
// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +ssse3 -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
// NOTE: This should match the tests in llvm/test/CodeGen/X86/ssse3-intrinsics-fast-isel.ll
@@ -13,18 +27,21 @@ __m128i test_mm_abs_epi8(__m128i a) {
// CHECK: [[ABS:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %{{.*}}, i1 false)
return _mm_abs_epi8(a);
}
+TEST_CONSTEXPR(match_v16qi(_mm_abs_epi8((__m128i)(__v16qs){+100, +50, -100, +20, +80, -50, +120, -20, -100, -50, +100, -20, -80, +50, -120, +20}), 100, 50, 100, 20, 80, 50, 120, 20, 100, 50, 100, 20, 80, 50, 120, 20));
__m128i test_mm_abs_epi16(__m128i a) {
// CHECK-LABEL: test_mm_abs_epi16
// CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %{{.*}}, i1 false)
return _mm_abs_epi16(a);
}
+TEST_CONSTEXPR(match_v8hi(_mm_abs_epi16((__m128i)(__v8hi){+32000, -32000, +6, -60, +80, -50, +120, -20}), 32000, 32000, 6, 60, 80, 50, 120, 20));
__m128i test_mm_abs_epi32(__m128i a) {
// CHECK-LABEL: test_mm_abs_epi32
// CHECK: [[ABS:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %{{.*}}, i1 false)
return _mm_abs_epi32(a);
}
+TEST_CONSTEXPR(match_v4si(_mm_abs_epi32((__m128i)(__v4si){-5, -1, 0, 1}), 5, 1, 0, 1));
__m128i test_mm_alignr_epi8(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_alignr_epi8
diff --git a/clang/test/CodeGen/X86/x86-bswap.c b/clang/test/CodeGen/X86/x86-bswap.c
index 589dd83..8a5baac 100644
--- a/clang/test/CodeGen/X86/x86-bswap.c
+++ b/clang/test/CodeGen/X86/x86-bswap.c
@@ -1,45 +1,41 @@
// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -no-enable-noundef-analysis -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-apple-darwin -no-enable-noundef-analysis -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -no-enable-noundef-analysis -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-apple-darwin -no-enable-noundef-analysis -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+
+
#include <x86intrin.h>
+#include "builtin_test_helpers.h"
int test__bswapd(int X) {
// CHECK-LABEL: test__bswapd
// CHECK: call i32 @llvm.bswap.i32
return __bswapd(X);
}
+TEST_CONSTEXPR(__bswapd(0x00000000) == 0x00000000);
+TEST_CONSTEXPR(__bswapd(0x01020304) == 0x04030201);
int test_bswap(int X) {
// CHECK-LABEL: test_bswap
// CHECK: call i32 @llvm.bswap.i32
return _bswap(X);
}
+TEST_CONSTEXPR(_bswap(0x00000000) == 0x00000000);
+TEST_CONSTEXPR(_bswap(0x10203040) == 0x40302010);
long test__bswapq(long long X) {
// CHECK-LABEL: test__bswapq
// CHECK: call i64 @llvm.bswap.i64
return __bswapq(X);
}
+TEST_CONSTEXPR(__bswapq(0x0000000000000000ULL) == 0x0000000000000000);
+TEST_CONSTEXPR(__bswapq(0x0102030405060708ULL) == 0x0807060504030201);
long test_bswap64(long long X) {
// CHECK-LABEL: test_bswap64
// CHECK: call i64 @llvm.bswap.i64
return _bswap64(X);
}
-
-// Test constexpr handling.
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-
-char bswapd_0[__bswapd(0x00000000) == 0x00000000 ? 1 : -1];
-char bswapd_1[__bswapd(0x01020304) == 0x04030201 ? 1 : -1];
-
-char bswap_0[_bswap(0x00000000) == 0x00000000 ? 1 : -1];
-char bswap_1[_bswap(0x10203040) == 0x40302010 ? 1 : -1];
-
-char bswapq_0[__bswapq(0x0000000000000000ULL) == 0x0000000000000000 ? 1 : -1];
-char bswapq_1[__bswapq(0x0102030405060708ULL) == 0x0807060504030201 ? 1 : -1];
-
-char bswap64_0[_bswap64(0x0000000000000000ULL) == 0x0000000000000000 ? 1 : -1];
-char bswap64_1[_bswap64(0x1020304050607080ULL) == 0x8070605040302010 ? 1 : -1];
-
-#endif
+TEST_CONSTEXPR(_bswap64(0x0000000000000000ULL) == 0x0000000000000000);
+TEST_CONSTEXPR(_bswap64(0x1020304050607080ULL) == 0x8070605040302010);
diff --git a/clang/test/CodeGen/X86/x86-builtins.c b/clang/test/CodeGen/X86/x86-builtins.c
index e503e47..37cfe3e 100644
--- a/clang/test/CodeGen/X86/x86-builtins.c
+++ b/clang/test/CodeGen/X86/x86-builtins.c
@@ -1,39 +1,40 @@
// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-unknown-unknown -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-unknown-unknown -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-unknown-unknown -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=i386-unknown-unknown -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-unknown-unknown -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-unknown-unknown -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-unknown-unknown -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-unknown-unknown -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-unknown-unknown -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-unknown-unknown -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <x86intrin.h>
+#include "builtin_test_helpers.h"
unsigned int test_castf32_u32 (float __A){
// CHECK-LABEL: test_castf32_u32
// CHECK: %{{.*}} = load i32, ptr %{{.*}}, align 4
return _castf32_u32(__A);
}
+TEST_CONSTEXPR(_castf32_u32(-0.0f) == 0x80000000);
unsigned long long test_castf64_u64 (double __A){
// CHECK-LABEL: test_castf64_u64
// CHECK: %{{.*}} = load i64, ptr %{{.*}}, align 8
return _castf64_u64(__A);
}
+TEST_CONSTEXPR(_castf64_u64(-0.0) == 0x8000000000000000);
float test_castu32_f32 (unsigned int __A){
// CHECK-LABEL: test_castu32_f32
// CHECK: %{{.*}} = load float, ptr %{{.*}}, align 4
return _castu32_f32(__A);
}
+TEST_CONSTEXPR(_castu32_f32(0x3F800000) == +1.0f);
double test_castu64_f64 (unsigned long long __A){
// CHECK-LABEL: test_castu64_f64
// CHECK: %{{.*}} = load double, ptr %{{.*}}, align 8
return _castu64_f64(__A);
}
-
-// Test constexpr handling.
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-char cast_f32_u32_0[_castf32_u32(-0.0f) == 0x80000000 ? 1 : -1];
-char cast_u32_f32_0[_castu32_f32(0x3F800000) == +1.0f ? 1 : -1];
-
-char castf64_u64_0[_castf64_u64(-0.0) == 0x8000000000000000 ? 1 : -1];
-char castu64_f64_0[_castu64_f64(0xBFF0000000000000ULL) == -1.0 ? 1 : -1];
-#endif
+TEST_CONSTEXPR(_castu64_f64(0xBFF0000000000000ULL) == -1.0);
diff --git a/clang/test/CodeGen/X86/xop-builtins.c b/clang/test/CodeGen/X86/xop-builtins.c
index 8ba6b8b..994fc7b 100644
--- a/clang/test/CodeGen/X86/xop-builtins.c
+++ b/clang/test/CodeGen/X86/xop-builtins.c
@@ -1,10 +1,24 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
#include <x86intrin.h>
+#include "builtin_test_helpers.h"
// NOTE: This should match the tests in llvm/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
@@ -46,25 +60,25 @@ __m128i test_mm_macc_epi32(__m128i a, __m128i b, __m128i c) {
__m128i test_mm_maccslo_epi32(__m128i a, __m128i b, __m128i c) {
// CHECK-LABEL: test_mm_maccslo_epi32
- // CHECK: call <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maccslo_epi32(a, b, c);
}
__m128i test_mm_macclo_epi32(__m128i a, __m128i b, __m128i c) {
// CHECK-LABEL: test_mm_macclo_epi32
- // CHECK: call <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
return _mm_macclo_epi32(a, b, c);
}
__m128i test_mm_maccshi_epi32(__m128i a, __m128i b, __m128i c) {
// CHECK-LABEL: test_mm_maccshi_epi32
- // CHECK: call <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maccshi_epi32(a, b, c);
}
__m128i test_mm_macchi_epi32(__m128i a, __m128i b, __m128i c) {
// CHECK-LABEL: test_mm_macchi_epi32
- // CHECK: call <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
return _mm_macchi_epi32(a, b, c);
}
@@ -94,7 +108,7 @@ __m128i test_mm_haddd_epi8(__m128i a) {
__m128i test_mm_haddq_epi8(__m128i a) {
// CHECK-LABEL: test_mm_haddq_epi8
- // CHECK: call <2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8> %{{.*}})
return _mm_haddq_epi8(a);
}
@@ -106,13 +120,13 @@ __m128i test_mm_haddd_epi16(__m128i a) {
__m128i test_mm_haddq_epi16(__m128i a) {
// CHECK-LABEL: test_mm_haddq_epi16
- // CHECK: call <2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16> %{{.*}})
return _mm_haddq_epi16(a);
}
__m128i test_mm_haddq_epi32(__m128i a) {
// CHECK-LABEL: test_mm_haddq_epi32
- // CHECK: call <2 x i64> @llvm.x86.xop.vphadddq(<4 x i32> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vphadddq(<4 x i32> %{{.*}})
return _mm_haddq_epi32(a);
}
@@ -130,7 +144,7 @@ __m128i test_mm_haddd_epu8(__m128i a) {
__m128i test_mm_haddq_epu8(__m128i a) {
// CHECK-LABEL: test_mm_haddq_epu8
- // CHECK: call <2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8> %{{.*}})
return _mm_haddq_epu8(a);
}
@@ -142,13 +156,13 @@ __m128i test_mm_haddd_epu16(__m128i a) {
__m128i test_mm_haddq_epu16(__m128i a) {
// CHECK-LABEL: test_mm_haddq_epu16
- // CHECK: call <2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16> %{{.*}})
return _mm_haddq_epu16(a);
}
__m128i test_mm_haddq_epu32(__m128i a) {
// CHECK-LABEL: test_mm_haddq_epu32
- // CHECK: call <2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32> %{{.*}})
return _mm_haddq_epu32(a);
}
@@ -166,7 +180,7 @@ __m128i test_mm_hsubd_epi16(__m128i a) {
__m128i test_mm_hsubq_epi32(__m128i a) {
// CHECK-LABEL: test_mm_hsubq_epi32
- // CHECK: call <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32> %{{.*}})
return _mm_hsubq_epi32(a);
}
@@ -178,6 +192,7 @@ __m128i test_mm_cmov_si128(__m128i a, __m128i b, __m128i c) {
// CHECK-NEXT: %{{.*}} = or <2 x i64> [[AND]], [[ANDN]]
return _mm_cmov_si128(a, b, c);
}
+TEST_CONSTEXPR(match_v4si(_mm_cmov_si128((__m128i)(__v4si){+1,+2,+3,+4}, (__m128i)(__v4si){-4,-3,-2,-1}, (__m128i)(__v4si){-1,0,0,-1}), +1, -3, -2, +4));
__m256i test_mm256_cmov_si256(__m256i a, __m256i b, __m256i c) {
// CHECK-LABEL: test_mm256_cmov_si256
@@ -187,6 +202,7 @@ __m256i test_mm256_cmov_si256(__m256i a, __m256i b, __m256i c) {
// CHECK-NEXT: %{{.*}} = or <4 x i64> [[AND]], [[ANDN]]
return _mm256_cmov_si256(a, b, c);
}
+TEST_CONSTEXPR(match_v4di(_mm256_cmov_si256((__m256i)(__v4di){+1,+2,+3,+4}, (__m256i)(__v4di){-4,-3,-2,-1}, (__m256i)(__v4di){0,-1,0,-1}), -4, +2, -2, +4));
__m128i test_mm_perm_epi8(__m128i a, __m128i b, __m128i c) {
// CHECK-LABEL: test_mm_perm_epi8
@@ -214,7 +230,7 @@ __m128i test_mm_rot_epi32(__m128i a, __m128i b) {
__m128i test_mm_rot_epi64(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_rot_epi64
- // CHECK: call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_rot_epi64(a, b);
}
@@ -223,24 +239,28 @@ __m128i test_mm_roti_epi8(__m128i a) {
// CHECK: call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> splat (i8 1))
return _mm_roti_epi8(a, 1);
}
+TEST_CONSTEXPR(match_v16qi(_mm_roti_epi8(((__m128i)(__v16qs){0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15}), 3), 0, 8, -9, 24, -25, 40, -41, 56, -57, 72, -73, 88, -89, 104, -105, 120));
__m128i test_mm_roti_epi16(__m128i a) {
// CHECK-LABEL: test_mm_roti_epi16
// CHECK: call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> splat (i16 50))
return _mm_roti_epi16(a, 50);
}
+TEST_CONSTEXPR(match_v8hi(_mm_roti_epi16(((__m128i)(__v8hi){2, -3, 4, -5, 6, -7, 8, -9}), 1), 4, -5, 8, -9, 12, -13, 16, -17));
__m128i test_mm_roti_epi32(__m128i a) {
// CHECK-LABEL: test_mm_roti_epi32
// CHECK: call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> splat (i32 226))
return _mm_roti_epi32(a, -30);
}
+TEST_CONSTEXPR(match_v4si(_mm_roti_epi32(((__m128i)(__v4si){1, -2, 3, -4}), 5), 32, -33, 96, -97));
__m128i test_mm_roti_epi64(__m128i a) {
// CHECK-LABEL: test_mm_roti_epi64
- // CHECK: call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 100))
+ // CHECK: call {{.*}}<2 x i64> @llvm.fshl.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> splat (i64 100))
return _mm_roti_epi64(a, 100);
}
+TEST_CONSTEXPR(match_v2di(_mm_roti_epi64(((__m128i)(__v2di){99, -55}), 19), 51904512, -28311553));
__m128i test_mm_shl_epi8(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_shl_epi8
@@ -262,7 +282,7 @@ __m128i test_mm_shl_epi32(__m128i a, __m128i b) {
__m128i test_mm_shl_epi64(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_shl_epi64
- // CHECK: call <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vpshlq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_shl_epi64(a, b);
}
@@ -286,7 +306,7 @@ __m128i test_mm_sha_epi32(__m128i a, __m128i b) {
__m128i test_mm_sha_epi64(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_sha_epi64
- // CHECK: call <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.xop.vpshaq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_sha_epi64(a, b);
}
@@ -372,36 +392,36 @@ __m256 test_mm256_permute2_ps(__m256 a, __m256 b, __m256i c) {
__m128 test_mm_frcz_ss(__m128 a) {
// CHECK-LABEL: test_mm_frcz_ss
- // CHECK: call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %{{.*}})
+ // CHECK: call {{.*}}<4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %{{.*}})
return _mm_frcz_ss(a);
}
__m128d test_mm_frcz_sd(__m128d a) {
// CHECK-LABEL: test_mm_frcz_sd
- // CHECK: call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %{{.*}})
+ // CHECK: call {{.*}}<2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %{{.*}})
return _mm_frcz_sd(a);
}
__m128 test_mm_frcz_ps(__m128 a) {
// CHECK-LABEL: test_mm_frcz_ps
- // CHECK: call <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float> %{{.*}})
+ // CHECK: call {{.*}}<4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float> %{{.*}})
return _mm_frcz_ps(a);
}
__m128d test_mm_frcz_pd(__m128d a) {
// CHECK-LABEL: test_mm_frcz_pd
- // CHECK: call <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double> %{{.*}})
+ // CHECK: call {{.*}}<2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double> %{{.*}})
return _mm_frcz_pd(a);
}
__m256 test_mm256_frcz_ps(__m256 a) {
// CHECK-LABEL: test_mm256_frcz_ps
- // CHECK: call <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float> %{{.*}})
+ // CHECK: call {{.*}}<8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float> %{{.*}})
return _mm256_frcz_ps(a);
}
__m256d test_mm256_frcz_pd(__m256d a) {
// CHECK-LABEL: test_mm256_frcz_pd
- // CHECK: call <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double> %{{.*}})
+ // CHECK: call {{.*}}<4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double> %{{.*}})
return _mm256_frcz_pd(a);
}
diff --git a/clang/test/CodeGen/afn-flag-test.c b/clang/test/CodeGen/afn-flag-test.c
index f948fc0..bc2ff5b 100644
--- a/clang/test/CodeGen/afn-flag-test.c
+++ b/clang/test/CodeGen/afn-flag-test.c
@@ -7,8 +7,6 @@ double afn_option_test(double x) {
// CHECK-LABEL: define{{.*}} double @afn_option_test(double %x) #0 {
// CHECK-AFN: %{{.*}} = call afn double @{{.*}}exp{{.*}}(double %{{.*}})
- // CHECK-AFN: attributes #0 ={{.*}} "approx-func-fp-math"="true" {{.*}}
// CHECK-NO-AFN: %{{.*}} = call double @{{.*}}exp{{.*}}(double %{{.*}})
- // CHECK-NO-AFN-NOT: attributes #0 ={{.*}} "approx-func-fp-math"="true" {{.*}}
}
diff --git a/clang/test/CodeGen/aggregate-assign-call.c b/clang/test/CodeGen/aggregate-assign-call.c
index 7d97239..f09e77032 100644
--- a/clang/test/CodeGen/aggregate-assign-call.c
+++ b/clang/test/CodeGen/aggregate-assign-call.c
@@ -24,23 +24,23 @@ struct S bar(void) {
// O1: %[[TMP2_ALLOCA:[^ ]+]] = alloca %struct.S
// O1: %[[TMP3_ALLOCA:[^ ]+]] = alloca %struct.S
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP1_ALLOCA]])
// O1: call void @foo
r = foo();
// O1: memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP1_ALLOCA]])
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP2_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP2_ALLOCA]])
// O1: call void @foo
r = foo();
// O1: memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP2_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP2_ALLOCA]])
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP3_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP3_ALLOCA]])
// O1: call void @foo
r = foo();
// O1: memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP3_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP3_ALLOCA]])
return r;
}
@@ -59,17 +59,17 @@ struct S baz(int i, volatile int *j) {
// O1: %[[TMP2_ALLOCA:[^ ]+]] = alloca %struct.S
do {
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP1_ALLOCA]])
//
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP1_ALLOCA]])
//
// O1: call void @foo_int(ptr dead_on_unwind writable sret(%struct.S) align 4 %[[TMP1_ALLOCA]],
// O1: call void @llvm.memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP2_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP2_ALLOCA]])
// O1: call void @foo_int(ptr dead_on_unwind writable sret(%struct.S) align 4 %[[TMP2_ALLOCA]],
// O1: call void @llvm.memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP2_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP2_ALLOCA]])
r = foo_int(({
if (*j)
break;
diff --git a/clang/test/CodeGen/asan-unified-lto.ll b/clang/test/CodeGen/asan-unified-lto.ll
index 7b790d4..21734e1 100644
--- a/clang/test/CodeGen/asan-unified-lto.ll
+++ b/clang/test/CodeGen/asan-unified-lto.ll
@@ -5,6 +5,7 @@
; RUN: %clang_cc1 -emit-llvm-bc -O1 -flto -fsanitize=address -o - -x ir < %s | llvm-dis -o - | FileCheck %s
; RUN: %clang_cc1 -emit-llvm-bc -O1 -flto -funified-lto -fsanitize=address -o - -x ir < %s | llvm-dis -o - | FileCheck %s
+; RUN: %clang_cc1 -emit-llvm-bc -O1 -flto -fno-unified-lto -fsanitize=address -o - -x ir < %s | llvm-dis -o - | FileCheck %s
; CHECK: @anon.3ee0898e5200a57350fed5485ae5d237
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/clang/test/CodeGen/attr-counted-by-for-pointers.c b/clang/test/CodeGen/attr-counted-by-for-pointers.c
index e939e49..0d72b58 100644
--- a/clang/test/CodeGen/attr-counted-by-for-pointers.c
+++ b/clang/test/CodeGen/attr-counted-by-for-pointers.c
@@ -32,7 +32,7 @@ struct annotated_ptr {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label [[CONT10:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
@@ -85,7 +85,7 @@ void test1(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label [[CONT10:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3]], !nosanitize [[META2]]
@@ -138,7 +138,7 @@ void test2(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: [[DOTNOT:%.*]] = icmp ugt i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[DOTNOT]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], label [[CONT10:%.*]], !prof [[PROF15:![0-9]+]], !nosanitize [[META2]]
@@ -311,7 +311,7 @@ size_t test6(struct annotated_ptr *p, int index) {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label [[CONT10:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3]], !nosanitize [[META2]]
diff --git a/clang/test/CodeGen/attr-counted-by.c b/clang/test/CodeGen/attr-counted-by.c
index 9fb50c6..cb23efd 100644
--- a/clang/test/CodeGen/attr-counted-by.c
+++ b/clang/test/CodeGen/attr-counted-by.c
@@ -72,7 +72,7 @@ struct anon_struct {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont3:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -81,7 +81,7 @@ struct anon_struct {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -90,7 +90,7 @@ struct anon_struct {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -99,7 +99,7 @@ struct anon_struct {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -120,7 +120,7 @@ void test1(struct annotated *p, int index, int val) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont6:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.smax.i32(i32 [[COUNTED_BY_LOAD]], i32 0)
// SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = shl i32 [[TMP2]], 2
// SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4]]
@@ -134,7 +134,7 @@ void test1(struct annotated *p, int index, int val) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i32, ptr [[COUNTED_BY_GEP]], align 4
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.smax.i32(i32 [[COUNTED_BY_LOAD]], i32 0)
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = shl i32 [[TMP0]], 2
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -142,7 +142,7 @@ void test1(struct annotated *p, int index, int val) {
// SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -150,7 +150,7 @@ void test1(struct annotated *p, int index, int val) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -243,7 +243,7 @@ size_t test2_bdos_cast(struct annotated *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont3:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITH-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -251,7 +251,7 @@ size_t test2_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITH-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -259,7 +259,7 @@ size_t test2_bdos_cast(struct annotated *p) {
// SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -267,7 +267,7 @@ size_t test2_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -350,7 +350,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// SANITIZE-WITH-ATTR-NEXT: [[RESULT:%.*]] = add i32 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], 244
// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = and i32 [[RESULT]], 252
// SANITIZE-WITH-ATTR-NEXT: [[CONV2:%.*]] = select i1 [[TMP3]], i32 [[TMP4]], i32 0
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV2]], ptr [[ARRAYIDX10]], align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: [[DOTNOT81:%.*]] = icmp eq i32 [[DOTCOUNTED_BY_LOAD]], 3
// SANITIZE-WITH-ATTR-NEXT: br i1 [[DOTNOT81]], label [[HANDLER_OUT_OF_BOUNDS18:%.*]], label [[CONT19:%.*]], !prof [[PROF8:![0-9]+]], !nosanitize [[META2]]
@@ -370,7 +370,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// SANITIZE-WITH-ATTR-NEXT: [[RESULT25:%.*]] = add i32 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], 240
// SANITIZE-WITH-ATTR-NEXT: [[TMP7:%.*]] = and i32 [[RESULT25]], 252
// SANITIZE-WITH-ATTR-NEXT: [[CONV27:%.*]] = select i1 [[TMP6]], i32 [[TMP7]], i32 0
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM31]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM31]]
// SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV27]], ptr [[ARRAYIDX36]], align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM42:%.*]] = sext i32 [[FAM_IDX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD44:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
@@ -389,7 +389,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB10:[0-9]+]], i64 [[IDXPROM60]]) #[[ATTR8]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont67:
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM60]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM60]]
// SANITIZE-WITH-ATTR-NEXT: [[COUNT50:%.*]] = sext i32 [[DOTCOUNTED_BY_LOAD44]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[TMP10:%.*]] = sub nsw i64 [[COUNT50]], [[IDXPROM42]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP11:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP10]], i64 0)
@@ -411,7 +411,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = and i32 [[RESULT]], 252
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV1:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 0
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV1]], ptr [[ARRAYIDX3]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD7:%.*]] = load i32, ptr [[COUNTED_BY_GEP]], align 4
// NO-SANITIZE-WITH-ATTR-NEXT: [[FLEXIBLE_ARRAY_MEMBER_SIZE9:%.*]] = shl i32 [[COUNTED_BY_LOAD7]], 2
@@ -419,9 +419,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[COUNTED_BY_LOAD7]], 3
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP3:%.*]] = and i32 [[RESULT10]], 252
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV12:%.*]] = select i1 [[TMP2]], i32 [[TMP3]], i32 0
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ADD:%.*]] = add nsw i32 [[INDEX]], 1
-// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM14:%.*]] = sext i32 [[ADD]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM14]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX15:%.*]] = getelementptr i8, ptr [[ARRAYIDX3]], i64 4
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV12]], ptr [[ARRAYIDX15]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM17:%.*]] = sext i32 [[FAM_IDX]] to i64
// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD20:%.*]] = load i32, ptr [[COUNTED_BY_GEP]], align 4
@@ -434,9 +432,7 @@ size_t test3_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP8:%.*]] = shl i32 [[DOTTR]], 2
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP9:%.*]] = and i32 [[TMP8]], 252
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV26:%.*]] = select i1 [[TMP7]], i32 [[TMP9]], i32 0
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ADD28:%.*]] = add nsw i32 [[INDEX]], 2
-// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM29:%.*]] = sext i32 [[ADD28]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM29]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX30:%.*]] = getelementptr i8, ptr [[ARRAYIDX3]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV26]], ptr [[ARRAYIDX30]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -445,15 +441,11 @@ size_t test3_bdos_cast(struct annotated *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX5]], align 4, !tbaa [[TBAA2]]
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ADD:%.*]] = add nsw i32 [[INDEX]], 1
-// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM17:%.*]] = sext i32 [[ADD]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM17]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX18:%.*]] = getelementptr i8, ptr [[ARRAYIDX5]], i64 4
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX18]], align 4, !tbaa [[TBAA2]]
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ADD31:%.*]] = add nsw i32 [[INDEX]], 2
-// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM32:%.*]] = sext i32 [[ADD31]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM32]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX33:%.*]] = getelementptr i8, ptr [[ARRAYIDX5]], i64 8
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX33]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -462,15 +454,11 @@ size_t test3_bdos_cast(struct annotated *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX3]], align 4, !tbaa [[TBAA2]]
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ADD:%.*]] = add nsw i32 [[INDEX]], 1
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM9:%.*]] = sext i32 [[ADD]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM9]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX10:%.*]] = getelementptr i8, ptr [[ARRAYIDX3]], i64 4
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX10]], align 4, !tbaa [[TBAA2]]
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ADD17:%.*]] = add nsw i32 [[INDEX]], 2
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM18:%.*]] = sext i32 [[ADD17]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM18]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX19:%.*]] = getelementptr i8, ptr [[ARRAYIDX3]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 255, ptr [[ARRAYIDX19]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -632,7 +620,7 @@ size_t test4_bdos_cast2(struct annotated *p, int index) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont3:
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -641,7 +629,7 @@ size_t test4_bdos_cast2(struct annotated *p, int index) {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -650,7 +638,7 @@ size_t test4_bdos_cast2(struct annotated *p, int index) {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -659,7 +647,7 @@ size_t test4_bdos_cast2(struct annotated *p, int index) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -704,7 +692,7 @@ size_t test5_bdos(struct anon_struct *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont6:
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[FLEXIBLE_ARRAY_MEMBER_SIZE:%.*]] = shl nuw i64 [[COUNTED_BY_LOAD]], 2
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.smax.i64(i64 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], i64 0)
// SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = trunc i64 [[TMP2]] to i32
@@ -721,7 +709,7 @@ size_t test5_bdos(struct anon_struct *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.smax.i64(i64 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], i64 0)
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -730,7 +718,7 @@ size_t test5_bdos(struct anon_struct *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -739,7 +727,7 @@ size_t test5_bdos(struct anon_struct *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -793,7 +781,7 @@ size_t test6_bdos(struct anon_struct *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont7:
// SANITIZE-WITH-ATTR-NEXT: [[INTS:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 9
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i8], ptr [[INTS]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[INTS]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA9:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -802,7 +790,7 @@ size_t test6_bdos(struct anon_struct *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[INTS:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 9
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[INTS]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[INTS]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -811,7 +799,7 @@ size_t test6_bdos(struct anon_struct *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[INTS:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 9
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[INTS]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[INTS]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -820,7 +808,7 @@ size_t test6_bdos(struct anon_struct *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[INTS:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 9
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[INTS]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[INTS]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -866,7 +854,7 @@ size_t test7_bdos(struct union_of_fams *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont14:
// SANITIZE-WITH-ATTR-NEXT: [[INTS:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 9
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i8], ptr [[INTS]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[INTS]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i8 [[COUNTED_BY_LOAD]], ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA9]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -877,7 +865,7 @@ size_t test7_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[INTS:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 9
// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i8, ptr [[TMP0]], align 4
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[INTS]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[INTS]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i8 [[COUNTED_BY_LOAD]], ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -886,7 +874,7 @@ size_t test7_bdos(struct union_of_fams *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[INTS:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 9
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[INTS]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[INTS]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -895,7 +883,7 @@ size_t test7_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[INTS:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 9
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[INTS]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[INTS]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -947,7 +935,7 @@ size_t test8_bdos(struct union_of_fams *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont7:
// SANITIZE-WITH-ATTR-NEXT: [[BYTES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i8], ptr [[BYTES]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[BYTES]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA9]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -956,7 +944,7 @@ size_t test8_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[BYTES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[BYTES]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BYTES]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -965,7 +953,7 @@ size_t test8_bdos(struct union_of_fams *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[BYTES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[BYTES]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BYTES]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -974,7 +962,7 @@ size_t test8_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BYTES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[BYTES]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BYTES]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1020,7 +1008,7 @@ size_t test9_bdos(struct union_of_fams *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont14:
// SANITIZE-WITH-ATTR-NEXT: [[BYTES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i8], ptr [[BYTES]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[BYTES]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[NARROW:%.*]] = tail call i32 @llvm.smax.i32(i32 [[COUNTED_BY_LOAD]], i32 0)
// SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = trunc i32 [[NARROW]] to i8
// SANITIZE-WITH-ATTR-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA9]]
@@ -1035,7 +1023,7 @@ size_t test9_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[NARROW:%.*]] = tail call i32 @llvm.smax.i32(i32 [[COUNTED_BY_LOAD]], i32 0)
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = trunc i32 [[NARROW]] to i8
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[BYTES]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BYTES]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -1044,7 +1032,7 @@ size_t test9_bdos(struct union_of_fams *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[BYTES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[BYTES]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BYTES]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1053,7 +1041,7 @@ size_t test9_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BYTES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[BYTES]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BYTES]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1107,7 +1095,7 @@ size_t test10_bdos(struct union_of_fams *p) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont6:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[COUNTED_BY_LOAD]], -3
// SANITIZE-WITH-ATTR-NEXT: [[FLEXIBLE_ARRAY_MEMBER_SIZE:%.*]] = shl i32 [[COUNTED_BY_LOAD]], 2
// SANITIZE-WITH-ATTR-NEXT: [[RESULT:%.*]] = add i32 [[FLEXIBLE_ARRAY_MEMBER_SIZE]], 8
@@ -1126,7 +1114,7 @@ size_t test10_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = select i1 [[TMP0]], i32 [[RESULT]], i32 0
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -1135,7 +1123,7 @@ size_t test10_bdos(struct union_of_fams *p) {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1144,7 +1132,7 @@ size_t test10_bdos(struct union_of_fams *p) {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1209,7 +1197,7 @@ int test12_a, test12_b;
// SANITIZE-WITH-ATTR-SAME: i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[BAZ:%.*]] = alloca [[STRUCT_HANG:%.*]], align 4
-// SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR9:[0-9]+]]
+// SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR9:[0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT10:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp ult i32 [[INDEX]], 6
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = zext i32 [[INDEX]] to i64
@@ -1218,7 +1206,7 @@ int test12_a, test12_b;
// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB22:[0-9]+]], i64 [[TMP1]]) #[[ATTR8]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont:
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [6 x i32], ptr [[BAZ]], i64 0, i64 [[TMP1]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[BAZ]], i64 [[TMP1]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP2]], ptr @test12_b, align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr @test12_foo, align 4
@@ -1235,10 +1223,10 @@ int test12_a, test12_b;
// NO-SANITIZE-WITH-ATTR-SAME: i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR4:[0-9]+]] {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[BAZ:%.*]] = alloca [[STRUCT_HANG:%.*]], align 4
-// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR12:[0-9]+]]
+// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR12:[0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[BAZ]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP0]], ptr @test12_b, align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds nuw (i8, ptr @test12_foo, i64 4), align 4, !tbaa [[TBAA2]]
@@ -1251,7 +1239,7 @@ int test12_a, test12_b;
// SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[BAZ:%.*]] = alloca [[STRUCT_HANG:%.*]], align 4
-// SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR7:[0-9]+]]
+// SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR7:[0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = icmp ult i32 [[INDEX]], 6
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = zext i32 [[INDEX]] to i64
@@ -1260,7 +1248,7 @@ int test12_a, test12_b;
// SANITIZE-WITHOUT-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB2:[0-9]+]], i64 [[TMP1]]) #[[ATTR8:[0-9]+]], !nosanitize [[META9]]
// SANITIZE-WITHOUT-ATTR-NEXT: unreachable, !nosanitize [[META9]]
// SANITIZE-WITHOUT-ATTR: cont:
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [6 x i32], ptr [[BAZ]], i64 0, i64 [[TMP1]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[BAZ]], i64 [[TMP1]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[TMP2]], ptr @test12_b, align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr @test12_foo, align 4
@@ -1277,10 +1265,10 @@ int test12_a, test12_b;
// NO-SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BAZ:%.*]] = alloca [[STRUCT_HANG:%.*]], align 4
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR10:[0-9]+]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR10:[0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[BAZ]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[TMP0]], ptr @test12_b, align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds nuw (i8, ptr @test12_foo, i64 4), align 4, !tbaa [[TBAA2]]
@@ -1322,7 +1310,7 @@ struct test13_bar {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont5:
// SANITIZE-WITH-ATTR-NEXT: [[REVMAP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x ptr], ptr [[REVMAP]], i64 0, i64 [[INDEX]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[REVMAP]], i64 [[INDEX]]
// SANITIZE-WITH-ATTR-NEXT: store ptr null, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA15:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: ret i32 0
//
@@ -1331,7 +1319,7 @@ struct test13_bar {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr @test13_f, align 8, !tbaa [[TBAA8:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[REVMAP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x ptr], ptr [[REVMAP]], i64 0, i64 [[INDEX]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[REVMAP]], i64 [[INDEX]]
// NO-SANITIZE-WITH-ATTR-NEXT: store ptr null, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA12:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 0
//
@@ -1349,7 +1337,7 @@ struct test13_bar {
// SANITIZE-WITHOUT-ATTR-NEXT: unreachable, !nosanitize [[META9]]
// SANITIZE-WITHOUT-ATTR: cont5:
// SANITIZE-WITHOUT-ATTR-NEXT: [[REVMAP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x ptr], ptr [[REVMAP]], i64 0, i64 [[INDEX]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[REVMAP]], i64 [[INDEX]]
// SANITIZE-WITHOUT-ATTR-NEXT: store ptr null, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA15:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret i32 0
//
@@ -1358,7 +1346,7 @@ struct test13_bar {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr @test13_f, align 8, !tbaa [[TBAA8:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[REVMAP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x ptr], ptr [[REVMAP]], i64 0, i64 [[INDEX]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[REVMAP]], i64 [[INDEX]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store ptr null, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA12:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 0
//
@@ -1393,7 +1381,7 @@ struct test14_foo {
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 2, ptr [[Y]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[BLAH:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTCOMPOUNDLITERAL]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[BLAH]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[BLAH]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1418,7 +1406,7 @@ struct test14_foo {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 2, ptr [[Y]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BLAH:%.*]] = getelementptr inbounds nuw i8, ptr [[DOTCOMPOUNDLITERAL]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[BLAH]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[BLAH]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1442,7 +1430,7 @@ int test14(int idx) {
// NO-SANITIZE-WITH-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR3]] {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr getelementptr inbounds nuw (i8, ptr @__const.test15.foo, i64 8), i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @__const.test15.foo, i64 8), i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1462,7 +1450,7 @@ int test14(int idx) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR1]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr getelementptr inbounds nuw (i8, ptr @__const.test15.foo, i64 8), i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds nuw (i8, ptr @__const.test15.foo, i64 8), i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1718,7 +1706,7 @@ struct test26_foo {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont5:
// SANITIZE-WITH-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[FOO]], i64 8
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARR]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARR]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP2]]
//
@@ -1727,7 +1715,7 @@ struct test26_foo {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[FOO]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[C]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARR]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1736,7 +1724,7 @@ struct test26_foo {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[FOO]], i64 8
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[C]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARR]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1745,7 +1733,7 @@ struct test26_foo {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[FOO]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[C]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARR]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1789,7 +1777,7 @@ struct test27_foo {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont3:
// SANITIZE-WITH-ATTR-NEXT: [[ENTRIES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 24
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x ptr], ptr [[ENTRIES]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[ENTRIES]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA19:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM4:%.*]] = sext i32 [[J]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [[STRUCT_TEST27_BAR:%.*]], ptr [[TMP2]], i64 [[IDXPROM4]]
@@ -1800,7 +1788,7 @@ struct test27_foo {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[ENTRIES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 24
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x ptr], ptr [[ENTRIES]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ENTRIES]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA16:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM1:%.*]] = sext i32 [[J]] to i64
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [[STRUCT_TEST27_BAR:%.*]], ptr [[TMP0]], i64 [[IDXPROM1]]
@@ -1811,7 +1799,7 @@ struct test27_foo {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRIES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 24
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x ptr], ptr [[ENTRIES]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ENTRIES]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA19:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM3:%.*]] = sext i32 [[J]] to i64
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [[STRUCT_TEST27_BAR:%.*]], ptr [[TMP0]], i64 [[IDXPROM3]]
@@ -1822,7 +1810,7 @@ struct test27_foo {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ENTRIES:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 24
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x ptr], ptr [[ENTRIES]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ENTRIES]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA16:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM1:%.*]] = sext i32 [[J]] to i64
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [[STRUCT_TEST27_BAR:%.*]], ptr [[TMP0]], i64 [[IDXPROM1]]
@@ -1855,7 +1843,7 @@ struct test28_foo {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont17:
// SANITIZE-WITH-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARR]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARR]], i64 [[IDXPROM]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP5]]
//
@@ -1867,7 +1855,7 @@ struct test28_foo {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8, !tbaa [[TBAA18]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARR]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP3]]
//
@@ -1879,7 +1867,7 @@ struct test28_foo {
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8, !tbaa [[TBAA21]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARR]], i64 0, i64 [[IDXPROM]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP3]]
//
@@ -1891,7 +1879,7 @@ struct test28_foo {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8, !tbaa [[TBAA18]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARR:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARR]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP3]]
//
@@ -1916,7 +1904,7 @@ struct annotated_struct_array {
// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB41:[0-9]+]], i64 [[TMP1]]) #[[ATTR8]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont3:
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [10 x ptr], ptr [[ANN]], i64 0, i64 [[TMP1]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[ANN]], i64 [[TMP1]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA23:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 8
// SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i32, ptr [[COUNTED_BY_GEP]], align 4
@@ -1929,7 +1917,7 @@ struct annotated_struct_array {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont32:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM27]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[IDXPROM27]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.smax.i32(i32 [[COUNTED_BY_LOAD]], i32 0)
// SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = shl i32 [[TMP5]], 2
// SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX30]], align 4, !tbaa [[TBAA4]]
@@ -1939,7 +1927,7 @@ struct annotated_struct_array {
// NO-SANITIZE-WITH-ATTR-SAME: ptr noundef readonly captures(none) [[ANN:%.*]], i32 noundef [[IDX1:%.*]], i32 noundef [[IDX2:%.*]]) local_unnamed_addr #[[ATTR9:[0-9]+]] {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX1]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x ptr], ptr [[ANN]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ANN]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA20:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 8
@@ -1947,7 +1935,7 @@ struct annotated_struct_array {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[COUNTED_BY_LOAD]], i32 0)
// NO-SANITIZE-WITH-ATTR-NEXT: [[CONV:%.*]] = shl i32 [[TMP1]], 2
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM8:%.*]] = sext i32 [[IDX2]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM8]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM8]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX9]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -1961,11 +1949,11 @@ struct annotated_struct_array {
// SANITIZE-WITHOUT-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB13:[0-9]+]], i64 [[TMP1]]) #[[ATTR8]], !nosanitize [[META9]]
// SANITIZE-WITHOUT-ATTR-NEXT: unreachable, !nosanitize [[META9]]
// SANITIZE-WITHOUT-ATTR: cont21:
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [10 x ptr], ptr [[ANN]], i64 0, i64 [[TMP1]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw ptr, ptr [[ANN]], i64 [[TMP1]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA23:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 12
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM18:%.*]] = sext i32 [[IDX2]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM18]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM18]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX19]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -1973,11 +1961,11 @@ struct annotated_struct_array {
// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef readonly captures(none) [[ANN:%.*]], i32 noundef [[IDX1:%.*]], i32 noundef [[IDX2:%.*]]) local_unnamed_addr #[[ATTR8:[0-9]+]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX1]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x ptr], ptr [[ANN]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[ANN]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA20:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM5:%.*]] = sext i32 [[IDX2]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [0 x i32], ptr [[ARRAY]], i64 0, i64 [[IDXPROM5]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[IDXPROM5]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 -1, ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2008,7 +1996,7 @@ struct test30_struct {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[PCPU_REFCNT:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 12
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[PCPU_REFCNT]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[PCPU_REFCNT]], i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -2024,7 +2012,7 @@ struct test30_struct {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[PCPU_REFCNT:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 12
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i8], ptr [[PCPU_REFCNT]], i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[PCPU_REFCNT]], i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i8 -1, ptr [[ARRAYIDX]], align 1, !tbaa [[TBAA6]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2091,7 +2079,7 @@ struct annotated_with_array {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont9:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 344
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [0 x i64], ptr [[ARRAY]], i64 0, i64 [[IDXPROM4]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw i64, ptr [[ARRAY]], i64 [[IDXPROM4]]
// SANITIZE-WITH-ATTR-NEXT: [[COUNT:%.*]] = sext i32 [[COUNTED_BY_LOAD]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[FLEXIBLE_ARRAY_MEMBER_SIZE:%.*]] = shl nsw i64 [[COUNT]], 3
// SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = shl nuw nsw i32 [[IDX2]], 3
@@ -2118,7 +2106,7 @@ struct annotated_with_array {
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i64 [[RESULT]], i64 0
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 344
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM1:%.*]] = sext i32 [[IDX1]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [0 x i64], ptr [[ARRAY]], i64 0, i64 [[IDXPROM1]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[ARRAY]], i64 [[IDXPROM1]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i64 [[TMP4]], ptr [[ARRAYIDX2]], align 8, !tbaa [[TBAA22:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -2134,7 +2122,7 @@ struct annotated_with_array {
// SANITIZE-WITHOUT-ATTR: cont7:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 344
// SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM4:%.*]] = sext i32 [[IDX1]] to i64
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [0 x i64], ptr [[ARRAY]], i64 0, i64 [[IDXPROM4]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i64, ptr [[ARRAY]], i64 [[IDXPROM4]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i64 -1, ptr [[ARRAYIDX5]], align 8, !tbaa [[TBAA25:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2143,7 +2131,7 @@ struct annotated_with_array {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 344
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM1:%.*]] = sext i32 [[IDX1]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [0 x i64], ptr [[ARRAY]], i64 0, i64 [[IDXPROM1]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[ARRAY]], i64 [[IDXPROM1]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i64 -1, ptr [[ARRAYIDX2]], align 8, !tbaa [[TBAA22:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2353,7 +2341,7 @@ size_t test34(struct multi_subscripts *ptr, int idx1, int idx2) {
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: cont3:
// SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITH-ATTR-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4]]
// SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -2361,7 +2349,7 @@ size_t test34(struct multi_subscripts *ptr, int idx1, int idx2) {
// NO-SANITIZE-WITH-ATTR-SAME: ptr noundef writeonly captures(none) [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret void
//
@@ -2369,7 +2357,7 @@ size_t test34(struct multi_subscripts *ptr, int idx1, int idx2) {
// SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// SANITIZE-WITHOUT-ATTR-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
@@ -2377,7 +2365,7 @@ size_t test34(struct multi_subscripts *ptr, int idx1, int idx2) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef writeonly captures(none) [[P:%.*]], i64 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR0]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAY:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 12
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [0 x i32], ptr [[ARRAY]], i64 0, i64 [[INDEX]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret void
//
diff --git a/clang/test/CodeGen/builtin-bpf-btf-type-id.c b/clang/test/CodeGen/builtin-bpf-btf-type-id.c
index 4c6efd6..c8f29ee 100644
--- a/clang/test/CodeGen/builtin-bpf-btf-type-id.c
+++ b/clang/test/CodeGen/builtin-bpf-btf-type-id.c
@@ -21,5 +21,5 @@ unsigned test3() {
//
// CHECK: ![[INT]] = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed
// CHECK: ![[INT_POINTER]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: ![[INT]], size: 64
-// CHECK: ![[STRUCT_T1]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1"
// CHECK: ![[TYPEDEF_T1]] = !DIDerivedType(tag: DW_TAG_typedef, name: "__t1"
+// CHECK: ![[STRUCT_T1]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1"
diff --git a/clang/test/CodeGen/builtin-masked.c b/clang/test/CodeGen/builtin-masked.c
new file mode 100644
index 0000000..579cf5c
--- /dev/null
+++ b/clang/test/CodeGen/builtin-masked.c
@@ -0,0 +1,131 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
+
+typedef int v8i __attribute__((ext_vector_type(8)));
+typedef _Bool v8b __attribute__((ext_vector_type(8)));
+
+// CHECK-LABEL: define dso_local <8 x i32> @test_load(
+// CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[M:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1
+// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1
+// CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1>
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i1> [[M1]] to i8
+// CHECK-NEXT: store i8 [[TMP0]], ptr [[M_ADDR]], align 1
+// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
+// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P_ADDR]], align 8
+// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP2]], i32 32, <8 x i1> [[TMP1]], <8 x i32> poison)
+// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
+//
+v8i test_load(v8b m, v8i *p) {
+ return __builtin_masked_load(m, p);
+}
+
+// CHECK-LABEL: define dso_local <8 x i32> @test_load_passthru(
+// CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef [[P:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[M:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[T_ADDR:%.*]] = alloca <8 x i32>, align 32
+// CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1
+// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1
+// CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1>
+// CHECK-NEXT: [[T:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8
+// CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1
+// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
+// CHECK-NEXT: store <8 x i32> [[T]], ptr [[T_ADDR]], align 32
+// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P_ADDR]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr [[T_ADDR]], align 32
+// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP3]], i32 32, <8 x i1> [[TMP2]], <8 x i32> [[TMP4]])
+// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
+//
+v8i test_load_passthru(v8b m, v8i *p, v8i t) {
+ return __builtin_masked_load(m, p, t);
+}
+
+// CHECK-LABEL: define dso_local <8 x i32> @test_load_expand(
+// CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef [[P:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[M:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[T_ADDR:%.*]] = alloca <8 x i32>, align 32
+// CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1
+// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1
+// CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1>
+// CHECK-NEXT: [[T:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8
+// CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1
+// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
+// CHECK-NEXT: store <8 x i32> [[T]], ptr [[T_ADDR]], align 32
+// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P_ADDR]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr [[T_ADDR]], align 32
+// CHECK-NEXT: [[MASKED_EXPAND_LOAD:%.*]] = call <8 x i32> @llvm.masked.expandload.v8i32(ptr [[TMP3]], <8 x i1> [[TMP2]], <8 x i32> [[TMP4]])
+// CHECK-NEXT: ret <8 x i32> [[MASKED_EXPAND_LOAD]]
+//
+v8i test_load_expand(v8b m, v8i *p, v8i t) {
+ return __builtin_masked_expand_load(m, p, t);
+}
+
+// CHECK-LABEL: define dso_local void @test_store(
+// CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]], ptr noundef [[P:%.*]]) #[[ATTR3:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[M:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[V_ADDR:%.*]] = alloca <8 x i32>, align 32
+// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1
+// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1
+// CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1>
+// CHECK-NEXT: [[V:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8
+// CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1
+// CHECK-NEXT: store <8 x i32> [[V]], ptr [[V_ADDR]], align 32
+// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
+// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
+// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[P_ADDR]], align 8
+// CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr [[TMP4]], i32 32, <8 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_store(v8b m, v8i v, v8i *p) {
+ __builtin_masked_store(m, v, p);
+}
+
+// CHECK-LABEL: define dso_local void @test_compress_store(
+// CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]], ptr noundef [[P:%.*]]) #[[ATTR3]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[M:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[V_ADDR:%.*]] = alloca <8 x i32>, align 32
+// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1
+// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1
+// CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1>
+// CHECK-NEXT: [[V:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8
+// CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1
+// CHECK-NEXT: store <8 x i32> [[V]], ptr [[V_ADDR]], align 32
+// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
+// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
+// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[P_ADDR]], align 8
+// CHECK-NEXT: call void @llvm.masked.compressstore.v8i32(<8 x i32> [[TMP3]], ptr [[TMP4]], <8 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_compress_store(v8b m, v8i v, v8i *p) {
+ __builtin_masked_compress_store(m, v, p);
+}
diff --git a/clang/test/CodeGen/builtins-elementwise-math.c b/clang/test/CodeGen/builtins-elementwise-math.c
index ee8345f..188a6c3 100644
--- a/clang/test/CodeGen/builtins-elementwise-math.c
+++ b/clang/test/CodeGen/builtins-elementwise-math.c
@@ -66,7 +66,7 @@ void test_builtin_elementwise_abs(float f1, float f2, double d1, double d2,
// CHECK-NEXT: call i32 @llvm.abs.i32(i32 [[IA1]], i1 false)
b = __builtin_elementwise_abs(int_as_one);
- // CHECK: call i32 @llvm.abs.i32(i32 -10, i1 false)
+ // CHECK: store i32 %elt.abs11, ptr @b, align 4
b = __builtin_elementwise_abs(-10);
// CHECK: [[SI:%.+]] = load i16, ptr %si.addr, align 2
@@ -418,7 +418,7 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,
// CHECK-NEXT: call i32 @llvm.smax.i32(i32 [[IAS1]], i32 [[B]])
int_as_one = __builtin_elementwise_max(int_as_one, b);
- // CHECK: call i32 @llvm.smax.i32(i32 1, i32 97)
+ // CHECK: store i64 97, ptr [[I1:%.+]], align 8
i1 = __builtin_elementwise_max(1, 'a');
}
@@ -508,6 +508,9 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,
// CHECK-NEXT: [[B:%.+]] = load i32, ptr @b, align 4
// CHECK-NEXT: call i32 @llvm.smin.i32(i32 [[IAS1]], i32 [[B]])
int_as_one = __builtin_elementwise_min(int_as_one, b);
+
+ // CHECK: store i64 2, ptr [[I1:%.+]], align 8
+ i1 = __builtin_elementwise_min(2, 'b');
}
void test_builtin_elementwise_bitreverse(si8 vi1, si8 vi2,
@@ -1176,3 +1179,185 @@ void test_builtin_elementwise_fma(float f32, double f64,
half2 tmp2_v2f16 = __builtin_elementwise_fma(v2f16, v2f16, (half2)4.0);
}
+
+void test_builtin_elementwise_fshl(long long int i1, long long int i2,
+ long long int i3, unsigned short us1,
+ unsigned short us2, unsigned short us3,
+ char c1, char c2, char c3,
+ unsigned char uc1, unsigned char uc2,
+ unsigned char uc3, si8 vi1, si8 vi2,
+ si8 vi3, u4 vu1, u4 vu2, u4 vu3) {
+ // CHECK: [[I1:%.+]] = load i64, ptr %i1.addr
+ // CHECK-NEXT: [[I2:%.+]] = load i64, ptr %i2.addr
+ // CHECK-NEXT: [[I3:%.+]] = load i64, ptr %i3.addr
+ // CHECK-NEXT: [[I4:%.+]] = call i64 @llvm.fshl.i64(i64 [[I1]], i64 [[I2]], i64 [[I3]])
+ // CHECK-NEXT: store i64 [[I4]], ptr %tmp_lli_l
+ // CHECK-NEXT: [[I5:%.+]] = load i64, ptr %i1.addr
+ // CHECK-NEXT: [[I6:%.+]] = load i64, ptr %i2.addr
+ // CHECK-NEXT: [[I7:%.+]] = load i64, ptr %i3.addr
+ // CHECK-NEXT: [[I8:%.+]] = call i64 @llvm.fshr.i64(i64 [[I5]], i64 [[I6]], i64 [[I7]])
+ // CHECK-NEXT: store i64 [[I8]], ptr %tmp_lli_r
+ long long int tmp_lli_l = __builtin_elementwise_fshl(i1, i2, i3);
+ long long int tmp_lli_r = __builtin_elementwise_fshr(i1, i2, i3);
+
+ // CHECK: [[US1:%.+]] = load i16, ptr %us1.addr
+ // CHECK-NEXT: [[US2:%.+]] = load i16, ptr %us2.addr
+ // CHECK-NEXT: [[US3:%.+]] = load i16, ptr %us3.addr
+ // CHECK-NEXT: [[US4:%.+]] = call i16 @llvm.fshl.i16(i16 [[US1]], i16 [[US2]], i16 [[US3]])
+ // CHECK-NEXT: store i16 [[US4]], ptr %tmp_usi_l
+ // CHECK-NEXT: [[US5:%.+]] = load i16, ptr %us1.addr
+ // CHECK-NEXT: [[US6:%.+]] = load i16, ptr %us2.addr
+ // CHECK-NEXT: [[US7:%.+]] = load i16, ptr %us3.addr
+ // CHECK-NEXT: [[US8:%.+]] = call i16 @llvm.fshr.i16(i16 [[US5]], i16 [[US6]], i16 [[US7]])
+ // CHECK-NEXT: store i16 [[US8]], ptr %tmp_usi_r
+ unsigned short tmp_usi_l = __builtin_elementwise_fshl(us1, us2, us3);
+ unsigned short tmp_usi_r = __builtin_elementwise_fshr(us1, us2, us3);
+
+ // CHECK: [[C1:%.+]] = load i8, ptr %c1.addr
+ // CHECK-NEXT: [[C2:%.+]] = load i8, ptr %c2.addr
+ // CHECK-NEXT: [[C3:%.+]] = load i8, ptr %c3.addr
+ // CHECK-NEXT: [[C4:%.+]] = call i8 @llvm.fshl.i8(i8 [[C1]], i8 [[C2]], i8 [[C3]])
+ // CHECK-NEXT: store i8 [[C4]], ptr %tmp_c_l
+ // CHECK-NEXT: [[C5:%.+]] = load i8, ptr %c1.addr
+ // CHECK-NEXT: [[C6:%.+]] = load i8, ptr %c2.addr
+ // CHECK-NEXT: [[C7:%.+]] = load i8, ptr %c3.addr
+ // CHECK-NEXT: [[C8:%.+]] = call i8 @llvm.fshr.i8(i8 [[C5]], i8 [[C6]], i8 [[C7]])
+ // CHECK-NEXT: store i8 [[C8]], ptr %tmp_c_r
+ char tmp_c_l = __builtin_elementwise_fshl(c1, c2, c3);
+ char tmp_c_r = __builtin_elementwise_fshr(c1, c2, c3);
+
+ // CHECK: [[UC1:%.+]] = load i8, ptr %uc1.addr
+ // CHECK-NEXT: [[UC2:%.+]] = load i8, ptr %uc2.addr
+ // CHECK-NEXT: [[UC3:%.+]] = load i8, ptr %uc3.addr
+ // CHECK-NEXT: [[UC4:%.+]] = call i8 @llvm.fshl.i8(i8 [[UC1]], i8 [[UC2]], i8 [[UC3]])
+ // CHECK-NEXT: store i8 [[UC4]], ptr %tmp_uc_l
+ // CHECK-NEXT: [[UC5:%.+]] = load i8, ptr %uc1.addr
+ // CHECK-NEXT: [[UC6:%.+]] = load i8, ptr %uc2.addr
+ // CHECK-NEXT: [[UC7:%.+]] = load i8, ptr %uc3.addr
+ // CHECK-NEXT: [[UC8:%.+]] = call i8 @llvm.fshr.i8(i8 [[UC5]], i8 [[UC6]], i8 [[UC7]])
+ // CHECK-NEXT: store i8 [[UC8]], ptr %tmp_uc_r
+ unsigned char tmp_uc_l = __builtin_elementwise_fshl(uc1, uc2, uc3);
+ unsigned char tmp_uc_r = __builtin_elementwise_fshr(uc1, uc2, uc3);
+
+ // CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr
+ // CHECK-NEXT: [[VI2:%.+]] = load <8 x i16>, ptr %vi2.addr
+ // CHECK-NEXT: [[VI3:%.+]] = load <8 x i16>, ptr %vi3.addr
+ // CHECK-NEXT: [[VI4:%.+]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[VI1]], <8 x i16> [[VI2]], <8 x i16> [[VI3]])
+ // CHECK-NEXT: store <8 x i16> [[VI4]], ptr %tmp_vi_l
+ // CHECK-NEXT: [[VI5:%.+]] = load <8 x i16>, ptr %vi1.addr
+ // CHECK-NEXT: [[VI6:%.+]] = load <8 x i16>, ptr %vi2.addr
+ // CHECK-NEXT: [[VI7:%.+]] = load <8 x i16>, ptr %vi3.addr
+ // CHECK-NEXT: [[VI8:%.+]] = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> [[VI5]], <8 x i16> [[VI6]], <8 x i16> [[VI7]])
+ // CHECK-NEXT: store <8 x i16> [[VI8]], ptr %tmp_vi_r
+ si8 tmp_vi_l = __builtin_elementwise_fshl(vi1, vi2, vi3);
+ si8 tmp_vi_r = __builtin_elementwise_fshr(vi1, vi2, vi3);
+
+ // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr
+ // CHECK-NEXT: [[VU2:%.+]] = load <4 x i32>, ptr %vu2.addr
+ // CHECK-NEXT: [[VU3:%.+]] = load <4 x i32>, ptr %vu3.addr
+ // CHECK-NEXT: [[VU4:%.+]] = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> [[VU1]], <4 x i32> [[VU2]], <4 x i32> [[VU3]])
+ // CHECK-NEXT: store <4 x i32> [[VU4]], ptr %tmp_vu_l
+ // CHECK-NEXT: [[VU5:%.+]] = load <4 x i32>, ptr %vu1.addr
+ // CHECK-NEXT: [[VU6:%.+]] = load <4 x i32>, ptr %vu2.addr
+ // CHECK-NEXT: [[VU7:%.+]] = load <4 x i32>, ptr %vu3.addr
+ // CHECK-NEXT: [[VU8:%.+]] = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> [[VU5]], <4 x i32> [[VU6]], <4 x i32> [[VU7]])
+ // CHECK-NEXT: store <4 x i32> [[VU8]], ptr %tmp_vu_r
+ u4 tmp_vu_l = __builtin_elementwise_fshl(vu1, vu2, vu3);
+ u4 tmp_vu_r = __builtin_elementwise_fshr(vu1, vu2, vu3);
+}
+
+void test_builtin_elementwise_ctlz(si8 vs1, si8 vs2, u4 vu1,
+ long long int lli, short si,
+ _BitInt(31) bi, int i,
+ char ci) {
+ // CHECK: [[V8S1:%.+]] = load <8 x i16>, ptr %vs1.addr
+ // CHECK-NEXT: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> [[V8S1]], i1 true)
+ vs1 = __builtin_elementwise_ctlz(vs1);
+
+ // CHECK: [[V8S1:%.+]] = load <8 x i16>, ptr %vs1.addr
+ // CHECK-NEXT: [[CLZ:%.+]] = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> [[V8S1]], i1 true)
+ // CHECK-NEXT: [[ISZERO:%.+]] = icmp eq <8 x i16> [[V8S1]], zeroinitializer
+ // CHECK-NEXT: [[V8S2:%.+]] = load <8 x i16>, ptr %vs2.addr
+ // select <8 x i1> [[ISZERO]], <8 x i16> [[CLZ]], <8 x i16> [[V8S2]]
+ vs1 = __builtin_elementwise_ctlz(vs1, vs2);
+
+ // CHECK: [[V4U1:%.+]] = load <4 x i32>, ptr %vu1.addr
+ // CHECK-NEXT: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> [[V4U1]], i1 true)
+ vu1 = __builtin_elementwise_ctlz(vu1);
+
+ // CHECK: [[LLI:%.+]] = load i64, ptr %lli.addr
+ // CHECK-NEXT: call i64 @llvm.ctlz.i64(i64 [[LLI]], i1 true)
+ lli = __builtin_elementwise_ctlz(lli);
+
+ // CHECK: [[SI:%.+]] = load i16, ptr %si.addr
+ // CHECK-NEXT: call i16 @llvm.ctlz.i16(i16 [[SI]], i1 true)
+ si = __builtin_elementwise_ctlz(si);
+
+ // CHECK: [[BI1:%.+]] = load i32, ptr %bi.addr
+ // CHECK-NEXT: [[BI2:%.+]] = trunc i32 [[BI1]] to i31
+ // CHECK-NEXT: call i31 @llvm.ctlz.i31(i31 [[BI2]], i1 true)
+ bi = __builtin_elementwise_ctlz(bi);
+
+ // CHECK: [[BI1:%.+]] = load i32, ptr %bi.addr
+ // CHECK-NEXT: [[BI2:%.+]] = trunc i32 [[BI1]] to i31
+ // CHECK-NEXT: [[CLZ:%.+]] = call i31 @llvm.ctlz.i31(i31 [[BI2]], i1 true)
+ // CHECK-NEXT: [[ISZERO:%.+]] = icmp eq i31 [[BI2]], 0
+ // CHECK-NEXT: select i1 [[ISZERO]], i31 1, i31 [[CLZ]]
+ bi = __builtin_elementwise_ctlz(bi, (_BitInt(31))1);
+
+ // CHECK: [[I:%.+]] = load i32, ptr %i.addr
+ // CHECK-NEXT: call i32 @llvm.ctlz.i32(i32 [[I]], i1 true)
+ i = __builtin_elementwise_ctlz(i);
+
+ // CHECK: [[CI:%.+]] = load i8, ptr %ci.addr
+ // CHECK-NEXT: call i8 @llvm.ctlz.i8(i8 [[CI]], i1 true)
+ ci = __builtin_elementwise_ctlz(ci);
+}
+
+void test_builtin_elementwise_cttz(si8 vs1, si8 vs2, u4 vu1,
+ long long int lli, short si,
+ _BitInt(31) bi, int i,
+ char ci) {
+ // CHECK: [[V8S1:%.+]] = load <8 x i16>, ptr %vs1.addr
+ // CHECK-NEXT: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> [[V8S1]], i1 true)
+ vs1 = __builtin_elementwise_cttz(vs1);
+
+ // CHECK: [[V8S1:%.+]] = load <8 x i16>, ptr %vs1.addr
+ // CHECK-NEXT: [[ctz:%.+]] = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> [[V8S1]], i1 true)
+ // CHECK-NEXT: [[ISZERO:%.+]] = icmp eq <8 x i16> [[V8S1]], zeroinitializer
+ // CHECK-NEXT: [[V8S2:%.+]] = load <8 x i16>, ptr %vs2.addr
+ // select <8 x i1> [[ISZERO]], <8 x i16> [[ctz]], <8 x i16> [[V8S2]]
+ vs1 = __builtin_elementwise_cttz(vs1, vs2);
+
+ // CHECK: [[V4U1:%.+]] = load <4 x i32>, ptr %vu1.addr
+ // CHECK-NEXT: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> [[V4U1]], i1 true)
+ vu1 = __builtin_elementwise_cttz(vu1);
+
+ // CHECK: [[LLI:%.+]] = load i64, ptr %lli.addr
+ // CHECK-NEXT: call i64 @llvm.cttz.i64(i64 [[LLI]], i1 true)
+ lli = __builtin_elementwise_cttz(lli);
+
+ // CHECK: [[SI:%.+]] = load i16, ptr %si.addr
+ // CHECK-NEXT: call i16 @llvm.cttz.i16(i16 [[SI]], i1 true)
+ si = __builtin_elementwise_cttz(si);
+
+ // CHECK: [[BI1:%.+]] = load i32, ptr %bi.addr
+ // CHECK-NEXT: [[BI2:%.+]] = trunc i32 [[BI1]] to i31
+ // CHECK-NEXT: call i31 @llvm.cttz.i31(i31 [[BI2]], i1 true)
+ bi = __builtin_elementwise_cttz(bi);
+
+ // CHECK: [[BI1:%.+]] = load i32, ptr %bi.addr
+ // CHECK-NEXT: [[BI2:%.+]] = trunc i32 [[BI1]] to i31
+ // CHECK-NEXT: [[ctz:%.+]] = call i31 @llvm.cttz.i31(i31 [[BI2]], i1 true)
+ // CHECK-NEXT: [[ISZERO:%.+]] = icmp eq i31 [[BI2]], 0
+ // CHECK-NEXT: select i1 [[ISZERO]], i31 1, i31 [[ctz]]
+ bi = __builtin_elementwise_cttz(bi, (_BitInt(31))1);
+
+ // CHECK: [[I:%.+]] = load i32, ptr %i.addr
+ // CHECK-NEXT: call i32 @llvm.cttz.i32(i32 [[I]], i1 true)
+ i = __builtin_elementwise_cttz(i);
+
+ // CHECK: [[CI:%.+]] = load i8, ptr %ci.addr
+ // CHECK-NEXT: call i8 @llvm.cttz.i8(i8 [[CI]], i1 true)
+ ci = __builtin_elementwise_cttz(ci);
+}
diff --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c
index f201dfe..375664b 100644
--- a/clang/test/CodeGen/builtins-wasm.c
+++ b/clang/test/CodeGen/builtins-wasm.c
@@ -751,24 +751,3 @@ void *tp (void) {
return __builtin_thread_pointer ();
// WEBASSEMBLY: call {{.*}} @llvm.thread.pointer.p0()
}
-
-typedef void (*Fvoid)(void);
-typedef float (*Ffloats)(float, double, int);
-typedef void (*Fpointers)(Fvoid, Ffloats, void*, int*, int***, char[5]);
-
-void use(int);
-
-void test_function_pointer_signature_void(Fvoid func) {
- // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison)
- use(__builtin_wasm_test_function_pointer_signature(func));
-}
-
-void test_function_pointer_signature_floats(Ffloats func) {
- // WEBASSEMBLY: tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float 0.000000e+00, token poison, float 0.000000e+00, double 0.000000e+00, i32 0)
- use(__builtin_wasm_test_function_pointer_signature(func));
-}
-
-void test_function_pointer_signature_pointers(Fpointers func) {
- // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null)
- use(__builtin_wasm_test_function_pointer_signature(func));
-}
diff --git a/clang/test/CodeGen/builtins-x86.c b/clang/test/CodeGen/builtins-x86.c
index c42c321..31f3097 100644
--- a/clang/test/CodeGen/builtins-x86.c
+++ b/clang/test/CodeGen/builtins-x86.c
@@ -22,6 +22,7 @@ typedef float V2f __attribute__((vector_size(8)));
// 128-bit
typedef char V16c __attribute__((vector_size(16)));
typedef signed short V8s __attribute__((vector_size(16)));
+typedef unsigned short V8u __attribute__((vector_size(16)));
typedef signed int V4i __attribute__((vector_size(16)));
#ifndef OPENCL
typedef signed long long V2LLi __attribute__((vector_size(16)));
@@ -99,6 +100,7 @@ void f0(void) {
// 128-bit
V16c tmp_V16c;
V8s tmp_V8s;
+ V8u tmp_V8u;
V4i tmp_V4i;
V2LLi tmp_V2LLi;
V4f tmp_V4f;
@@ -192,7 +194,7 @@ void f0(void) {
tmp_V16c = __builtin_ia32_packsswb128(tmp_V8s, tmp_V8s);
tmp_V8s = __builtin_ia32_packssdw128(tmp_V4i, tmp_V4i);
tmp_V16c = __builtin_ia32_packuswb128(tmp_V8s, tmp_V8s);
- tmp_V8s = __builtin_ia32_pmulhuw128(tmp_V8s, tmp_V8s);
+ tmp_V8u = __builtin_ia32_pmulhuw128(tmp_V8u, tmp_V8u);
tmp_V4f = __builtin_ia32_addsubps(tmp_V4f, tmp_V4f);
tmp_V2d = __builtin_ia32_addsubpd(tmp_V2d, tmp_V2d);
tmp_V4f = __builtin_ia32_haddps(tmp_V4f, tmp_V4f);
diff --git a/clang/test/CodeGen/builtins.c b/clang/test/CodeGen/builtins.c
index aa9965b..738814c 100644
--- a/clang/test/CodeGen/builtins.c
+++ b/clang/test/CodeGen/builtins.c
@@ -1,6 +1,7 @@
// RUN: %clang_cc1 -emit-llvm -o %t %s
// RUN: not grep __builtin %t
// RUN: %clang_cc1 -emit-llvm -triple x86_64-darwin-apple -o - %s | FileCheck %s
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-darwin-apple -o - %s -fexperimental-new-constant-interpreter | FileCheck %s
int printf(const char *, ...);
@@ -991,247 +992,288 @@ void test_builtin_os_log_long_double(void *buf, long double ld) {
void test_builtin_popcountg(unsigned char uc, unsigned short us,
unsigned int ui, unsigned long ul,
unsigned long long ull, unsigned __int128 ui128,
- unsigned _BitInt(128) ubi128) {
+ unsigned _BitInt(128) ubi128,
+ _Bool __attribute__((ext_vector_type(8))) vb8) {
volatile int pop;
- pop = __builtin_popcountg(uc);
- // CHECK: %1 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %2 = call i8 @llvm.ctpop.i8(i8 %1)
- // CHECK-NEXT: %cast = zext i8 %2 to i32
+ // CHECK: %2 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %3 = call i8 @llvm.ctpop.i8(i8 %2)
+ // CHECK-NEXT: %cast = zext i8 %3 to i32
// CHECK-NEXT: store volatile i32 %cast, ptr %pop, align 4
+ pop = __builtin_popcountg(uc);
+ // CHECK: %4 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %5 = call i16 @llvm.ctpop.i16(i16 %4)
+ // CHECK-NEXT: %cast2 = zext i16 %5 to i32
+ // CHECK-NEXT: store volatile i32 %cast2, ptr %pop, align 4
pop = __builtin_popcountg(us);
- // CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %4 = call i16 @llvm.ctpop.i16(i16 %3)
- // CHECK-NEXT: %cast1 = zext i16 %4 to i32
- // CHECK-NEXT: store volatile i32 %cast1, ptr %pop, align 4
+ // CHECK: %6 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %7 = call i32 @llvm.ctpop.i32(i32 %6)
+ // CHECK-NEXT: store volatile i32 %7, ptr %pop, align 4
pop = __builtin_popcountg(ui);
- // CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
- // CHECK-NEXT: %6 = call i32 @llvm.ctpop.i32(i32 %5)
- // CHECK-NEXT: store volatile i32 %6, ptr %pop, align 4
+ // CHECK: %8 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %9 = call i64 @llvm.ctpop.i64(i64 %8)
+ // CHECK-NEXT: %cast3 = trunc i64 %9 to i32
+ // CHECK-NEXT: store volatile i32 %cast3, ptr %pop, align 4
pop = __builtin_popcountg(ul);
- // CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
- // CHECK-NEXT: %8 = call i64 @llvm.ctpop.i64(i64 %7)
- // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
- // CHECK-NEXT: store volatile i32 %cast2, ptr %pop, align 4
+ // CHECK: %10 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %11 = call i64 @llvm.ctpop.i64(i64 %10)
+ // CHECK-NEXT: %cast4 = trunc i64 %11 to i32
+ // CHECK-NEXT: store volatile i32 %cast4, ptr %pop, align 4
pop = __builtin_popcountg(ull);
- // CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
- // CHECK-NEXT: %10 = call i64 @llvm.ctpop.i64(i64 %9)
- // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
- // CHECK-NEXT: store volatile i32 %cast3, ptr %pop, align 4
+ // CHECK: %12 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %13 = call i128 @llvm.ctpop.i128(i128 %12)
+ // CHECK-NEXT: %cast5 = trunc i128 %13 to i32
+ // CHECK-NEXT: store volatile i32 %cast5, ptr %pop, align 4
pop = __builtin_popcountg(ui128);
- // CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
- // CHECK-NEXT: %12 = call i128 @llvm.ctpop.i128(i128 %11)
- // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
- // CHECK-NEXT: store volatile i32 %cast4, ptr %pop, align 4
+ // CHECK: %14 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %15 = call i128 @llvm.ctpop.i128(i128 %14)
+ // CHECK-NEXT: %cast6 = trunc i128 %15 to i32
+ // CHECK-NEXT: store volatile i32 %cast6, ptr %pop, align 4
pop = __builtin_popcountg(ubi128);
- // CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
- // CHECK-NEXT: %14 = call i128 @llvm.ctpop.i128(i128 %13)
- // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
- // CHECK-NEXT: store volatile i32 %cast5, ptr %pop, align 4
- // CHECK-NEXT: ret void
+ // CHECK: %load_bits7 = load i8, ptr %vb8.addr, align 1
+ // CHECK-NEXT: %16 = bitcast i8 %load_bits7 to <8 x i1>
+ // CHECK-NEXT: %17 = bitcast <8 x i1> %16 to i8
+ // CHECK-NEXT: %18 = call i8 @llvm.ctpop.i8(i8 %17)
+ // CHECK-NEXT: %cast8 = zext i8 %18 to i32
+ // CHECK-NEXT: store volatile i32 %cast8, ptr %pop, align 4
+ pop = __builtin_popcountg(vb8);
}
// CHECK-LABEL: define{{.*}} void @test_builtin_clzg
void test_builtin_clzg(unsigned char uc, unsigned short us, unsigned int ui,
unsigned long ul, unsigned long long ull,
unsigned __int128 ui128, unsigned _BitInt(128) ubi128,
- signed char sc, short s, int i) {
+ signed char sc, short s, int i,
+ _Bool __attribute__((ext_vector_type(8))) vb8) {
volatile int lz;
+ // CHECK: %2 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %3 = call i8 @llvm.ctlz.i8(i8 %2, i1 true)
+ // CHECK-NEXT: %cast = zext i8 %3 to i32
+ // CHECK-NEXT: store volatile i32 %cast, ptr %lz, align 4
lz = __builtin_clzg(uc);
- // CHECK: %1 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %2 = call i8 @llvm.ctlz.i8(i8 %1, i1 true)
- // CHECK-NEXT: %cast = zext i8 %2 to i32
- // CHECK-NEXT: store volatile i32 %cast, ptr %lz, align 4
+ // CHECK-NEXT: %4 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %5 = call i16 @llvm.ctlz.i16(i16 %4, i1 true)
+ // CHECK-NEXT: %cast2 = zext i16 %5 to i32
+ // CHECK-NEXT: store volatile i32 %cast2, ptr %lz, align 4
lz = __builtin_clzg(us);
- // CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %4 = call i16 @llvm.ctlz.i16(i16 %3, i1 true)
- // CHECK-NEXT: %cast1 = zext i16 %4 to i32
- // CHECK-NEXT: store volatile i32 %cast1, ptr %lz, align 4
+ // CHECK-NEXT: %6 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %7 = call i32 @llvm.ctlz.i32(i32 %6, i1 true)
+ // CHECK-NEXT: store volatile i32 %7, ptr %lz, align 4
lz = __builtin_clzg(ui);
- // CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
- // CHECK-NEXT: %6 = call i32 @llvm.ctlz.i32(i32 %5, i1 true)
- // CHECK-NEXT: store volatile i32 %6, ptr %lz, align 4
+ // CHECK-NEXT: %8 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %9 = call i64 @llvm.ctlz.i64(i64 %8, i1 true)
+ // CHECK-NEXT: %cast3 = trunc i64 %9 to i32
+ // CHECK-NEXT: store volatile i32 %cast3, ptr %lz, align 4
lz = __builtin_clzg(ul);
- // CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
- // CHECK-NEXT: %8 = call i64 @llvm.ctlz.i64(i64 %7, i1 true)
- // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
- // CHECK-NEXT: store volatile i32 %cast2, ptr %lz, align 4
+ // CHECK-NEXT: %10 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %11 = call i64 @llvm.ctlz.i64(i64 %10, i1 true)
+ // CHECK-NEXT: %cast4 = trunc i64 %11 to i32
+ // CHECK-NEXT: store volatile i32 %cast4, ptr %lz, align 4
lz = __builtin_clzg(ull);
- // CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
- // CHECK-NEXT: %10 = call i64 @llvm.ctlz.i64(i64 %9, i1 true)
- // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
- // CHECK-NEXT: store volatile i32 %cast3, ptr %lz, align 4
+ // CHECK-NEXT: %12 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %13 = call i128 @llvm.ctlz.i128(i128 %12, i1 true)
+ // CHECK-NEXT: %cast5 = trunc i128 %13 to i32
+ // CHECK-NEXT: store volatile i32 %cast5, ptr %lz, align 4
lz = __builtin_clzg(ui128);
- // CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
- // CHECK-NEXT: %12 = call i128 @llvm.ctlz.i128(i128 %11, i1 true)
- // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
- // CHECK-NEXT: store volatile i32 %cast4, ptr %lz, align 4
+ // CHECK-NEXT: %14 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %15 = call i128 @llvm.ctlz.i128(i128 %14, i1 true)
+ // CHECK-NEXT: %cast6 = trunc i128 %15 to i32
+ // CHECK-NEXT: store volatile i32 %cast6, ptr %lz, align 4
lz = __builtin_clzg(ubi128);
- // CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
- // CHECK-NEXT: %14 = call i128 @llvm.ctlz.i128(i128 %13, i1 true)
- // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
- // CHECK-NEXT: store volatile i32 %cast5, ptr %lz, align 4
+ // CHECK-NEXT: %load_bits7 = load i8, ptr %vb8.addr, align 1
+ // CHECK-NEXT: %16 = bitcast i8 %load_bits7 to <8 x i1>
+ // CHECK-NEXT: %17 = bitcast <8 x i1> %16 to i8
+ // CHECK-NEXT: %18 = call i8 @llvm.ctlz.i8(i8 %17, i1 true)
+ // CHECK-NEXT: %cast8 = zext i8 %18 to i32
+ // CHECK-NEXT: store volatile i32 %cast8, ptr %lz, align 4
+ lz = __builtin_clzg(vb8);
+ // CHECK-NEXT: %19 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %20 = call i8 @llvm.ctlz.i8(i8 %19, i1 true)
+ // CHECK-NEXT: %cast9 = zext i8 %20 to i32
+ // CHECK-NEXT: %iszero = icmp eq i8 %19, 0
+ // CHECK-NEXT: %21 = load i8, ptr %sc.addr, align 1
+ // CHECK-NEXT: %conv = sext i8 %21 to i32
+ // CHECK-NEXT: %clzg = select i1 %iszero, i32 %conv, i32 %cast9
+ // CHECK-NEXT: store volatile i32 %clzg, ptr %lz, align 4
lz = __builtin_clzg(uc, sc);
- // CHECK-NEXT: %15 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %16 = call i8 @llvm.ctlz.i8(i8 %15, i1 true)
- // CHECK-NEXT: %cast6 = zext i8 %16 to i32
- // CHECK-NEXT: %iszero = icmp eq i8 %15, 0
- // CHECK-NEXT: %17 = load i8, ptr %sc.addr, align 1
- // CHECK-NEXT: %conv = sext i8 %17 to i32
- // CHECK-NEXT: %clzg = select i1 %iszero, i32 %conv, i32 %cast6
- // CHECK-NEXT: store volatile i32 %clzg, ptr %lz, align 4
+ // CHECK-NEXT: %22 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %23 = call i16 @llvm.ctlz.i16(i16 %22, i1 true)
+ // CHECK-NEXT: %cast10 = zext i16 %23 to i32
+ // CHECK-NEXT: %iszero11 = icmp eq i16 %22, 0
+ // CHECK-NEXT: %24 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %conv12 = zext i8 %24 to i32
+ // CHECK-NEXT: %clzg13 = select i1 %iszero11, i32 %conv12, i32 %cast10
+ // CHECK-NEXT: store volatile i32 %clzg13, ptr %lz, align 4
lz = __builtin_clzg(us, uc);
- // CHECK-NEXT: %18 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %19 = call i16 @llvm.ctlz.i16(i16 %18, i1 true)
- // CHECK-NEXT: %cast7 = zext i16 %19 to i32
- // CHECK-NEXT: %iszero8 = icmp eq i16 %18, 0
- // CHECK-NEXT: %20 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %conv9 = zext i8 %20 to i32
- // CHECK-NEXT: %clzg10 = select i1 %iszero8, i32 %conv9, i32 %cast7
- // CHECK-NEXT: store volatile i32 %clzg10, ptr %lz, align 4
+ // CHECK-NEXT: %25 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %26 = call i32 @llvm.ctlz.i32(i32 %25, i1 true)
+ // CHECK-NEXT: %iszero14 = icmp eq i32 %25, 0
+ // CHECK-NEXT: %27 = load i16, ptr %s.addr, align 2
+ // CHECK-NEXT: %conv15 = sext i16 %27 to i32
+ // CHECK-NEXT: %clzg16 = select i1 %iszero14, i32 %conv15, i32 %26
+ // CHECK-NEXT: store volatile i32 %clzg16, ptr %lz, align 4
lz = __builtin_clzg(ui, s);
- // CHECK-NEXT: %21 = load i32, ptr %ui.addr, align 4
- // CHECK-NEXT: %22 = call i32 @llvm.ctlz.i32(i32 %21, i1 true)
- // CHECK-NEXT: %iszero11 = icmp eq i32 %21, 0
- // CHECK-NEXT: %23 = load i16, ptr %s.addr, align 2
- // CHECK-NEXT: %conv12 = sext i16 %23 to i32
- // CHECK-NEXT: %clzg13 = select i1 %iszero11, i32 %conv12, i32 %22
- // CHECK-NEXT: store volatile i32 %clzg13, ptr %lz, align 4
+ // CHECK-NEXT: %28 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %29 = call i64 @llvm.ctlz.i64(i64 %28, i1 true)
+ // CHECK-NEXT: %cast17 = trunc i64 %29 to i32
+ // CHECK-NEXT: %iszero18 = icmp eq i64 %28, 0
+ // CHECK-NEXT: %30 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %conv19 = zext i16 %30 to i32
+ // CHECK-NEXT: %clzg20 = select i1 %iszero18, i32 %conv19, i32 %cast17
+ // CHECK-NEXT: store volatile i32 %clzg20, ptr %lz, align 4
lz = __builtin_clzg(ul, us);
- // CHECK-NEXT: %24 = load i64, ptr %ul.addr, align 8
- // CHECK-NEXT: %25 = call i64 @llvm.ctlz.i64(i64 %24, i1 true)
- // CHECK-NEXT: %cast14 = trunc i64 %25 to i32
- // CHECK-NEXT: %iszero15 = icmp eq i64 %24, 0
- // CHECK-NEXT: %26 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %conv16 = zext i16 %26 to i32
- // CHECK-NEXT: %clzg17 = select i1 %iszero15, i32 %conv16, i32 %cast14
- // CHECK-NEXT: store volatile i32 %clzg17, ptr %lz, align 4
+ // CHECK-NEXT: %31 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %32 = call i64 @llvm.ctlz.i64(i64 %31, i1 true)
+ // CHECK-NEXT: %cast21 = trunc i64 %32 to i32
+ // CHECK-NEXT: %iszero22 = icmp eq i64 %31, 0
+ // CHECK-NEXT: %33 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %clzg23 = select i1 %iszero22, i32 %33, i32 %cast21
+ // CHECK-NEXT: store volatile i32 %clzg23, ptr %lz, align 4
lz = __builtin_clzg(ull, i);
- // CHECK-NEXT: %27 = load i64, ptr %ull.addr, align 8
- // CHECK-NEXT: %28 = call i64 @llvm.ctlz.i64(i64 %27, i1 true)
- // CHECK-NEXT: %cast18 = trunc i64 %28 to i32
- // CHECK-NEXT: %iszero19 = icmp eq i64 %27, 0
- // CHECK-NEXT: %29 = load i32, ptr %i.addr, align 4
- // CHECK-NEXT: %clzg20 = select i1 %iszero19, i32 %29, i32 %cast18
- // CHECK-NEXT: store volatile i32 %clzg20, ptr %lz, align 4
+ // CHECK-NEXT: %34 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %35 = call i128 @llvm.ctlz.i128(i128 %34, i1 true)
+ // CHECK-NEXT: %cast24 = trunc i128 %35 to i32
+ // CHECK-NEXT: %iszero25 = icmp eq i128 %34, 0
+ // CHECK-NEXT: %36 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %clzg26 = select i1 %iszero25, i32 %36, i32 %cast24
+ // CHECK-NEXT: store volatile i32 %clzg26, ptr %lz, align 4
lz = __builtin_clzg(ui128, i);
- // CHECK-NEXT: %30 = load i128, ptr %ui128.addr, align 16
- // CHECK-NEXT: %31 = call i128 @llvm.ctlz.i128(i128 %30, i1 true)
- // CHECK-NEXT: %cast21 = trunc i128 %31 to i32
- // CHECK-NEXT: %iszero22 = icmp eq i128 %30, 0
- // CHECK-NEXT: %32 = load i32, ptr %i.addr, align 4
- // CHECK-NEXT: %clzg23 = select i1 %iszero22, i32 %32, i32 %cast21
- // CHECK-NEXT: store volatile i32 %clzg23, ptr %lz, align 4
+ // CHECK-NEXT: %37 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %38 = call i128 @llvm.ctlz.i128(i128 %37, i1 true)
+ // CHECK-NEXT: %cast27 = trunc i128 %38 to i32
+ // CHECK-NEXT: %iszero28 = icmp eq i128 %37, 0
+ // CHECK-NEXT: %39 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %clzg29 = select i1 %iszero28, i32 %39, i32 %cast27
+ // CHECK-NEXT: store volatile i32 %clzg29, ptr %lz, align 4
lz = __builtin_clzg(ubi128, i);
- // CHECK-NEXT: %33 = load i128, ptr %ubi128.addr, align 8
- // CHECK-NEXT: %34 = call i128 @llvm.ctlz.i128(i128 %33, i1 true)
- // CHECK-NEXT: %cast24 = trunc i128 %34 to i32
- // CHECK-NEXT: %iszero25 = icmp eq i128 %33, 0
- // CHECK-NEXT: %35 = load i32, ptr %i.addr, align 4
- // CHECK-NEXT: %clzg26 = select i1 %iszero25, i32 %35, i32 %cast24
- // CHECK-NEXT: store volatile i32 %clzg26, ptr %lz, align 4
- // CHECK-NEXT: ret void
+ // CHECK-NEXT: %load_bits30 = load i8, ptr %vb8.addr, align 1
+ // CHECK-NEXT: %40 = bitcast i8 %load_bits30 to <8 x i1>
+ // CHECK-NEXT: %41 = bitcast <8 x i1> %40 to i8
+ // CHECK-NEXT: %42 = call i8 @llvm.ctlz.i8(i8 %41, i1 true)
+ // CHECK-NEXT: %cast31 = zext i8 %42 to i32
+ // CHECK-NEXT: %iszero32 = icmp eq i8 %41, 0
+ // CHECK-NEXT: %43 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %clzg33 = select i1 %iszero32, i32 %43, i32 %cast31
+ // CHECK-NEXT: store volatile i32 %clzg33, ptr %lz, align 4
+ lz = __builtin_clzg(vb8, i);
}
// CHECK-LABEL: define{{.*}} void @test_builtin_ctzg
void test_builtin_ctzg(unsigned char uc, unsigned short us, unsigned int ui,
unsigned long ul, unsigned long long ull,
unsigned __int128 ui128, unsigned _BitInt(128) ubi128,
- signed char sc, short s, int i) {
+ signed char sc, short s, int i,
+ _Bool __attribute__((ext_vector_type(8))) vb8) {
volatile int tz;
- tz = __builtin_ctzg(uc);
- // CHECK: %1 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %2 = call i8 @llvm.cttz.i8(i8 %1, i1 true)
- // CHECK-NEXT: %cast = zext i8 %2 to i32
+ // CHECK: %2 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %3 = call i8 @llvm.cttz.i8(i8 %2, i1 true)
+ // CHECK-NEXT: %cast = zext i8 %3 to i32
// CHECK-NEXT: store volatile i32 %cast, ptr %tz, align 4
+ tz = __builtin_ctzg(uc);
+ // CHECK-NEXT: %4 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %5 = call i16 @llvm.cttz.i16(i16 %4, i1 true)
+ // CHECK-NEXT: %cast2 = zext i16 %5 to i32
+ // CHECK-NEXT: store volatile i32 %cast2, ptr %tz, align 4
tz = __builtin_ctzg(us);
- // CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %4 = call i16 @llvm.cttz.i16(i16 %3, i1 true)
- // CHECK-NEXT: %cast1 = zext i16 %4 to i32
- // CHECK-NEXT: store volatile i32 %cast1, ptr %tz, align 4
+ // CHECK-NEXT: %6 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %7 = call i32 @llvm.cttz.i32(i32 %6, i1 true)
+ // CHECK-NEXT: store volatile i32 %7, ptr %tz, align 4
tz = __builtin_ctzg(ui);
- // CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
- // CHECK-NEXT: %6 = call i32 @llvm.cttz.i32(i32 %5, i1 true)
- // CHECK-NEXT: store volatile i32 %6, ptr %tz, align 4
+ // CHECK-NEXT: %8 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %9 = call i64 @llvm.cttz.i64(i64 %8, i1 true)
+ // CHECK-NEXT: %cast3 = trunc i64 %9 to i32
+ // CHECK-NEXT: store volatile i32 %cast3, ptr %tz, align 4
tz = __builtin_ctzg(ul);
- // CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
- // CHECK-NEXT: %8 = call i64 @llvm.cttz.i64(i64 %7, i1 true)
- // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
- // CHECK-NEXT: store volatile i32 %cast2, ptr %tz, align 4
+ // CHECK-NEXT: %10 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %11 = call i64 @llvm.cttz.i64(i64 %10, i1 true)
+ // CHECK-NEXT: %cast4 = trunc i64 %11 to i32
+ // CHECK-NEXT: store volatile i32 %cast4, ptr %tz, align 4
tz = __builtin_ctzg(ull);
- // CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
- // CHECK-NEXT: %10 = call i64 @llvm.cttz.i64(i64 %9, i1 true)
- // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
- // CHECK-NEXT: store volatile i32 %cast3, ptr %tz, align 4
+ // CHECK-NEXT: %12 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %13 = call i128 @llvm.cttz.i128(i128 %12, i1 true)
+ // CHECK-NEXT: %cast5 = trunc i128 %13 to i32
+ // CHECK-NEXT: store volatile i32 %cast5, ptr %tz, align 4
tz = __builtin_ctzg(ui128);
- // CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
- // CHECK-NEXT: %12 = call i128 @llvm.cttz.i128(i128 %11, i1 true)
- // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
- // CHECK-NEXT: store volatile i32 %cast4, ptr %tz, align 4
+ // CHECK-NEXT: %14 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %15 = call i128 @llvm.cttz.i128(i128 %14, i1 true)
+ // CHECK-NEXT: %cast6 = trunc i128 %15 to i32
+ // CHECK-NEXT: store volatile i32 %cast6, ptr %tz, align 4
tz = __builtin_ctzg(ubi128);
- // CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
- // CHECK-NEXT: %14 = call i128 @llvm.cttz.i128(i128 %13, i1 true)
- // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
- // CHECK-NEXT: store volatile i32 %cast5, ptr %tz, align 4
- tz = __builtin_ctzg(uc, sc);
- // CHECK-NEXT: %15 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %16 = call i8 @llvm.cttz.i8(i8 %15, i1 true)
- // CHECK-NEXT: %cast6 = zext i8 %16 to i32
- // CHECK-NEXT: %iszero = icmp eq i8 %15, 0
- // CHECK-NEXT: %17 = load i8, ptr %sc.addr, align 1
- // CHECK-NEXT: %conv = sext i8 %17 to i32
- // CHECK-NEXT: %ctzg = select i1 %iszero, i32 %conv, i32 %cast6
+ // CHECK-NEXT: %load_bits7 = load i8, ptr %vb8.addr, align 1
+ // CHECK-NEXT: %16 = bitcast i8 %load_bits7 to <8 x i1>
+ // CHECK-NEXT: %17 = bitcast <8 x i1> %16 to i8
+ // CHECK-NEXT: %18 = call i8 @llvm.cttz.i8(i8 %17, i1 true)
+ // CHECK-NEXT: %cast8 = zext i8 %18 to i32
+ // CHECK-NEXT: store volatile i32 %cast8, ptr %tz, align 4
+ tz = __builtin_ctzg(vb8);
+ // CHECK-NEXT: %19 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %20 = call i8 @llvm.cttz.i8(i8 %19, i1 true)
+ // CHECK-NEXT: %cast9 = zext i8 %20 to i32
+ // CHECK-NEXT: %iszero = icmp eq i8 %19, 0
+ // CHECK-NEXT: %21 = load i8, ptr %sc.addr, align 1
+ // CHECK-NEXT: %conv = sext i8 %21 to i32
+ // CHECK-NEXT: %ctzg = select i1 %iszero, i32 %conv, i32 %cast9
// CHECK-NEXT: store volatile i32 %ctzg, ptr %tz, align 4
+ tz = __builtin_ctzg(uc, sc);
+ // CHECK-NEXT: %22 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %23 = call i16 @llvm.cttz.i16(i16 %22, i1 true)
+ // CHECK-NEXT: %cast10 = zext i16 %23 to i32
+ // CHECK-NEXT: %iszero11 = icmp eq i16 %22, 0
+ // CHECK-NEXT: %24 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %conv12 = zext i8 %24 to i32
+ // CHECK-NEXT: %ctzg13 = select i1 %iszero11, i32 %conv12, i32 %cast10
+ // CHECK-NEXT: store volatile i32 %ctzg13, ptr %tz, align 4
tz = __builtin_ctzg(us, uc);
- // CHECK-NEXT: %18 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %19 = call i16 @llvm.cttz.i16(i16 %18, i1 true)
- // CHECK-NEXT: %cast7 = zext i16 %19 to i32
- // CHECK-NEXT: %iszero8 = icmp eq i16 %18, 0
- // CHECK-NEXT: %20 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %conv9 = zext i8 %20 to i32
- // CHECK-NEXT: %ctzg10 = select i1 %iszero8, i32 %conv9, i32 %cast7
- // CHECK-NEXT: store volatile i32 %ctzg10, ptr %tz, align 4
+ // CHECK-NEXT: %25 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %26 = call i32 @llvm.cttz.i32(i32 %25, i1 true)
+ // CHECK-NEXT: %iszero14 = icmp eq i32 %25, 0
+ // CHECK-NEXT: %27 = load i16, ptr %s.addr, align 2
+ // CHECK-NEXT: %conv15 = sext i16 %27 to i32
+ // CHECK-NEXT: %ctzg16 = select i1 %iszero14, i32 %conv15, i32 %26
+ // CHECK-NEXT: store volatile i32 %ctzg16, ptr %tz, align 4
tz = __builtin_ctzg(ui, s);
- // CHECK-NEXT: %21 = load i32, ptr %ui.addr, align 4
- // CHECK-NEXT: %22 = call i32 @llvm.cttz.i32(i32 %21, i1 true)
- // CHECK-NEXT: %iszero11 = icmp eq i32 %21, 0
- // CHECK-NEXT: %23 = load i16, ptr %s.addr, align 2
- // CHECK-NEXT: %conv12 = sext i16 %23 to i32
- // CHECK-NEXT: %ctzg13 = select i1 %iszero11, i32 %conv12, i32 %22
- // CHECK-NEXT: store volatile i32 %ctzg13, ptr %tz, align 4
+ // CHECK-NEXT: %28 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %29 = call i64 @llvm.cttz.i64(i64 %28, i1 true)
+ // CHECK-NEXT: %cast17 = trunc i64 %29 to i32
+ // CHECK-NEXT: %iszero18 = icmp eq i64 %28, 0
+ // CHECK-NEXT: %30 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %conv19 = zext i16 %30 to i32
+ // CHECK-NEXT: %ctzg20 = select i1 %iszero18, i32 %conv19, i32 %cast17
+ // CHECK-NEXT: store volatile i32 %ctzg20, ptr %tz, align 4
tz = __builtin_ctzg(ul, us);
- // CHECK-NEXT: %24 = load i64, ptr %ul.addr, align 8
- // CHECK-NEXT: %25 = call i64 @llvm.cttz.i64(i64 %24, i1 true)
- // CHECK-NEXT: %cast14 = trunc i64 %25 to i32
- // CHECK-NEXT: %iszero15 = icmp eq i64 %24, 0
- // CHECK-NEXT: %26 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %conv16 = zext i16 %26 to i32
- // CHECK-NEXT: %ctzg17 = select i1 %iszero15, i32 %conv16, i32 %cast14
- // CHECK-NEXT: store volatile i32 %ctzg17, ptr %tz, align 4
+ // CHECK-NEXT: %31 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %32 = call i64 @llvm.cttz.i64(i64 %31, i1 true)
+ // CHECK-NEXT: %cast21 = trunc i64 %32 to i32
+ // CHECK-NEXT: %iszero22 = icmp eq i64 %31, 0
+ // CHECK-NEXT: %33 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %ctzg23 = select i1 %iszero22, i32 %33, i32 %cast21
+ // CHECK-NEXT: store volatile i32 %ctzg23, ptr %tz, align 4
tz = __builtin_ctzg(ull, i);
- // CHECK-NEXT: %27 = load i64, ptr %ull.addr, align 8
- // CHECK-NEXT: %28 = call i64 @llvm.cttz.i64(i64 %27, i1 true)
- // CHECK-NEXT: %cast18 = trunc i64 %28 to i32
- // CHECK-NEXT: %iszero19 = icmp eq i64 %27, 0
- // CHECK-NEXT: %29 = load i32, ptr %i.addr, align 4
- // CHECK-NEXT: %ctzg20 = select i1 %iszero19, i32 %29, i32 %cast18
- // CHECK-NEXT: store volatile i32 %ctzg20, ptr %tz, align 4
+ // CHECK-NEXT: %34 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %35 = call i128 @llvm.cttz.i128(i128 %34, i1 true)
+ // CHECK-NEXT: %cast24 = trunc i128 %35 to i32
+ // CHECK-NEXT: %iszero25 = icmp eq i128 %34, 0
+ // CHECK-NEXT: %36 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %ctzg26 = select i1 %iszero25, i32 %36, i32 %cast24
+ // CHECK-NEXT: store volatile i32 %ctzg26, ptr %tz, align 4
tz = __builtin_ctzg(ui128, i);
- // CHECK-NEXT: %30 = load i128, ptr %ui128.addr, align 16
- // CHECK-NEXT: %31 = call i128 @llvm.cttz.i128(i128 %30, i1 true)
- // CHECK-NEXT: %cast21 = trunc i128 %31 to i32
- // CHECK-NEXT: %iszero22 = icmp eq i128 %30, 0
- // CHECK-NEXT: %32 = load i32, ptr %i.addr, align 4
- // CHECK-NEXT: %ctzg23 = select i1 %iszero22, i32 %32, i32 %cast21
- // CHECK-NEXT: store volatile i32 %ctzg23, ptr %tz, align 4
+ // CHECK-NEXT: %37 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %38 = call i128 @llvm.cttz.i128(i128 %37, i1 true)
+ // CHECK-NEXT: %cast27 = trunc i128 %38 to i32
+ // CHECK-NEXT: %iszero28 = icmp eq i128 %37, 0
+ // CHECK-NEXT: %39 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %ctzg29 = select i1 %iszero28, i32 %39, i32 %cast27
+ // CHECK-NEXT: store volatile i32 %ctzg29, ptr %tz, align 4
tz = __builtin_ctzg(ubi128, i);
- // CHECK-NEXT: %33 = load i128, ptr %ubi128.addr, align 8
- // CHECK-NEXT: %34 = call i128 @llvm.cttz.i128(i128 %33, i1 true)
- // CHECK-NEXT: %cast24 = trunc i128 %34 to i32
- // CHECK-NEXT: %iszero25 = icmp eq i128 %33, 0
- // CHECK-NEXT: %35 = load i32, ptr %i.addr, align 4
- // CHECK-NEXT: %ctzg26 = select i1 %iszero25, i32 %35, i32 %cast24
- // CHECK-NEXT: store volatile i32 %ctzg26, ptr %tz, align 4
- // CHECK-NEXT: ret void
+ // CHECK-NEXT: %load_bits30 = load i8, ptr %vb8.addr, align 1
+ // CHECK-NEXT: %40 = bitcast i8 %load_bits30 to <8 x i1>
+ // CHECK-NEXT: %41 = bitcast <8 x i1> %40 to i8
+ // CHECK-NEXT: %42 = call i8 @llvm.cttz.i8(i8 %41, i1 true)
+ // CHECK-NEXT: %cast31 = zext i8 %42 to i32
+ // CHECK-NEXT: %iszero32 = icmp eq i8 %41, 0
+ // CHECK-NEXT: %43 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %ctzg33 = select i1 %iszero32, i32 %43, i32 %cast31
+ // CHECK-NEXT: store volatile i32 %ctzg33, ptr %tz, align 4
+ tz = __builtin_ctzg(vb8, i);
}
#endif
diff --git a/clang/test/CodeGen/c-strings.c b/clang/test/CodeGen/c-strings.c
index 988deee..31c438f 100644
--- a/clang/test/CodeGen/c-strings.c
+++ b/clang/test/CodeGen/c-strings.c
@@ -1,4 +1,3 @@
-// XFAIL: target=aarch64-pc-windows-{{.*}}
// RUN: %clang_cc1 -triple %itanium_abi_triple -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=ITANIUM
// RUN: %clang_cc1 -triple %ms_abi_triple -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=MSABI
@@ -16,6 +15,11 @@
// MSABI: @f4.x = internal global %struct.s { ptr @"??_C@_05CJBACGMB@hello?$AA@" }
// CHECK: @x = {{(dso_local )?}}global [3 x i8] c"ola", align [[ALIGN]]
+// XFAIL: target=aarch64-{{.*}}-windows-msvc, target=arm64ec-{{.*}}-windows-msvc
+// Arm64 in MSVC mode aligns arrays to either 32-bit or 64-bit boundaries, which fails
+// various checks above, since ALIGN is derived from the alignment of a single
+// i8, which is still 1.
+
// XFAIL: target=hexagon-{{.*}}
// Hexagon aligns arrays of size 8+ bytes to a 64-bit boundary, which
// fails the check for "@f3.x = ... align [ALIGN]", since ALIGN is derived
diff --git a/clang/test/CodeGen/cfi-salt.c b/clang/test/CodeGen/cfi-salt.c
new file mode 100644
index 0000000..7ba1e2f
--- /dev/null
+++ b/clang/test/CodeGen/cfi-salt.c
@@ -0,0 +1,188 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -fsanitize=kcfi -DORIG_ATTR_SYN -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -fsanitize=kcfi -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -fsanitize=kcfi -fpatchable-function-entry-offset=3 -DORIG_ATTR_SYN -o - %s | FileCheck %s --check-prefixes=CHECK,OFFSET
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -fsanitize=kcfi -fpatchable-function-entry-offset=3 -o - %s | FileCheck %s --check-prefixes=CHECK,OFFSET
+
+// Note that the interleving of functions, which normally would be in sequence,
+// is due to the fact that Clang outputs them in a non-sequential order.
+
+#if !__has_feature(kcfi)
+#error Missing kcfi?
+#endif
+
+#ifdef ORIG_ATTR_SYN
+#define __cfi_salt __attribute__((cfi_salt("pepper")))
+#define __cfi_salt_empty __attribute__((cfi_salt("")))
+#else
+#define __cfi_salt [[clang::cfi_salt("pepper")]]
+#define __cfi_salt_empty [[clang::cfi_salt("")]]
+#endif
+
+typedef int (*fn_t)(void);
+typedef int (* __cfi_salt fn_salt_t)(void);
+typedef int (* __cfi_salt_empty fn_salt_empty_t)(void);
+
+typedef unsigned int (*ufn_t)(void);
+typedef unsigned int (* __cfi_salt ufn_salt_t)(void);
+
+/// Must emit __kcfi_typeid symbols for address-taken function declarations
+// CHECK: module asm ".weak __kcfi_typeid_[[F4:[a-zA-Z0-9_]+]]"
+// CHECK: module asm ".set __kcfi_typeid_[[F4]], [[#%d,LOW_SODIUM_HASH:]]"
+// CHECK: module asm ".weak __kcfi_typeid_[[F4_SALT:[a-zA-Z0-9_]+]]"
+// CHECK: module asm ".set __kcfi_typeid_[[F4_SALT]], [[#%d,ASM_SALTY_HASH:]]"
+
+/// Must not __kcfi_typeid symbols for non-address-taken declarations
+// CHECK-NOT: module asm ".weak __kcfi_typeid_f6"
+
+int f1(void);
+int f1_salt(void) __cfi_salt;
+
+unsigned int f2(void);
+unsigned int f2_salt(void) __cfi_salt;
+
+static int f3(void);
+static int f3_salt(void) __cfi_salt;
+
+extern int f4(void);
+extern int f4_salt(void) __cfi_salt;
+
+static int f5(void);
+static int f5_salt(void) __cfi_salt;
+
+extern int f6(void);
+extern int f6_salt(void) __cfi_salt;
+
+int f8(void);
+int f8_salt_empty(void) __cfi_salt_empty;
+
+struct cfi_struct {
+ fn_t __cfi_salt fptr;
+ fn_salt_t td_fptr;
+ fn_salt_empty_t td_empty_fptr;
+};
+
+int f7_salt(struct cfi_struct *ptr);
+int f7_typedef_salt(struct cfi_struct *ptr);
+
+// CHECK-LABEL: @__call
+// CHECK: call{{.*}} i32
+// CHECK-NOT: "kcfi"
+// CHECK-SAME: ()
+__attribute__((__no_sanitize__("kcfi")))
+int __call(fn_t f) {
+ return f();
+}
+
+// CHECK-LABEL: @call
+// CHECK: call{{.*}} i32 %{{.}}(){{.*}} [ "kcfi"(i32 [[#LOW_SODIUM_HASH]]) ]
+// CHECK-LABEL: @call_salt
+// CHECK: call{{.*}} i32 %{{.}}(){{.*}} [ "kcfi"(i32 [[#%d,SALTY_HASH:]]) ]
+// CHECK-LABEL: @call_salt_ty
+// CHECK: call{{.*}} i32 %{{.}}(){{.*}} [ "kcfi"(i32 [[#SALTY_HASH]]) ]
+int call(fn_t f) { return f(); }
+int call_salt(fn_t __cfi_salt f) { return f(); }
+int call_salt_ty(fn_salt_t f) { return f(); }
+int call_salt_empty_ty(fn_salt_empty_t f) { return f(); }
+
+// CHECK-LABEL: @ucall
+// CHECK: call{{.*}} i32 %{{.}}(){{.*}} [ "kcfi"(i32 [[#%d,LOW_SODIUM_UHASH:]]) ]
+// CHECK-LABEL: @ucall_salt
+// CHECK: call{{.*}} i32 %{{.}}(){{.*}} [ "kcfi"(i32 [[#%d,SALTY_UHASH:]]) ]
+// CHECK-LABEL: @ucall_salt_ty
+// CHECK: call{{.*}} i32 %{{.}}(){{.*}} [ "kcfi"(i32 [[#SALTY_UHASH]]) ]
+unsigned int ucall(ufn_t f) { return f(); }
+unsigned int ucall_salt(ufn_t __cfi_salt f) { return f(); }
+unsigned int ucall_salt_ty(ufn_salt_t f) { return f(); }
+
+int test1(struct cfi_struct *ptr) {
+ return call(f1) +
+ call_salt(f1_salt) +
+ call_salt_ty(f1_salt) +
+
+ __call((fn_t)f2) +
+ __call((fn_t)f2_salt) +
+
+ ucall(f2) +
+ ucall_salt(f2_salt) +
+ ucall_salt_ty(f2_salt) +
+
+ call(f3) +
+ call_salt(f3_salt) +
+ call_salt_ty(f3_salt) +
+
+ call(f4) +
+ call_salt(f4_salt) +
+ call_salt_ty(f4_salt) +
+
+ f5() +
+ f5_salt() +
+
+ f6() +
+ f6_salt() +
+
+ f7_salt(ptr) +
+ f7_typedef_salt(ptr) +
+
+ f8() +
+ f8_salt_empty();
+}
+
+// CHECK-LABEL: define dso_local{{.*}} i32 @f1(){{.*}} !kcfi_type
+// CHECK-SAME: ![[#LOW_SODIUM_TYPE:]]
+// CHECK-LABEL: define dso_local{{.*}} i32 @f1_salt(){{.*}} !kcfi_type
+// CHECK-SAME: ![[#SALTY_TYPE:]]
+int f1(void) { return 0; }
+int f1_salt(void) __cfi_salt { return 0; }
+
+// CHECK-LABEL: define dso_local{{.*}} i32 @f2(){{.*}} !kcfi_type
+// CHECK-SAME: ![[#LOW_SODIUM_UTYPE:]]
+// CHECK: define dso_local{{.*}} i32 @f2_salt(){{.*}} !kcfi_type
+// CHECK-SAME: ![[#SALTY_UTYPE:]]
+unsigned int f2(void) { return 2; }
+unsigned int f2_salt(void) __cfi_salt { return 2; }
+
+// CHECK-LABEL: define internal{{.*}} i32 @f3(){{.*}} !kcfi_type
+// CHECK-SAME: ![[#LOW_SODIUM_TYPE]]
+// CHECK-LABEL: define internal{{.*}} i32 @f3_salt(){{.*}} !kcfi_type
+// CHECK-SAME: ![[#SALTY_TYPE]]
+static int f3(void) { return 1; }
+static int f3_salt(void) __cfi_salt { return 1; }
+
+// CHECK: declare !kcfi_type ![[#LOW_SODIUM_TYPE]]{{.*}} i32 @[[F4]]()
+// CHECK: declare !kcfi_type ![[#SALTY_TYPE]]{{.*}} i32 @[[F4_SALT]]()
+
+/// Must not emit !kcfi_type for non-address-taken local functions
+// CHECK-LABEL: define internal{{.*}} i32 @f5()
+// CHECK-NOT: !kcfi_type
+// CHECK-SAME: {
+// CHECK-LABEL: define internal{{.*}} i32 @f5_salt()
+// CHECK-NOT: !kcfi_type
+// CHECK-SAME: {
+static int f5(void) { return 2; }
+static int f5_salt(void) __cfi_salt { return 2; }
+
+// CHECK: declare !kcfi_type ![[#LOW_SODIUM_TYPE]]{{.*}} i32 @f6()
+// CHECK: declare !kcfi_type ![[#SALTY_TYPE]]{{.*}} i32 @f6_salt()
+
+// CHECK-LABEL: @f7_salt
+// CHECK: call{{.*}} i32 %{{.*}}() [ "kcfi"(i32 [[#SALTY_HASH]]) ]
+// CHECK-LABEL: @f7_typedef_salt
+// CHECK: call{{.*}} i32 %{{.*}}() [ "kcfi"(i32 [[#SALTY_HASH]]) ]
+int f7_salt(struct cfi_struct *ptr) { return ptr->fptr(); }
+int f7_typedef_salt(struct cfi_struct *ptr) { return ptr->td_fptr(); }
+
+// CHECK-LABEL: define dso_local{{.*}} i32 @f8(){{.*}} !kcfi_type
+// CHECK-SAME: ![[#LOW_SODIUM_TYPE:]]
+// CHECK-LABEL: define dso_local{{.*}} i32 @f8_salt_empty(){{.*}} !kcfi_type
+// CHECK-SAME: ![[#LOW_SODIUM_TYPE:]]
+int f8(void) { return 0; }
+int f8_salt_empty(void) __cfi_salt_empty { return 0; }
+
+// CHECK: ![[#]] = !{i32 4, !"kcfi", i32 1}
+// OFFSET: ![[#]] = !{i32 4, !"kcfi-offset", i32 3}
+//
+// CHECK: ![[#LOW_SODIUM_TYPE]] = !{i32 [[#LOW_SODIUM_HASH]]}
+// CHECK: ![[#SALTY_TYPE]] = !{i32 [[#SALTY_HASH]]}
+//
+// CHECK: ![[#LOW_SODIUM_UTYPE]] = !{i32 [[#LOW_SODIUM_UHASH]]}
+// CHECK: ![[#SALTY_UTYPE]] = !{i32 [[#SALTY_UHASH]]}
diff --git a/clang/test/CodeGen/cleanup-destslot-simple.c b/clang/test/CodeGen/cleanup-destslot-simple.c
index a02841a..8ace332 100644
--- a/clang/test/CodeGen/cleanup-destslot-simple.c
+++ b/clang/test/CodeGen/cleanup-destslot-simple.c
@@ -13,14 +13,14 @@
// CHECK-LIFETIME-NEXT: entry:
// CHECK-LIFETIME-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK-LIFETIME-NEXT: [[P:%.*]] = alloca ptr, align 8
-// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2:[0-9]+]], !dbg [[DBG9:![0-9]+]]
+// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) #[[ATTR2:[0-9]+]], !dbg [[DBG9:![0-9]+]]
// CHECK-LIFETIME-NEXT: store i32 3, ptr [[X]], align 4, !dbg [[DBG10:![0-9]+]], !tbaa [[TBAA11:![0-9]+]]
-// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
+// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
// CHECK-LIFETIME-NEXT: store volatile ptr [[X]], ptr [[P]], align 8, !dbg [[DBG16:![0-9]+]], !tbaa [[TBAA17:![0-9]+]]
// CHECK-LIFETIME-NEXT: [[P_0_P_0_P_0_P_0_:%.*]] = load volatile ptr, ptr [[P]], align 8, !dbg [[DBG19:![0-9]+]], !tbaa [[TBAA17]]
// CHECK-LIFETIME-NEXT: [[TMP0:%.*]] = load i32, ptr [[P_0_P_0_P_0_P_0_]], align 4, !dbg [[DBG20:![0-9]+]], !tbaa [[TBAA11]]
-// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG21:![0-9]+]]
-// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG21]]
+// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[P]]), !dbg [[DBG21:![0-9]+]]
+// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG21]]
// CHECK-LIFETIME-NEXT: ret i32 [[TMP0]], !dbg [[DBG22:![0-9]+]]
//
// CHECK-OPTNONE-LABEL: @test(
@@ -37,13 +37,13 @@
// CHECK-MSAN-NEXT: entry:
// CHECK-MSAN-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK-MSAN-NEXT: [[P:%.*]] = alloca ptr, align 8
-// CHECK-MSAN-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2:[0-9]+]], !dbg [[DBG9:![0-9]+]]
+// CHECK-MSAN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) #[[ATTR2:[0-9]+]], !dbg [[DBG9:![0-9]+]]
// CHECK-MSAN-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[X]] to i64, !dbg [[DBG9]]
// CHECK-MSAN-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG9]]
// CHECK-MSAN-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG9]]
// CHECK-MSAN-NEXT: store i32 0, ptr [[TMP2]], align 4, !dbg [[DBG10:![0-9]+]]
// CHECK-MSAN-NEXT: store i32 3, ptr [[X]], align 4, !dbg [[DBG10]], !tbaa [[TBAA11:![0-9]+]]
-// CHECK-MSAN-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
+// CHECK-MSAN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
// CHECK-MSAN-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64, !dbg [[DBG15]]
// CHECK-MSAN-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG15]]
// CHECK-MSAN-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG15]]
@@ -62,8 +62,8 @@
// CHECK-MSAN-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080, !dbg [[DBG20]]
// CHECK-MSAN-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr, !dbg [[DBG20]]
// CHECK-MSAN-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP11]], align 4, !dbg [[DBG20]]
-// CHECK-MSAN-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG22:![0-9]+]]
-// CHECK-MSAN-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG22]]
+// CHECK-MSAN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[P]]), !dbg [[DBG22:![0-9]+]]
+// CHECK-MSAN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG22]]
// CHECK-MSAN-NEXT: [[_MSCMP2_NOT:%.*]] = icmp eq i32 [[_MSLD1]], 0, !dbg [[DBG23:![0-9]+]]
// CHECK-MSAN-NEXT: br i1 [[_MSCMP2_NOT]], label [[TMP13:%.*]], label [[TMP12:%.*]], !dbg [[DBG23]], !prof [[PROF21]]
// CHECK-MSAN: 12:
@@ -77,13 +77,13 @@
// CHECK-KMSAN-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() #[[ATTR2:[0-9]+]]
// CHECK-KMSAN-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK-KMSAN-NEXT: [[P:%.*]] = alloca ptr, align 8
-// CHECK-KMSAN-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG9:![0-9]+]]
+// CHECK-KMSAN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG9:![0-9]+]]
// CHECK-KMSAN-NEXT: call void @__msan_poison_alloca(ptr nonnull [[X]], i64 4, ptr nonnull @[[GLOB0:[0-9]+]]) #[[ATTR2]], !dbg [[DBG9]]
// CHECK-KMSAN-NEXT: [[TMP1:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_4(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG10:![0-9]+]]
// CHECK-KMSAN-NEXT: [[TMP2:%.*]] = extractvalue { ptr, ptr } [[TMP1]], 0, !dbg [[DBG10]]
// CHECK-KMSAN-NEXT: store i32 0, ptr [[TMP2]], align 4, !dbg [[DBG10]]
// CHECK-KMSAN-NEXT: store i32 3, ptr [[X]], align 4, !dbg [[DBG10]], !tbaa [[TBAA11:![0-9]+]]
-// CHECK-KMSAN-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
+// CHECK-KMSAN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
// CHECK-KMSAN-NEXT: call void @__msan_poison_alloca(ptr nonnull [[P]], i64 8, ptr nonnull @[[GLOB1:[0-9]+]]) #[[ATTR2]], !dbg [[DBG15]]
// CHECK-KMSAN-NEXT: [[TMP3:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_8(ptr nonnull [[P]]) #[[ATTR2]], !dbg [[DBG16:![0-9]+]]
// CHECK-KMSAN-NEXT: [[TMP4:%.*]] = extractvalue { ptr, ptr } [[TMP3]], 0, !dbg [[DBG16]]
@@ -109,8 +109,8 @@
// CHECK-KMSAN-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP12]], 1, !dbg [[DBG20]]
// CHECK-KMSAN-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP13]], align 4, !dbg [[DBG20]]
// CHECK-KMSAN-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG20]]
-// CHECK-KMSAN-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG22:![0-9]+]]
-// CHECK-KMSAN-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG22]]
+// CHECK-KMSAN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[P]]), !dbg [[DBG22:![0-9]+]]
+// CHECK-KMSAN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG22]]
// CHECK-KMSAN-NEXT: store i32 [[_MSLD1]], ptr [[RETVAL_SHADOW]], align 8, !dbg [[DBG23:![0-9]+]]
// CHECK-KMSAN-NEXT: store i32 [[TMP15]], ptr [[RETVAL_ORIGIN]], align 4, !dbg [[DBG23]]
// CHECK-KMSAN-NEXT: ret i32 [[TMP11]], !dbg [[DBG23]]
diff --git a/clang/test/CodeGen/dominating-store-to-return.c b/clang/test/CodeGen/dominating-store-to-return.c
index 1c53e35..d095f3f 100644
--- a/clang/test/CodeGen/dominating-store-to-return.c
+++ b/clang/test/CodeGen/dominating-store-to-return.c
@@ -16,10 +16,10 @@
// LIFETIME-NEXT: [[FOO:%.*]] = alloca i32, align 4
// LIFETIME-NEXT: [[FOO2:%.*]] = alloca i32, align 4
// LIFETIME-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// LIFETIME-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[FOO]]) #[[ATTR2:[0-9]+]]
-// LIFETIME-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[FOO2]]) #[[ATTR2]]
-// LIFETIME-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[FOO2]]) #[[ATTR2]]
-// LIFETIME-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[FOO]]) #[[ATTR2]]
+// LIFETIME-NEXT: call void @llvm.lifetime.start.p0(ptr [[FOO]]) #[[ATTR2:[0-9]+]]
+// LIFETIME-NEXT: call void @llvm.lifetime.start.p0(ptr [[FOO2]]) #[[ATTR2]]
+// LIFETIME-NEXT: call void @llvm.lifetime.end.p0(ptr [[FOO2]]) #[[ATTR2]]
+// LIFETIME-NEXT: call void @llvm.lifetime.end.p0(ptr [[FOO]]) #[[ATTR2]]
// LIFETIME-NEXT: ret i32 0
//
int main() {
diff --git a/clang/test/CodeGen/func-attr.c b/clang/test/CodeGen/func-attr.c
index 96c3d91..1b36c51 100644
--- a/clang/test/CodeGen/func-attr.c
+++ b/clang/test/CodeGen/func-attr.c
@@ -22,7 +22,6 @@ float foo(float a, float b) {
// NOFINITEONLY: define{{.*}} float @foo(float noundef %{{.*}}, float noundef %{{.*}}){{.*}} [[ATTRS:#[0-9]+]]
// CHECK: attributes [[ATTRS]] = {
-// CHECK-SAME: "approx-func-fp-math"="true"
// CHECK-SAME: "no-signed-zeros-fp-math"="true"
// CHECK-SAME: "no-trapping-math"="true"
// CHECK-UNSAFE-SAME: "unsafe-fp-math"="true"
diff --git a/clang/test/CodeGen/issue155126.c b/clang/test/CodeGen/issue155126.c
new file mode 100644
index 0000000..56f00f8
--- /dev/null
+++ b/clang/test/CodeGen/issue155126.c
@@ -0,0 +1,17 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -std=c23 %s -triple x86_64 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c23 %s -triple x86_64 -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
+
+enum e : bool { b = true };
+// CHECK-LABEL: define dso_local void @foo(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[E1:%.*]] = alloca i8, align 1
+// CHECK-NEXT: store i8 0, ptr [[E1]], align 1
+// CHECK-NEXT: ret void
+//
+void foo ()
+{
+ enum e e1;
+ e1 = (bool) nullptr;
+}
diff --git a/clang/test/CodeGen/kcfi-generalize.c b/clang/test/CodeGen/kcfi-generalize.c
new file mode 100644
index 0000000..4e32f4f
--- /dev/null
+++ b/clang/test/CodeGen/kcfi-generalize.c
@@ -0,0 +1,33 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -fsanitize=kcfi -fsanitize-trap=kcfi -emit-llvm -o - %s | FileCheck --check-prefix=CHECK --check-prefix=UNGENERALIZED %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -fsanitize=kcfi -fsanitize-trap=kcfi -fsanitize-cfi-icall-generalize-pointers -emit-llvm -o - %s | FileCheck --check-prefix=CHECK --check-prefix=GENERALIZED %s
+
+// Test that const char* is generalized to const ptr and that char** is
+// generalized to ptr
+
+// CHECK: define{{.*}} ptr @f({{.*}} !kcfi_type [[TYPE:![0-9]+]]
+int** f(const char *a, const char **b) {
+ return (int**)0;
+}
+
+// GENERALIZED: define{{.*}} ptr @f2({{.*}} !kcfi_type [[TYPE]]
+// UNGENERALIZED: define{{.*}} ptr @f2({{.*}} !kcfi_type [[TYPE2:![0-9]+]]
+int** f2(const int *a, const int **b) {
+ return (int**)0;
+}
+
+// CHECK: define{{.*}} ptr @f3({{.*}} !kcfi_type [[TYPE3:![0-9]+]]
+int** f3(char *a, char **b) {
+ return (int**)0;
+}
+
+void g(int** (*fp)(const char *, const char **)) {
+ // UNGENERALIZED: call {{.*}} [ "kcfi"(i32 1296635908) ]
+ // GENERALIZED: call {{.*}} [ "kcfi"(i32 -49168686) ]
+ fp(0, 0);
+}
+
+// UNGENERALIZED: [[TYPE]] = !{i32 1296635908}
+// GENERALIZED: [[TYPE]] = !{i32 -49168686}
+
+// UNGENERALIZED: [[TYPE3]] = !{i32 874141567}
+// GENERALIZED: [[TYPE3]] = !{i32 954385378}
diff --git a/clang/test/CodeGen/lifetime-sanitizer.c b/clang/test/CodeGen/lifetime-sanitizer.c
index 68879fd..4d36bd7 100644
--- a/clang/test/CodeGen/lifetime-sanitizer.c
+++ b/clang/test/CodeGen/lifetime-sanitizer.c
@@ -18,14 +18,14 @@ extern int bar(char *A, int n);
// CHECK-O0-NOT: @llvm.lifetime.start
int foo(int n) {
if (n) {
- // LIFETIME: @llvm.lifetime.start.p0(i64 10, ptr {{.*}})
+ // LIFETIME: @llvm.lifetime.start.p0(ptr {{.*}})
char A[10];
return bar(A, 1);
- // LIFETIME: @llvm.lifetime.end.p0(i64 10, ptr {{.*}})
+ // LIFETIME: @llvm.lifetime.end.p0(ptr {{.*}})
} else {
- // LIFETIME: @llvm.lifetime.start.p0(i64 20, ptr {{.*}})
+ // LIFETIME: @llvm.lifetime.start.p0(ptr {{.*}})
char A[20];
return bar(A, 2);
- // LIFETIME: @llvm.lifetime.end.p0(i64 20, ptr {{.*}})
+ // LIFETIME: @llvm.lifetime.end.p0(ptr {{.*}})
}
}
diff --git a/clang/test/CodeGen/lifetime.c b/clang/test/CodeGen/lifetime.c
index 748a496..edb6867 100644
--- a/clang/test/CodeGen/lifetime.c
+++ b/clang/test/CodeGen/lifetime.c
@@ -8,14 +8,14 @@ extern void use(char *a);
// CHECK-LABEL: @helper_no_markers
__attribute__((always_inline)) void helper_no_markers(void) {
char a;
- // LIFETIME: call void @llvm.lifetime.start.p0(i64 1,
+ // LIFETIME: call void @llvm.lifetime.start.p0(
use(&a);
- // LIFETIME: call void @llvm.lifetime.end.p0(i64 1,
+ // LIFETIME: call void @llvm.lifetime.end.p0(
}
// CHECK-LABEL: @lifetime_test
void lifetime_test(void) {
-// LIFETIME: call void @llvm.lifetime.start.p0(i64 1,
+// LIFETIME: call void @llvm.lifetime.start.p0(
helper_no_markers();
-// LIFETIME: call void @llvm.lifetime.end.p0(i64 1,
+// LIFETIME: call void @llvm.lifetime.end.p0(
}
diff --git a/clang/test/CodeGen/lifetime2.c b/clang/test/CodeGen/lifetime2.c
index 88c35fc..560389f 100644
--- a/clang/test/CodeGen/lifetime2.c
+++ b/clang/test/CodeGen/lifetime2.c
@@ -7,21 +7,21 @@ extern int bar(char *A, int n);
// CHECK-LABEL: @foo
int foo (int n) {
if (n) {
-// O2: call void @llvm.lifetime.start.p0(i64 100,
+// O2: call void @llvm.lifetime.start.p0(
char A[100];
return bar(A, 1);
-// O2: call void @llvm.lifetime.end.p0(i64 100,
+// O2: call void @llvm.lifetime.end.p0(
} else {
-// O2: call void @llvm.lifetime.start.p0(i64 100,
+// O2: call void @llvm.lifetime.start.p0(
char A[100];
return bar(A, 2);
-// O2: call void @llvm.lifetime.end.p0(i64 100,
+// O2: call void @llvm.lifetime.end.p0(
}
}
// CHECK-LABEL: @no_goto_bypass
void no_goto_bypass(void) {
- // O2: call void @llvm.lifetime.start.p0(i64 1,
+ // O2: call void @llvm.lifetime.start.p0(
char x;
l1:
bar(&x, 1);
@@ -45,16 +45,16 @@ void goto_bypass(void) {
void no_switch_bypass(int n) {
switch (n) {
case 1: {
- // O2: call void @llvm.lifetime.start.p0(i64 1,
- // O2: call void @llvm.lifetime.end.p0(i64 1,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
char x;
bar(&x, 1);
break;
}
case 2:
n = n;
- // O2: call void @llvm.lifetime.start.p0(i64 5,
- // O2: call void @llvm.lifetime.end.p0(i64 5,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
char y[5];
bar(y, 5);
break;
diff --git a/clang/test/CodeGen/lifetime3.cpp b/clang/test/CodeGen/lifetime3.cpp
index 64a097c..476ca0d 100644
--- a/clang/test/CodeGen/lifetime3.cpp
+++ b/clang/test/CodeGen/lifetime3.cpp
@@ -6,30 +6,30 @@ extern int bar(char *A, int n);
// CHECK-LABEL: @no_switch_bypass
extern "C" void no_switch_bypass(int n) {
- // O2: call void @llvm.lifetime.start.p0(i64 4,
+ // O2: call void @llvm.lifetime.start.p0(
switch (n += 1; int b=n) {
case 1: {
- // O2: call void @llvm.lifetime.start.p0(i64 1,
- // O2: call void @llvm.lifetime.end.p0(i64 1,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
char x;
bar(&x, 1);
break;
}
case 2:
n = n;
- // O2: call void @llvm.lifetime.start.p0(i64 5,
- // O2: call void @llvm.lifetime.end.p0(i64 5,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
char y[5];
bar(y, 5);
break;
}
- // O2: call void @llvm.lifetime.end.p0(i64 4,
+ // O2: call void @llvm.lifetime.end.p0(
}
// CHECK-LABEL: @switch_bypass
extern "C" void switch_bypass(int n) {
- // O2: call void @llvm.lifetime.start.p0(i64 4,
- // O2: call void @llvm.lifetime.end.p0(i64 4,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
switch (n += 1; int b=n) {
case 1:
n = n;
diff --git a/clang/test/CodeGen/math-libcalls-tbaa.c b/clang/test/CodeGen/math-libcalls-tbaa.c
index f4e81ea..b2f502e 100644
--- a/clang/test/CodeGen/math-libcalls-tbaa.c
+++ b/clang/test/CodeGen/math-libcalls-tbaa.c
@@ -82,12 +82,12 @@ double test_remainder (double num[], double a) {
// CHECK-SAME: ptr noundef readonly captures(none) [[NUM:%.*]]) local_unnamed_addr #[[ATTR5:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[E:%.*]] = alloca i32, align 4
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[E]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[E]]) #[[ATTR9]]
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[NUM]], i64 16
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA8]]
// CHECK-NEXT: [[CALL:%.*]] = call double @frexp(double noundef [[TMP0]], ptr noundef nonnull [[E]]) #[[ATTR9]]
// CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP0]], [[CALL]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[E]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[E]]) #[[ATTR9]]
// CHECK-NEXT: ret double [[MUL]]
//
double test_frexp (double num[]) {
@@ -105,8 +105,8 @@ double test_frexp (double num[]) {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[SIN:%.*]] = alloca float, align 4
// CHECK-NEXT: [[COS:%.*]] = alloca float, align 4
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[SIN]]) #[[ATTR9]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[COS]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[SIN]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[COS]]) #[[ATTR9]]
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[NUM]], i64 8
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: call void @sincos(float noundef [[TMP0]], ptr noundef nonnull [[SIN]], ptr noundef nonnull [[COS]]) #[[ATTR9]]
@@ -115,8 +115,8 @@ double test_frexp (double num[]) {
// CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP1]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[ADD:%.*]] = fadd float [[MUL]], [[TMP3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[COS]]) #[[ATTR9]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[SIN]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[COS]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[SIN]]) #[[ATTR9]]
// CHECK-NEXT: ret float [[ADD]]
//
float test_sincos (float num[]) {
diff --git a/clang/test/CodeGen/nofpclass.c b/clang/test/CodeGen/nofpclass.c
index 8e61ae2..6cd7ec1 100644
--- a/clang/test/CodeGen/nofpclass.c
+++ b/clang/test/CodeGen/nofpclass.c
@@ -927,14 +927,14 @@ _Complex _Float16 defined_complex_func_f16_ret(_Complex _Float16 c) {
// CLFINITEONLY-NEXT: [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
// CLFINITEONLY-NEXT: [[CF16_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[CF16]], i64 2
// CLFINITEONLY-NEXT: [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
-// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12:[0-9]+]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12:[0-9]+]]
// CLFINITEONLY-NEXT: [[BYVAL_TEMP_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[BYVAL_TEMP]], i64 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE0]], ptr [[BYVAL_TEMP]], align 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE1]], ptr [[BYVAL_TEMP_IMAGP]], align 8
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[CF16_REAL]], i64 0
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[COERCE5_SROA_0_0_VEC_INSERT]], half [[CF16_IMAG]], i64 1
// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) @variadic(float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[BYVAL_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR11]]
-// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -1180,14 +1180,14 @@ float call_variadic(float f32, double f64, _Float16 f16,
// CLFINITEONLY-NEXT: [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
// CLFINITEONLY-NEXT: [[CF16_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[CF16]], i64 2
// CLFINITEONLY-NEXT: [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
-// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: [[BYVAL_TEMP_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[BYVAL_TEMP]], i64 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE0]], ptr [[BYVAL_TEMP]], align 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE1]], ptr [[BYVAL_TEMP_IMAGP]], align 8
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[CF16_REAL]], i64 0
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[COERCE5_SROA_0_0_VEC_INSERT]], half [[CF16_IMAG]], i64 1
// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) [[FPTR]](float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[BYVAL_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR11]]
-// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
diff --git a/clang/test/CodeGen/object-size.cpp b/clang/test/CodeGen/object-size.cpp
index e6ae3ae..39c0f35 100644
--- a/clang/test/CodeGen/object-size.cpp
+++ b/clang/test/CodeGen/object-size.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -o - %s -fexperimental-new-constant-interpreter | FileCheck %s
// C++-specific tests for __builtin_object_size
diff --git a/clang/test/CodeGen/packed-arrays.c b/clang/test/CodeGen/packed-arrays.c
index 097fa7f..51629b6 100644
--- a/clang/test/CodeGen/packed-arrays.c
+++ b/clang/test/CodeGen/packed-arrays.c
@@ -55,7 +55,7 @@ int align3_x0 = __alignof(((struct s3*) 0)->x[0]);
// CHECK: load i32, ptr %{{.*}}, align 1
// CHECK: }
// CHECK-LABEL: define{{.*}} i32 @f0_b
-// CHECK: load i32, ptr %{{.*}}, align 4
+// CHECK: load i32, ptr %{{.*}}, align 1
// CHECK: }
int f0_a(struct s0 *a) {
return a->x[1];
@@ -100,7 +100,7 @@ int f1_d(struct s1 *a) {
// CHECK: load i32, ptr %{{.*}}, align 1
// CHECK: }
// CHECK-LABEL: define{{.*}} i32 @f2_b
-// CHECK: load i32, ptr %{{.*}}, align 4
+// CHECK: load i32, ptr %{{.*}}, align 1
// CHECK: }
// CHECK-LABEL: define{{.*}} i32 @f2_c
// CHECK: load i32, ptr %{{.*}}, align 1
@@ -125,7 +125,7 @@ int f2_d(struct s2 *a) {
// CHECK: load i32, ptr %{{.*}}, align 1
// CHECK: }
// CHECK-LABEL: define{{.*}} i32 @f3_b
-// CHECK: load i32, ptr %{{.*}}, align 4
+// CHECK: load i32, ptr %{{.*}}, align 1
// CHECK: }
// CHECK-LABEL: define{{.*}} i32 @f3_c
// CHECK: load i32, ptr %{{.*}}, align 1
diff --git a/clang/test/CodeGen/palignr.c b/clang/test/CodeGen/palignr.c
index 092937a..9b80cf9 100644
--- a/clang/test/CodeGen/palignr.c
+++ b/clang/test/CodeGen/palignr.c
@@ -1,16 +1,16 @@
// REQUIRES: x86-registered-target
-// RUN: %clang_cc1 %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -flax-vector-conversions=none -o - | FileCheck %s
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
-typedef __attribute__((vector_size(16))) int int4;
+typedef char __v16qi __attribute__((__vector_size__(16)));
// CHECK: palignr $15, %xmm1, %xmm0
-int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
+__v16qi align1(__v16qi a, __v16qi b) { return _mm_alignr_epi8(a, b, 15); }
// CHECK: ret
// CHECK: ret
// CHECK-NOT: palignr
-int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); }
+__v16qi align2(__v16qi a, __v16qi b) { return _mm_alignr_epi8(a, b, 16); }
// CHECK: psrldq $1, %xmm0
-int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); }
+__v16qi align3(__v16qi a, __v16qi b) { return _mm_alignr_epi8(a, b, 17); }
// CHECK: xor
-int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); }
+__v16qi align4(__v16qi a, __v16qi b) { return _mm_alignr_epi8(a, b, 32); }
diff --git a/clang/test/CodeGen/pointer-arithmetic-align.c b/clang/test/CodeGen/pointer-arithmetic-align.c
new file mode 100644
index 0000000..745ab84
--- /dev/null
+++ b/clang/test/CodeGen/pointer-arithmetic-align.c
@@ -0,0 +1,83 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -O1 -triple=x86_64-unknown-linux %s -emit-llvm -o - | FileCheck %s
+
+typedef unsigned char uint8_t;
+typedef unsigned long long uint64_t;
+
+struct a {
+ uint64_t b;
+ uint8_t block[16];
+};
+
+// CHECK-LABEL: define dso_local void @ptradd_0(
+// CHECK-SAME: ptr noundef writeonly captures(none) initializes((8, 9)) [[CTX:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[BLOCK:%.*]] = getelementptr inbounds nuw i8, ptr [[CTX]], i64 8
+// CHECK-NEXT: store i8 0, ptr [[BLOCK]], align 8, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT: ret void
+//
+void ptradd_0(struct a *ctx) {
+ *(ctx->block + 0) = 0;
+}
+
+// CHECK-LABEL: define dso_local void @ptradd_4(
+// CHECK-SAME: ptr noundef writeonly captures(none) initializes((12, 13)) [[CTX:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[CTX]], i64 12
+// CHECK-NEXT: store i8 0, ptr [[ADD_PTR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+void ptradd_4(struct a *ctx) {
+ *(ctx->block + 4) = 0;
+}
+
+// CHECK-LABEL: define dso_local void @ptradd_8(
+// CHECK-SAME: ptr noundef writeonly captures(none) initializes((16, 17)) [[CTX:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[CTX]], i64 16
+// CHECK-NEXT: store i8 0, ptr [[ADD_PTR]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+void ptradd_8(struct a *ctx) {
+ *(ctx->block + 8) = 0;
+}
+
+// CHECK-LABEL: define dso_local void @ptradd_8_commuted(
+// CHECK-SAME: ptr noundef writeonly captures(none) initializes((16, 17)) [[CTX:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[CTX]], i64 16
+// CHECK-NEXT: store i8 0, ptr [[ADD_PTR]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+void ptradd_8_commuted(struct a *ctx) {
+ *(8 + ctx->block) = 0;
+}
+
+// CHECK-LABEL: define dso_local void @ptrsub_4(
+// CHECK-SAME: ptr noundef writeonly captures(none) initializes((8, 9)) [[CTX:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[CTX]], i64 8
+// CHECK-NEXT: store i8 0, ptr [[ADD_PTR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+void ptrsub_4(struct a *ctx) {
+ *(&ctx->block[4] - 4) = 0;
+}
+
+// CHECK-LABEL: define dso_local void @neg_ptradd_var_index(
+// CHECK-SAME: ptr noundef writeonly captures(none) [[CTX:%.*]], i8 noundef zeroext [[IDX:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[BLOCK:%.*]] = getelementptr inbounds nuw i8, ptr [[CTX]], i64 8
+// CHECK-NEXT: [[IDX_EXT:%.*]] = zext i8 [[IDX]] to i64
+// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[BLOCK]], i64 [[IDX_EXT]]
+// CHECK-NEXT: store i8 0, ptr [[ADD_PTR]], align 1, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+void neg_ptradd_var_index(struct a *ctx, uint8_t idx) {
+ *(ctx->block + idx) = 0;
+}
+//.
+// CHECK: [[TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0}
+// CHECK: [[META3]] = !{!"omnipotent char", [[META4:![0-9]+]], i64 0}
+// CHECK: [[META4]] = !{!"Simple C/C++ TBAA"}
+//.
diff --git a/clang/test/CodeGen/ptrauth-qualifier-blocks.c b/clang/test/CodeGen/ptrauth-qualifier-blocks.c
index 62da59c..f460da2 100644
--- a/clang/test/CodeGen/ptrauth-qualifier-blocks.c
+++ b/clang/test/CodeGen/ptrauth-qualifier-blocks.c
@@ -82,9 +82,15 @@ void test_block_address_byref_capture() {
// CHECK: store i32 33554432,
// CHECK: store i32 48,
// CHECK: [[COPY_HELPER_FIELD:%.*]] = getelementptr inbounds nuw [[BYREF_T]], ptr [[BYREF]], i32 0, i32 4
- // CHECK: store ptr @__Block_byref_object_copy_, ptr [[COPY_HELPER_FIELD]], align
+ // CHECK: [[T0:%.*]] = ptrtoint ptr [[COPY_HELPER_FIELD]] to i64
+ // CHECK: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 ptrtoint (ptr @__Block_byref_object_copy_ to i64), i32 0, i64 [[T0]])
+ // CHECK: [[T2:%.*]] = inttoptr i64 [[T1]] to ptr
+ // CHECK: store ptr [[T2]], ptr [[COPY_HELPER_FIELD]], align
// CHECK: [[DISPOSE_HELPER_FIELD:%.*]] = getelementptr inbounds nuw [[BYREF_T]], ptr [[BYREF]], i32 0, i32 5
- // CHECK: store ptr @__Block_byref_object_dispose_, ptr [[DISPOSE_HELPER_FIELD]], align
+ // CHECK: [[T0:%.*]] = ptrtoint ptr [[DISPOSE_HELPER_FIELD]] to i64
+ // CHECK: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 ptrtoint (ptr @__Block_byref_object_dispose_ to i64), i32 0, i64 [[T0]])
+ // CHECK: [[T2:%.*]] = inttoptr i64 [[T1]] to ptr
+ // CHECK: store ptr [[T2]], ptr [[DISPOSE_HELPER_FIELD]], align
// flags - copy/dispose required
// CHECK: store i32 1107296256, ptr
__block struct A * __ptrauth(1, 1, 60) ptr = createA();
diff --git a/clang/test/CodeGen/rounding-math.cpp b/clang/test/CodeGen/rounding-math.cpp
index 264031d..5c44fd3 100644
--- a/clang/test/CodeGen/rounding-math.cpp
+++ b/clang/test/CodeGen/rounding-math.cpp
@@ -11,3 +11,55 @@ float V3 = func_01(1.0F, 2.0F);
// CHECK: @V1 = {{.*}}global float 1.000000e+00, align 4
// CHECK: @V2 = {{.*}}global float 1.000000e+00, align 4
// CHECK: @V3 = {{.*}}global float 3.000000e+00, align 4
+
+void test_builtin_elementwise_fma_round_upward() {
+ #pragma STDC FENV_ACCESS ON
+ #pragma STDC FENV_ROUND FE_UPWARD
+
+ // CHECK: store float 0x4018000100000000, ptr %f1
+ // CHECK: store float 0x4018000100000000, ptr %f2
+ constexpr float f1 = __builtin_elementwise_fma(2.0F, 3.000001F, 0.000001F);
+ constexpr float f2 = 2.0F * 3.000001F + 0.000001F;
+ static_assert(f1 == f2);
+ static_assert(f1 == 6.00000381F);
+ // CHECK: store double 0x40180000C9539B89, ptr %d1
+ // CHECK: store double 0x40180000C9539B89, ptr %d2
+ constexpr double d1 = __builtin_elementwise_fma(2.0, 3.000001, 0.000001);
+ constexpr double d2 = 2.0 * 3.000001 + 0.000001;
+ static_assert(d1 == d2);
+ static_assert(d1 == 6.0000030000000004);
+}
+
+void test_builtin_elementwise_fma_round_downward() {
+ #pragma STDC FENV_ACCESS ON
+ #pragma STDC FENV_ROUND FE_DOWNWARD
+
+ // CHECK: store float 0x40180000C0000000, ptr %f3
+ // CHECK: store float 0x40180000C0000000, ptr %f4
+ constexpr float f3 = __builtin_elementwise_fma(2.0F, 3.000001F, 0.000001F);
+ constexpr float f4 = 2.0F * 3.000001F + 0.000001F;
+ static_assert(f3 == f4);
+ // CHECK: store double 0x40180000C9539B87, ptr %d3
+ // CHECK: store double 0x40180000C9539B87, ptr %d4
+ constexpr double d3 = __builtin_elementwise_fma(2.0, 3.000001, 0.000001);
+ constexpr double d4 = 2.0 * 3.000001 + 0.000001;
+ static_assert(d3 == d4);
+}
+
+void test_builtin_elementwise_fma_round_nearest() {
+ #pragma STDC FENV_ACCESS ON
+ #pragma STDC FENV_ROUND FE_TONEAREST
+
+ // CHECK: store float 0x40180000C0000000, ptr %f5
+ // CHECK: store float 0x40180000C0000000, ptr %f6
+ constexpr float f5 = __builtin_elementwise_fma(2.0F, 3.000001F, 0.000001F);
+ constexpr float f6 = 2.0F * 3.000001F + 0.000001F;
+ static_assert(f5 == f6);
+ static_assert(f5 == 6.00000286F);
+ // CHECK: store double 0x40180000C9539B89, ptr %d5
+ // CHECK: store double 0x40180000C9539B89, ptr %d6
+ constexpr double d5 = __builtin_elementwise_fma(2.0, 3.000001, 0.000001);
+ constexpr double d6 = 2.0 * 3.000001 + 0.000001;
+ static_assert(d5 == d6);
+ static_assert(d5 == 6.0000030000000004);
+}
diff --git a/clang/test/CodeGen/target-builtin-noerror.c b/clang/test/CodeGen/target-builtin-noerror.c
index 0bbd8c3..5cf53b2 100644
--- a/clang/test/CodeGen/target-builtin-noerror.c
+++ b/clang/test/CodeGen/target-builtin-noerror.c
@@ -32,15 +32,15 @@ int qq(void) {
// Test that fma and fma4 are both separately and combined valid for an fma intrinsic.
__m128 __attribute__((target("fma"))) fma_1(__m128 a, __m128 b, __m128 c) {
- return __builtin_ia32_vfmaddps(a, b, c);
+ return __builtin_ia32_vfmaddsubps(a, b, c);
}
__m128 __attribute__((target("fma4"))) fma_2(__m128 a, __m128 b, __m128 c) {
- return __builtin_ia32_vfmaddps(a, b, c);
+ return __builtin_ia32_vfmaddsubps(a, b, c);
}
__m128 __attribute__((target("fma,fma4"))) fma_3(__m128 a, __m128 b, __m128 c) {
- return __builtin_ia32_vfmaddps(a, b, c);
+ return __builtin_ia32_vfmaddsubps(a, b, c);
}
void verifyfeaturestrings(void) {
diff --git a/clang/test/CodeGen/target-data.c b/clang/test/CodeGen/target-data.c
index 92fe3eb6..eecee69 100644
--- a/clang/test/CodeGen/target-data.c
+++ b/clang/test/CodeGen/target-data.c
@@ -144,11 +144,11 @@
// RUN: %clang_cc1 -triple nvptx-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=NVPTX
-// NVPTX: target datalayout = "e-p:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64"
+// NVPTX: target datalayout = "e-p:32:32-p6:32:32-p7:32:32-i64:64-i128:128-i256:256-v16:16-v32:32-n16:32:64"
// RUN: %clang_cc1 -triple nvptx64-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=NVPTX64
-// NVPTX64: target datalayout = "e-p6:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64"
+// NVPTX64: target datalayout = "e-p6:32:32-i64:64-i128:128-i256:256-v16:16-v32:32-n16:32:64"
// RUN: %clang_cc1 -triple r600-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=R600
diff --git a/clang/test/CodeGen/target-features-error-3.c b/clang/test/CodeGen/target-features-error-3.c
new file mode 100644
index 0000000..ff4866a
--- /dev/null
+++ b/clang/test/CodeGen/target-features-error-3.c
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 %s -triple=x86_64-linux-gnu -emit-llvm -verify -o /dev/null
+
+typedef double __v2df __attribute__((__vector_size__(16)));
+
+__v2df __attribute__((target("sse4.1"))) foo() {
+ __v2df v = {0.0, 0.0};
+ return __builtin_ia32_roundpd(v, 2);
+}
+
+__v2df __attribute__((flatten)) bar() {
+ return foo(); // expected-error {{flatten function 'bar' calls 'foo' which requires target feature 'sse4.1', but the caller is compiled without support for 'sse4.1'}}
+}
diff --git a/clang/test/CodeGen/target-features-error-4.c b/clang/test/CodeGen/target-features-error-4.c
new file mode 100644
index 0000000..fe4879e
--- /dev/null
+++ b/clang/test/CodeGen/target-features-error-4.c
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 %s -triple=x86_64-linux-gnu -emit-llvm -verify -o /dev/null
+
+typedef double __v2df __attribute__((__vector_size__(16)));
+
+__v2df __attribute__((target("sse4.1"))) foo() {
+ __v2df v = {0.0, 0.0};
+ return __builtin_ia32_roundpd(v, 2);
+}
+
+__v2df __attribute__((target("no-sse4.1"), flatten)) bar() {
+ return foo(); // expected-error {{flatten function 'bar' calls 'foo' which requires target feature 'sse4.1', but the caller is compiled without support for 'sse4.1'}}
+}
diff --git a/clang/test/CodeGen/target-features-error-5.c b/clang/test/CodeGen/target-features-error-5.c
new file mode 100644
index 0000000..8bc3ba1
--- /dev/null
+++ b/clang/test/CodeGen/target-features-error-5.c
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 %s -triple=x86_64-linux-gnu -target-feature +sse4.1 -emit-llvm -verify -o /dev/null
+
+typedef double __v2df __attribute__((__vector_size__(16)));
+
+__v2df foo() {
+ __v2df v = {0.0, 0.0};
+ return __builtin_ia32_roundpd(v, 2);
+}
+
+__v2df __attribute__((target("no-sse4.1"), flatten)) bar() {
+ return foo(); // expected-error {{flatten function 'bar' calls 'foo' which requires target feature 'sse4.1', but the caller is compiled without support for 'sse4.1'}}
+}
diff --git a/clang/test/CodeGen/target-features-no-error-2.c b/clang/test/CodeGen/target-features-no-error-2.c
new file mode 100644
index 0000000..eeec3f3
--- /dev/null
+++ b/clang/test/CodeGen/target-features-no-error-2.c
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 %s -triple=x86_64-linux-gnu -emit-llvm -verify -o /dev/null
+
+typedef double __v2df __attribute__((__vector_size__(16)));
+
+__v2df __attribute__((target("sse4.1"))) foo() {
+ __v2df v = {0.0, 0.0};
+ return __builtin_ia32_roundpd(v, 2);
+}
+
+__v2df __attribute__((target("sse4.1"), flatten)) bar() {
+ return foo(); // expected-no-diagnostics
+}
diff --git a/clang/test/CodeGen/temporary-lifetime-exceptions.cpp b/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
index 50e4a0f..45400c2 100644
--- a/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
+++ b/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
@@ -8,15 +8,15 @@ A Baz(const A&);
void Test1() {
// CHECK-LABEL: @_Z5Test1v(
- // CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TMP:[^ ]+]])
- // CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TMP1:[^ ]+]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP:[^ ]+]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP1:[^ ]+]])
// Normal exit
- // CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP1]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP1]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP]])
// Exception exit
- // CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP1]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP1]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP]])
Baz(Baz(A()));
}
diff --git a/clang/test/CodeGen/temporary-lifetime.cpp b/clang/test/CodeGen/temporary-lifetime.cpp
index 9f085d4..0408729 100644
--- a/clang/test/CodeGen/temporary-lifetime.cpp
+++ b/clang/test/CodeGen/temporary-lifetime.cpp
@@ -21,27 +21,27 @@ T Baz();
void Test1() {
// CHECK-DTOR-LABEL: Test1
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[VAR]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[VAR]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
// CHECK-DTOR: }
// CHECK-NO-DTOR-LABEL: Test1
- // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
- // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
// CHECK-NO-DTOR: }
{
const A &a = A{};
@@ -55,27 +55,27 @@ void Test1() {
void Test2() {
// CHECK-DTOR-LABEL: Test2
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR1:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR1:.+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR1:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR2:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR2:.+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR2:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[VAR2]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR2]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR2]])
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[VAR1]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR1]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR1]])
// CHECK-DTOR: }
// CHECK-NO-DTOR-LABEL: Test2
- // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR1:.+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR1:.+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR1:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR2:.+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR2:.+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR2:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR2]])
- // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR1]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR2]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR1]])
// CHECK-NO-DTOR: }
const A &a = A{};
Foo(a);
@@ -135,16 +135,16 @@ int Test5() {
void Test6() {
// CHECK-DTOR-LABEL: Test6
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 {{[0-9]+}}, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call i32 @_Z3BazIiET_v()
// CHECK-DTOR: store
// CHECK-DTOR: call void @_Z3FooIiEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 {{[0-9]+}}, ptr nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 {{[0-9]+}}, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call i32 @_Z3BazIiET_v()
// CHECK-DTOR: store
// CHECK-DTOR: call void @_Z3FooIiEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 {{[0-9]+}}, ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
// CHECK-DTOR: }
Foo(Baz<int>());
Foo(Baz<int>());
@@ -152,16 +152,16 @@ void Test6() {
void Test7() {
// CHECK-DTOR-LABEL: Test7
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call void @_Z3BazI1AET_v({{.*}} %[[SLOT:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooI1AEvOT_({{.*}} %[[SLOT]])
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[SLOT]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call void @_Z3BazI1AET_v({{.*}} %[[SLOT:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooI1AEvOT_({{.*}} %[[SLOT]])
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[SLOT]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
// CHECK-DTOR: }
Foo(Baz<A>());
Foo(Baz<A>());
diff --git a/clang/test/CodeGen/ubsan-trap-reason-add-overflow.c b/clang/test/CodeGen/ubsan-trap-reason-add-overflow.c
deleted file mode 100644
index 225778d..0000000
--- a/clang/test/CodeGen/ubsan-trap-reason-add-overflow.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
-// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow -emit-llvm %s -o - | FileCheck %s
-
-int add_overflow(int a, int b) { return a + b; }
-
-// CHECK-LABEL: @add_overflow
-// CHECK: call void @llvm.ubsantrap(i8 0) {{.*}}!dbg [[LOC:![0-9]+]]
-// CHECK: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
-// CHECK: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Integer addition overflowed"
diff --git a/clang/test/CodeGen/ubsan-trap-reason-flag.c b/clang/test/CodeGen/ubsan-trap-reason-flag.c
deleted file mode 100644
index 5cc16d1..0000000
--- a/clang/test/CodeGen/ubsan-trap-reason-flag.c
+++ /dev/null
@@ -1,22 +0,0 @@
-// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
-// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow -emit-llvm %s -o - \
-// RUN: | FileCheck %s --check-prefix=ANNOTATE
-
-// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
-// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow \
-// RUN: -fsanitize-debug-trap-reasons -emit-llvm %s -o - | FileCheck %s --check-prefix=ANNOTATE
-
-// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
-// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow \
-// RUN: -fno-sanitize-debug-trap-reasons -emit-llvm %s -o - | FileCheck %s --check-prefix=NO-ANNOTATE
-
-int add_overflow(int a, int b) { return a + b; }
-
-// ANNOTATE-LABEL: @add_overflow
-// ANNOTATE: call void @llvm.ubsantrap(i8 0) {{.*}}!dbg [[LOC:![0-9]+]]
-// ANNOTATE: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
-// ANNOTATE: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Integer addition overflowed"
-
-// NO-ANNOTATE-LABEL: @add_overflow
-// NO-ANNOTATE: call void @llvm.ubsantrap(i8 0) {{.*}}!dbg [[LOC:![0-9]+]]
-// NO-ANNOTATE-NOT: __clang_trap_msg
diff --git a/clang/test/CodeGen/ubsan-trap-reason-mul-overflow.c b/clang/test/CodeGen/ubsan-trap-reason-mul-overflow.c
deleted file mode 100644
index cf9a0b4..0000000
--- a/clang/test/CodeGen/ubsan-trap-reason-mul-overflow.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
-// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow -emit-llvm %s -o - | FileCheck %s
-
-int mul_overflow(int a, int b) { return a * b; }
-
-// CHECK-LABEL: @mul_overflow
-// CHECK: call void @llvm.ubsantrap(i8 12) {{.*}}!dbg [[LOC:![0-9]+]]
-// CHECK: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
-// CHECK: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Integer multiplication overflowed"
diff --git a/clang/test/CodeGen/ubsan-trap-reason-sub-overflow.c b/clang/test/CodeGen/ubsan-trap-reason-sub-overflow.c
deleted file mode 100644
index 62aa7fc..0000000
--- a/clang/test/CodeGen/ubsan-trap-reason-sub-overflow.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
-// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow -emit-llvm %s -o - | FileCheck %s
-
-int sub_overflow(int a, int b) { return a - b; }
-
-// CHECK-LABEL: @sub_overflow
-// CHECK: call void @llvm.ubsantrap(i8 21) {{.*}}!dbg [[LOC:![0-9]+]]
-// CHECK: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
-// CHECK: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Integer subtraction overflowed"
diff --git a/clang/test/CodeGen/unified-lto-pipeline.c b/clang/test/CodeGen/unified-lto-pipeline.c
index e13cd57..0e0c503 100644
--- a/clang/test/CodeGen/unified-lto-pipeline.c
+++ b/clang/test/CodeGen/unified-lto-pipeline.c
@@ -11,8 +11,10 @@
/// Check that pass pipelines for thin, thin-unified, full-unified all match.
// RUN: diff %t.0.txt %t.1.txt
// RUN: diff %t.0.txt %t.2.txt
-/// Pass pipeline for full is different.
-// RUN: not diff %t.0.txt %t.3.txt
+/// Pass pipeline for full is different. Unified uses the full Linux pipeline except ThinLTOBitcodeWriterPass vs BitcodeWriterPass.
+// RUN: not diff -u %t.0.txt %t.3.txt | FileCheck %s --check-prefix=DIFF --implicit-check-not="{{^[-+!<>] }}"
+// DIFF: -Running pass: ThinLTOBitcodeWriterPass
+// DIFF-NEXT: +Running pass: BitcodeWriterPass
int foo() {
return 2 + 2;
diff --git a/clang/test/CodeGen/union-tbaa1.c b/clang/test/CodeGen/union-tbaa1.c
index 1e2f384..9f2b0e9 100644
--- a/clang/test/CodeGen/union-tbaa1.c
+++ b/clang/test/CodeGen/union-tbaa1.c
@@ -1,3 +1,4 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
// RUN: %clang_cc1 %s -triple hexagon-unknown-elf -O2 -emit-llvm -o - | FileCheck %s
typedef union __attribute__((aligned(4))) {
@@ -7,33 +8,33 @@ typedef union __attribute__((aligned(4))) {
void bar(vect32 p[][2]);
-// CHECK-LABEL: define dso_local void @fred
-// CHECK-SAME: (i32 noundef [[NUM:%.*]], ptr noundef writeonly captures(none) initializes((0, 8)) [[VEC:%.*]], ptr noundef readonly captures(none) [[INDEX:%.*]], ptr noundef readonly captures(none) [[ARR:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-// CHECK-NEXT: entry:
+// CHECK-LABEL: define dso_local void @fred(
+// CHECK-SAME: i32 noundef [[NUM:%.*]], ptr noundef writeonly captures(none) initializes((0, 8)) [[VEC:%.*]], ptr noundef readonly captures(none) [[INDEX:%.*]], ptr noundef readonly captures(none) [[ARR:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP:%.*]] = alloca [4 x [2 x %union.vect32]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[TMP]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2:![0-9]+]]
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]]
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP1]], [[NUM]]
-// CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]]
+// CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %union.vect32], ptr [[TMP]], i32 [[TMP0]]
// CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 8, !tbaa [[TBAA6:![0-9]+]]
// CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]], i32 1
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[MUL6:%.*]] = mul i32 [[TMP2]], [[NUM]]
-// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]], i32 1
+// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %union.vect32], ptr [[TMP]], i32 [[TMP0]], i32 1
// CHECK-NEXT: store i32 [[MUL6]], ptr [[ARRAYIDX8]], align 4, !tbaa [[TBAA6]]
// CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[MUL]], 16
// CHECK-NEXT: store i32 [[TMP3]], ptr [[VEC]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP4]], i32 1
+// CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [2 x %union.vect32], ptr [[TMP]], i32 [[TMP4]], i32 1
// CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX14]], i32 2
// CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX15]], align 2, !tbaa [[TBAA6]]
// CHECK-NEXT: [[CONV16:%.*]] = zext i16 [[TMP5]] to i32
// CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds nuw i8, ptr [[VEC]], i32 4
// CHECK-NEXT: store i32 [[CONV16]], ptr [[ARRAYIDX17]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: call void @bar(ptr noundef nonnull [[TMP]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[TMP]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP]]) #[[ATTR3]]
// CHECK-NEXT: ret void
//
void fred(unsigned Num, int Vec[2], int *Index, int Arr[4][2]) {
@@ -45,5 +46,10 @@ void fred(unsigned Num, int Vec[2], int *Index, int Arr[4][2]) {
bar(Tmp);
}
-// CHECK-DAG: [[CHAR:![0-9]+]] = !{!"omnipotent char"
-// CHECK-DAG: [[TBAA6]] = !{[[CHAR]], [[CHAR]], i64 0}
+//.
+// CHECK: [[TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0}
+// CHECK: [[META3]] = !{!"int", [[META4:![0-9]+]], i64 0}
+// CHECK: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0}
+// CHECK: [[META5]] = !{!"Simple C/C++ TBAA"}
+// CHECK: [[TBAA6]] = !{[[META4]], [[META4]], i64 0}
+//.
diff --git a/clang/test/CodeGen/volatile-1.c b/clang/test/CodeGen/volatile-1.c
index 7116a4e..9d97d89 100644
--- a/clang/test/CodeGen/volatile-1.c
+++ b/clang/test/CodeGen/volatile-1.c
@@ -1,4 +1,3 @@
-// XFAIL: target=aarch64-pc-windows-msvc
// RUN: %clang_cc1 -Wno-return-type -Wno-unused-value -emit-llvm %s -w -o - | FileCheck %s
// CHECK: @i = {{(dso_local )?}}global [[INT:i[0-9]+]] 0
@@ -27,44 +26,44 @@ int printf(const char *, ...);
void test(void) {
// CHECK: load volatile [[INT]], ptr @i
i;
- // CHECK-NEXT: load volatile [[INT]], ptr @ci, align 4
- // CHECK-NEXT: load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
+ // CHECK-NEXT: load volatile [[INT]], ptr @ci, align [[ALIGN:[0-9]+]]
+ // CHECK-NEXT: load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP:[0-9]+]]
// CHECK-NEXT: sitofp [[INT]]
(float)(ci);
- // CHECK-NEXT: load volatile [[INT]], ptr @ci, align 4
- // CHECK-NEXT: load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
+ // CHECK-NEXT: load volatile [[INT]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
(void)ci;
// CHECK-NEXT: memcpy
(void)a;
- // CHECK-NEXT: [[R:%.*]] = load volatile [[INT]], ptr @ci, align 4
- // CHECK-NEXT: [[I:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
- // CHECK-NEXT: store volatile [[INT]] [[R]], ptr @ci, align 4
- // CHECK-NEXT: store volatile [[INT]] [[I]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
+ // CHECK-NEXT: [[R:%.*]] = load volatile [[INT]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: [[I:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
+ // CHECK-NEXT: store volatile [[INT]] [[R]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: store volatile [[INT]] [[I]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
(void)(ci=ci);
// CHECK-NEXT: [[T:%.*]] = load volatile [[INT]], ptr @j
// CHECK-NEXT: store volatile [[INT]] [[T]], ptr @i
(void)(i=j);
- // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], ptr @ci, align 4
- // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
- // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], ptr @ci, align 4
- // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
+ // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
+ // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
// Not sure why they're ordered this way.
// CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]]
// CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]]
- // CHECK-NEXT: store volatile [[INT]] [[R]], ptr @ci, align 4
- // CHECK-NEXT: store volatile [[INT]] [[I]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
+ // CHECK-NEXT: store volatile [[INT]] [[R]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: store volatile [[INT]] [[I]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
ci+=ci;
- // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], ptr @ci, align 4
- // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
- // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], ptr @ci, align 4
- // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
+ // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
+ // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
// CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]]
// CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]]
- // CHECK-NEXT: store volatile [[INT]] [[R]], ptr @ci, align 4
- // CHECK-NEXT: store volatile [[INT]] [[I]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
- // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], ptr @ci, align 4
- // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align 4
+ // CHECK-NEXT: store volatile [[INT]] [[R]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: store volatile [[INT]] [[I]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
+ // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], ptr @ci, align [[ALIGN]]
+ // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], ptr getelementptr inbounds nuw ([[CINT]], ptr @ci, i32 0, i32 1), align [[ALIGN_GEP]]
// These additions can be elided
// CHECK-NEXT: add [[INT]] [[R]], [[R2]]
// CHECK-NEXT: add [[INT]] [[I]], [[I2]]
diff --git a/clang/test/CodeGenCUDA/grid-constant.cu b/clang/test/CodeGenCUDA/grid-constant.cu
index e7000ca..120b854 100644
--- a/clang/test/CodeGenCUDA/grid-constant.cu
+++ b/clang/test/CodeGenCUDA/grid-constant.cu
@@ -19,13 +19,9 @@ void foo() {
tkernel_const<S><<<1,1>>>({});
tkernel<const S><<<1,1>>>(1, {});
}
-//.
-//.
-// CHECK: [[META0:![0-9]+]] = !{ptr @_Z6kernel1Sii, !"grid_constant", [[META1:![0-9]+]]}
-// CHECK: [[META1]] = !{i32 1, i32 3}
-// CHECK: [[META2:![0-9]+]] = !{ptr @_Z13tkernel_constIK1SEvT_, !"grid_constant", [[META3:![0-9]+]]}
-// CHECK: [[META3]] = !{i32 1}
-// CHECK: [[META4:![0-9]+]] = !{ptr @_Z13tkernel_constI1SEvT_, !"grid_constant", [[META3]]}
-// CHECK: [[META5:![0-9]+]] = !{ptr @_Z7tkernelIK1SEviT_, !"grid_constant", [[META6:![0-9]+]]}
-// CHECK: [[META6]] = !{i32 2}
-//.
+
+// CHECK: define dso_local ptx_kernel void @_Z6kernel1Sii(ptr noundef byval(%struct.S) align 1 "nvvm.grid_constant" %gc_arg1, i32 noundef %arg2, i32 noundef "nvvm.grid_constant" %gc_arg3)
+// CHECK: define ptx_kernel void @_Z13tkernel_constIK1SEvT_(ptr noundef byval(%struct.S) align 1 "nvvm.grid_constant" %arg)
+// CHECK: define ptx_kernel void @_Z13tkernel_constI1SEvT_(ptr noundef byval(%struct.S) align 1 "nvvm.grid_constant" %arg)
+// CHECK: define ptx_kernel void @_Z7tkernelIK1SEviT_(i32 noundef %dummy, ptr noundef byval(%struct.S) align 1 "nvvm.grid_constant" %arg)
+
diff --git a/clang/test/CodeGenCXX/aarch64-sve-vector-conditional-op.cpp b/clang/test/CodeGenCXX/aarch64-sve-vector-conditional-op.cpp
index d6fa26b..5c99393 100644
--- a/clang/test/CodeGenCXX/aarch64-sve-vector-conditional-op.cpp
+++ b/clang/test/CodeGenCXX/aarch64-sve-vector-conditional-op.cpp
@@ -10,8 +10,7 @@
// CHECK-LABEL: @_Z9cond_boolu10__SVBool_tS_(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
-// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 16 x i1> [[CMP]], zeroinitializer
-// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 16 x i1> [[VECTOR_COND]], <vscale x 16 x i1> [[A]], <vscale x 16 x i1> [[B]]
+// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 16 x i1> [[CMP]], <vscale x 16 x i1> [[A]], <vscale x 16 x i1> [[B]]
// CHECK-NEXT: ret <vscale x 16 x i1> [[VECTOR_SELECT]]
//
svbool_t cond_bool(svbool_t a, svbool_t b) {
diff --git a/clang/test/CodeGenCXX/amdgcn_declspec_get.cpp b/clang/test/CodeGenCXX/amdgcn_declspec_get.cpp
index 0ce8801..c784e60 100644
--- a/clang/test/CodeGenCXX/amdgcn_declspec_get.cpp
+++ b/clang/test/CodeGenCXX/amdgcn_declspec_get.cpp
@@ -15,10 +15,10 @@ extern const A a;
// CHECK-LABEL: define{{.*}} void @_Z4testv()
// CHECK: %i = alloca i32, align 4, addrspace(5)
// CHECK: %[[ii:.*]] = addrspacecast ptr addrspace(5) %i to ptr
-// CHECK: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %i)
+// CHECK: call void @llvm.lifetime.start.p5(ptr addrspace(5) %i)
// CHECK: %call = call noundef i32 @_ZN1A6_get_xEv()
// CHECK: store i32 %call, ptr %[[ii]]
-// CHECK: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %i)
+// CHECK: call void @llvm.lifetime.end.p5(ptr addrspace(5) %i)
void test()
{
int i = a.x;
diff --git a/clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp b/clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp
index fd9786d..151b77a 100644
--- a/clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp
+++ b/clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp
@@ -68,7 +68,7 @@ void w_branch_elided(unsigned e){
// CHECK-NEXT: [[E_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 [[E:%.*]], ptr [[E_ADDR]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: store i32 0, ptr [[I]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
@@ -78,7 +78,7 @@ void w_branch_elided(unsigned e){
// CHECK-NEXT: [[CMP_EXPVAL:%.*]] = call i1 @llvm.expect.i1(i1 [[CMP]], i1 true)
// CHECK-NEXT: br i1 [[CMP_EXPVAL]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
// CHECK: for.cond.cleanup:
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: br label [[FOR_INC:%.*]]
@@ -100,7 +100,7 @@ void fl(unsigned e)
// CHECK-NEXT: [[E_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 [[E:%.*]], ptr [[E_ADDR]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: store i32 0, ptr [[I]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
@@ -110,7 +110,7 @@ void fl(unsigned e)
// CHECK-NEXT: [[CMP_EXPVAL:%.*]] = call i1 @llvm.expect.i1(i1 [[CMP]], i1 false)
// CHECK-NEXT: br i1 [[CMP_EXPVAL]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
// CHECK: for.cond.cleanup:
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: br label [[FOR_INC:%.*]]
@@ -146,14 +146,14 @@ void f_branch_elided()
// CHECK-NEXT: [[__END1:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[E:%.*]], ptr [[E_ADDR]], align 8, !tbaa [[TBAA14:![0-9]+]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__RANGE1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__RANGE1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: store ptr [[TMP0]], ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__BEGIN1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__BEGIN1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i32], ptr [[TMP1]], i64 0, i64 0
// CHECK-NEXT: store ptr [[ARRAYDECAY]], ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__END1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__END1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i32], ptr [[TMP2]], i64 0, i64 0
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAYDECAY1]], i64 4
@@ -166,16 +166,16 @@ void f_branch_elided()
// CHECK-NEXT: [[CMP_EXPVAL:%.*]] = call i1 @llvm.expect.i1(i1 [[CMP]], i1 true)
// CHECK-NEXT: br i1 [[CMP_EXPVAL]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
// CHECK: for.cond.cleanup:
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__END1]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__BEGIN1]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__RANGE1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__END1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__BEGIN1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__RANGE1]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_END:%.*]]
// CHECK: for.body:
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: store i32 [[TMP6]], ptr [[I]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
@@ -198,14 +198,14 @@ void frl(int (&&e) [4])
// CHECK-NEXT: [[__END1:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[E:%.*]], ptr [[E_ADDR]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__RANGE1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__RANGE1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: store ptr [[TMP0]], ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__BEGIN1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__BEGIN1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i32], ptr [[TMP1]], i64 0, i64 0
// CHECK-NEXT: store ptr [[ARRAYDECAY]], ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__END1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__END1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i32], ptr [[TMP2]], i64 0, i64 0
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAYDECAY1]], i64 4
@@ -218,16 +218,16 @@ void frl(int (&&e) [4])
// CHECK-NEXT: [[CMP_EXPVAL:%.*]] = call i1 @llvm.expect.i1(i1 [[CMP]], i1 false)
// CHECK-NEXT: br i1 [[CMP_EXPVAL]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
// CHECK: for.cond.cleanup:
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__END1]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__BEGIN1]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__RANGE1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__END1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__BEGIN1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__RANGE1]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_END:%.*]]
// CHECK: for.body:
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: store i32 [[TMP6]], ptr [[I]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
diff --git a/clang/test/CodeGenCXX/cxx2b-deducing-this.cpp b/clang/test/CodeGenCXX/cxx2b-deducing-this.cpp
index 8a78463d..9664a86 100644
--- a/clang/test/CodeGenCXX/cxx2b-deducing-this.cpp
+++ b/clang/test/CodeGenCXX/cxx2b-deducing-this.cpp
@@ -264,3 +264,55 @@ void test() {
// CHECK: call void @_ZNH5P27971C1cERKS0_
// CHECK: call void @_ZN5P27971C1cEi
}
+
+// This used to crash because we weren’t instantiating a dependent 'this'.
+namespace GH154054 {
+struct S {
+ int x;
+ auto byval() {
+ return [*this](this auto) { return this->x; };
+ }
+};
+
+// CHECK-LABEL: define {{.*}} void @_ZN8GH1540544mainEv
+void main() {
+ S s{ 42 };
+
+ // CHECK: call {{.*}} i32 @_ZZN8GH1540541S5byvalEvENHUlT_E_clIS2_EEDaS1_
+ if ( s.byval()() != 42)
+ __builtin_abort();
+}
+
+// CHECK-LABEL: define {{.*}} i32 @_ZZN8GH1540541S5byvalEvENHUlT_E_clIS2_EEDaS1_(i32 %.coerce)
+// CHECK: entry:
+// CHECK: %0 = alloca %class.anon.11, align 4
+// CHECK: %coerce.dive = getelementptr inbounds nuw %class.anon.11, ptr %0, i32 0, i32 0
+// CHECK: %coerce.dive1 = getelementptr inbounds nuw %"struct.GH154054::S", ptr %coerce.dive, i32 0, i32 0
+// CHECK: store i32 %.coerce, ptr %coerce.dive1, align 4
+// CHECK: %1 = getelementptr inbounds nuw %class.anon.11, ptr %0, i32 0, i32 0
+// CHECK: %x = getelementptr inbounds nuw %"struct.GH154054::S", ptr %1, i32 0, i32 0
+// CHECK: %2 = load i32, ptr %x, align 4
+// CHECK: ret i32 %2
+
+struct s {
+ int q;
+ auto f() {
+ return [*this](this auto) { return this; };
+ }
+};
+
+// CHECK-LABEL: define {{.*}} void @_ZN8GH1540541fEv
+void f() {
+ // CHECK: call {{.*}} ptr @_ZZN8GH1540541s1fEvENHUlT_E_clIS2_EEDaS1_
+ s{}.f()();
+}
+
+// CHECK-LABEL: define {{.*}} ptr @_ZZN8GH1540541s1fEvENHUlT_E_clIS2_EEDaS1_(i32 %.coerce)
+// CHECK: entry:
+// CHECK: %0 = alloca %class.anon.12, align 4
+// CHECK: %coerce.dive = getelementptr inbounds nuw %class.anon.12, ptr %0, i32 0, i32 0
+// CHECK: %coerce.dive1 = getelementptr inbounds nuw %"struct.GH154054::s", ptr %coerce.dive, i32 0, i32 0
+// CHECK: store i32 %.coerce, ptr %coerce.dive1, align 4
+// CHECK: %1 = getelementptr inbounds nuw %class.anon.12, ptr %0, i32 0, i32 0
+// CHECK: ret ptr %1
+}
diff --git a/clang/test/CodeGenCXX/debug-info-class-limited.test b/clang/test/CodeGenCXX/debug-info-class-limited.test
deleted file mode 100644
index c2e3328..0000000
--- a/clang/test/CodeGenCXX/debug-info-class-limited.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited %S/Inputs/debug-info-class-limited.cpp -o - | FileCheck %S/Inputs/debug-info-class-limited.cpp
diff --git a/clang/test/CodeGenCXX/destructors.cpp b/clang/test/CodeGenCXX/destructors.cpp
index 99c82ec..1bf8b16 100644
--- a/clang/test/CodeGenCXX/destructors.cpp
+++ b/clang/test/CodeGenCXX/destructors.cpp
@@ -308,7 +308,7 @@ namespace test5 {
// CHECK5: [[ELEMS:%.*]] = alloca [5 x [[A:%.*]]], align
// CHECK5v03-NEXT: [[EXN:%.*]] = alloca ptr
// CHECK5v03-NEXT: [[SEL:%.*]] = alloca i32
- // CHECK5-NEXT: call void @llvm.lifetime.start.p0(i64 5, ptr [[ELEMS]])
+ // CHECK5-NEXT: call void @llvm.lifetime.start.p0(ptr [[ELEMS]])
// CHECK5-NEXT: [[BEGIN:%.*]] = getelementptr inbounds [5 x [[A]]], ptr [[ELEMS]], i32 0, i32 0
// CHECK5-NEXT: [[END:%.*]] = getelementptr inbounds [[A]], ptr [[BEGIN]], i64 5
// CHECK5-NEXT: br label
@@ -482,24 +482,24 @@ namespace test11 {
// CHECK6: {{^}}invoke.cont
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T1]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T1]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T1]])
// CHECK6: {{^}}lpad
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T1]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T1]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T1]])
// CHECK6: {{^}}invoke.cont
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T2]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T2]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T2]])
// CHECK6: {{^}}lpad
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T2]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T2]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T2]])
// CHECK6: {{^}}invoke.cont
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T3]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T3]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T3]])
// CHECK6: {{^}}lpad
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T3]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T3]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T3]])
struct S1 {
~S1();
diff --git a/clang/test/CodeGenCXX/ext-vector-type-conditional.cpp b/clang/test/CodeGenCXX/ext-vector-type-conditional.cpp
index 4504000..8ef3fbb 100644
--- a/clang/test/CodeGenCXX/ext-vector-type-conditional.cpp
+++ b/clang/test/CodeGenCXX/ext-vector-type-conditional.cpp
@@ -1,3 +1,4 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
// RUN: %clang_cc1 %s -triple x86_64-linux-gnu -Wno-unused -std=c++11 -emit-llvm -o - | FileCheck %s
using FourShorts = short __attribute__((ext_vector_type(4)));
@@ -11,6 +12,7 @@ using TwoFloats = float __attribute__((ext_vector_type(2)));
using FourFloats = float __attribute__((ext_vector_type(4)));
using TwoDoubles = double __attribute__((ext_vector_type(2)));
using FourDoubles = double __attribute__((ext_vector_type(4)));
+using TwoBools = bool __attribute__((ext_vector_type(2)));
FourShorts four_shorts;
TwoInts two_ints;
@@ -23,6 +25,7 @@ TwoFloats two_floats;
FourFloats four_floats;
TwoDoubles two_doubles;
FourDoubles four_doubles;
+TwoBools two_bools;
short some_short;
unsigned short some_ushort;
@@ -33,235 +36,186 @@ long long some_ll;
unsigned long long some_ull;
double some_double;
-// CHECK: TwoVectorOps
+// CHECK-LABEL: define dso_local void @_Z12TwoVectorOpsv(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @two_ints, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @two_ints, align 8
+// CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @two_ints, align 8
+// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp slt <2 x i32> [[TMP0]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <2 x i1> [[VECTOR_COND]], <2 x i32> [[TMP1]], <2 x i32> [[TMP2]]
+// CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @two_ints, align 8
+// CHECK-NEXT: [[TMP4:%.*]] = load <2 x float>, ptr @two_floats, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = load <2 x float>, ptr @two_floats, align 8
+// CHECK-NEXT: [[VECTOR_COND1:%.*]] = icmp slt <2 x i32> [[TMP3]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT2:%.*]] = select <2 x i1> [[VECTOR_COND1]], <2 x float> [[TMP4]], <2 x float> [[TMP5]]
+// CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @two_ll, align 16
+// CHECK-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr @two_doubles, align 16
+// CHECK-NEXT: [[TMP8:%.*]] = load <2 x double>, ptr @two_doubles, align 16
+// CHECK-NEXT: [[VECTOR_COND3:%.*]] = icmp slt <2 x i64> [[TMP6]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT4:%.*]] = select <2 x i1> [[VECTOR_COND3]], <2 x double> [[TMP7]], <2 x double> [[TMP8]]
+// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr @two_bools, align 1
+// CHECK-NEXT: [[TMP9:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1>
+// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <8 x i1> [[TMP9]], <8 x i1> poison, <2 x i32> <i32 0, i32 1>
+// CHECK-NEXT: [[TMP10:%.*]] = load <2 x i32>, ptr @two_ints, align 8
+// CHECK-NEXT: [[TMP11:%.*]] = load <2 x i32>, ptr @two_ints, align 8
+// CHECK-NEXT: [[VECTOR_SELECT5:%.*]] = select <2 x i1> [[EXTRACTVEC]], <2 x i32> [[TMP10]], <2 x i32> [[TMP11]]
+// CHECK-NEXT: [[LOAD_BITS6:%.*]] = load i8, ptr @two_bools, align 1
+// CHECK-NEXT: [[TMP12:%.*]] = bitcast i8 [[LOAD_BITS6]] to <8 x i1>
+// CHECK-NEXT: [[EXTRACTVEC7:%.*]] = shufflevector <8 x i1> [[TMP12]], <8 x i1> poison, <2 x i32> <i32 0, i32 1>
+// CHECK-NEXT: [[TMP13:%.*]] = load <2 x double>, ptr @two_doubles, align 16
+// CHECK-NEXT: [[TMP14:%.*]] = load <2 x double>, ptr @two_doubles, align 16
+// CHECK-NEXT: [[VECTOR_SELECT8:%.*]] = select <2 x i1> [[EXTRACTVEC7]], <2 x double> [[TMP13]], <2 x double> [[TMP14]]
+// CHECK-NEXT: ret void
+//
void TwoVectorOps() {
two_ints ? two_ints : two_ints;
- // CHECK: [[COND:%.+]] = load <2 x i32>
- // CHECK: [[LHS:%.+]] = load <2 x i32>
- // CHECK: [[RHS:%.+]] = load <2 x i32>
- // CHECK: [[NEG:%.+]] = icmp slt <2 x i32> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <2 x i1> [[NEG]] to <2 x i32>
- // CHECK: [[XOR:%.+]] = xor <2 x i32> [[SEXT]], splat (i32 -1)
- // CHECK: [[RHS_AND:%.+]] = and <2 x i32> [[RHS]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <2 x i32> [[LHS]], [[SEXT]]
- // CHECK: = or <2 x i32> [[RHS_AND]], [[LHS_AND]]
two_ints ? two_floats : two_floats;
- // CHECK: [[COND:%.+]] = load <2 x i32>
- // CHECK: [[LHS:%.+]] = load <2 x float>
- // CHECK: [[RHS:%.+]] = load <2 x float>
- // CHECK: [[NEG:%.+]] = icmp slt <2 x i32> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <2 x i1> [[NEG]] to <2 x i32>
- // CHECK: [[XOR:%.+]] = xor <2 x i32> [[SEXT]], splat (i32 -1)
- // CHECK: [[RHS_EXT:%.+]] = bitcast <2 x float> [[RHS]] to <2 x i32>
- // CHECK: [[LHS_EXT:%.+]] = bitcast <2 x float> [[LHS]] to <2 x i32>
- // CHECK: [[RHS_AND:%.+]] = and <2 x i32> [[RHS_EXT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <2 x i32> [[LHS_EXT]], [[SEXT]]
- // CHECK: [[OR:%.+]] = or <2 x i32> [[RHS_AND]], [[LHS_AND]]
- // CHECK: = bitcast <2 x i32> [[OR]] to <2 x float>
two_ll ? two_doubles : two_doubles;
- // CHECK: [[COND:%.+]] = load <2 x i64>
- // CHECK: [[LHS:%.+]] = load <2 x double>
- // CHECK: [[RHS:%.+]] = load <2 x double>
- // CHECK: [[NEG:%.+]] = icmp slt <2 x i64> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <2 x i1> [[NEG]] to <2 x i64>
- // CHECK: [[XOR:%.+]] = xor <2 x i64> [[SEXT]], splat (i64 -1)
- // CHECK: [[RHS_EXT:%.+]] = bitcast <2 x double> [[RHS]] to <2 x i64>
- // CHECK: [[LHS_EXT:%.+]] = bitcast <2 x double> [[LHS]] to <2 x i64>
- // CHECK: [[RHS_AND:%.+]] = and <2 x i64> [[RHS_EXT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <2 x i64> [[LHS_EXT]], [[SEXT]]
- // CHECK: [[OR:%.+]] = or <2 x i64> [[RHS_AND]], [[LHS_AND]]
- // CHECK: = bitcast <2 x i64> [[OR]] to <2 x double>
+
+ two_bools ? two_ints : two_ints;
+
+ two_bools ? two_doubles : two_doubles;
}
-// CHECK: TwoScalarOps
+// CHECK-LABEL: define dso_local void @_Z12TwoScalarOpsv(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr @four_shorts, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @some_short, align 2
+// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[TMP1]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i16> [[SPLAT_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr @some_short, align 2
+// CHECK-NEXT: [[SPLAT_SPLATINSERT1:%.*]] = insertelement <4 x i16> poison, i16 [[TMP2]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT2:%.*]] = shufflevector <4 x i16> [[SPLAT_SPLATINSERT1]], <4 x i16> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp slt <4 x i16> [[TMP0]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <4 x i1> [[VECTOR_COND]], <4 x i16> [[SPLAT_SPLAT]], <4 x i16> [[SPLAT_SPLAT2]]
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @four_shorts, align 8
+// CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @some_ushort, align 2
+// CHECK-NEXT: [[SPLAT_SPLATINSERT3:%.*]] = insertelement <4 x i16> poison, i16 [[TMP4]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT4:%.*]] = shufflevector <4 x i16> [[SPLAT_SPLATINSERT3]], <4 x i16> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr @some_ushort, align 2
+// CHECK-NEXT: [[SPLAT_SPLATINSERT5:%.*]] = insertelement <4 x i16> poison, i16 [[TMP5]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT6:%.*]] = shufflevector <4 x i16> [[SPLAT_SPLATINSERT5]], <4 x i16> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND7:%.*]] = icmp slt <4 x i16> [[TMP3]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT8:%.*]] = select <4 x i1> [[VECTOR_COND7]], <4 x i16> [[SPLAT_SPLAT4]], <4 x i16> [[SPLAT_SPLAT6]]
+// CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @four_ints, align 16
+// CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr @some_ushort, align 2
+// CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP7]] to i32
+// CHECK-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <4 x i32> poison, i32 [[CONV]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT9]], <4 x i32> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[TMP8:%.*]] = load i16, ptr @some_short, align 2
+// CHECK-NEXT: [[CONV11:%.*]] = sext i16 [[TMP8]] to i32
+// CHECK-NEXT: [[SPLAT_SPLATINSERT12:%.*]] = insertelement <4 x i32> poison, i32 [[CONV11]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT13:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT12]], <4 x i32> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND14:%.*]] = icmp slt <4 x i32> [[TMP6]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT15:%.*]] = select <4 x i1> [[VECTOR_COND14]], <4 x i32> [[SPLAT_SPLAT10]], <4 x i32> [[SPLAT_SPLAT13]]
+// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @four_ints, align 16
+// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr @some_int, align 4
+// CHECK-NEXT: [[CONV16:%.*]] = sitofp i32 [[TMP10]] to float
+// CHECK-NEXT: [[SPLAT_SPLATINSERT17:%.*]] = insertelement <4 x float> poison, float [[CONV16]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT18:%.*]] = shufflevector <4 x float> [[SPLAT_SPLATINSERT17]], <4 x float> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[TMP11:%.*]] = load float, ptr @some_float, align 4
+// CHECK-NEXT: [[SPLAT_SPLATINSERT19:%.*]] = insertelement <4 x float> poison, float [[TMP11]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT20:%.*]] = shufflevector <4 x float> [[SPLAT_SPLATINSERT19]], <4 x float> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND21:%.*]] = icmp slt <4 x i32> [[TMP9]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT22:%.*]] = select <4 x i1> [[VECTOR_COND21]], <4 x float> [[SPLAT_SPLAT18]], <4 x float> [[SPLAT_SPLAT20]]
+// CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @four_ll, align 32
+// CHECK-NEXT: [[TMP13:%.*]] = load double, ptr @some_double, align 8
+// CHECK-NEXT: [[SPLAT_SPLATINSERT23:%.*]] = insertelement <4 x double> poison, double [[TMP13]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT24:%.*]] = shufflevector <4 x double> [[SPLAT_SPLATINSERT23]], <4 x double> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr @some_ll, align 8
+// CHECK-NEXT: [[CONV25:%.*]] = sitofp i64 [[TMP14]] to double
+// CHECK-NEXT: [[SPLAT_SPLATINSERT26:%.*]] = insertelement <4 x double> poison, double [[CONV25]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT27:%.*]] = shufflevector <4 x double> [[SPLAT_SPLATINSERT26]], <4 x double> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND28:%.*]] = icmp slt <4 x i64> [[TMP12]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT29:%.*]] = select <4 x i1> [[VECTOR_COND28]], <4 x double> [[SPLAT_SPLAT24]], <4 x double> [[SPLAT_SPLAT27]]
+// CHECK-NEXT: [[TMP15:%.*]] = load <4 x i32>, ptr @four_ints, align 16
+// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr @some_int, align 4
+// CHECK-NEXT: [[SPLAT_SPLATINSERT30:%.*]] = insertelement <4 x i32> poison, i32 [[TMP16]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT31:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT30]], <4 x i32> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[TMP17:%.*]] = load i16, ptr @some_short, align 2
+// CHECK-NEXT: [[CONV32:%.*]] = sext i16 [[TMP17]] to i32
+// CHECK-NEXT: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> poison, i32 [[CONV32]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND35:%.*]] = icmp slt <4 x i32> [[TMP15]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT36:%.*]] = select <4 x i1> [[VECTOR_COND35]], <4 x i32> [[SPLAT_SPLAT31]], <4 x i32> [[SPLAT_SPLAT34]]
+// CHECK-NEXT: ret void
+//
void TwoScalarOps() {
four_shorts ? some_short : some_short;
- // CHECK: [[COND:%.+]] = load <4 x i16>
- // CHECK: [[LHS:%.+]] = load i16
- // CHECK: [[LHS_SPLAT_INSERT:%.+]] = insertelement <4 x i16> poison, i16 [[LHS]], i64 0
- // CHECK: [[LHS_SPLAT:%.+]] = shufflevector <4 x i16> [[LHS_SPLAT_INSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
- // CHECK: [[RHS:%.+]] = load i16
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x i16> poison, i16 [[RHS]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x i16> [[RHS_SPLAT_INSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i16> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i16>
- // CHECK: [[XOR:%.+]] = xor <4 x i16> [[SEXT]], splat (i16 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i16> [[RHS_SPLAT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i16> [[LHS_SPLAT]], [[SEXT]]
- // CHECK: = or <4 x i16> [[RHS_AND]], [[LHS_AND]]
four_shorts ? some_ushort : some_ushort;
- // CHECK: [[COND:%.+]] = load <4 x i16>
- // CHECK: [[LHS:%.+]] = load i16
- // CHECK: [[LHS_SPLAT_INSERT:%.+]] = insertelement <4 x i16> poison, i16 [[LHS]], i64 0
- // CHECK: [[LHS_SPLAT:%.+]] = shufflevector <4 x i16> [[LHS_SPLAT_INSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
- // CHECK: [[RHS:%.+]] = load i16
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x i16> poison, i16 [[RHS]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x i16> [[RHS_SPLAT_INSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i16> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i16>
- // CHECK: [[XOR:%.+]] = xor <4 x i16> [[SEXT]], splat (i16 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i16> [[RHS_SPLAT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i16> [[LHS_SPLAT]], [[SEXT]]
- // CHECK: = or <4 x i16> [[RHS_AND]], [[LHS_AND]]
four_ints ? some_ushort : some_short;
- // CHECK: [[COND:%.+]] = load <4 x i32>
- // CHECK: [[LHS:%.+]] = load i16
- // CHECK: [[LHS_ZEXT:%.+]] = zext i16 [[LHS]] to i32
- // CHECK: [[LHS_SPLAT_INSERT:%.+]] = insertelement <4 x i32> poison, i32 [[LHS_ZEXT]], i64 0
- // CHECK: [[LHS_SPLAT:%.+]] = shufflevector <4 x i32> [[LHS_SPLAT_INSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
- // CHECK: [[RHS:%.+]] = load i16
- // CHECK: [[RHS_SEXT:%.+]] = sext i16 [[RHS]] to i32
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x i32> poison, i32 [[RHS_SEXT]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x i32> [[RHS_SPLAT_INSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i32> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i32>
- // CHECK: [[XOR:%.+]] = xor <4 x i32> [[SEXT]], splat (i32 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i32> [[RHS_SPLAT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i32> [[LHS_SPLAT]], [[SEXT]]
- // CHECK: = or <4 x i32> [[RHS_AND]], [[LHS_AND]]
four_ints ? some_int : some_float;
- // CHECK: [[COND:%.+]] = load <4 x i32>
- // CHECK: [[LHS:%.+]] = load i32
- // CHECK: [[LHS_CONV:%.+]] = sitofp i32 [[LHS]] to float
- // CHECK: [[LHS_SPLAT_INSERT:%.+]] = insertelement <4 x float> poison, float [[LHS_CONV]], i64 0
- // CHECK: [[LHS_SPLAT:%.+]] = shufflevector <4 x float> [[LHS_SPLAT_INSERT]], <4 x float> poison, <4 x i32> zeroinitializer
- // CHECK: [[RHS:%.+]] = load float
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x float> poison, float [[RHS]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x float> [[RHS_SPLAT_INSERT]], <4 x float> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i32> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i32>
- // CHECK: [[XOR:%.+]] = xor <4 x i32> [[SEXT]], splat (i32 -1)
- // CHECK: [[RHS_CAST:%.+]] = bitcast <4 x float> [[RHS_SPLAT]] to <4 x i32>
- // CHECK: [[LHS_CAST:%.+]] = bitcast <4 x float> [[LHS_SPLAT]] to <4 x i32>
- // CHECK: [[RHS_AND:%.+]] = and <4 x i32> [[RHS_CAST]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i32> [[LHS_CAST]], [[SEXT]]
- // CHECK: = or <4 x i32> [[RHS_AND]], [[LHS_AND]]
four_ll ? some_double : some_ll;
- // CHECK: [[COND:%.+]] = load <4 x i64>
- // CHECK: [[LHS:%.+]] = load double
- // CHECK: [[LHS_SPLAT_INSERT:%.+]] = insertelement <4 x double> poison, double [[LHS]], i64 0
- // CHECK: [[LHS_SPLAT:%.+]] = shufflevector <4 x double> [[LHS_SPLAT_INSERT]], <4 x double> poison, <4 x i32> zeroinitializer
- // CHECK: [[RHS:%.+]] = load i64
- // CHECK: [[RHS_CONV:%.+]] = sitofp i64 [[RHS]] to double
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x double> poison, double [[RHS_CONV]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x double> [[RHS_SPLAT_INSERT]], <4 x double> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i64> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i64>
- // CHECK: [[XOR:%.+]] = xor <4 x i64> [[SEXT]], splat (i64 -1)
- // CHECK: [[RHS_CAST:%.+]] = bitcast <4 x double> [[RHS_SPLAT]] to <4 x i64>
- // CHECK: [[LHS_CAST:%.+]] = bitcast <4 x double> [[LHS_SPLAT]] to <4 x i64>
- // CHECK: [[RHS_AND:%.+]] = and <4 x i64> [[RHS_CAST]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i64> [[LHS_CAST]], [[SEXT]]
- // CHECK: = or <4 x i64> [[RHS_AND]], [[LHS_AND]]
four_ints ? some_int : some_short;
- // CHECK: [[COND:%.+]] = load <4 x i32>
- // CHECK: [[LHS:%.+]] = load i32
- // CHECK: [[LHS_SPLAT_INSERT:%.+]] = insertelement <4 x i32> poison, i32 [[LHS]], i64 0
- // CHECK: [[LHS_SPLAT:%.+]] = shufflevector <4 x i32> [[LHS_SPLAT_INSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
- // CHECK: [[RHS:%.+]] = load i16
- // CHECK: [[RHS_SEXT:%.+]] = sext i16 [[RHS]] to i32
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x i32> poison, i32 [[RHS_SEXT]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x i32> [[RHS_SPLAT_INSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i32> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i32>
- // CHECK: [[XOR:%.+]] = xor <4 x i32> [[SEXT]], splat (i32 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i32> [[RHS_SPLAT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i32> [[LHS_SPLAT]], [[SEXT]]
- // CHECK: = or <4 x i32> [[RHS_AND]], [[LHS_AND]]
}
-// CHECK: OneScalarOp
+// CHECK-LABEL: define dso_local void @_Z11OneScalarOpv(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @four_ints, align 16
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @four_ints, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @some_int, align 4
+// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP2]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp slt <4 x i32> [[TMP0]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <4 x i1> [[VECTOR_COND]], <4 x i32> [[TMP1]], <4 x i32> [[SPLAT_SPLAT]]
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @four_ints, align 16
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr @four_ints, align 16
+// CHECK-NEXT: [[VECTOR_COND1:%.*]] = icmp slt <4 x i32> [[TMP3]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT2:%.*]] = select <4 x i1> [[VECTOR_COND1]], <4 x i32> [[TMP4]], <4 x i32> splat (i32 5)
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @four_ints, align 16
+// CHECK-NEXT: [[TMP6:%.*]] = load <4 x float>, ptr @four_floats, align 16
+// CHECK-NEXT: [[TMP7:%.*]] = load float, ptr @some_float, align 4
+// CHECK-NEXT: [[SPLAT_SPLATINSERT3:%.*]] = insertelement <4 x float> poison, float [[TMP7]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT4:%.*]] = shufflevector <4 x float> [[SPLAT_SPLATINSERT3]], <4 x float> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND5:%.*]] = icmp slt <4 x i32> [[TMP5]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT6:%.*]] = select <4 x i1> [[VECTOR_COND5]], <4 x float> [[TMP6]], <4 x float> [[SPLAT_SPLAT4]]
+// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @four_ll, align 32
+// CHECK-NEXT: [[TMP9:%.*]] = load <4 x double>, ptr @four_doubles, align 32
+// CHECK-NEXT: [[VECTOR_COND7:%.*]] = icmp slt <4 x i64> [[TMP8]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT8:%.*]] = select <4 x i1> [[VECTOR_COND7]], <4 x double> [[TMP9]], <4 x double> splat (double 6.000000e+00)
+// CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr @four_ll, align 32
+// CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr @four_ll, align 32
+// CHECK-NEXT: [[VECTOR_COND9:%.*]] = icmp slt <4 x i64> [[TMP10]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT10:%.*]] = select <4 x i1> [[VECTOR_COND9]], <4 x i64> [[TMP11]], <4 x i64> splat (i64 6)
+// CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @four_ll, align 32
+// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr @four_ll, align 32
+// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr @some_int, align 4
+// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP14]] to i64
+// CHECK-NEXT: [[SPLAT_SPLATINSERT11:%.*]] = insertelement <4 x i64> poison, i64 [[CONV]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT11]], <4 x i64> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND13:%.*]] = icmp slt <4 x i64> [[TMP12]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT14:%.*]] = select <4 x i1> [[VECTOR_COND13]], <4 x i64> [[TMP13]], <4 x i64> [[SPLAT_SPLAT12]]
+// CHECK-NEXT: [[TMP15:%.*]] = load <4 x i64>, ptr @four_ll, align 32
+// CHECK-NEXT: [[TMP16:%.*]] = load <4 x i64>, ptr @four_ll, align 32
+// CHECK-NEXT: [[TMP17:%.*]] = load i64, ptr @some_ll, align 8
+// CHECK-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <4 x i64> poison, i64 [[TMP17]], i64 0
+// CHECK-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT15]], <4 x i64> poison, <4 x i32> zeroinitializer
+// CHECK-NEXT: [[VECTOR_COND17:%.*]] = icmp slt <4 x i64> [[TMP15]], zeroinitializer
+// CHECK-NEXT: [[VECTOR_SELECT18:%.*]] = select <4 x i1> [[VECTOR_COND17]], <4 x i64> [[TMP16]], <4 x i64> [[SPLAT_SPLAT16]]
+// CHECK-NEXT: ret void
+//
void OneScalarOp() {
four_ints ? four_ints : some_int;
- // CHECK: [[COND:%.+]] = load <4 x i32>
- // CHECK: [[LHS:%.+]] = load <4 x i32>
- // CHECK: [[RHS:%.+]] = load i32
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x i32> poison, i32 [[RHS]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x i32> [[RHS_SPLAT_INSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i32> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i32>
- // CHECK: [[XOR:%.+]] = xor <4 x i32> [[SEXT]], splat (i32 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i32> [[RHS_SPLAT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i32> [[LHS]], [[SEXT]]
- // CHECK: = or <4 x i32> [[RHS_AND]], [[LHS_AND]]
four_ints ? four_ints : 5;
- // CHECK: [[COND:%.+]] = load <4 x i32>
- // CHECK: [[LHS:%.+]] = load <4 x i32>
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i32> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i32>
- // CHECK: [[XOR:%.+]] = xor <4 x i32> [[SEXT]], splat (i32 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i32> splat (i32 5), [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i32> [[LHS]], [[SEXT]]
- // CHECK: = or <4 x i32> [[RHS_AND]], [[LHS_AND]]
four_ints ? four_floats : some_float;
- // CHECK: [[COND:%.+]] = load <4 x i32>
- // CHECK: [[LHS:%.+]] = load <4 x float>
- // CHECK: [[RHS:%.+]] = load float
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x float> poison, float [[RHS]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x float> [[RHS_SPLAT_INSERT]], <4 x float> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i32> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i32>
- // CHECK: [[XOR:%.+]] = xor <4 x i32> [[SEXT]], splat (i32 -1)
- // CHECK: [[RHS_CAST:%.+]] = bitcast <4 x float> [[RHS_SPLAT]] to <4 x i32>
- // CHECK: [[LHS_CAST:%.+]] = bitcast <4 x float> [[LHS]] to <4 x i32>
- // CHECK: [[RHS_AND:%.+]] = and <4 x i32> [[RHS_CAST]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i32> [[LHS_CAST]], [[SEXT]]
- // CHECK: = or <4 x i32> [[RHS_AND]], [[LHS_AND]]
four_ll ? four_doubles : 6.0;
- // CHECK: [[COND:%.+]] = load <4 x i64>
- // CHECK: [[LHS:%.+]] = load <4 x double>
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i64> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i64>
- // CHECK: [[XOR:%.+]] = xor <4 x i64> [[SEXT]], splat (i64 -1)
- // CHECK: [[LHS_CAST:%.+]] = bitcast <4 x double> [[LHS]] to <4 x i64>
- // CHECK: [[RHS_AND:%.+]] = and <4 x i64> splat (i64 4618441417868443648), [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i64> [[LHS_CAST]], [[SEXT]]
- // CHECK: = or <4 x i64> [[RHS_AND]], [[LHS_AND]]
four_ll ? four_ll : 6;
- // CHECK: [[COND:%.+]] = load <4 x i64>
- // CHECK: [[LHS:%.+]] = load <4 x i64>
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i64> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i64>
- // CHECK: [[XOR:%.+]] = xor <4 x i64> [[SEXT]], splat (i64 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i64> splat (i64 6), [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i64> [[LHS]], [[SEXT]]
- // CHECK: [[OR:%.+]] = or <4 x i64> [[RHS_AND]], [[LHS_AND]]
four_ll ? four_ll : some_int;
- // CHECK: [[COND:%.+]] = load <4 x i64>
- // CHECK: [[LHS:%.+]] = load <4 x i64>
- // CHECK: [[RHS:%.+]] = load i32
- // CHECK: [[RHS_CONV:%.+]] = sext i32 [[RHS]] to i64
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x i64> poison, i64 [[RHS_CONV]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x i64> [[RHS_SPLAT_INSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i64> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i64>
- // CHECK: [[XOR:%.+]] = xor <4 x i64> [[SEXT]], splat (i64 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i64> [[RHS_SPLAT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i64> [[LHS]], [[SEXT]]
- // CHECK: [[OR:%.+]] = or <4 x i64> [[RHS_AND]], [[LHS_AND]]
four_ll ? four_ll : some_ll;
- // CHECK: [[COND:%.+]] = load <4 x i64>
- // CHECK: [[LHS:%.+]] = load <4 x i64>
- // CHECK: [[RHS:%.+]] = load i64
- // CHECK: [[RHS_SPLAT_INSERT:%.+]] = insertelement <4 x i64> poison, i64 [[RHS]], i64 0
- // CHECK: [[RHS_SPLAT:%.+]] = shufflevector <4 x i64> [[RHS_SPLAT_INSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
- // CHECK: [[NEG:%.+]] = icmp slt <4 x i64> [[COND]], zeroinitializer
- // CHECK: [[SEXT:%.+]] = sext <4 x i1> [[NEG]] to <4 x i64>
- // CHECK: [[XOR:%.+]] = xor <4 x i64> [[SEXT]], splat (i64 -1)
- // CHECK: [[RHS_AND:%.+]] = and <4 x i64> [[RHS_SPLAT]], [[XOR]]
- // CHECK: [[LHS_AND:%.+]] = and <4 x i64> [[LHS]], [[SEXT]]
- // CHECK: [[OR:%.+]] = or <4 x i64> [[RHS_AND]], [[LHS_AND]]
}
diff --git a/clang/test/CodeGenCXX/int64_uint64.cpp b/clang/test/CodeGenCXX/int64_uint64.cpp
index f4fd9ea..8046ea9 100644
--- a/clang/test/CodeGenCXX/int64_uint64.cpp
+++ b/clang/test/CodeGenCXX/int64_uint64.cpp
@@ -6,6 +6,14 @@
// RUN: -target-feature +neon \
// RUN: -emit-llvm -w -O1 -o - %s | FileCheck --check-prefix=CHECK-AARCH64 %s
+// RUN: %clang_cc1 -triple arm-linux-guneabi \
+// RUN: -target-cpu cortex-a8 -fexperimental-new-constant-interpreter \
+// RUN: -emit-llvm -w -O1 -o - %s | FileCheck --check-prefix=CHECK-ARM %s
+
+// RUN: %clang_cc1 -triple arm64-linux-gnueabi \
+// RUN: -target-feature +neon -fexperimental-new-constant-interpreter \
+// RUN: -emit-llvm -w -O1 -o - %s | FileCheck --check-prefix=CHECK-AARCH64 %s
+
// REQUIRES: aarch64-registered-target || arm-registered-target
// Test if int64_t and uint64_t can be correctly mangled.
diff --git a/clang/test/CodeGenCXX/debug-info-lambda-this.cpp b/clang/test/CodeGenCXX/lambda-this-2.cpp
index e5acab1..e5acab1 100644
--- a/clang/test/CodeGenCXX/debug-info-lambda-this.cpp
+++ b/clang/test/CodeGenCXX/lambda-this-2.cpp
diff --git a/clang/test/CodeGenCXX/mangle-ms-cxx11.cpp b/clang/test/CodeGenCXX/mangle-ms-cxx11.cpp
index 312c70cc..44f4436 100644
--- a/clang/test/CodeGenCXX/mangle-ms-cxx11.cpp
+++ b/clang/test/CodeGenCXX/mangle-ms-cxx11.cpp
@@ -358,3 +358,42 @@ struct s { enum {}; enum {}; };
// DBG-DAG: DW_TAG_enumeration_type{{.*}}identifier: ".?AW4<unnamed-type-$S3>@s@pr37723@@"
s x;
}
+
+namespace InconsistentTagKinds {
+ namespace t1 {
+ class A;
+ struct A;
+ void f(A*) {}
+ // CHECK-DAG: @"?f@t1@InconsistentTagKinds@@YAXPAVA@12@@Z"
+ } // namespace t1
+ namespace t2 {
+ struct A;
+ class A;
+ void f(A*) {}
+ // CHECK-DAG: @"?f@t2@InconsistentTagKinds@@YAXPAUA@12@@Z"
+ } // namespace t2
+ namespace t3 {
+ class A {};
+ struct A;
+ void f(A*) {}
+ // CHECK-DAG: @"?f@t3@InconsistentTagKinds@@YAXPAVA@12@@Z"
+ } // namespace t3
+ namespace t4 {
+ struct A {};
+ class A;
+ void f(A*) {}
+ // CHECK-DAG: @"?f@t4@InconsistentTagKinds@@YAXPAUA@12@@Z"
+ } // namespace t4
+ namespace t5 {
+ class A;
+ struct A {};
+ void f(A*) {}
+ // CHECK-DAG: @"?f@t5@InconsistentTagKinds@@YAXPAUA@12@@Z"
+ } // namespace t5
+ namespace t6 {
+ struct A;
+ class A {};
+ void f(A*) {}
+ // CHECK-DAG: @"?f@t6@InconsistentTagKinds@@YAXPAVA@12@@Z"
+ } // namespace t6
+} // namespace InconsistentTagKinds
diff --git a/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp b/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
index 5e2403b..a60781c 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
@@ -277,13 +277,13 @@ void f() {
// WIN32-LIFETIME-LABEL: define dso_local void @"?f@lifetime_marker@@YAXXZ"()
// WIN32-LIFETIME: %[[c:.*]] = alloca %"struct.lifetime_marker::C"
-// WIN32-LIFETIME: call void @llvm.lifetime.start.p0(i64 1, ptr %c)
+// WIN32-LIFETIME: call void @llvm.lifetime.start.p0(ptr %c)
// WIN32-LIFETIME: invoke void @"?g@lifetime_marker@@YAXXZ"()
// WIN32-LIFETIME-NEXT: to label %[[cont:[^ ]*]] unwind label %[[lpad0:[^ ]*]]
//
// WIN32-LIFETIME: [[cont]]
// WIN32-LIFETIME: call x86_thiscallcc void @"??1C@lifetime_marker@@QAE@XZ"({{.*}})
-// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(i64 1, ptr %[[c]])
+// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(ptr %[[c]])
//
// WIN32-LIFETIME: [[lpad0]]
// WIN32-LIFETIME-NEXT: cleanuppad
@@ -292,7 +292,7 @@ void f() {
//
// WIN32-LIFETIME: [[lpad1]]
// WIN32-LIFETIME-NEXT: cleanuppad
-// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(i64 1, ptr %[[c]])
+// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(ptr %[[c]])
}
struct class_2 {
diff --git a/clang/test/CodeGenCXX/modules-vtable.cppm b/clang/test/CodeGenCXX/modules-vtable.cppm
index 6589b9f..75f7598 100644
--- a/clang/test/CodeGenCXX/modules-vtable.cppm
+++ b/clang/test/CodeGenCXX/modules-vtable.cppm
@@ -1,4 +1,4 @@
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
// RUN: rm -rf %t
// RUN: split-file %s %t
diff --git a/clang/test/CodeGenCXX/pr70585.cppm b/clang/test/CodeGenCXX/pr70585.cppm
index ad4e135..d44a4f4 100644
--- a/clang/test/CodeGenCXX/pr70585.cppm
+++ b/clang/test/CodeGenCXX/pr70585.cppm
@@ -1,4 +1,4 @@
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
// RUN: rm -rf %t
// RUN: split-file %s %t
diff --git a/clang/test/CodeGenCXX/ptrauth-explicit-vtable-pointer-control.cpp b/clang/test/CodeGenCXX/ptrauth-explicit-vtable-pointer-control.cpp
index 1b10371..e33525c 100644
--- a/clang/test/CodeGenCXX/ptrauth-explicit-vtable-pointer-control.cpp
+++ b/clang/test/CodeGenCXX/ptrauth-explicit-vtable-pointer-control.cpp
@@ -1,31 +1,31 @@
-// RUN: %clang_cc1 %s -x c++ -std=c++11 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics \
+// RUN: %clang_cc1 %s -x c++ -std=c++20 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics \
// RUN: -emit-llvm -o - | FileCheck --check-prefixes=CHECK,NODISC %s
-// RUN: %clang_cc1 %s -x c++ -std=c++11 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics \
+// RUN: %clang_cc1 %s -x c++ -std=c++20 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics \
// RUN: -fptrauth-vtable-pointer-type-discrimination \
// RUN: -emit-llvm -o - | FileCheck --check-prefixes=CHECK,TYPE %s
-// RUN: %clang_cc1 %s -x c++ -std=c++11 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics \
+// RUN: %clang_cc1 %s -x c++ -std=c++20 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics \
// RUN: -fptrauth-vtable-pointer-address-discrimination \
// RUN: -emit-llvm -o - | FileCheck --check-prefixes=CHECK,ADDR %s
-// RUN: %clang_cc1 %s -x c++ -std=c++11 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics \
+// RUN: %clang_cc1 %s -x c++ -std=c++20 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics \
// RUN: -fptrauth-vtable-pointer-type-discrimination \
// RUN: -fptrauth-vtable-pointer-address-discrimination \
// RUN: -emit-llvm -o - | FileCheck --check-prefixes=CHECK,BOTH %s
-// RUN: %clang_cc1 %s -x c++ -std=c++11 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics \
+// RUN: %clang_cc1 %s -x c++ -std=c++20 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics \
// RUN: -emit-llvm -o - | FileCheck --check-prefixes=CHECK,NODISC %s
-// RUN: %clang_cc1 %s -x c++ -std=c++11 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics \
+// RUN: %clang_cc1 %s -x c++ -std=c++20 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics \
// RUN: -fptrauth-vtable-pointer-type-discrimination \
// RUN: -emit-llvm -o - | FileCheck --check-prefixes=CHECK,TYPE %s
-// RUN: %clang_cc1 %s -x c++ -std=c++11 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics \
+// RUN: %clang_cc1 %s -x c++ -std=c++20 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics \
// RUN: -fptrauth-vtable-pointer-address-discrimination \
// RUN: -emit-llvm -o - | FileCheck --check-prefixes=CHECK,ADDR %s
-// RUN: %clang_cc1 %s -x c++ -std=c++11 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics \
+// RUN: %clang_cc1 %s -x c++ -std=c++20 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics \
// RUN: -fptrauth-vtable-pointer-type-discrimination \
// RUN: -fptrauth-vtable-pointer-address-discrimination \
// RUN: -emit-llvm -o - | FileCheck --check-prefixes=CHECK,BOTH %s
@@ -78,6 +78,27 @@ struct authenticated(default_key, default_address_discrimination, custom_discrim
virtual void g();
};
+// CHECK: @_ZTVN5test19ConstEvalE = external unnamed_addr constant { [3 x ptr] }, align 8
+// CHECK: @_ZN5test12ceE = global %{{.*}} { ptr ptrauth (ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTVN5test19ConstEvalE, i32 0, i32 0, i32 2), i32 2, i64 0, ptr @_ZN5test12ceE) }, align 8
+// CHECK: @_ZTVN5test116ConstEvalDerivedE = linkonce_odr unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTIN5test116ConstEvalDerivedE, ptr ptrauth (ptr @_ZN5test19ConstEval1fEv, i32 0, i64 26259, ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTVN5test116ConstEvalDerivedE, i32 0, i32 0, i32 2))] },{{.*}}align 8
+// CHECK: @_ZN5test13cedE = global { ptr } { ptr ptrauth (ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTVN5test116ConstEvalDerivedE, i32 0, i32 0, i32 2), i32 2, i64 0, ptr @_ZN5test13cedE) }, align 8
+
+struct authenticated(default_key, address_discrimination, no_extra_discrimination) ConstEval {
+ consteval ConstEval() {}
+ virtual void f();
+};
+
+// clang used to bail out with error message "could not emit constant value abstractly".
+ConstEval ce;
+
+struct ConstEvalDerived : public ConstEval {
+public:
+ consteval ConstEvalDerived() {}
+};
+
+// clang used to emit an undef initializer.
+ConstEvalDerived ced;
+
template <typename T>
struct SubClass : T {
virtual void g();
diff --git a/clang/test/CodeGenCXX/sret_cast_with_nonzero_alloca_as.cpp b/clang/test/CodeGenCXX/sret_cast_with_nonzero_alloca_as.cpp
index 320c712..a1a6ada 100644
--- a/clang/test/CodeGenCXX/sret_cast_with_nonzero_alloca_as.cpp
+++ b/clang/test/CodeGenCXX/sret_cast_with_nonzero_alloca_as.cpp
@@ -10,16 +10,15 @@ struct X { int z[17]; };
// CHECK-NEXT: [[Y_ADDR:%.*]] = alloca i8, align 1, addrspace(5)
// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
// CHECK-NEXT: [[Y_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[Y_ADDR]] to ptr
+// CHECK-NEXT: [[AGG_RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AGG_RESULT]] to ptr
// CHECK-NEXT: store i8 [[X]], ptr [[X_ADDR_ASCAST]], align 1
// CHECK-NEXT: store i8 [[Y]], ptr [[Y_ADDR_ASCAST]], align 1
// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[X_ADDR_ASCAST]], align 1
-// CHECK-NEXT: [[AGG_RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AGG_RESULT]] to ptr
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[AGG_RESULT_ASCAST]], i64 1
// CHECK-NEXT: store i8 [[TMP0]], ptr [[ADD_PTR]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[Y_ADDR_ASCAST]], align 1
-// CHECK-NEXT: [[AGG_RESULT_ASCAST1:%.*]] = addrspacecast ptr addrspace(5) [[AGG_RESULT]] to ptr
-// CHECK-NEXT: [[ADD_PTR2:%.*]] = getelementptr inbounds i8, ptr [[AGG_RESULT_ASCAST1]], i64 2
-// CHECK-NEXT: store i8 [[TMP1]], ptr [[ADD_PTR2]], align 1
+// CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds i8, ptr [[AGG_RESULT_ASCAST]], i64 2
+// CHECK-NEXT: store i8 [[TMP1]], ptr [[ADD_PTR1]], align 2
// CHECK-NEXT: ret void
//
X foo(char x, char y) {
diff --git a/clang/test/CodeGenCXX/stack-reuse-exceptions.cpp b/clang/test/CodeGenCXX/stack-reuse-exceptions.cpp
index e036bca..27123a6 100644
--- a/clang/test/CodeGenCXX/stack-reuse-exceptions.cpp
+++ b/clang/test/CodeGenCXX/stack-reuse-exceptions.cpp
@@ -20,32 +20,32 @@ struct NontrivialDtor {
// CHECK-LABEL: define{{.*}} void @_Z33cleanupsAreEmittedWithoutTryCatchv
void cleanupsAreEmittedWithoutTryCatch() {
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[CLEAN:.*]])
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T1:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[CLEAN:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T1:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT:[^ ]+]] unwind label %[[LPAD:[^ ]+]]
//
// CHECK: [[CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T2:.*]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T2:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT2:[^ ]+]] unwind label %[[LPAD2:.+]]
//
// CHECK: [[CONT2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[CLEAN]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[CLEAN]])
// CHECK: ret void
//
// CHECK: [[LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
// CHECK: br label %[[EHCLEANUP:.+]]
//
// CHECK: [[LPAD2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK: br label %[[EHCLEANUP]]
//
// CHECK: [[EHCLEANUP]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[CLEAN]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[CLEAN]])
NontrivialDtor clean;
@@ -55,27 +55,27 @@ void cleanupsAreEmittedWithoutTryCatch() {
// CHECK-LABEL: define{{.*}} void @_Z30cleanupsAreEmittedWithTryCatchv
void cleanupsAreEmittedWithTryCatch() {
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[CLEAN:.*]])
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T1:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[CLEAN:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T1:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT:[^ ]+]] unwind label %[[LPAD:[^ ]+]]
//
// CHECK: [[CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T2:.*]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T2:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT2:[^ ]+]] unwind label %[[LPAD2:.+]]
//
// CHECK: [[CONT2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK: br label %[[TRY_CONT:.+]]
//
// CHECK: [[LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
// CHECK: br label %[[CATCH:.+]]
//
// CHECK: [[LPAD2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK: br label %[[CATCH]]
//
// CHECK: [[CATCH]]:
@@ -84,13 +84,13 @@ void cleanupsAreEmittedWithTryCatch() {
// CHECK-NEXT: to label %[[TRY_CONT]] unwind label %[[OUTER_LPAD:.+]]
//
// CHECK: [[TRY_CONT]]:
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T_OUTER:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T_OUTER:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[OUTER_CONT:[^ ]+]] unwind label %[[OUTER_LPAD2:.+]]
//
// CHECK: [[OUTER_CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T_OUTER]])
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[CLEAN]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T_OUTER]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[CLEAN]])
// CHECK: ret void
//
// CHECK: [[OUTER_LPAD]]:
@@ -98,11 +98,11 @@ void cleanupsAreEmittedWithTryCatch() {
// CHECK: br label %[[EHCLEANUP:.+]]
//
// CHECK: [[OUTER_LPAD2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T_OUTER]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T_OUTER]])
// CHECK: br label %[[EHCLEANUP]]
//
// CHECK: [[EHCLEANUP]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[CLEAN]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[CLEAN]])
NontrivialDtor clean;
@@ -116,44 +116,44 @@ void cleanupsAreEmittedWithTryCatch() {
// CHECK-LABEL: define{{.*}} void @_Z39cleanupInTryHappensBeforeCleanupInCatchv
void cleanupInTryHappensBeforeCleanupInCatch() {
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T1:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T1:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT:[^ ]+]] unwind label %[[LPAD:[^ ]+]]
//
// CHECK: [[CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
// CHECK: br label %[[TRY_CONT]]
//
// CHECK: [[LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
// CHECK: br i1 {{[^,]+}}, label %[[CATCH_INT_MATCH:[^,]+]], label %[[CATCH_ALL:.+]]
//
// CHECK: [[CATCH_INT_MATCH]]:
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T2:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T2:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CATCH_INT_CONT:[^ ]+]] unwind label %[[CATCH_INT_LPAD:[^ ]+]]
//
// CHECK: [[CATCH_INT_CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK: br label %[[TRY_CONT]]
//
// CHECK: [[TRY_CONT]]:
// CHECK: ret void
//
// CHECK: [[CATCH_ALL]]:
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T3:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T3:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CATCH_ALL_CONT:[^ ]+]] unwind label %[[CATCH_ALL_LPAD:[^ ]+]]
//
// CHECK: [[CATCH_ALL_CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T3]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T3]])
// CHECK: br label %[[TRY_CONT]]
//
// CHECK: [[CATCH_ALL_LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T3]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T3]])
//
// CHECK: [[CATCH_INT_LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK-NOT: call void @llvm.lifetime
try {
diff --git a/clang/test/CodeGenCXX/stack-reuse-miscompile.cpp b/clang/test/CodeGenCXX/stack-reuse-miscompile.cpp
index 50a8d16..67fa9f9 100644
--- a/clang/test/CodeGenCXX/stack-reuse-miscompile.cpp
+++ b/clang/test/CodeGenCXX/stack-reuse-miscompile.cpp
@@ -28,12 +28,12 @@ const char * f(S s)
//
// FIXME: We could defer starting the lifetime of the return object of concat
// until the call.
-// CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr [[T1]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr [[T1]])
//
-// CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr [[T2]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr [[T2]])
// CHECK: [[T4:%.*]] = call noundef ptr @_ZN1TC1EPKc(ptr {{[^,]*}} [[T2]], ptr noundef @.str)
//
-// CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr [[T3]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr [[T3]])
// CHECK: [[T5:%.*]] = call noundef ptr @_ZN1TC1E1S(ptr {{[^,]*}} [[T3]], [2 x i32] %{{.*}})
//
// CHECK: call void @_ZNK1T6concatERKS_(ptr dead_on_unwind writable sret(%class.T) align 4 [[T1]], ptr {{[^,]*}} [[T2]], ptr noundef nonnull align 4 dereferenceable(16) [[T3]])
diff --git a/clang/test/CodeGenHIP/hip-cumode.hip b/clang/test/CodeGenHIP/hip-cumode.hip
index 1aa1ca7..61fd53c 100644
--- a/clang/test/CodeGenHIP/hip-cumode.hip
+++ b/clang/test/CodeGenHIP/hip-cumode.hip
@@ -5,14 +5,20 @@
// RUN: %clang -S -o - --offload-arch=gfx906 --cuda-device-only -nogpuinc -nogpulib -mcumode \
// RUN: %s 2>&1 | FileCheck --check-prefix=NOWGP %s
// RUN: %clang -S -o - --offload-arch=gfx906 --cuda-device-only -nogpuinc -nogpulib -mno-cumode \
-// RUN: %s 2>&1 | FileCheck --check-prefixes=NOWGP,WARN-CUMODE %s
+// RUN: %s 2>&1 | FileCheck -DOFFLOAD_ARCH=gfx906 --check-prefixes=NOWGP,WARN-CUMODE %s
// RUN: %clang -S -o - --offload-arch=gfx1030 --cuda-device-only -nogpuinc -nogpulib \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-OFF %s
// RUN: %clang -S -o - --offload-arch=gfx1030 --cuda-device-only -nogpuinc -nogpulib -mcumode \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-ON %s
// RUN: %clang -S -o - --offload-arch=gfx1030 --cuda-device-only -nogpuinc -nogpulib -mno-cumode \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-OFF %s
-// WARN-CUMODE: warning: ignoring '-mno-cumode' option as it is not currently supported for processor 'gfx906' [-Woption-ignored]
+// RUN: %clang -S -o - --offload-arch=gfx1250 --cuda-device-only -nogpuinc -nogpulib \
+// RUN: %s 2>&1 | FileCheck --check-prefix=NOWGP %s
+// RUN: %clang -S -o - --offload-arch=gfx1250 --cuda-device-only -nogpuinc -nogpulib -mcumode \
+// RUN: %s 2>&1 | FileCheck --check-prefix=NOWGP %s
+// RUN: %clang -S -o - --offload-arch=gfx1250 --cuda-device-only -nogpuinc -nogpulib -mno-cumode \
+// RUN: %s 2>&1 | FileCheck -DOFFLOAD_ARCH=gfx1250 --check-prefixes=NOWGP,WARN-CUMODE %s
+// WARN-CUMODE: warning: ignoring '-mno-cumode' option as it is not currently supported for processor '[[OFFLOAD_ARCH]]' [-Woption-ignored]
// NOWGP-NOT: .amdhsa_workgroup_processor_mode
// CUMODE-ON: .amdhsa_workgroup_processor_mode 0
// CUMODE-OFF: .amdhsa_workgroup_processor_mode 1
diff --git a/clang/test/CodeGenHIP/store-addr-space.hip b/clang/test/CodeGenHIP/store-addr-space.hip
new file mode 100644
index 0000000..6103edb
--- /dev/null
+++ b/clang/test/CodeGenHIP/store-addr-space.hip
@@ -0,0 +1,46 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --functions "bar" --version 5
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -x hip -emit-llvm -fcuda-is-device \
+// RUN: -o - %s | FileCheck --check-prefix=AMDGCN --enable-var-scope %s
+
+struct Foo {
+ unsigned long long val;
+//
+ __attribute__((device)) inline Foo() { val = 0; }
+ __attribute__((device)) inline Foo(const Foo &src) { val = src.val; }
+ __attribute__((device)) inline Foo(const volatile Foo &src) { val = src.val; }
+};
+
+// AMDGCN-LABEL: define dso_local void @_Z3barPK3Foo(
+// AMDGCN-SAME: ptr addrspace(5) dead_on_unwind noalias writable sret([[STRUCT_FOO:%.*]]) align 8 [[AGG_RESULT:%.*]], ptr noundef [[SRC_PTR:%.*]]) #[[ATTR0:[0-9]+]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[RESULT_PTR:%.*]] = alloca ptr addrspace(5), align 4, addrspace(5)
+// AMDGCN-NEXT: [[SRC_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// AMDGCN-NEXT: [[DST:%.*]] = alloca [[UNION_ANON:%.*]], align 8, addrspace(5)
+// AMDGCN-NEXT: [[RESULT_PTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT_PTR]] to ptr
+// AMDGCN-NEXT: [[SRC_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_PTR_ADDR]] to ptr
+// AMDGCN-NEXT: [[AGG_RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AGG_RESULT]] to ptr
+// AMDGCN-NEXT: [[DST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DST]] to ptr
+// AMDGCN-NEXT: store ptr addrspace(5) [[AGG_RESULT]], ptr [[RESULT_PTR_ASCAST]], align 4
+// AMDGCN-NEXT: store ptr [[SRC_PTR]], ptr [[SRC_PTR_ADDR_ASCAST]], align 8
+// AMDGCN-NEXT: call void @_ZN3FooC1Ev(ptr noundef nonnull align 8 dereferenceable(8) [[AGG_RESULT_ASCAST]]) #[[ATTR1:[0-9]+]]
+// AMDGCN-NEXT: store ptr [[AGG_RESULT_ASCAST]], ptr [[DST_ASCAST]], align 8
+// AMDGCN-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SRC_PTR_ADDR_ASCAST]], align 8
+// AMDGCN-NEXT: [[VAL:%.*]] = getelementptr inbounds nuw [[STRUCT_FOO]], ptr [[TMP0]], i32 0, i32 0
+// AMDGCN-NEXT: [[TMP1:%.*]] = load i64, ptr [[VAL]], align 8
+// AMDGCN-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DST_ASCAST]], align 8
+// AMDGCN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 0
+// AMDGCN-NEXT: store i64 [[TMP1]], ptr [[ARRAYIDX]], align 8
+// AMDGCN-NEXT: ret void
+//
+__attribute__((device)) Foo bar(const Foo *const src_ptr) {
+ Foo result;
+
+ union {
+ Foo* const ptr;
+ unsigned long long * const ptr64;
+ } dst = {&result};
+
+ dst.ptr64[0] = src_ptr->val;
+ return result;
+}
diff --git a/clang/test/CodeGenHLSL/builtins/dot2add.hlsl b/clang/test/CodeGenHLSL/builtins/dot2add.hlsl
index c345e17..7c3a48e 100644
--- a/clang/test/CodeGenHLSL/builtins/dot2add.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/dot2add.hlsl
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -finclude-default-header -fnative-half-type -triple \
-// RUN: dxil-pc-shadermodel6.3-compute %s -emit-llvm -o - | \
+// RUN: dxil-pc-shadermodel6.4-compute %s -emit-llvm -o - | \
// RUN: FileCheck %s --check-prefixes=CHECK,CHECK-DXIL
// RUN: %clang_cc1 -finclude-default-header -fnative-half-type -triple \
// RUN: spirv-pc-vulkan-compute %s -emit-llvm -o - | \
diff --git a/clang/test/CodeGenHLSL/builtins/hlsl_resource_t.hlsl b/clang/test/CodeGenHLSL/builtins/hlsl_resource_t.hlsl
index 24114b1..75d9fb8 100644
--- a/clang/test/CodeGenHLSL/builtins/hlsl_resource_t.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/hlsl_resource_t.hlsl
@@ -2,23 +2,28 @@
using handle_float_t = __hlsl_resource_t [[hlsl::resource_class(UAV)]] [[hlsl::contained_type(float)]];
+struct CustomResource {
+ handle_float_t h;
+};
+
+// CHECK: %struct.CustomResource = type { target("dx.TypedBuffer", float, 1, 0, 0) }
// CHECK: %"class.hlsl::RWBuffer" = type { target("dx.TypedBuffer", <4 x float>, 1, 0, 0)
// CHECK: %"class.hlsl::StructuredBuffer" = type { target("dx.RawBuffer", %struct.MyStruct, 0, 0)
// CHECK: %struct.MyStruct = type <{ <4 x float>, <2 x i32> }>
-// CHECK: define hidden void @_Z2faU9_Res_u_CTfu17__hlsl_resource_t(target("dx.TypedBuffer", float, 1, 0, 0) %a)
-// CHECK: call void @_Z4foo1U9_Res_u_CTfu17__hlsl_resource_t(target("dx.TypedBuffer", float, 1, 0, 0) %0)
-// CHECK: declare hidden void @_Z4foo1U9_Res_u_CTfu17__hlsl_resource_t(target("dx.TypedBuffer", float, 1, 0, 0))
+// CHECK: define hidden void @_Z2fa14CustomResource(ptr noundef byval(%struct.CustomResource) align 1 %a)
+// CHECK: call void @_Z4foo114CustomResource(ptr noundef byval(%struct.CustomResource) align 1 %agg.tmp)
+// CHECK: declare hidden void @_Z4foo114CustomResource(ptr noundef byval(%struct.CustomResource) align 1)
-void foo1(handle_float_t res);
+void foo1(CustomResource res);
-void fa(handle_float_t a) {
+void fa(CustomResource a) {
foo1(a);
}
-// CHECK: define hidden void @_Z2fbU9_Res_u_CTfu17__hlsl_resource_t(target("dx.TypedBuffer", float, 1, 0, 0) %a)
-void fb(handle_float_t a) {
- handle_float_t b = a;
+// CHECK: define hidden void @_Z2fb14CustomResource(ptr noundef byval(%struct.CustomResource) align 1 %a)
+void fb(CustomResource a) {
+ CustomResource b = a;
}
// CHECK: define hidden void @_Z2fcN4hlsl8RWBufferIDv4_fEE(ptr noundef byval(%"class.hlsl::RWBuffer") align 4 %a)
diff --git a/clang/test/CodeGenHLSL/convergence/global_array.hlsl b/clang/test/CodeGenHLSL/convergence/global_array.hlsl
index 030ba48..c594e3a 100644
--- a/clang/test/CodeGenHLSL/convergence/global_array.hlsl
+++ b/clang/test/CodeGenHLSL/convergence/global_array.hlsl
@@ -6,9 +6,15 @@
// CHECK: [[loop_entry]]:
// CHECK: [[loop_token:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[entry_token]]) ]
-// CHECK: call void {{.*}} [ "convergencectrl"(token [[loop_token]]) ]
+// CHECK: call spir_func void {{.*}} [ "convergencectrl"(token [[loop_token]]) ]
// CHECK: br i1 {{%.*}} label {{%.*}} label %[[loop_entry]]
-RWBuffer<float> e[2];
+
+struct S {
+ int i;
+ S() { i = 10; }
+};
+
+static S s[2];
[numthreads(4,1,1)]
void main() {
diff --git a/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-constructors.hlsl b/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-constructors.hlsl
index 3a8d2c0..5db156e 100644
--- a/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-constructors.hlsl
+++ b/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-constructors.hlsl
@@ -70,7 +70,7 @@ export void foo() {
// CHECK: define linkonce_odr hidden void @_ZN4hlsl17ByteAddressBufferC2EjjijPKc(ptr noundef nonnull align 4 dereferenceable(4) %this,
// CHECK-SAME: i32 noundef %registerNo, i32 noundef %spaceNo, i32 noundef %range, i32 noundef %index, ptr noundef %name)
// CHECK-DXIL: %[[HANDLE:.*]] = call target("dx.RawBuffer", i8, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0t(
-// CHECK-DXIL-SAME: i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 false, ptr %{{.*}})
+// CHECK-DXIL-SAME: i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, ptr %{{.*}})
// CHECK-NEXT: %__handle = getelementptr inbounds nuw %"class.hlsl::ByteAddressBuffer", ptr %{{.*}}, i32 0, i32 0
// CHECK-DXIL-NEXT: store target("dx.RawBuffer", i8, 0, 0) %[[HANDLE]], ptr %__handle, align 4
@@ -79,7 +79,7 @@ export void foo() {
// CHECK: define linkonce_odr hidden void @_ZN4hlsl19RWByteAddressBufferC2EjijjPKc(ptr noundef nonnull align 4 dereferenceable(4) %this,
// CHECK-SAME: i32 noundef %spaceNo, i32 noundef %range, i32 noundef %index, i32 noundef %orderId, ptr noundef %name)
// CHECK: %[[HANDLE:.*]] = call target("dx.RawBuffer", i8, 1, 0) @llvm.dx.resource.handlefromimplicitbinding.tdx.RawBuffer_i8_1_0t
-// CHECK-SAME: (i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 false, ptr %{{.*}})
+// CHECK-SAME: (i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, ptr %{{.*}})
// CHECK-NEXT: %__handle = getelementptr inbounds nuw %"class.hlsl::RWByteAddressBuffer", ptr %this1, i32 0, i32 0
// CHECK-NEXT: store target("dx.RawBuffer", i8, 1, 0) %[[HANDLE]], ptr %__handle, align 4
diff --git a/clang/test/CodeGenHLSL/resources/RWBuffer-constructor.hlsl b/clang/test/CodeGenHLSL/resources/RWBuffer-constructor.hlsl
index 1144689..1274984 100644
--- a/clang/test/CodeGenHLSL/resources/RWBuffer-constructor.hlsl
+++ b/clang/test/CodeGenHLSL/resources/RWBuffer-constructor.hlsl
@@ -63,11 +63,11 @@ export void foo() {
// CHECK: call void @_ZN4hlsl8RWBufferIiEC2Ev(ptr noundef nonnull align 4 dereferenceable(4) %{{.*}})
// Buf1 initialization part 3 - body of RWBuffer<float> C2 constructor with explicit binding that initializes
-// handle with @llvm.dx.resource.handlefrombinding
+// handle with @llvm.dx.resource.handlefrombinding
// CHECK: define linkonce_odr hidden void @_ZN4hlsl8RWBufferIfEC2EjjijPKc(ptr noundef nonnull align 4 dereferenceable(4) %this,
// CHECK-SAME: i32 noundef %registerNo, i32 noundef %spaceNo, i32 noundef %range, i32 noundef %index, ptr noundef %name)
// CHECK-DXIL: %[[HANDLE:.*]] = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_f32_1_0_0t(
-// CHECK-DXIL-SAME: i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 false, ptr %{{.*}})
+// CHECK-DXIL-SAME: i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, ptr %{{.*}})
// CHECK-NEXT: %__handle = getelementptr inbounds nuw %"class.hlsl::RWBuffer", ptr %{{.*}}, i32 0, i32 0
// CHECK-DXIL-NEXT: store target("dx.TypedBuffer", float, 1, 0, 0) %[[HANDLE]], ptr %__handle, align 4
@@ -76,7 +76,7 @@ export void foo() {
// CHECK: define linkonce_odr hidden void @_ZN4hlsl8RWBufferIdEC2EjijjPKc(ptr noundef nonnull align 4 dereferenceable(4) %this,
// CHECK-SAME: i32 noundef %spaceNo, i32 noundef %range, i32 noundef %index, i32 noundef %orderId, ptr noundef %name)
// CHECK: %[[HANDLE:.*]] = call target("dx.TypedBuffer", double, 1, 0, 0) @llvm.dx.resource.handlefromimplicitbinding.tdx.TypedBuffer_f64_1_0_0t
-// CHECK-SAME: (i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 false, ptr %{{.*}})
+// CHECK-SAME: (i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, ptr %{{.*}})
// CHECK-NEXT: %__handle = getelementptr inbounds nuw %"class.hlsl::RWBuffer.0", ptr %{{.*}}, i32 0, i32 0
// CHECK-NEXT: store target("dx.TypedBuffer", double, 1, 0, 0) %[[HANDLE]], ptr %__handle, align 4
diff --git a/clang/test/CodeGenHLSL/resources/RWBuffer-elementtype.hlsl b/clang/test/CodeGenHLSL/resources/RWBuffer-elementtype.hlsl
index 5512a65..f48521b 100644
--- a/clang/test/CodeGenHLSL/resources/RWBuffer-elementtype.hlsl
+++ b/clang/test/CodeGenHLSL/resources/RWBuffer-elementtype.hlsl
@@ -18,18 +18,18 @@
// SPIRV: %"class.hlsl::RWBuffer" = type { target("spirv.SignedImage", i16, 5, 2, 0, 0, 2, 0) }
// SPIRV: %"class.hlsl::RWBuffer.0" = type { target("spirv.Image", i16, 5, 2, 0, 0, 2, 0) }
-// SPIRV: %"class.hlsl::RWBuffer.1" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) }
-// SPIRV: %"class.hlsl::RWBuffer.2" = type { target("spirv.Image", i32, 5, 2, 0, 0, 2, 0) }
-// SPIRV: %"class.hlsl::RWBuffer.3" = type { target("spirv.SignedImage", i64, 5, 2, 0, 0, 2, 0) }
-// SPIRV: %"class.hlsl::RWBuffer.4" = type { target("spirv.Image", i64, 5, 2, 0, 0, 2, 0) }
+// SPIRV: %"class.hlsl::RWBuffer.1" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 24) }
+// SPIRV: %"class.hlsl::RWBuffer.2" = type { target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) }
+// SPIRV: %"class.hlsl::RWBuffer.3" = type { target("spirv.SignedImage", i64, 5, 2, 0, 0, 2, 41) }
+// SPIRV: %"class.hlsl::RWBuffer.4" = type { target("spirv.Image", i64, 5, 2, 0, 0, 2, 40) }
// SPIRV: %"class.hlsl::RWBuffer.5" = type { target("spirv.Image", half, 5, 2, 0, 0, 2, 0) }
-// SPIRV: %"class.hlsl::RWBuffer.6" = type { target("spirv.Image", float, 5, 2, 0, 0, 2, 0) }
+// SPIRV: %"class.hlsl::RWBuffer.6" = type { target("spirv.Image", float, 5, 2, 0, 0, 2, 3) }
// SPIRV: %"class.hlsl::RWBuffer.7" = type { target("spirv.Image", double, 5, 2, 0, 0, 2, 0) }
// SPIRV: %"class.hlsl::RWBuffer.8" = type { target("spirv.SignedImage", i16, 5, 2, 0, 0, 2, 0) }
// SPIRV: %"class.hlsl::RWBuffer.9" = type { target("spirv.Image", i32, 5, 2, 0, 0, 2, 0) }
// SPIRV: %"class.hlsl::RWBuffer.10" = type { target("spirv.Image", half, 5, 2, 0, 0, 2, 0) }
// SPIRV: %"class.hlsl::RWBuffer.11" = type { target("spirv.Image", float, 5, 2, 0, 0, 2, 0) }
-// SPIRV: %"class.hlsl::RWBuffer.12" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) }
+// SPIRV: %"class.hlsl::RWBuffer.12" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 21) }
RWBuffer<int16_t> BufI16;
RWBuffer<uint16_t> BufU16;
diff --git a/clang/test/CodeGenHLSL/resources/RWBuffer-imageformat.hlsl b/clang/test/CodeGenHLSL/resources/RWBuffer-imageformat.hlsl
new file mode 100644
index 0000000..aebee89
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/RWBuffer-imageformat.hlsl
@@ -0,0 +1,74 @@
+// RUN: %clang_cc1 -triple spirv-pc-vulkan-compute -finclude-default-header -fnative-half-type -emit-llvm -o - %s | FileCheck %s
+
+// Signed integers
+// CHECK: %"class.hlsl::RWBuffer" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 24) }
+RWBuffer<int> rwb_int;
+// CHECK: %"class.hlsl::RWBuffer.0" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 25) }
+RWBuffer<int2> rwb_int2;
+// CHECK: %"class.hlsl::RWBuffer.1" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) }
+RWBuffer<int3> rwb_int3;
+// CHECK: %"class.hlsl::RWBuffer.2" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 21) }
+RWBuffer<int4> rwb_int4;
+
+// Unsigned integers
+// CHECK: %"class.hlsl::RWBuffer.3" = type { target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) }
+RWBuffer<uint> rwb_uint;
+// CHECK: %"class.hlsl::RWBuffer.4" = type { target("spirv.Image", i32, 5, 2, 0, 0, 2, 35) }
+RWBuffer<uint2> rwb_uint2;
+// CHECK: %"class.hlsl::RWBuffer.5" = type { target("spirv.Image", i32, 5, 2, 0, 0, 2, 0) }
+RWBuffer<uint3> rwb_uint3;
+// CHECK: %"class.hlsl::RWBuffer.6" = type { target("spirv.Image", i32, 5, 2, 0, 0, 2, 30) }
+RWBuffer<uint4> rwb_uint4;
+
+// 64-bit integers
+// CHECK: %"class.hlsl::RWBuffer.7" = type { target("spirv.SignedImage", i64, 5, 2, 0, 0, 2, 41) }
+RWBuffer<int64_t> rwb_i64;
+// CHECK: %"class.hlsl::RWBuffer.8" = type { target("spirv.SignedImage", i64, 5, 2, 0, 0, 2, 0) }
+RWBuffer<int64_t2> rwb_i64_2;
+// CHECK: %"class.hlsl::RWBuffer.9" = type { target("spirv.Image", i64, 5, 2, 0, 0, 2, 40) }
+RWBuffer<uint64_t> rwb_u64;
+// CHECK: %"class.hlsl::RWBuffer.10" = type { target("spirv.Image", i64, 5, 2, 0, 0, 2, 0) }
+RWBuffer<uint64_t2> rwb_u64_2;
+
+// Floats
+// CHECK: %"class.hlsl::RWBuffer.11" = type { target("spirv.Image", float, 5, 2, 0, 0, 2, 3) }
+RWBuffer<float> rwb_float;
+// CHECK: %"class.hlsl::RWBuffer.12" = type { target("spirv.Image", float, 5, 2, 0, 0, 2, 6) }
+RWBuffer<float2> rwb_float2;
+// CHECK: %"class.hlsl::RWBuffer.13" = type { target("spirv.Image", float, 5, 2, 0, 0, 2, 0) }
+RWBuffer<float3> rwb_float3;
+// CHECK: %"class.hlsl::RWBuffer.14" = type { target("spirv.Image", float, 5, 2, 0, 0, 2, 1) }
+RWBuffer<float4> rwb_float4;
+
+// Other types that should get Unknown format
+// CHECK: %"class.hlsl::RWBuffer.15" = type { target("spirv.Image", half, 5, 2, 0, 0, 2, 0) }
+RWBuffer<half> rwb_half;
+// CHECK: %"class.hlsl::RWBuffer.16" = type { target("spirv.Image", double, 5, 2, 0, 0, 2, 0) }
+RWBuffer<double> rwb_double;
+
+// Non-UAV resource
+// CHECK: %"class.hlsl::Buffer" = type { target("spirv.SignedImage", i32, 5, 2, 0, 0, 1, 0) }
+Buffer<int> b_int;
+
+[numthreads(1,1,1)]
+void main(int GI : SV_GroupIndex) {
+ rwb_int[GI] = 0;
+ rwb_int2[GI] = 0;
+ rwb_int3[GI] = 0;
+ rwb_int4[GI] = 0;
+ rwb_uint[GI] = 0;
+ rwb_uint2[GI] = 0;
+ rwb_uint3[GI] = 0;
+ rwb_uint4[GI] = 0;
+ rwb_i64[GI] = 0;
+ rwb_i64_2[GI] = 0;
+ rwb_u64[GI] = 0;
+ rwb_u64_2[GI] = 0;
+ rwb_float[GI] = 0;
+ rwb_float2[GI] = 0;
+ rwb_float3[GI] = 0;
+ rwb_float4[GI] = 0;
+ rwb_half[GI] = 0;
+ rwb_double[GI] = 0;
+ int val = b_int[GI];
+}
diff --git a/clang/test/CodeGenHLSL/resources/RWBuffer-subscript.hlsl b/clang/test/CodeGenHLSL/resources/RWBuffer-subscript.hlsl
index 63e3552..0de171c 100644
--- a/clang/test/CodeGenHLSL/resources/RWBuffer-subscript.hlsl
+++ b/clang/test/CodeGenHLSL/resources/RWBuffer-subscript.hlsl
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-compute -emit-llvm -o - -O0 %s | FileCheck %s --check-prefixes=DXC,CHECK
-// RUN: %clang_cc1 -triple spirv1.6-pc-vulkan1.3-compute -emit-llvm -o - -O0 %s | FileCheck %s --check-prefixes=SPIRV,CHECK
+// RUN: %clang_cc1 -triple spirv1.6-pc-vulkan1.3-compute -fspv-use-unknown-image-format -emit-llvm -o - -O0 %s | FileCheck %s --check-prefixes=SPIRV,CHECK
RWBuffer<int> In;
RWBuffer<int> Out;
diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl
index 2884173..91410e6 100644
--- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl
+++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl
@@ -71,7 +71,7 @@ export void foo() {
// CHECK: define linkonce_odr hidden void @_ZN4hlsl16StructuredBufferIfEC2EjjijPKc(ptr noundef nonnull align 4 dereferenceable(4) %this,
// CHECK-SAME: i32 noundef %registerNo, i32 noundef %spaceNo, i32 noundef %range, i32 noundef %index, ptr noundef %name)
// CHECK-DXIL: %[[HANDLE:.*]] = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_f32_0_0t(
-// CHECK-SAME: i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 false, ptr %{{.*}})
+// CHECK-SAME: i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, ptr %{{.*}})
// CHECK-NEXT: %__handle = getelementptr inbounds nuw %"class.hlsl::StructuredBuffer", ptr %{{.*}}, i32 0, i32 0
// CHECK-DXIL-NEXT: store target("dx.RawBuffer", float, 0, 0) %[[HANDLE]], ptr %__handle, align 4
@@ -80,7 +80,7 @@ export void foo() {
// CHECK: define linkonce_odr hidden void @_ZN4hlsl18RWStructuredBufferIfEC2EjijjPKc(ptr noundef nonnull align 4 dereferenceable(4) %this,
// CHECK-SAME: i32 noundef %spaceNo, i32 noundef %range, i32 noundef %index, i32 noundef %orderId, ptr noundef %name)
// CHECK: %[[HANDLE:.*]] = call target("dx.RawBuffer", float, 1, 0) @llvm.dx.resource.handlefromimplicitbinding.tdx.RawBuffer_f32_1_0t
-// CHECK-SAME: (i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 false, ptr %{{.*}})
+// CHECK-SAME: (i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, ptr %{{.*}})
// CHECK-NEXT: %__handle = getelementptr inbounds nuw %"class.hlsl::RWStructuredBuffer", ptr %{{.*}}, i32 0, i32 0
// CHECK-NEXT: store target("dx.RawBuffer", float, 1, 0) %[[HANDLE]], ptr %__handle, align 4
diff --git a/clang/test/CodeGenHLSL/resources/cbuffer.hlsl b/clang/test/CodeGenHLSL/resources/cbuffer.hlsl
index b58a49b..8dcff5d 100644
--- a/clang/test/CodeGenHLSL/resources/cbuffer.hlsl
+++ b/clang/test/CodeGenHLSL/resources/cbuffer.hlsl
@@ -276,61 +276,61 @@ cbuffer CB_C {
// CHECK: define internal void @_init_buffer_CBScalars.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CBScalars.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CBScalars, 56, 0, 8, 16, 24, 32, 36, 40, 48))
-// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBScalarss_56_0_8_16_24_32_36_40_48tt(i32 5, i32 1, i32 1, i32 0, i1 false, ptr @CBScalars.str)
+// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBScalarss_56_0_8_16_24_32_36_40_48tt(i32 5, i32 1, i32 1, i32 0, ptr @CBScalars.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CBScalars, 56, 0, 8, 16, 24, 32, 36, 40, 48)) %CBScalars.cb_h, ptr @CBScalars.cb, align 4
// CHECK: define internal void @_init_buffer_CBVectors.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CBVectors.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CBVectors, 136, 0, 16, 40, 48, 80, 96, 112))
-// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBVectorss_136_0_16_40_48_80_96_112tt(i32 0, i32 0, i32 1, i32 0, i1 false, ptr @CBVectors.str)
+// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBVectorss_136_0_16_40_48_80_96_112tt(i32 0, i32 0, i32 1, i32 0, ptr @CBVectors.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CBVectors, 136, 0, 16, 40, 48, 80, 96, 112)) %CBVectors.cb_h, ptr @CBVectors.cb, align 4
// CHECK: define internal void @_init_buffer_CBArrays.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CBArrays.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CBArrays, 708, 0, 48, 112, 176, 224, 608, 624, 656))
-// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBArrayss_708_0_48_112_176_224_608_624_656tt(i32 0, i32 2, i32 1, i32 0, i1 false, ptr @CBArrays.str)
+// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBArrayss_708_0_48_112_176_224_608_624_656tt(i32 0, i32 2, i32 1, i32 0, ptr @CBArrays.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CBArrays, 708, 0, 48, 112, 176, 224, 608, 624, 656)) %CBArrays.cb_h, ptr @CBArrays.cb, align 4
// CHECK: define internal void @_init_buffer_CBTypedefArray.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CBTypedefArray.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CBTypedefArray, 128, 0, 64))
-// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBTypedefArrays_128_0_64tt(i32 1, i32 2, i32 1, i32 0, i1 false, ptr @CBTypedefArray.str)
+// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBTypedefArrays_128_0_64tt(i32 1, i32 2, i32 1, i32 0, ptr @CBTypedefArray.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CBTypedefArray, 128, 0, 64)) %CBTypedefArray.cb_h, ptr @CBTypedefArray.cb, align 4
// CHECK: define internal void @_init_buffer_CBStructs.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CBStructs.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CBStructs, 246, 0, 16, 32, 64, 144, 238, 240))
-// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBStructss_246_0_16_32_64_144_238_240tt(i32 2, i32 0, i32 1, i32 0, i1 false, ptr @CBStructs.str)
+// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBStructss_246_0_16_32_64_144_238_240tt(i32 2, i32 0, i32 1, i32 0, ptr @CBStructs.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CBStructs, 246, 0, 16, 32, 64, 144, 238, 240)) %CBStructs.cb_h, ptr @CBStructs.cb, align 4
// CHECK: define internal void @_init_buffer_CBClasses.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CBClasses.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CBClasses, 260, 0, 16, 32, 112))
-// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBClassess_260_0_16_32_112tt(i32 3, i32 0, i32 1, i32 0, i1 false, ptr @CBClasses.str)
+// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBClassess_260_0_16_32_112tt(i32 3, i32 0, i32 1, i32 0, ptr @CBClasses.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CBClasses, 260, 0, 16, 32, 112)) %CBClasses.cb_h, ptr @CBClasses.cb, align 4
// CHECK: define internal void @_init_buffer_CBMix.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CBMix.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CBMix, 170, 0, 24, 32, 120, 128, 136, 144, 152, 160, 168))
-// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBMixs_170_0_24_32_120_128_136_144_152_160_168tt(i32 4, i32 0, i32 1, i32 0, i1 false, ptr @CBMix.str)
+// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBMixs_170_0_24_32_120_128_136_144_152_160_168tt(i32 4, i32 0, i32 1, i32 0, ptr @CBMix.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CBMix, 170, 0, 24, 32, 120, 128, 136, 144, 152, 160, 168)) %CBMix.cb_h, ptr @CBMix.cb, align 4
// CHECK: define internal void @_init_buffer_CB_A.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CB_A.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB_A, 188, 0, 32, 76, 80, 120, 128, 144, 160, 182))
-// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CB_As_188_0_32_76_80_120_128_144_160_182tt(i32 5, i32 0, i32 1, i32 0, i1 false, ptr @CB_A.str)
+// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CB_As_188_0_32_76_80_120_128_144_160_182tt(i32 5, i32 0, i32 1, i32 0, ptr @CB_A.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CB_A, 188, 0, 32, 76, 80, 120, 128, 144, 160, 182)) %CB_A.cb_h, ptr @CB_A.cb, align 4
// CHECK: define internal void @_init_buffer_CB_B.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CB_B.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB_B, 94, 0, 88))
-// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CB_Bs_94_0_88tt(i32 6, i32 0, i32 1, i32 0, i1 false, ptr @CB_B.str)
+// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CB_Bs_94_0_88tt(i32 6, i32 0, i32 1, i32 0, ptr @CB_B.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CB_B, 94, 0, 88)) %CB_B.cb_h, ptr @CB_B.cb, align 4
// CHECK: define internal void @_init_buffer_CB_C.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CB_C.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB_C, 400, 0, 16, 112, 128, 392))
-// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CB_Cs_400_0_16_112_128_392tt(i32 7, i32 0, i32 1, i32 0, i1 false, ptr @CB_C.str)
+// CHECK-SAME: @llvm.dx.resource.handlefromimplicitbinding.tdx.CBuffer_tdx.Layout_s___cblayout_CB_Cs_400_0_16_112_128_392tt(i32 7, i32 0, i32 1, i32 0, ptr @CB_C.str)
// CHECK-NEXT: store target("dx.CBuffer", target("dx.Layout", %__cblayout_CB_C, 400, 0, 16, 112, 128, 392)) %CB_C.cb_h, ptr @CB_C.cb, align 4
RWBuffer<float> Buf;
diff --git a/clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl b/clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl
index 16d22a5..7bedd63 100644
--- a/clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl
+++ b/clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl
@@ -31,7 +31,7 @@ cbuffer CB : register(b0) {
// CHECK: define internal void @_init_buffer_CB.cb()
// CHECK-NEXT: entry:
// CHECK-NEXT: %CB.cb_h = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 176, 16, 168, 88))
-// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBs_176_16_168_88tt(i32 3, i32 1, i32 1, i32 0, i1 false, ptr @CB.str)
+// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.CBuffer_tdx.Layout_s___cblayout_CBs_176_16_168_88tt(i32 3, i32 1, i32 1, i32 0, ptr @CB.str)
float foo() {
// CHECK: load float, ptr addrspace(2) @a, align 4
diff --git a/clang/test/CodeGenHLSL/resources/res-array-global-dyn-index.hlsl b/clang/test/CodeGenHLSL/resources/res-array-global-dyn-index.hlsl
new file mode 100644
index 0000000..5b62452
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/res-array-global-dyn-index.hlsl
@@ -0,0 +1,29 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.6-compute -finclude-default-header \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s
+
+// CHECK: @[[BufA:.*]] = private unnamed_addr constant [2 x i8] c"A\00", align 1
+
+RWBuffer<float> A[4][3] : register(u2);
+RWStructuredBuffer<float> Out;
+
+// Make sure A[GI.x][GI.y] is translated to a RWBuffer<float> constructor call with range 12 and dynamically calculated index
+
+// NOTE:
+// Constructor call for explicit binding has "jjij" in the mangled name and the arguments are (register, space, range_size, index, name).
+
+// CHECK: define internal void @_Z4mainDv3_j(<3 x i32> noundef %GI)
+// CHECK: %[[GI_alloca:.*]] = alloca <3 x i32>, align 16
+// CHECK: %[[Tmp0:.*]] = alloca %"class.hlsl::RWBuffer
+// CHECK: store <3 x i32> %GI, ptr %[[GI_alloca]]
+
+// CHECK: %[[GI:.*]] = load <3 x i32>, ptr %[[GI_alloca]], align 16
+// CHECK: %[[GI_y:.*]] = extractelement <3 x i32> %[[GI]], i32 1
+// CHECK: %[[GI:.*]] = load <3 x i32>, ptr %[[GI_alloca]], align 16
+// CHECK: %[[GI_x:.*]] = extractelement <3 x i32> %[[GI]], i32 0
+// CHECK: %[[Tmp1:.*]] = mul i32 %[[GI_x]], 3
+// CHECK: %[[Index:.*]] = add i32 %[[GI_y]], %[[Tmp1]]
+// CHECK: call void @_ZN4hlsl8RWBufferIfEC1EjjijPKc(ptr {{.*}} %[[Tmp0]], i32 noundef 2, i32 noundef 0, i32 noundef 12, i32 noundef %[[Index]], ptr noundef @A.str)
+[numthreads(4,1,1)]
+void main(uint3 GI : SV_GroupThreadID) {
+ Out[0] = A[GI.x][GI.y][0];
+}
diff --git a/clang/test/CodeGenHLSL/resources/res-array-global-multi-dim.hlsl b/clang/test/CodeGenHLSL/resources/res-array-global-multi-dim.hlsl
new file mode 100644
index 0000000..8d664da
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/res-array-global-multi-dim.hlsl
@@ -0,0 +1,46 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.6-compute -finclude-default-header \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s
+// RUN: %clang_cc1 -finclude-default-header -triple spirv-unknown-vulkan-compute \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s
+
+// CHECK: @[[BufB:.*]] = private unnamed_addr constant [2 x i8] c"B\00", align 1
+// CHECK: @[[BufC:.*]] = private unnamed_addr constant [2 x i8] c"C\00", align 1
+// CHECK: @[[BufD:.*]] = private unnamed_addr constant [2 x i8] c"D\00", align 1
+
+RWBuffer<float> B[4][4] : register(u2);
+RWBuffer<int> C[2][2][5] : register(u10, space1);
+
+typedef RWBuffer<uint> RWBufferArrayTenByFive[10][5]; // test typedef for the resource array type
+RWBufferArrayTenByFive D; // implicit binding -> u18, space0
+
+RWStructuredBuffer<float> Out;
+
+[numthreads(4,1,1)]
+void main() {
+ // CHECK: define internal{{.*}} void @_Z4mainv()
+ // CHECK: %[[Tmp0:.*]] = alloca %"class.hlsl::RWBuffer
+ // CHECK: %[[Tmp1:.*]] = alloca %"class.hlsl::RWBuffer
+ // CHECK: %[[Tmp2:.*]] = alloca %"class.hlsl::RWBuffer
+ // CHECK: %[[Tmp3:.*]] = alloca %"class.hlsl::RWBuffer
+
+ // NOTE:
+ // Constructor call for explicit binding has "jjij" in the mangled name and the arguments are (register, space, range_size, index, name).
+ // For implicit binding the constructor has "jijj" in the mangled name and the arguments are (space, range_size, index, order_id, name).
+ // The range_size can be -1 for unbounded arrays, and that is the only signed int in the signature.
+ // The order_id argument is a sequential number that is assigned to resources with implicit binding and corresponds to the order in which
+ // the resources were declared. It is needed because implicit bindings are assigned later on in an LLVM pass that needs to know the order
+ // of the resource declarations.
+
+ // Make sure that B[3][2] is translated to a RWBuffer<float> constructor call for explicit binding (u2, space0) with range 16 and index 14
+ // CHECK: call void @_ZN4hlsl8RWBufferIfEC1EjjijPKc(ptr {{.*}} %[[Tmp0]], i32 noundef 2, i32 noundef 0, i32 noundef 16, i32 noundef 14, ptr noundef @[[BufB]])
+
+ // Make sure that C[1][0][3] is translated to a RWBuffer<int> constructor call for explicit binding (u10, space1) with range 20 and index 13
+ // CHECK: call void @_ZN4hlsl8RWBufferIiEC1EjjijPKc(ptr {{.*}} %[[Tmp1]], i32 noundef 10, i32 noundef 1, i32 noundef 20, i32 noundef 13, ptr noundef @[[BufC]])
+
+ // Make sure that D[9][2] is translated to a RWBuffer<uint> constructor call for implicit binding (u18, space0) with range 50 and index 47
+ // CHECK: call void @_ZN4hlsl8RWBufferIjEC1EjijjPKc(ptr {{.*}} %[[Tmp2]], i32 noundef 0, i32 noundef 50, i32 noundef 47, i32 noundef 0, ptr noundef @[[BufD]])
+
+ // Make sure that the second B[3][2] is translated to the same a RWBuffer<float> constructor call as the first B[3][2] subscript
+ // CHECK: call void @_ZN4hlsl8RWBufferIfEC1EjjijPKc(ptr {{.*}} %[[Tmp3]], i32 noundef 2, i32 noundef 0, i32 noundef 16, i32 noundef 14, ptr noundef @[[BufB]])
+ Out[0] = B[3][2][0] + (float)C[1][0][3][0] + (float)D[9][2][0] + B[3][2][1];
+}
diff --git a/clang/test/CodeGenHLSL/resources/res-array-global.hlsl b/clang/test/CodeGenHLSL/resources/res-array-global.hlsl
new file mode 100644
index 0000000..595ea81
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/res-array-global.hlsl
@@ -0,0 +1,75 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.6-compute -finclude-default-header \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s -check-prefixes=CHECK,DXIL
+// RUN: %clang_cc1 -finclude-default-header -triple spirv-unknown-vulkan-compute \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s -check-prefixes=CHECK,SPV
+
+// CHECK: @[[BufA:.*]] = private unnamed_addr constant [2 x i8] c"A\00", align 1
+// CHECK: @[[BufB:.*]] = private unnamed_addr constant [2 x i8] c"B\00", align 1
+// CHECK: @[[BufC:.*]] = private unnamed_addr constant [2 x i8] c"C\00", align 1
+// CHECK: @[[BufD:.*]] = private unnamed_addr constant [2 x i8] c"D\00", align 1
+// CHECK: @[[BufE:.*]] = private unnamed_addr constant [2 x i8] c"E\00", align 1
+
+// different explicit binding for DXIL and SPIR-V
+[[vk::binding(12, 2)]]
+RWBuffer<float> A[4] : register(u10, space1);
+
+[[vk::binding(13)]] // SPIR-V explicit binding 13, set 0
+RWBuffer<int> B[5]; // DXIL implicit binding in space0
+
+// same explicit binding for both DXIL and SPIR-V
+// (SPIR-V takes the binding from register annotation if there is no vk::binding attribute))
+RWBuffer<int> C[3] : register(u2);
+
+// implicit binding for both DXIL and SPIR-V in space/set 0
+RWBuffer<double> D[10];
+
+// implicit binding for both DXIL and SPIR-V with specified space/set 0
+RWBuffer<uint> E[15] : register(space2);
+
+RWStructuredBuffer<float> Out;
+
+[numthreads(4,1,1)]
+void main() {
+ // CHECK: define internal{{.*}} void @_Z4mainv()
+ // CHECK: %[[Tmp0:.*]] = alloca %"class.hlsl::RWBuffer
+ // CHECK: %[[Tmp1:.*]] = alloca %"class.hlsl::RWBuffer
+ // CHECK: %[[Tmp2:.*]] = alloca %"class.hlsl::RWBuffer
+ // CHECK: %[[Tmp3:.*]] = alloca %"class.hlsl::RWBuffer
+ // CHECK: %[[Tmp4:.*]] = alloca %"class.hlsl::RWBuffer
+
+ // NOTE:
+ // Constructor call for explicit binding has "jjij" in the mangled name and the arguments are (register, space, range_size, index, name).
+ // For implicit binding the constructor has "jijj" in the mangled name and the arguments are (space, range_size, index, order_id, name).
+ // The range_size can be -1 for unbounded arrays, and that is the only signed int in the signature.
+ // The order_id argument is a sequential number that is assigned to resources with implicit binding and corresponds to the order in which
+ // the resources were declared. It is needed because implicit bindings are assigned later on in an LLVM pass that needs to know the order
+ // of the resource declarations.
+
+ // Make sure A[2] is translated to a RWBuffer<float> constructor call with range 4 and index 2
+ // and DXIL explicit binding (u10, space1)
+ // and SPIR-V explicit binding (binding 12, set 2)
+ // DXIL: call void @_ZN4hlsl8RWBufferIfEC1EjjijPKc(ptr {{.*}} %[[Tmp0]], i32 noundef 10, i32 noundef 1, i32 noundef 4, i32 noundef 2, ptr noundef @[[BufA]])
+ // SPV: call void @_ZN4hlsl8RWBufferIfEC1EjjijPKc(ptr {{.*}} %[[Tmp0]], i32 noundef 12, i32 noundef 2, i32 noundef 4, i32 noundef 2, ptr noundef @[[BufA]])
+
+ // Make sure B[3] is translated to a RWBuffer<int> constructor call with range 5 and index 3
+ // and DXIL for implicit binding in space0, order id 0
+ // and SPIR-V explicit binding (binding 13, set 0)
+ // DXIL: call void @_ZN4hlsl8RWBufferIiEC1EjijjPKc(ptr {{.*}} %[[Tmp1]], i32 noundef 0, i32 noundef 5, i32 noundef 3, i32 noundef 0, ptr noundef @[[BufB]])
+ // SPV: call void @_ZN4hlsl8RWBufferIiEC1EjjijPKc(ptr {{.*}} %[[Tmp1]], i32 noundef 13, i32 noundef 0, i32 noundef 5, i32 noundef 3, ptr noundef @[[BufB]])
+
+ // Make sure C[1] is translated to a RWBuffer<int> constructor call with range 3 and index 1
+ // and DXIL explicit binding (u2, space0)
+ // and SPIR-V explicit binding (binding 2, set 0)
+ // DXIL: call void @_ZN4hlsl8RWBufferIiEC1EjjijPKc(ptr {{.*}} %[[Tmp2]], i32 noundef 2, i32 noundef 0, i32 noundef 3, i32 noundef 1, ptr noundef @[[BufC]])
+ // SPV: call void @_ZN4hlsl8RWBufferIiEC1EjjijPKc(ptr {{.*}} %[[Tmp2]], i32 noundef 2, i32 noundef 0, i32 noundef 3, i32 noundef 1, ptr noundef @[[BufC]])
+
+ // Make sure D[7] is translated to a RWBuffer<double> constructor call with implicit binding
+ // for both DXIL and SPIR-V
+ // DXIL: call void @_ZN4hlsl8RWBufferIdEC1EjijjPKc(ptr {{.*}} %[[Tmp3]], i32 noundef 0, i32 noundef 10, i32 noundef 7, i32 noundef 1, ptr noundef @[[BufD]])
+ // SPV: call void @_ZN4hlsl8RWBufferIdEC1EjijjPKc(ptr {{.*}} %[[Tmp3]], i32 noundef 0, i32 noundef 10, i32 noundef 7, i32 noundef 0, ptr noundef @[[BufD]])
+
+ // Make sure E[5][0] is translated to RWBuffer<uint> constructor call with implicit binding and specified space/set 2
+ // DXIL: call void @_ZN4hlsl8RWBufferIjEC1EjijjPKc(ptr {{.*}} %[[Tmp4]], i32 noundef 2, i32 noundef 15, i32 noundef 5, i32 noundef 2, ptr noundef @[[BufE]])
+ // SPV: call void @_ZN4hlsl8RWBufferIjEC1EjijjPKc(ptr {{.*}} %[[Tmp4]], i32 noundef 2, i32 noundef 15, i32 noundef 5, i32 noundef 1, ptr noundef @[[BufE]])
+ Out[0] = A[2][0] + (float)B[3][0] + (float)C[1][0] + (float)D[7][0] + (float)E[5][0];
+}
diff --git a/clang/test/CodeGenHLSL/resources/res-array-local-multi-dim.hlsl b/clang/test/CodeGenHLSL/resources/res-array-local-multi-dim.hlsl
new file mode 100644
index 0000000..d803882
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/res-array-local-multi-dim.hlsl
@@ -0,0 +1,49 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.6-compute -finclude-default-header \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s
+
+// This test verifies handling of multi-dimensional local arrays of resources
+// when used as a function argument and local variable.
+
+// CHECK: @_ZL1A = internal global %"class.hlsl::RWBuffer" poison, align 4
+// CHECK: @_ZL1B = internal global %"class.hlsl::RWBuffer" poison, align 4
+
+RWBuffer<float> A : register(u10);
+RWBuffer<float> B : register(u20);
+RWStructuredBuffer<float> Out;
+
+// NOTE: _ZN4hlsl8RWBufferIfEixEj is the subscript operator for RWBuffer<float> and
+// _ZN4hlsl18RWStructuredBufferIfEixEj is the subscript operator for RWStructuredBuffer<float>
+
+// CHECK: define {{.*}} float @_Z3fooA2_A2_N4hlsl8RWBufferIfEE(ptr noundef byval([2 x [2 x %"class.hlsl::RWBuffer"]]) align 4 %Arr)
+// CHECK-NEXT: entry:
+float foo(RWBuffer<float> Arr[2][2]) {
+// CHECK-NEXT: %[[Arr_1_Ptr:.*]] = getelementptr inbounds [2 x [2 x %"class.hlsl::RWBuffer"]], ptr %Arr, i32 0, i32 1
+// CHECK-NEXT: %[[Arr_1_1_Ptr:.*]] = getelementptr inbounds [2 x %"class.hlsl::RWBuffer"], ptr %[[Arr_1_Ptr]], i32 0, i32 1
+// CHECK-NEXT: %[[BufPtr:.*]] = call {{.*}} ptr @_ZN4hlsl8RWBufferIfEixEj(ptr {{.*}} %[[Arr_1_1_Ptr]], i32 noundef 0)
+// CHECK-NEXT: %[[Value:.*]] = load float, ptr %[[BufPtr]], align 4
+// CHECK-NEXT: ret float %[[Value]]
+ return Arr[1][1][0];
+}
+
+// CHECK: define internal void @_Z4mainv()
+// CHECK-NEXT: entry:
+[numthreads(4,1,1)]
+void main() {
+// CHECK-NEXT: %L = alloca [2 x [2 x %"class.hlsl::RWBuffer"]], align 4
+// CHECK-NEXT: %[[Tmp:.*]] = alloca [2 x [2 x %"class.hlsl::RWBuffer"]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %L, ptr align 4 @_ZL1A, i32 4, i1 false)
+// CHECK-NEXT: %[[Ptr1:.*]] = getelementptr inbounds %"class.hlsl::RWBuffer", ptr %L, i32 1
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Ptr1]], ptr align 4 @_ZL1B, i32 4, i1 false)
+// CHECK-NEXT: %[[Ptr2:.*]] = getelementptr inbounds [2 x %"class.hlsl::RWBuffer"], ptr %L, i32 1
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Ptr2]], ptr align 4 @_ZL1A, i32 4, i1 false)
+// CHECK-NEXT: %[[Ptr3:.*]] = getelementptr inbounds %"class.hlsl::RWBuffer", ptr %[[Ptr2]], i32 1
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Ptr3]], ptr align 4 @_ZL1B, i32 4, i1 false)
+ RWBuffer<float> L[2][2] = { { A, B }, { A, B } };
+
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Tmp]], ptr align 4 %L, i32 16, i1 false)
+// CHECK-NEXT: %[[ReturnedValue:.*]] = call {{.*}}float @_Z3fooA2_A2_N4hlsl8RWBufferIfEE(ptr noundef byval([2 x [2 x %"class.hlsl::RWBuffer"]]) align 4 %[[Tmp]])
+// CHECK-NEXT: %[[OutBufPtr:.*]] = call {{.*}} ptr @_ZN4hlsl18RWStructuredBufferIfEixEj(ptr {{.*}} @_ZL3Out, i32 noundef 0)
+// CHECK-NEXT: store float %[[ReturnedValue]], ptr %[[OutBufPtr]], align 4
+// CHECK-NEXT: ret void
+ Out[0] = foo(L);
+}
diff --git a/clang/test/CodeGenHLSL/resources/res-array-local1.hlsl b/clang/test/CodeGenHLSL/resources/res-array-local1.hlsl
new file mode 100644
index 0000000..c0d508b
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/res-array-local1.hlsl
@@ -0,0 +1,64 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.6-compute -finclude-default-header \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s
+
+// This test verifies local arrays of resources in HLSL.
+
+// CHECK: @_ZL1A = internal global %"class.hlsl::RWBuffer" poison, align 4
+// CHECK: @_ZL1B = internal global %"class.hlsl::RWBuffer" poison, align 4
+// CHECK: @_ZL1C = internal global %"class.hlsl::RWBuffer" poison, align 4
+
+RWBuffer<float> A : register(u1);
+RWBuffer<float> B : register(u2);
+RWBuffer<float> C : register(u3);
+RWStructuredBuffer<float> Out : register(u0);
+
+// CHECK: define internal void @_Z4mainv()
+// CHECK-NEXT: entry:
+[numthreads(4,1,1)]
+void main() {
+// CHECK-NEXT: %First = alloca [3 x %"class.hlsl::RWBuffer"], align 4
+// CHECK-NEXT: %Second = alloca [4 x %"class.hlsl::RWBuffer"], align 4
+ RWBuffer<float> First[3] = { A, B, C };
+ RWBuffer<float> Second[4];
+
+// Verify initialization of First array from an initialization list
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %First, ptr align 4 @_ZL1A, i32 4, i1 false)
+// CHECK-NEXT: %[[Ptr1:.*]] = getelementptr inbounds %"class.hlsl::RWBuffer", ptr %First, i32 1
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Ptr1]], ptr align 4 @_ZL1B, i32 4, i1 false)
+// CHECK-NEXT: %[[Ptr2:.*]] = getelementptr inbounds %"class.hlsl::RWBuffer", ptr %First, i32 2
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Ptr2]], ptr align 4 @_ZL1C, i32 4, i1 false)
+
+// Verify default initialization of Second array, which means there is a loop iterating
+// over the array elements and calling the default constructor for each
+// CHECK-NEXT: %[[ArrayBeginPtr:.*]] = getelementptr inbounds [4 x %"class.hlsl::RWBuffer"], ptr %Second, i32 0, i32 0
+// CHECK-NEXT: %[[ArrayEndPtr:.*]] = getelementptr inbounds %"class.hlsl::RWBuffer", ptr %[[ArrayBeginPtr]], i32 4
+// CHECK-NEXT: br label %[[ArrayInitLoop:.*]]
+// CHECK: [[ArrayInitLoop]]:
+// CHECK-NEXT: %[[ArrayCurPtr:.*]] = phi ptr [ %[[ArrayBeginPtr]], %entry ], [ %[[ArrayNextPtr:.*]], %[[ArrayInitLoop]] ]
+// CHECK-NEXT: call void @_ZN4hlsl8RWBufferIfEC1Ev(ptr {{.*}} %[[ArrayCurPtr]])
+// CHECK-NEXT: %[[ArrayNextPtr]] = getelementptr inbounds %"class.hlsl::RWBuffer", ptr %[[ArrayCurPtr]], i32 1
+// CHECK-NEXT: %[[ArrayInitDone:.*]] = icmp eq ptr %[[ArrayNextPtr]], %[[ArrayEndPtr]]
+// CHECK-NEXT: br i1 %[[ArrayInitDone]], label %[[AfterArrayInit:.*]], label %[[ArrayInitLoop]]
+// CHECK: [[AfterArrayInit]]:
+
+// Initialize First[2] with C
+// CHECK: %[[Ptr3:.*]] = getelementptr inbounds [4 x %"class.hlsl::RWBuffer"], ptr %Second, i32 0, i32 2
+// CHECK: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Ptr3]], ptr align 4 @_ZL1C, i32 4, i1 false)
+ Second[2] = C;
+
+ // NOTE: _ZN4hlsl8RWBufferIfEixEj is the subscript operator for RWBuffer<float>
+
+// get First[1][0] value
+// CHECK: %[[First_1_Ptr:.*]] = getelementptr inbounds [3 x %"class.hlsl::RWBuffer"], ptr %First, i32 0, i32 1
+// CHECK: %[[BufPtr1:.*]] = call {{.*}} ptr @_ZN4hlsl8RWBufferIfEixEj(ptr {{.*}} %[[First_1_Ptr]], i32 noundef 0)
+// CHECK: %[[Value1:.*]] = load float, ptr %[[BufPtr1]], align 4
+
+// get Second[2][0] value
+// CHECK: %[[Second_2_Ptr:.*]] = getelementptr inbounds [4 x %"class.hlsl::RWBuffer"], ptr %Second, i32 0, i32 2
+// CHECK: %[[BufPtr2:.*]] = call {{.*}} ptr @_ZN4hlsl8RWBufferIfEixEj(ptr {{.*}} %[[Second_2_Ptr]], i32 noundef 0)
+// CHECK: %[[Value2:.*]] = load float, ptr %[[BufPtr2]], align 4
+
+// add them
+// CHECK: %{{.*}} = fadd {{.*}} float %[[Value1]], %[[Value2]]
+ Out[0] = First[1][0] + Second[2][0];
+}
diff --git a/clang/test/CodeGenHLSL/resources/res-array-local2.hlsl b/clang/test/CodeGenHLSL/resources/res-array-local2.hlsl
new file mode 100644
index 0000000..39f3aeb
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/res-array-local2.hlsl
@@ -0,0 +1,37 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.6-compute -finclude-default-header \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s
+
+// This test verifies handling of local arrays of resources when used as a function argument.
+
+// CHECK: @_ZL1A = internal global [3 x %"class.hlsl::RWBuffer"] poison, align 4
+
+RWBuffer<float> A[3] : register(u0);
+RWStructuredBuffer<float> Out : register(u0);
+
+// NOTE: _ZN4hlsl8RWBufferIfEixEj is the subscript operator for RWBuffer<float> and
+// _ZN4hlsl18RWStructuredBufferIfEixEj is the subscript operator for RWStructuredBuffer<float>
+
+// CHECK: define {{.*}} float @_Z3fooA3_N4hlsl8RWBufferIfEE(ptr noundef byval([3 x %"class.hlsl::RWBuffer"]) align 4 %LocalA)
+// CHECK-NEXT: entry:
+float foo(RWBuffer<float> LocalA[3]) {
+// CHECK-NEXT: %[[LocalA_2_Ptr:.*]] = getelementptr inbounds [3 x %"class.hlsl::RWBuffer"], ptr %LocalA, i32 0, i32 2
+// CHECK-NEXT: %[[BufPtr:.*]] = call {{.*}} ptr @_ZN4hlsl8RWBufferIfEixEj(ptr {{.*}} %[[LocalA_2_Ptr]], i32 noundef 0)
+// CHECK-NEXT: %[[Value:.*]] = load float, ptr %[[BufPtr]], align 4
+// CHECK-NEXT: ret float %[[Value]]
+ return LocalA[2][0];
+}
+
+// CHECK: define internal void @_Z4mainv()
+// CHECK-NEXT: entry:
+[numthreads(4,1,1)]
+void main() {
+// Check that the `main` function calls `foo` with a local copy of the array
+// CHECK-NEXT: %[[Tmp:.*]] = alloca [3 x %"class.hlsl::RWBuffer"], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Tmp]], ptr align 4 @_ZL1A, i32 12, i1 false)
+
+// CHECK-NEXT: %[[ReturnedValue:.*]] = call {{.*}} float @_Z3fooA3_N4hlsl8RWBufferIfEE(ptr noundef byval([3 x %"class.hlsl::RWBuffer"]) align 4 %[[Tmp]])
+// CHECK-NEXT: %[[OutBufPtr:.*]] = call {{.*}} ptr @_ZN4hlsl18RWStructuredBufferIfEixEj(ptr {{.*}} @_ZL3Out, i32 noundef 0)
+// CHECK-NEXT: store float %[[ReturnedValue]], ptr %[[OutBufPtr]], align 4
+// CHECK-NEXT: ret void
+ Out[0] = foo(A);
+}
diff --git a/clang/test/CodeGenHLSL/resources/res-array-local3.hlsl b/clang/test/CodeGenHLSL/resources/res-array-local3.hlsl
new file mode 100644
index 0000000..e5bcdc6
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/res-array-local3.hlsl
@@ -0,0 +1,62 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.6-compute -finclude-default-header \
+// RUN: -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s
+
+// This test verifies handling of local arrays of resources when used
+// as a function argument that is modified inside the function.
+
+// CHECK: @_ZL1X = internal global %"class.hlsl::RWBuffer" poison, align 4
+// CHECK: @_ZL1Y = internal global %"class.hlsl::RWBuffer" poison, align 4
+
+RWBuffer<int> X : register(u0);
+RWBuffer<int> Y : register(u1);
+
+// CHECK: define {{.*}} @_Z6SomeFnA2_N4hlsl8RWBufferIiEEji(
+// CHECK-SAME: ptr noundef byval([2 x %"class.hlsl::RWBuffer"]) align 4 %B, i32 noundef %Idx, i32 noundef %Val0)
+// CHECK-NEXT: entry:
+// CHECK-NEXT: %[[Idx_addr:.*]] = alloca i32, align 4
+// CHECK-NEXT: %[[Val0_addr:.*]] = alloca i32, align 4
+// CHECK-NEXT: store i32 %Idx, ptr %[[Idx_addr]], align 4
+// CHECK-NEXT: store i32 %Val0, ptr %[[Val0_addr]], align 4
+void SomeFn(RWBuffer<int> B[2], uint Idx, int Val0) {
+
+// CHECK-NEXT: %[[B_0_Ptr:.*]] = getelementptr inbounds [2 x %"class.hlsl::RWBuffer"], ptr %B, i32 0, i32 0
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[B_0_Ptr]], ptr align 4 @_ZL1Y, i32 4, i1 false)
+ B[0] = Y;
+
+// NOTE: _ZN4hlsl8RWBufferIiEixEj is the subscript operator for RWBuffer<int>
+
+// CHECK-NEXT: %[[Val0:.*]] = load i32, ptr %[[Val0_addr]], align 4
+// CHECK-NEXT: %[[B_0_Ptr:.*]] = getelementptr inbounds [2 x %"class.hlsl::RWBuffer"], ptr %B, i32 0, i32 0
+// CHECK-NEXT: %[[Idx:.*]] = load i32, ptr %[[Idx_addr]], align 4
+// CHECK-NEXT: %[[BufPtr:.*]] = call {{.*}} ptr @_ZN4hlsl8RWBufferIiEixEj(ptr {{.*}} %[[B_0_Ptr]], i32 noundef %[[Idx]])
+// CHECK-NEXT: store i32 %[[Val0]], ptr %[[BufPtr]], align 4
+ B[0][Idx] = Val0;
+}
+
+// CHECK: define {{.*}} void @_Z4mainj(i32 noundef %GI)
+// CHECK-NEXT: entry:
+// CHECK-NEXT: %[[GI_addr:.*]] = alloca i32, align 4
+[numthreads(4,1,1)]
+void main(uint GI : SV_GroupIndex) {
+// CHECK-NEXT: %A = alloca [2 x %"class.hlsl::RWBuffer"], align 4
+// CHECK-NEXT: %[[Tmp:.*]] = alloca [2 x %"class.hlsl::RWBuffer"], align 4
+// CHECK-NEXT: store i32 %GI, ptr %GI.addr, align 4
+
+// Initialization of array A with resources X and Y
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %A, ptr align 4 @_ZL1X, i32 4, i1 false)
+// CHECK-NEXT: %[[A_1_Ptr:.*]] = getelementptr inbounds %"class.hlsl::RWBuffer", ptr %A, i32 1
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[A_1_Ptr]], ptr align 4 @_ZL1Y, i32 4, i1 false)
+ RWBuffer<int> A[2] = {X, Y};
+
+// Verify that SomeFn is called with a local copy of the array A
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 %[[Tmp]], ptr align 4 %A, i32 8, i1 false)
+// CHECK-NEXT: %[[GI:.*]] = load i32, ptr %[[GI_addr]], align 4
+// CHECK-NEXT: call void @_Z6SomeFnA2_N4hlsl8RWBufferIiEEji(ptr noundef byval([2 x %"class.hlsl::RWBuffer"]) align 4 %[[Tmp]], i32 noundef %[[GI]], i32 noundef 1)
+ SomeFn(A, GI, 1);
+
+// CHECK-NEXT: %[[A_0_Ptr:.*]] = getelementptr inbounds [2 x %"class.hlsl::RWBuffer"], ptr %A, i32 0, i32 0
+// CHECK-NEXT: %[[GI:.*]] = load i32, ptr %[[GI_addr]], align 4
+// CHECK-NEXT: %[[BufPtr:.*]] = call {{.*}} ptr @_ZN4hlsl8RWBufferIiEixEj(ptr {{.*}} %[[A_0_Ptr]], i32 noundef %[[GI]])
+// CHECK-NEXT: store i32 2, ptr %[[BufPtr]], align 4
+ A[0][GI] = 2;
+}
diff --git a/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl b/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl
index 0a30134..27af47e 100644
--- a/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl
+++ b/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl
@@ -4,36 +4,38 @@
// CHECK: %"class.hlsl::RWBuffer.0" = type { target("dx.TypedBuffer", float, 1, 0, 0) }
// CHECK: %"class.hlsl::StructuredBuffer" = type { target("dx.RawBuffer", i32, 0, 0) }
// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", %struct.S, 1, 0) }
+// CHECK: %"class.hlsl::RWBuffer.1" = type { target("dx.TypedBuffer", double, 1, 0, 0) }
// CHECK: @_ZL4U0S0 = internal global %"class.hlsl::RWBuffer" poison, align 4
// CHECK: @_ZL4U5S3 = internal global %"class.hlsl::RWBuffer.0" poison, align 4
// CHECK: @_ZL4T2S2 = internal global %"class.hlsl::StructuredBuffer" poison, align 4
// CHECK: @_ZL4T3S0 = internal global %"class.hlsl::RWStructuredBuffer" poison, align 4
+// CHECK: @_ZL5Array = internal global [10 x %"class.hlsl::RWBuffer.1"] poison, align 4
// CHECK: %[[HANDLE:.*]] = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0)
// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_v4f32_1_0_0t(
-// CHECK-SAME: i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i1 false, ptr %{{.*}})
+// CHECK-SAME: i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, ptr %{{.*}})
// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::RWBuffer", ptr %this{{[0-9]*}}, i32 0, i32 0
// CHECK: store target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %[[HANDLE]], ptr %[[HANDLE_PTR]], align 4
RWBuffer<float4> U0S0 : register(u0);
// CHECK: %[[HANDLE:.*]] = call target("dx.TypedBuffer", float, 1, 0, 0)
// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_f32_1_0_0t(
-// CHECK-SAME: i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i1 false, ptr %{{.*}})
+// CHECK-SAME: i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, ptr %{{.*}})
// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::RWBuffer.0", ptr %this{{[0-9]*}}, i32 0, i32 0
// CHECK: store target("dx.TypedBuffer", float, 1, 0, 0) %[[HANDLE]], ptr %[[HANDLE_PTR]], align 4
RWBuffer<float> U5S3 : register(u5, space3);
// CHECK: %[[HANDLE:.*]] = call target("dx.RawBuffer", i32, 0, 0)
// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_0_0t(
-// CHECK-SAME: i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i1 false, ptr %{{.*}})
+// CHECK-SAME: i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, ptr %{{.*}})
// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::StructuredBuffer", ptr %this{{[0-9]*}}, i32 0, i32 0
// CHECK: store target("dx.RawBuffer", i32, 0, 0) %[[HANDLE]], ptr %[[HANDLE_PTR]], align 4
StructuredBuffer<int> T2S2 : register(t2, space2);
// CHECK: %[[HANDLE:.*]] = call target("dx.RawBuffer", %struct.S, 1, 0)
// CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_s_struct.Ss_1_0t(
-// CHECK-SAME: i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i1 false, ptr %{{.*}})
+// CHECK-SAME: i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, ptr %{{.*}})
// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::RWStructuredBuffer", ptr %this{{[0-9]*}}, i32 0, i32 0
// CHECK: store target("dx.RawBuffer", %struct.S, 1, 0) %[[HANDLE]], ptr %[[HANDLE_PTR]], align 4
struct S {
@@ -42,5 +44,15 @@ struct S {
};
RWStructuredBuffer<S> T3S0 : register(u3);
+// Resource array elements are initialized on access; make sure there is no call
+// to initialize RWBuffer<double>.
+// CHECK-NOT: call target("dx.TypedBuffer", double, 1, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_f64_1_0_0t(
+RWBuffer<double> Array[10] : register(u4, space0);
+
[numthreads(4,1,1)]
-void main() {}
+void main() {
+ // Reference Array to ensure it is emitted and we can test that it is initialized
+ // to poison, but do not index it.
+ // Non-array resources are always emitted because they have a constructor initializer.
+ (void)Array;
+}
diff --git a/clang/test/CodeGenHLSL/static-local-ctor.hlsl b/clang/test/CodeGenHLSL/static-local-ctor.hlsl
index 87f49b8..9a4bf66 100644
--- a/clang/test/CodeGenHLSL/static-local-ctor.hlsl
+++ b/clang/test/CodeGenHLSL/static-local-ctor.hlsl
@@ -2,7 +2,7 @@
// Verify that no per variable _Init_thread instructions are emitted for non-trivial static locals
// These would normally be emitted by the MicrosoftCXXABI, but the DirectX backend should exlude them
-// Instead, check for the guardvar oparations that should protect the constructor initialization should
+// Instead, check for the guardvar operations that should protect the constructor initialization should
// only take place once.
RWBuffer<int> buf[10];
@@ -15,13 +15,14 @@ void InitBuf(RWBuffer<int> buf) {
// CHECK-NOT: _Init_thread_epoch
// CHECK: define internal void @_Z4mainv
// CHECK-NEXT: entry:
+// CHECK-NEXT: [[Tmp0:%.*]] = alloca %"class.hlsl::RWBuffer"
// CHECK-NEXT: [[Tmp1:%.*]] = alloca %"class.hlsl::RWBuffer"
// CHECK-NEXT: [[Tmp2:%.*]] = load i8, ptr @_ZGVZ4mainvE5mybuf
// CHECK-NEXT: [[Tmp3:%.*]] = icmp eq i8 [[Tmp2]], 0
// CHECK-NEXT: br i1 [[Tmp3]]
// CHECK-NOT: _Init_thread_header
// CHECK: init.check:
-// CHECK-NEXT: call void @_ZN4hlsl8RWBufferIiEC1Ejijj
+// CHECK-NEXT: call void @_ZN4hlsl8RWBufferIiEC1EjijjPKc(
// CHECK-NEXT: store i8 1, ptr @_ZGVZ4mainvE5mybuf
// CHECK-NOT: _Init_thread_footer
diff --git a/clang/test/CodeGenHLSL/vk-features/SpirvType.hlsl b/clang/test/CodeGenHLSL/vk-features/SpirvType.hlsl
index 7149be0..0cebac1 100644
--- a/clang/test/CodeGenHLSL/vk-features/SpirvType.hlsl
+++ b/clang/test/CodeGenHLSL/vk-features/SpirvType.hlsl
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
// RUN: spirv-unknown-vulkan-compute %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s
+// RUN: -fspv-use-unknown-image-format -o - | FileCheck %s
template<class T, uint64_t Size>
using Array = vk::SpirvOpaqueType</* OpTypeArray */ 28, T, vk::integral_constant<uint64_t, Size>>;
diff --git a/clang/test/CodeGenObjC/arc-blocks.m b/clang/test/CodeGenObjC/arc-blocks.m
index 72bf35c..605dda7 100644
--- a/clang/test/CodeGenObjC/arc-blocks.m
+++ b/clang/test/CodeGenObjC/arc-blocks.m
@@ -73,7 +73,7 @@ void test3(void (^sink)(id*)) {
// CHECK-NEXT: [[TEMP:%.*]] = alloca ptr
// CHECK-NEXT: call ptr @llvm.objc.retain(
// CHECK-NEXT: store ptr {{%.*}}, ptr [[SINK]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[STRONG]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[STRONG]])
// CHECK-NEXT: store ptr null, ptr [[STRONG]]
// CHECK-NEXT: [[BLOCK:%.*]] = load ptr, ptr [[SINK]]
@@ -91,7 +91,7 @@ void test3(void (^sink)(id*)) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[STRONG]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[STRONG]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[STRONG]])
// CHECK-NEXT: load ptr, ptr [[SINK]]
// CHECK-NEXT: call void @llvm.objc.release
@@ -161,7 +161,7 @@ void test5(void) {
// CHECK-LABEL: define{{.*}} void @test5()
// CHECK: [[VAR:%.*]] = alloca ptr
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VAR]])
// CHECK: [[T1:%.*]] = call ptr @test5_source() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: store ptr [[T1]], ptr [[VAR]],
@@ -172,7 +172,7 @@ void test5(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[VAR]]
// CHECK-NEXT: store ptr [[T0]], ptr [[CAPTURE]]
// CHECK: call void @test5_helper
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VAR]])
// CHECK-NEXT: ret void
}
@@ -185,7 +185,7 @@ void test6(void) {
// CHECK-LABEL: define{{.*}} void @test6()
// CHECK: [[VAR:%.*]] = alloca [[BYREF_T:%.*]],
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 48, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VAR]])
// CHECK: [[T0:%.*]] = getelementptr inbounds nuw [[BYREF_T]], ptr [[VAR]], i32 0, i32 2
// 0x02000000 - has copy/dispose helpers weak
// CHECK-NEXT: store i32 1107296256, ptr [[T0]]
@@ -203,7 +203,7 @@ void test6(void) {
// CHECK: call void @test6_helper(
// CHECK: call void @_Block_object_dispose(ptr [[VAR]], i32 8)
// CHECK-NEXT: call void @llvm.objc.destroyWeak(ptr [[SLOT]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VAR]])
// CHECK-NEXT: ret void
// CHECK-LABEL: define internal void @__Block_byref_object_copy_.{{[0-9]+}}(ptr noundef %0, ptr noundef %1) #{{[0-9]+}} {
@@ -449,7 +449,7 @@ void test13(id x) {
// CHECK-NEXT: [[CLEANUP_ACTIVE:%.*]] = alloca i1
// CHECK-NEXT: [[T0:%.*]] = call ptr @llvm.objc.retain(ptr {{%.*}})
// CHECK-NEXT: store ptr [[T0]], ptr [[X]], align 8
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[B]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-NEXT: [[T1:%.*]] = icmp ne ptr [[T0]], null
// CHECK-NEXT: store i1 false, ptr [[CLEANUP_ACTIVE]]
@@ -479,7 +479,7 @@ void test13(id x) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
// CHECK-NEXT: br label
- // CHECK: call void @llvm.lifetime.end.p0(i64 8, ptr [[B]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr [[B]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
// CHECK-NEXT: ret void
@@ -501,7 +501,7 @@ void test16(void) {
// CHECK-LABEL: define{{.*}} void @test16(
// CHECK: [[BLKVAR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[BLKVAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BLKVAR]])
// CHECK-NEXT: store ptr null, ptr [[BLKVAR]], align 8
}
diff --git a/clang/test/CodeGenObjC/arc-precise-lifetime.m b/clang/test/CodeGenObjC/arc-precise-lifetime.m
index 473c0b0..ac761ba 100644
--- a/clang/test/CodeGenObjC/arc-precise-lifetime.m
+++ b/clang/test/CodeGenObjC/arc-precise-lifetime.m
@@ -7,7 +7,7 @@ void test0(void) {
PRECISE_LIFETIME id x = test0_helper();
x = 0;
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[CALL:%.*]] = call ptr @test0_helper()
// CHECK-NEXT: store ptr [[CALL]], ptr [[X]]
@@ -20,7 +20,7 @@ void test0(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]]) [[NUW:#[0-9]+]]
// CHECK-NOT: clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -37,20 +37,20 @@ extern Test1 *test1_helper(void);
void test1a_message(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[C:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T0]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T6:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T6]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
Test1 *ptr = test1_helper();
char *c = [(ptr) interior];
@@ -61,20 +61,20 @@ void test1a_message(void) {
void test1a_property(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[C:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T0]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T6:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T6]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
Test1 *ptr = test1_helper();
char *c = ptr.interior;
@@ -85,20 +85,20 @@ void test1a_property(void) {
void test1b_message(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[C:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T3:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T3]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
// CHECK-NOT: clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
PRECISE_LIFETIME Test1 *ptr = test1_helper();
char *c = [ptr interior];
@@ -108,20 +108,20 @@ void test1b_message(void) {
void test1b_property(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[C:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T3:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T3]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
// CHECK-NOT: clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
PRECISE_LIFETIME Test1 *ptr = test1_helper();
char *c = ptr.interior;
@@ -131,20 +131,20 @@ void test1b_property(void) {
void test1c_message(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[PC:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T0]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T6:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T6]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
Test1 *ptr = test1_helper();
char *pc = [ptr PropertyReturnsInnerPointer];
@@ -154,20 +154,20 @@ void test1c_message(void) {
void test1c_property(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[PC:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T0]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T6:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T6]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
Test1 *ptr = test1_helper();
char *pc = ptr.PropertyReturnsInnerPointer;
@@ -177,19 +177,19 @@ void test1c_property(void) {
void test1d_message(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[PC:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[SEL:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[CALL1:%.*]] = call ptr @objc_msgSend(ptr noundef [[T0]], ptr noundef [[SEL]])
// CHECK-NEXT: store ptr [[CALL1]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PC]])
// CHECK-NEXT: [[NINE:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[NINE]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
PRECISE_LIFETIME Test1 *ptr = test1_helper();
char *pc = [ptr PropertyReturnsInnerPointer];
@@ -199,19 +199,19 @@ void test1d_message(void) {
void test1d_property(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[PC:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[SEL:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[CALL1:%.*]] = call ptr @objc_msgSend(ptr noundef [[T0]], ptr noundef [[SEL]])
// CHECK-NEXT: store ptr [[CALL1]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PC]])
// CHECK-NEXT: [[NINE:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[NINE]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
PRECISE_LIFETIME Test1 *ptr = test1_helper();
char *pc = ptr.PropertyReturnsInnerPointer;
diff --git a/clang/test/CodeGenObjC/arc-ternary-op.m b/clang/test/CodeGenObjC/arc-ternary-op.m
index 4a3c00c..46529b4 100644
--- a/clang/test/CodeGenObjC/arc-ternary-op.m
+++ b/clang/test/CodeGenObjC/arc-ternary-op.m
@@ -12,7 +12,7 @@ void test0(_Bool cond) {
// CHECK-NEXT: [[RELCOND:%.*]] = alloca i1
// CHECK-NEXT: zext
// CHECK-NEXT: store
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T0:%.*]] = load i8, ptr [[COND]]
// CHECK-NEXT: [[T1:%.*]] = trunc i8 [[T0]] to i1
// CHECK-NEXT: store i1 false, ptr [[RELCOND]]
@@ -32,7 +32,7 @@ void test0(_Bool cond) {
// CHECK-NEXT: br label
// CHECK: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
id x = (cond ? 0 : test0_helper());
}
@@ -53,9 +53,9 @@ void test1(int cond) {
// CHECK-NEXT: [[CONDCLEANUPSAVE:%.*]] = alloca ptr
// CHECK-NEXT: [[CONDCLEANUP:%.*]] = alloca i1
// CHECK-NEXT: store i32
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[STRONG]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[STRONG]])
// CHECK-NEXT: store ptr null, ptr [[STRONG]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[WEAK]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[WEAK]])
// CHECK-NEXT: call ptr @llvm.objc.initWeak(ptr [[WEAK]], ptr null)
// CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[COND]]
@@ -99,8 +99,8 @@ void test1(int cond) {
// CHECK-NEXT: br label
// CHECK: call void @llvm.objc.destroyWeak(ptr [[WEAK]])
- // CHECK: call void @llvm.lifetime.end.p0(i64 8, ptr [[WEAK]])
- // CHECK: call void @llvm.lifetime.end.p0(i64 8, ptr [[STRONG]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr [[WEAK]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr [[STRONG]])
// CHECK: ret void
}
diff --git a/clang/test/CodeGenObjC/arc.m b/clang/test/CodeGenObjC/arc.m
index 57afe9c..2d860c1 100644
--- a/clang/test/CodeGenObjC/arc.m
+++ b/clang/test/CodeGenObjC/arc.m
@@ -48,13 +48,13 @@ id test1(id x) {
// CHECK-NEXT: [[Y:%.*]] = alloca ptr
// CHECK-NEXT: [[PARM:%.*]] = call ptr @llvm.objc.retain(ptr {{%.*}})
// CHECK-NEXT: store ptr [[PARM]], ptr [[X]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: store ptr null, ptr [[Y]]
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: [[RET:%.*]] = call ptr @llvm.objc.retain(ptr [[T0]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]])
// CHECK-NEXT: [[T1:%.*]] = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr [[RET]])
@@ -99,7 +99,7 @@ void test3_unelided(void) {
extern void test3_helper(void);
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]], align
Test3 *x;
@@ -118,14 +118,14 @@ void test3_unelided(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
// CHECK-LABEL: define{{.*}} void @test3()
void test3(void) {
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
id x = [[Test3 alloc] initWith: 5];
@@ -155,7 +155,7 @@ void test3(void) {
// Cleanup for x.
// CHECK-NEXT: [[TMP:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[TMP]]) [[NUW]]
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -225,12 +225,12 @@ id test6_helper(void) __attribute__((ns_returns_retained));
// CHECK-LABEL: define{{.*}} void @test6()
void test6(void) {
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[CALL:%.*]] = call ptr @test6_helper()
// CHECK-NEXT: store ptr [[CALL]], ptr [[X]]
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
id x = test6_helper();
}
@@ -239,14 +239,14 @@ void test7_helper(id __attribute__((ns_consumed)));
// CHECK-LABEL: define{{.*}} void @test7()
void test7(void) {
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: [[T1:%.*]] = call ptr @llvm.objc.retain(ptr [[T0]]) [[NUW]]
// CHECK-NEXT: call void @test7_helper(ptr noundef [[T1]])
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
id x;
test7_helper(x);
@@ -256,11 +256,11 @@ id test8_helper(void) __attribute__((ns_returns_retained));
void test8(void) {
__unsafe_unretained id x = test8_helper();
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T0:%.*]] = call ptr @test8_helper()
// CHECK-NEXT: store ptr [[T0]], ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -274,9 +274,9 @@ void test10(void) {
// CHECK-LABEL: define{{.*}} void @test10()
// CHECK: [[X:%.*]] = alloca ptr, align
// CHECK-NEXT: [[Y:%.*]] = alloca ptr, align
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: load ptr, ptr [[X]], align
// CHECK-NEXT: load ptr, ptr @OBJC_SELECTOR_REFERENCES_{{[0-9]*}}
// CHECK-NEXT: [[V:%.*]] = call ptr @objc_msgSend{{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
@@ -288,10 +288,10 @@ void test10(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[V]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -300,13 +300,13 @@ void test11(id (*f)(void) __attribute__((ns_returns_retained))) {
// CHECK: [[F:%.*]] = alloca ptr, align
// CHECK-NEXT: [[X:%.*]] = alloca ptr, align
// CHECK-NEXT: store ptr {{%.*}}, ptr [[F]], align
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[F]], align
// CHECK-NEXT: [[T1:%.*]] = call ptr [[T0]]()
// CHECK-NEXT: store ptr [[T1]], ptr [[X]], align
// CHECK-NEXT: [[T3:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T3]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
id x = f();
}
@@ -319,7 +319,7 @@ void test12(void) {
// CHECK-NEXT: [[Y:%.*]] = alloca ptr, align
__weak id x = test12_helper();
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T1:%.*]] = call ptr @test12_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: call ptr @llvm.objc.initWeak(ptr [[X]], ptr [[T1]])
@@ -332,15 +332,15 @@ void test12(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]])
id y = x;
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.loadWeakRetained(ptr [[X]])
// CHECK-NEXT: store ptr [[T2]], ptr [[Y]], align
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T4]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: call void @llvm.objc.destroyWeak(ptr [[X]])
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK: ret void
}
@@ -348,7 +348,7 @@ void test12(void) {
void test13(void) {
// CHECK-LABEL: define{{.*}} void @test13()
// CHECK: [[X:%.*]] = alloca ptr, align
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]], align
id x;
@@ -371,7 +371,7 @@ void test13(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -886,7 +886,7 @@ void test37(void) {
// CHECK-LABEL: define{{.*}} void @test37()
// CHECK: [[VAR:%.*]] = alloca ptr,
// CHECK-NEXT: [[TEMP:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VAR]])
// CHECK-NEXT: store ptr null, ptr [[VAR]]
// CHECK-NEXT: [[W0:%.*]] = load ptr, ptr [[VAR]]
@@ -901,7 +901,7 @@ void test37(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[VAR]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VAR]])
// CHECK-NEXT: ret void
}
@@ -956,7 +956,7 @@ void test47(void) {
// CHECK-LABEL: define{{.*}} void @test47()
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
// CHECK-NEXT: [[T0:%.*]] = call ptr @test47_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
@@ -969,7 +969,7 @@ void test47(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T3]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T4]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -978,7 +978,7 @@ void test48(void) {
__weak id x = x = test48_helper();
// CHECK-LABEL: define{{.*}} void @test48()
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T0:%.*]] = call ptr @llvm.objc.initWeak(ptr [[X]], ptr null)
// CHECK-NEXT: [[T2:%.*]] = call ptr @test48_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T2]])
@@ -986,7 +986,7 @@ void test48(void) {
// CHECK-NEXT: [[T4:%.*]] = call ptr @llvm.objc.storeWeak(ptr [[X]], ptr [[T3]])
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T2]])
// CHECK-NEXT: call void @llvm.objc.destroyWeak(ptr [[X]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -995,7 +995,7 @@ void test49(void) {
__autoreleasing id x = x = test49_helper();
// CHECK-LABEL: define{{.*}} void @test49()
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
// CHECK-NEXT: [[T0:%.*]] = call ptr @test49_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
@@ -1003,7 +1003,7 @@ void test49(void) {
// CHECK-NEXT: store ptr [[T1]], ptr [[X]]
// CHECK-NEXT: [[T3:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T1]])
// CHECK-NEXT: store ptr [[T3]], ptr [[X]]
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -1035,12 +1035,12 @@ id test52(void) {
// CHECK-LABEL: define{{.*}} ptr @test52()
// CHECK: [[X:%.*]] = alloca i32
// CHECK-NEXT: [[TMPALLOCA:%.*]] = alloca ptr
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store i32 5, ptr [[X]],
// CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[X]],
// CHECK-NEXT: [[T1:%.*]] = call ptr @test52_helper(i32 noundef [[T0]])
// CHECK-NEXT: store ptr [[T1]], ptr [[TMPALLOCA]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: [[T2:%.*]] = load ptr, ptr [[TMPALLOCA]]
// CHECK-NEXT: [[T3:%.*]] = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr [[T2]])
// CHECK-NEXT: ret ptr [[T3]]
@@ -1054,8 +1054,8 @@ void test53(void) {
// CHECK: [[X:%.*]] = alloca ptr,
// CHECK-NEXT: [[Y:%.*]] = alloca ptr,
// CHECK-NEXT: [[TMPALLOCA:%.*]] = alloca ptr,
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: [[T1:%.*]] = call ptr @test53_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: store ptr [[T1]], ptr [[Y]],
@@ -1064,13 +1064,13 @@ void test53(void) {
// CHECK-NEXT: store ptr [[T1]], ptr [[TMPALLOCA]]
// CHECK-NEXT: [[T2:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T2]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: [[T3:%.*]] = load ptr, ptr [[TMPALLOCA]]
// CHECK-NEXT: store ptr [[T3]], ptr [[X]],
// CHECK-NEXT: load ptr, ptr [[X]],
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -1115,12 +1115,12 @@ void test56_test(void) {
id x = [Test56 make];
// CHECK-LABEL: define{{.*}} void @test56_test()
// CHECK: [[X:%.*]] = alloca ptr, align 8
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK: [[T0:%.*]] = call ptr @objc_msgSend(
// CHECK-NEXT: store ptr [[T0]], ptr [[X]]
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -1188,7 +1188,7 @@ void test61(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]])
[test61_make() performSelector: @selector(test61_void)];
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: [[T1:%.*]] = call ptr @test61_make(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: [[T2:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
@@ -1201,7 +1201,7 @@ void test61(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: ret void
}
@@ -1213,7 +1213,7 @@ void test62(void) {
extern id test62_make(void);
extern void test62_body(void);
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]])
// CHECK-NEXT: store i32 0, ptr [[I]], align 4
// CHECK-NEXT: br label
@@ -1300,10 +1300,10 @@ void test67(void) {
}
// CHECK-LABEL: define{{.*}} void @test67()
// CHECK: [[CL:%.*]] = alloca ptr, align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[CL]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[CL]])
// CHECK-NEXT: [[T0:%.*]] = call ptr @test67_helper()
// CHECK-NEXT: store ptr [[T0]], ptr [[CL]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[CL]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[CL]])
// CHECK-NEXT: ret void
Class test68_helper(void);
@@ -1312,13 +1312,13 @@ void test68(void) {
}
// CHECK-LABEL: define{{.*}} void @test68()
// CHECK: [[CL:%.*]] = alloca ptr, align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[CL]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[CL]])
// CHECK-NEXT: [[T1:%.*]] = call ptr @test67_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: store ptr [[T1]], ptr [[CL]], align 8
// CHECK-NEXT: [[T2:%.*]] = load ptr, ptr [[CL]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T2]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[CL]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[CL]])
// CHECK-NEXT: ret void
@interface Test69 @end
@@ -1351,14 +1351,14 @@ struct AggDtor getAggDtor(void);
// CHECK-LABEL: define{{.*}} void @test71
void test71(void) {
- // CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr %[[T:.*]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr %[[T:.*]])
// CHECK: call void @getAggDtor(ptr dead_on_unwind writable sret(%struct.AggDtor) align 8 %[[T]])
// CHECK: call void @__destructor_8_s40(ptr %[[T]])
- // CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr %[[T]])
- // CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr %[[T2:.*]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr %[[T]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr %[[T2:.*]])
// CHECK: call void @getAggDtor(ptr dead_on_unwind writable sret(%struct.AggDtor) align 8 %[[T2]])
// CHECK: call void @__destructor_8_s40(ptr %[[T2]])
- // CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr %[[T2]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr %[[T2]])
getAggDtor();
getAggDtor();
}
diff --git a/clang/test/CodeGenObjC/exceptions.m b/clang/test/CodeGenObjC/exceptions.m
index 832d3a45..66dc051 100644
--- a/clang/test/CodeGenObjC/exceptions.m
+++ b/clang/test/CodeGenObjC/exceptions.m
@@ -79,7 +79,7 @@ void f3(void) {
extern void f3_helper(int, int*);
// CHECK: [[X:%.*]] = alloca i32
- // CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
// CHECK: store i32 0, ptr [[X]]
int x = 0;
@@ -120,7 +120,7 @@ void f3(void) {
}
// CHECK: call void @f3_helper(i32 noundef 4, ptr noundef nonnull [[X]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
// CHECK-NEXT: ret void
f3_helper(4, &x);
}
diff --git a/clang/test/CodeGenObjC/ptrauth-block-descriptor-pointer.m b/clang/test/CodeGenObjC/ptrauth-block-descriptor-pointer.m
new file mode 100644
index 0000000..b51670f
--- /dev/null
+++ b/clang/test/CodeGenObjC/ptrauth-block-descriptor-pointer.m
@@ -0,0 +1,39 @@
+// RUN: %clang_cc1 -fobjc-arc -fblocks -fptrauth-calls -fptrauth-block-descriptor-pointers -triple arm64e-apple-ios -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fobjc-arc -fblocks -fptrauth-calls -triple arm64e-apple-ios -DNO_BLOCK_DESC_AUTH -emit-llvm -o - %s | FileCheck %s --check-prefix=NODESCRIPTORAUTH
+
+#ifndef NO_BLOCK_DESC_AUTH
+_Static_assert(__has_feature(ptrauth_signed_block_descriptors), "-fptrauth-block-descriptor-pointers should set ptrauth_signed_block_descriptors");
+#else
+_Static_assert(!__has_feature(ptrauth_signed_block_descriptors), "-fptrauth-block-descriptor-pointers should not be enabled by default");
+#endif
+
+void a() {
+ // Test out a global block.
+ void (^blk)(void) = ^{};
+}
+
+// CHECK: [[BLOCK_DESCRIPTOR_NAME:@"__block_descriptor_.*"]] = linkonce_odr hidden unnamed_addr constant { i64, i64, ptr, ptr } { i64 0, i64 32, ptr @.str, ptr null }
+// CHECK: @__block_literal_global = internal constant { ptr, i32, i32, ptr, ptr } { ptr @_NSConcreteGlobalBlock, i32 1342177280, i32 0, ptr ptrauth (ptr @__a_block_invoke, i32 0, i64 0, ptr getelementptr inbounds ({ ptr, i32, i32, ptr, ptr }, ptr @__block_literal_global, i32 0, i32 3)), ptr ptrauth (ptr [[BLOCK_DESCRIPTOR_NAME]], i32 2, i64 49339, ptr getelementptr inbounds ({ ptr, i32, i32, ptr, ptr }, ptr @__block_literal_global, i32 0, i32 4)) }
+
+// NODESCRIPTORAUTH: [[BLOCK_DESCRIPTOR_NAME:@"__block_descriptor_.*"]] = linkonce_odr hidden unnamed_addr constant { i64, i64, ptr, ptr } { i64 0, i64 32, ptr @.str, ptr null }
+// NODESCRIPTORAUTH: @__block_literal_global = internal constant { ptr, i32, i32, ptr, ptr } { ptr @_NSConcreteGlobalBlock, i32 1342177280, i32 0, ptr ptrauth (ptr @__a_block_invoke, i32 0, i64 0, ptr getelementptr inbounds ({ ptr, i32, i32, ptr, ptr }, ptr @__block_literal_global, i32 0, i32 3)), ptr [[BLOCK_DESCRIPTOR_NAME]] }
+
+
+void b(int p) {
+ // CHECK-LABEL: define void @b
+
+ // Test out a stack block.
+ void (^blk)(void) = ^{(void)p;};
+
+ // CHECK: [[BLOCK:%.*]] = alloca <{ ptr, i32, i32, ptr, ptr, i32 }>
+ // CHECK: [[BLOCK_DESCRIPTOR_REF:%.*]] = getelementptr inbounds nuw <{ {{.*}} }>, ptr [[BLOCK]], i32 0, i32 4
+ // CHECK: [[BLOCK_DESCRIPTOR_REF_INT:%.*]] = ptrtoint ptr [[BLOCK_DESCRIPTOR_REF]] to i64
+ // CHECK: [[BLENDED:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[BLOCK_DESCRIPTOR_REF_INT]], i64 49339)
+ // CHECK: [[SIGNED_REF:%.*]] = call i64 @llvm.ptrauth.sign(i64 ptrtoint (ptr @"__block_descriptor_36_e5_v8\01?0l" to i64), i32 2, i64 [[BLENDED]])
+ // CHECK: [[SIGNED_REF_PTR:%.*]] = inttoptr i64 [[SIGNED_REF]] to ptr
+ // CHECK: store ptr [[SIGNED_REF_PTR]], ptr [[BLOCK_DESCRIPTOR_REF]]
+
+ // NODESCRIPTORAUTH: [[BLOCK:%.*]] = alloca <{ ptr, i32, i32, ptr, ptr, i32 }>
+ // NODESCRIPTORAUTH: [[BLOCK_DESCRIPTOR_REF:%.*]] = getelementptr inbounds nuw <{ {{.*}} }>, ptr [[BLOCK]], i32 0, i32 4
+ // NODESCRIPTORAUTH: store ptr @"__block_descriptor_36_e5_v8\01?0l", ptr [[BLOCK_DESCRIPTOR_REF]]
+}
diff --git a/clang/test/CodeGenObjC/ptrauth-block-isa.m b/clang/test/CodeGenObjC/ptrauth-block-isa.m
index c1e98c6..248e577 100644
--- a/clang/test/CodeGenObjC/ptrauth-block-isa.m
+++ b/clang/test/CodeGenObjC/ptrauth-block-isa.m
@@ -1,7 +1,8 @@
-// RUN: %clang_cc1 -fptrauth-calls -fptrauth-objc-isa -fobjc-arc -fblocks -triple arm64e -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fptrauth-calls -fptrauth-objc-isa -fobjc-arc -fblocks -triple arm64e -emit-llvm %s -o - | FileCheck %s
void (^globalblock)(void) = ^{};
-// CHECK: [[GLOBAL_BLOCK:@.*]] = internal constant { ptr, i32, i32, ptr, ptr } { ptr ptrauth (ptr @_NSConcreteGlobalBlock, i32 2, i64 27361, ptr [[GLOBAL_BLOCK]]), i32 1342177280, i32 0, ptr @globalblock_block_invoke, ptr @"__block_descriptor_32_e5_v8\01?0l" }, align 8 #0
+// CHECK: [[BLOCK_DESCRIPTOR_NAME:@"__block_descriptor_.*"]] = linkonce_odr hidden unnamed_addr constant { i64, i64, ptr, ptr } { i64 0, i64 32, ptr @.str, ptr null }, comdat, align 8
+// CHECK: @__block_literal_global = internal constant { ptr, i32, i32, ptr, ptr } { ptr ptrauth (ptr @_NSConcreteGlobalBlock, i32 2, i64 27361, ptr @__block_literal_global), i32 1342177280, i32 0, ptr ptrauth (ptr @globalblock_block_invoke, i32 0, i64 0, ptr getelementptr inbounds ({ ptr, i32, i32, ptr, ptr }, ptr @__block_literal_global, i32 0, i32 3)), ptr [[BLOCK_DESCRIPTOR_NAME]] }
@interface A
- (int) count;
diff --git a/clang/test/CodeGenObjCXX/arc-move.mm b/clang/test/CodeGenObjCXX/arc-move.mm
index 9d25a6a..bda03c1 100644
--- a/clang/test/CodeGenObjCXX/arc-move.mm
+++ b/clang/test/CodeGenObjCXX/arc-move.mm
@@ -48,7 +48,7 @@ void library_move(__strong id &x, __strong id &y) {
void library_move(__strong id &y) {
// CHECK: [[X:%x]] = alloca ptr, align 8
// CHECK: [[I:%.*]] = alloca i32, align 4
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK: [[Y:%[a-zA-Z0-9]+]] = call noundef nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) ptr @_Z4moveIRU8__strongP11objc_objectEON16remove_referenceIT_E4typeEOS5_
// Load the object
// CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load ptr, ptr [[Y]]
@@ -58,13 +58,13 @@ void library_move(__strong id &y) {
// CHECK-NEXT: store ptr [[OBJ]], ptr [[X:%[a-zA-Z0-9]+]]
id x = move(y);
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]])
// CHECK-NEXT: store i32 17
int i = 17;
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]])
// CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[OBJ]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
diff --git a/clang/test/CodeGenObjCXX/arc-references.mm b/clang/test/CodeGenObjCXX/arc-references.mm
index 273e339..5fad327 100644
--- a/clang/test/CodeGenObjCXX/arc-references.mm
+++ b/clang/test/CodeGenObjCXX/arc-references.mm
@@ -45,7 +45,7 @@ void test3() {
// CHECK-NEXT: call void @_Z6calleev()
callee();
// CHECK-NEXT: call void @llvm.objc.destroyWeak
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF]])
// CHECK-NEXT: ret void
}
@@ -71,10 +71,10 @@ void test5(__strong id &x) {
sink(x);
// CHECK-NEXT: [[OBJ_A:%[a-zA-Z0-9]+]] = load ptr, ptr [[REFTMP]]
// CHECK-NEXT: call void @llvm.objc.release
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]])
// CHECK-NEXT: store i32 17, ptr
int i = 17;
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]])
// CHECK-NEXT: ret void
}
diff --git a/clang/test/CodeGenObjCXX/arc.mm b/clang/test/CodeGenObjCXX/arc.mm
index 7883378..20f1e37 100644
--- a/clang/test/CodeGenObjCXX/arc.mm
+++ b/clang/test/CodeGenObjCXX/arc.mm
@@ -61,9 +61,9 @@ void test34(int cond) {
// CHECK-NEXT: [[CONDCLEANUPSAVE:%.*]] = alloca ptr
// CHECK-NEXT: [[CONDCLEANUP:%.*]] = alloca i1
// CHECK-NEXT: store i32
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[STRONG]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[STRONG]])
// CHECK-NEXT: store ptr null, ptr [[STRONG]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[WEAK]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[WEAK]])
// CHECK-NEXT: call ptr @llvm.objc.initWeak(ptr [[WEAK]], ptr null)
// CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[COND]]
@@ -293,7 +293,7 @@ template void test40_helper<int>();
// CHECK-LABEL: define weak_odr void @_Z13test40_helperIiEvv()
// CHECK: [[X:%.*]] = alloca ptr
// CHECK-NEXT: [[TEMP:%.*]] = alloca ptr
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
// CHECK: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: store ptr [[T0]], ptr [[TEMP]]
diff --git a/clang/test/CodeGenObjCXX/literals.mm b/clang/test/CodeGenObjCXX/literals.mm
index 737aa9e..b7938ccc 100644
--- a/clang/test/CodeGenObjCXX/literals.mm
+++ b/clang/test/CodeGenObjCXX/literals.mm
@@ -22,16 +22,16 @@ void test_array() {
// CHECK: [[TMPY:%[a-zA-Z0-9.]+]] = alloca %
// Initializing first element
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[ARR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[ARR]])
// CHECK: [[ELEMENT0:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x ptr], ptr [[OBJECTS]], i64 0, i64 0
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMPX]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMPX]])
// CHECK-NEXT: call void @_ZN1XC1Ev({{.*}} [[TMPX]])
// CHECK-NEXT: [[OBJECT0:%[a-zA-Z0-9.]+]] = invoke noundef ptr @_ZNK1XcvP11objc_objectEv{{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK: store ptr [[OBJECT0]], ptr [[ELEMENT0]]
// Initializing the second element
// CHECK: [[ELEMENT1:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x ptr], ptr [[OBJECTS]], i64 0, i64 1
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMPY]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMPY]])
// CHECK-NEXT: invoke void @_ZN1YC1Ev({{.*}} [[TMPY]])
// CHECK: [[OBJECT1:%[a-zA-Z0-9.]+]] = invoke noundef ptr @_ZNK1YcvP11objc_objectEv{{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK: store ptr [[OBJECT1]], ptr [[ELEMENT1]]
@@ -50,7 +50,7 @@ void test_array() {
// CHECK-NEXT: call void @_ZN1XD1Ev
// CHECK-NOT: ret void
// CHECK: call void @llvm.objc.release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ARR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
// CHECK-NEXT: ret void
// Check cleanups
@@ -71,7 +71,7 @@ void test_array_instantiation() {
// CHECK: [[OBJECTS:%[a-zA-Z0-9.]+]] = alloca [2 x ptr]
// Initializing first element
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[ARR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[ARR]])
// CHECK: [[ELEMENT0:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x ptr], ptr [[OBJECTS]], i64 0, i64 0
// CHECK: call void @_ZN1XC1Ev
// CHECK-NEXT: [[OBJECT0:%[a-zA-Z0-9.]+]] = invoke noundef ptr @_ZNK1XcvP11objc_objectEv{{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
@@ -97,7 +97,7 @@ void test_array_instantiation() {
// CHECK-NEXT: call void @_ZN1XD1Ev
// CHECK-NOT: ret void
// CHECK: call void @llvm.objc.release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ARR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
// CHECK-NEXT: ret void
// Check cleanups
diff --git a/clang/test/CodeGenOpenCL/addr-space-struct-arg.cl b/clang/test/CodeGenOpenCL/addr-space-struct-arg.cl
index a70e9af..85157bd 100644
--- a/clang/test/CodeGenOpenCL/addr-space-struct-arg.cl
+++ b/clang/test/CodeGenOpenCL/addr-space-struct-arg.cl
@@ -647,6 +647,7 @@ kernel void KernelLargeTwoMember(struct LargeStructTwoMember u) {
// AMDGCN20-NEXT: [[IN:%.*]] = alloca [[STRUCT_MAT3X3:%.*]], align 4, addrspace(5)
// AMDGCN20-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
// AMDGCN20-NEXT: [[IN1:%.*]] = addrspacecast ptr addrspace(5) [[IN]] to ptr
+// AMDGCN20-NEXT: [[RETVAL_ASCAST_ASCAST:%.*]] = addrspacecast ptr [[RETVAL_ASCAST]] to ptr addrspace(5)
// AMDGCN20-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_MAT3X3]], ptr [[IN1]], i32 0, i32 0
// AMDGCN20-NEXT: store [9 x i32] [[IN_COERCE]], ptr [[COERCE_DIVE]], align 4
// AMDGCN20-NEXT: [[TMP0:%.*]] = load [[STRUCT_MAT4X4]], ptr [[RETVAL_ASCAST]], align 4
diff --git a/clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl b/clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl
index a1a114e..bc65788 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl
@@ -121,6 +121,7 @@ kernel void KernelLargeTwoMember(struct LargeStructTwoMember u) {
// AMDGCN-NEXT: [[IN:%.*]] = alloca [[STRUCT_MAT3X3:%.*]], align 4, addrspace(5)
// AMDGCN-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
// AMDGCN-NEXT: [[IN1:%.*]] = addrspacecast ptr addrspace(5) [[IN]] to ptr
+// AMDGCN-NEXT: [[RETVAL_ASCAST_ASCAST:%.*]] = addrspacecast ptr [[RETVAL_ASCAST]] to ptr addrspace(5)
// AMDGCN-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_MAT3X3]], ptr [[IN1]], i32 0, i32 0
// AMDGCN-NEXT: store [9 x i32] [[IN_COERCE]], ptr [[COERCE_DIVE]], align 4
// AMDGCN-NEXT: [[TMP0:%.*]] = load [[STRUCT_MAT4X4]], ptr [[RETVAL_ASCAST]], align 4
diff --git a/clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl b/clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl
index bfbed79..d71c898 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl
@@ -523,10 +523,10 @@ kernel void test_target_features_kernel(global int *i) {
// GFX900-NEXT: store i8 [[B]], ptr [[B_ADDR_ASCAST]], align 1, !tbaa [[TBAA16]]
// GFX900-NEXT: store ptr addrspace(1) [[C]], ptr [[C_ADDR_ASCAST]], align 8, !tbaa [[TBAA7]]
// GFX900-NEXT: store i64 [[D]], ptr [[D_ADDR_ASCAST]], align 8, !tbaa [[TBAA3]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9:[0-9]+]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9:[0-9]+]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
// GFX900-NEXT: store i32 0, ptr addrspace(5) [[FLAGS]], align 4, !tbaa [[TBAA17:![0-9]+]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
// GFX900-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[DEFAULT_QUEUE]], align 8, !tbaa [[TBAA19:![0-9]+]]
// GFX900-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[FLAGS]], align 4, !tbaa [[TBAA17]]
// GFX900-NEXT: call void @llvm.memcpy.p0.p5.i64(ptr align 4 [[TMP_ASCAST]], ptr addrspace(5) align 4 [[NDRANGE]], i64 4, i1 false), !tbaa.struct [[TBAA_STRUCT21:![0-9]+]]
@@ -586,12 +586,12 @@ kernel void test_target_features_kernel(global int *i) {
// GFX900-NEXT: [[BLOCK_CAPTURED19:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr, ptr addrspace(1), ptr addrspace(1), i64, i8 }>, ptr [[BLOCK12_ASCAST]], i32 0, i32 5
// GFX900-NEXT: [[TMP17:%.*]] = load i64, ptr [[D_ADDR_ASCAST]], align 8, !tbaa [[TBAA3]]
// GFX900-NEXT: store i64 [[TMP17]], ptr [[BLOCK_CAPTURED19]], align 8, !tbaa [[TBAA3]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[BLOCK_SIZES]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[BLOCK_SIZES]]) #[[ATTR9]]
// GFX900-NEXT: [[TMP18:%.*]] = getelementptr [1 x i64], ptr addrspace(5) [[BLOCK_SIZES]], i32 0, i32 0
// GFX900-NEXT: store i64 100, ptr addrspace(5) [[TMP18]], align 8
// GFX900-NEXT: [[TMP19:%.*]] = call i32 @__enqueue_kernel_varargs(ptr addrspace(1) [[TMP12]], i32 [[TMP13]], ptr addrspace(5) [[VARTMP11]], ptr addrspacecast (ptr addrspace(1) @__test_block_invoke_3_kernel.runtime.handle to ptr), ptr [[BLOCK12_ASCAST]], i32 1, ptr addrspace(5) [[TMP18]])
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[BLOCK_SIZES]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[BLOCK20]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[BLOCK_SIZES]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[BLOCK20]]) #[[ATTR9]]
// GFX900-NEXT: [[BLOCK_SIZE22:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr, i64, ptr addrspace(1) }>, ptr [[BLOCK21_ASCAST]], i32 0, i32 0
// GFX900-NEXT: store i32 32, ptr [[BLOCK_SIZE22]], align 8
// GFX900-NEXT: [[BLOCK_ALIGN23:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr, i64, ptr addrspace(1) }>, ptr [[BLOCK21_ASCAST]], i32 0, i32 1
@@ -610,10 +610,10 @@ kernel void test_target_features_kernel(global int *i) {
// GFX900-NEXT: call void @llvm.memcpy.p0.p5.i64(ptr align 4 [[TMP27_ASCAST]], ptr addrspace(5) align 4 [[NDRANGE]], i64 4, i1 false), !tbaa.struct [[TBAA_STRUCT21]]
// GFX900-NEXT: [[TMP24:%.*]] = load ptr, ptr addrspace(5) [[BLOCK20]], align 8, !tbaa [[TBAA16]]
// GFX900-NEXT: [[TMP25:%.*]] = call i32 @__enqueue_kernel_basic(ptr addrspace(1) [[TMP22]], i32 [[TMP23]], ptr addrspace(5) [[VARTMP27]], ptr addrspacecast (ptr addrspace(1) @__test_block_invoke_4_kernel.runtime.handle to ptr), ptr [[BLOCK21_ASCAST]])
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[BLOCK20]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[BLOCK20]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
// GFX900-NEXT: ret void
//
//
@@ -641,18 +641,18 @@ kernel void test_target_features_kernel(global int *i) {
// GFX900-NEXT: [[I_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I_ADDR]] to ptr
// GFX900-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
// GFX900-NEXT: store ptr addrspace(1) [[I]], ptr [[I_ADDR_ASCAST]], align 8, !tbaa [[TBAA26]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
// GFX900-NEXT: store i32 0, ptr addrspace(5) [[FLAGS]], align 4, !tbaa [[TBAA17]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
// GFX900-NEXT: [[TMP0:%.*]] = call i64 @llvm.amdgcn.s.memtime()
// GFX900-NEXT: [[TMP1:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[DEFAULT_QUEUE]], align 8, !tbaa [[TBAA19]]
// GFX900-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(5) [[FLAGS]], align 4, !tbaa [[TBAA17]]
// GFX900-NEXT: call void @llvm.memcpy.p0.p5.i64(ptr align 4 [[TMP_ASCAST]], ptr addrspace(5) align 4 [[NDRANGE]], i64 4, i1 false), !tbaa.struct [[TBAA_STRUCT21]]
// GFX900-NEXT: [[TMP3:%.*]] = call i32 @__enqueue_kernel_basic(ptr addrspace(1) [[TMP1]], i32 [[TMP2]], ptr addrspace(5) [[TMP]], ptr addrspacecast (ptr addrspace(1) @__test_target_features_kernel_block_invoke_kernel.runtime.handle to ptr), ptr addrspacecast (ptr addrspace(1) @__block_literal_global to ptr))
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
// GFX900-NEXT: ret void
//
//
diff --git a/clang/test/CodeGenOpenCL/amdgpu-features-illegal.cl b/clang/test/CodeGenOpenCL/amdgpu-features-illegal.cl
index 4e2f7f8..04de5dc 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-features-illegal.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-features-illegal.cl
@@ -1,8 +1,10 @@
// RUN: not %clang_cc1 -triple amdgcn -target-feature +wavefrontsize32 -target-feature +wavefrontsize64 -o /dev/null %s 2>&1 | FileCheck %s
// RUN: not %clang_cc1 -triple amdgcn -target-cpu gfx1103 -target-feature +wavefrontsize32 -target-feature +wavefrontsize64 -o /dev/null %s 2>&1 | FileCheck %s
// RUN: not %clang_cc1 -triple amdgcn -target-cpu gfx900 -target-feature +wavefrontsize32 -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=GFX9
+// RUN: not %clang_cc1 -triple amdgcn -target-cpu gfx1250 -target-feature +wavefrontsize64 -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=GFX1250
// CHECK: error: invalid feature combination: 'wavefrontsize32' and 'wavefrontsize64' are mutually exclusive
// GFX9: error: option 'wavefrontsize32' cannot be specified on this target
+// GFX1250: error: option 'wavefrontsize64' cannot be specified on this target
kernel void test() {}
diff --git a/clang/test/CodeGenOpenCL/amdgpu-printf.cl b/clang/test/CodeGenOpenCL/amdgpu-printf.cl
index 33fee66..b9e2517 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-printf.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-printf.cl
@@ -65,12 +65,12 @@ __kernel void test_printf_str_int(int i) {
// CHECK-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[S:%.*]] = alloca [4 x i8], align 1, addrspace(5)
// CHECK-NEXT: store i32 [[I]], ptr addrspace(5) [[I_ADDR]], align 4, !tbaa [[TBAA9]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[S]]) #[[ATTR7:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[S]]) #[[ATTR7:[0-9]+]]
// CHECK-NEXT: call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) align 1 [[S]], ptr addrspace(4) align 1 @__const.test_printf_str_int.s, i64 4, i1 false)
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(5) [[S]], i64 0, i64 0
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[I_ADDR]], align 4, !tbaa [[TBAA9]]
// CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr addrspace(4), ...) @printf(ptr addrspace(4) noundef @.str.2, ptr addrspace(5) noundef [[ARRAYDECAY]], i32 noundef [[TMP0]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[S]]) #[[ATTR7]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[S]]) #[[ATTR7]]
// CHECK-NEXT: ret void
//
//.
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-fp8.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-fp8.cl
index f300b05f..cdfe9fc 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-fp8.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-fp8.cl
@@ -1,6 +1,7 @@
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx942 -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1200 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1250 -emit-llvm -o - %s | FileCheck %s
typedef float v2f __attribute__((ext_vector_type(2)));
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-param-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-param-err.cl
index 5d86a9b..1a50433 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-param-err.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-param-err.cl
@@ -23,6 +23,13 @@ kernel void builtins_amdgcn_s_barrier_signal_isfirst_err(global int* in, global
*out = *in;
}
+kernel void builtins_amdgcn_s_barrier_leave_err(global int* in, global int* out, int barrier) {
+
+ __builtin_amdgcn_s_barrier_signal(-1);
+ __builtin_amdgcn_s_barrier_leave(barrier); // expected-error {{'__builtin_amdgcn_s_barrier_leave' must be a constant integer}}
+ *out = *in;
+}
+
void test_s_buffer_prefetch_data(__amdgpu_buffer_rsrc_t rsrc, unsigned int off)
{
__builtin_amdgcn_s_buffer_prefetch_data(rsrc, off, 31); // expected-error {{'__builtin_amdgcn_s_buffer_prefetch_data' must be a constant integer}}
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12.cl
index f764128..8c02616 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12.cl
@@ -139,6 +139,50 @@ void test_s_barrier_signal_isfirst(int* a, int* b, int *c)
__builtin_amdgcn_s_barrier_wait(1);
}
+// CHECK-LABEL: @test_s_barrier_init(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[BAR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[BAR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[BAR_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: store ptr [[BAR:%.*]], ptr [[BAR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BAR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[TMP0]] to ptr addrspace(3)
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: call void @llvm.amdgcn.s.barrier.init(ptr addrspace(3) [[TMP1]], i32 [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_s_barrier_init(void *bar, int a)
+{
+ __builtin_amdgcn_s_barrier_init(bar, a);
+}
+
+// CHECK-LABEL: @test_s_barrier_join(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[BAR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[BAR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[BAR_ADDR]] to ptr
+// CHECK-NEXT: store ptr [[BAR:%.*]], ptr [[BAR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BAR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[TMP0]] to ptr addrspace(3)
+// CHECK-NEXT: call void @llvm.amdgcn.s.barrier.join(ptr addrspace(3) [[TMP1]])
+// CHECK-NEXT: ret void
+//
+void test_s_barrier_join(void *bar)
+{
+ __builtin_amdgcn_s_barrier_join(bar);
+}
+
+// CHECK-LABEL: @test_s_barrier_leave(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: call void @llvm.amdgcn.s.barrier.leave(i16 1)
+// CHECK-NEXT: ret void
+//
+void test_s_barrier_leave()
+{
+ __builtin_amdgcn_s_barrier_leave(1);
+}
+
// CHECK-LABEL: @test_s_get_barrier_state(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5)
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
index 4ff0571..23af19d 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
@@ -58,6 +58,58 @@ void test_s_wait_tensorcnt() {
__builtin_amdgcn_s_wait_tensorcnt(0);
}
+// CHECK-LABEL: @test_bitop3_b32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[C_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK-NEXT: [[C_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[C_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[C:%.*]], ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.bitop3.i32(i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 1)
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: ret void
+//
+void test_bitop3_b32(global uint* out, uint a, uint b, uint c) {
+ *out = __builtin_amdgcn_bitop3_b32(a, b, c, 1);
+}
+
+// CHECK-LABEL: @test_bitop3_b16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
+// CHECK-NEXT: [[C_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK-NEXT: [[C_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[C_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: store i16 [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 2
+// CHECK-NEXT: store i16 [[C:%.*]], ptr [[C_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[B_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[C_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP3:%.*]] = call i16 @llvm.amdgcn.bitop3.i16(i16 [[TMP0]], i16 [[TMP1]], i16 [[TMP2]], i32 1)
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i16 [[TMP3]], ptr addrspace(1) [[TMP4]], align 2
+// CHECK-NEXT: ret void
+//
+void test_bitop3_b16(global ushort* out, ushort a, ushort b, ushort c) {
+ *out = __builtin_amdgcn_bitop3_b16(a, b, c, 1);
+}
+
// CHECK-LABEL: @test_prng_b32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
@@ -1258,6 +1310,145 @@ void test_prefetch(generic void *fptr, global void *gptr) {
__builtin_amdgcn_global_prefetch(gptr, 8);
}
+// CHECK-LABEL: @test_global_add_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[ADDR:%.*]], ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store float [[X:%.*]], ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr addrspace(1) [[TMP0]], float [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4:![0-9]+]], !amdgpu.ignore.denormal.mode [[META4]]
+// CHECK-NEXT: ret float [[TMP2]]
+//
+float test_global_add_f32(global float *addr, float x) {
+ return __builtin_amdgcn_global_atomic_fadd_f32(addr, x);
+}
+
+// CHECK-LABEL: @test_global_add_half2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <2 x half>, align 4, addrspace(5)
+// CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca <2 x half>, align 4, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[ADDR:%.*]], ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x half> [[X:%.*]], ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load <2 x half>, ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr addrspace(1) [[TMP0]], <2 x half> [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]]
+// CHECK-NEXT: ret <2 x half> [[TMP2]]
+//
+half2 test_global_add_half2(global half2 *addr, half2 x) {
+ return __builtin_amdgcn_global_atomic_fadd_v2f16(addr, x);
+}
+
+// CHECK-LABEL: @test_flat_add_2f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <2 x half>, align 4, addrspace(5)
+// CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca <2 x half>, align 4, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
+// CHECK-NEXT: store ptr [[ADDR:%.*]], ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x half> [[X:%.*]], ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load <2 x half>, ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], <2 x half> [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]]
+// CHECK-NEXT: ret <2 x half> [[TMP2]]
+//
+half2 test_flat_add_2f16(generic half2 *addr, half2 x) {
+ return __builtin_amdgcn_flat_atomic_fadd_v2f16(addr, x);
+}
+
+// CHECK-LABEL: @test_flat_add_2bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
+// CHECK-NEXT: store ptr [[ADDR:%.*]], ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x i16> [[X:%.*]], ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i16> [[TMP1]] to <2 x bfloat>
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw fadd ptr [[TMP0]], <2 x bfloat> [[TMP2]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]]
+// CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x bfloat> [[TMP3]] to <2 x i16>
+// CHECK-NEXT: ret <2 x i16> [[TMP4]]
+//
+short2 test_flat_add_2bf16(generic short2 *addr, short2 x) {
+ return __builtin_amdgcn_flat_atomic_fadd_v2bf16(addr, x);
+}
+
+// CHECK-LABEL: @test_global_add_2bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[ADDR:%.*]], ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x i16> [[X:%.*]], ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i16> [[TMP1]] to <2 x bfloat>
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw fadd ptr addrspace(1) [[TMP0]], <2 x bfloat> [[TMP2]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]]
+// CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x bfloat> [[TMP3]] to <2 x i16>
+// CHECK-NEXT: ret <2 x i16> [[TMP4]]
+//
+short2 test_global_add_2bf16(global short2 *addr, short2 x) {
+ return __builtin_amdgcn_global_atomic_fadd_v2bf16(addr, x);
+}
+
+// CHECK-LABEL: @test_local_add_2f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr addrspace(3), align 4, addrspace(5)
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(3) [[ADDR:%.*]], ptr [[ADDR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store <2 x i16> [[X:%.*]], ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(3), ptr [[ADDR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i16> [[TMP1]] to <2 x bfloat>
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw fadd ptr addrspace(3) [[TMP0]], <2 x bfloat> [[TMP2]] syncscope("agent") monotonic, align 4
+// CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x bfloat> [[TMP3]] to <2 x i16>
+// CHECK-NEXT: ret <2 x i16> [[TMP4]]
+//
+short2 test_local_add_2f16(local short2 *addr, short2 x) {
+ return __builtin_amdgcn_ds_atomic_fadd_v2bf16(addr, x);
+}
+
+// CHECK-LABEL: @test_local_add_2bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <2 x half>, align 4, addrspace(5)
+// CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr addrspace(3), align 4, addrspace(5)
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca <2 x half>, align 4, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(3) [[ADDR:%.*]], ptr [[ADDR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store <2 x half> [[X:%.*]], ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(3), ptr [[ADDR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load <2 x half>, ptr [[X_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr addrspace(3) [[TMP0]], <2 x half> [[TMP1]] syncscope("agent") monotonic, align 4
+// CHECK-NEXT: ret <2 x half> [[TMP2]]
+//
+half2 test_local_add_2bf16(local half2 *addr, half2 x) {
+ return __builtin_amdgcn_ds_atomic_fadd_v2f16(addr, x);
+}
+
// CHECK-LABEL: @test_cvt_pk_fp8_f32_e5m3(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl
index 5e587cb..d390418 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl
@@ -24,6 +24,13 @@ void test_ballot_wave32_target_attr(global uint* out, int a, int b)
*out = __builtin_amdgcn_ballot_w32(a == b);
}
+// CHECK-LABEL: @test_inverse_ballot_wave32(
+// CHECK: call i1 @llvm.amdgcn.inverse.ballot.i32(i32 %{{.+}})
+void test_inverse_ballot_wave32(global bool* out, int a)
+{
+ *out = __builtin_amdgcn_inverse_ballot_w32(a);
+}
+
// CHECK-LABEL: @test_read_exec(
// CHECK: call i32 @llvm.amdgcn.ballot.i32(i1 true)
void test_read_exec(global uint* out) {
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl
index 1fc2ac0..d851ec7 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl
@@ -23,6 +23,13 @@ void test_ballot_wave64_target_attr(global ulong* out, int a, int b)
*out = __builtin_amdgcn_ballot_w64(a == b);
}
+// CHECK-LABEL: @test_inverse_ballot_wave64(
+// CHECK: call i1 @llvm.amdgcn.inverse.ballot.i64(i64 %{{.+}})
+void test_inverse_ballot_wave64(global bool* out, ulong a)
+{
+ *out = __builtin_amdgcn_inverse_ballot_w64(a);
+}
+
// CHECK-LABEL: @test_read_exec(
// CHECK: call i64 @llvm.amdgcn.ballot.i64(i1 true)
void test_read_exec(global ulong* out) {
diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl
index 6c85e73..b0dbf60 100644
--- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl
+++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl
@@ -128,9 +128,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES1]])
+ // CHECK-LIFETIMES: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES1]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES1]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES1]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES1]], i32 0, i32 0
// B32: store i32 256, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES1]], i32 0, i32 0
@@ -153,9 +153,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES2]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES2]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES2]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES2]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES2]], i32 0, i32 0
// B32: store i32 %{{.*}}, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES2]], i32 0, i32 0
@@ -181,9 +181,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// X86: [[AD:%arraydecay[0-9]*]] = getelementptr inbounds [1 x ptr], ptr %event_wait_list2, i{{32|64}} 0, i{{32|64}} 0
// COMMON: [[WAIT_EVNT:%[0-9]+]] ={{.*}} addrspacecast ptr [[AD]] to ptr addrspace(4)
// COMMON: [[EVNT:%[0-9]+]] ={{.*}} addrspacecast ptr %clk_event to ptr addrspace(4)
- // CHECK-LIFETIMES: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES3]])
+ // CHECK-LIFETIMES: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES3]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_events_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES3]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES3]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES3]], i32 0, i32 0
// B32: store i32 256, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES3]], i32 0, i32 0
@@ -209,9 +209,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// X86: [[AD:%arraydecay[0-9]*]] = getelementptr inbounds [1 x ptr], ptr %event_wait_list2, i{{32|64}} 0, i{{32|64}} 0
// COMMON: [[WAIT_EVNT:%[0-9]+]] ={{.*}} addrspacecast ptr [[AD]] to ptr addrspace(4)
// COMMON: [[EVNT:%[0-9]+]] ={{.*}} addrspacecast ptr %clk_event to ptr addrspace(4)
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES4]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES4]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_events_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES4]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES4]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES4]], i32 0, i32 0
// B32: store i32 %{{.*}}, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES4]], i32 0, i32 0
@@ -234,9 +234,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES5]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES5]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES5]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES5]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES5]], i32 0, i32 0
// B32: store i32 %{{.*}}, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES5]], i32 0, i32 0
@@ -258,9 +258,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %[[BLOCK_SIZES6]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES6]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %[[BLOCK_SIZES6]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES6]])
// B32: %[[TMP:.*]] = getelementptr [3 x i32], ptr %[[BLOCK_SIZES6]], i32 0, i32 0
// B32: store i32 1, ptr %[[TMP]], align 4
// B32: %[[BLOCK_SIZES62:.*]] = getelementptr [3 x i32], ptr %[[BLOCK_SIZES6]], i32 0, i32 1
@@ -290,9 +290,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES7]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES7]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES7]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES7]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES7]], i32 0, i32 0
// B32: store i32 0, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES7]], i32 0, i32 0
diff --git a/clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl b/clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl
index 8845ffe..4e40073 100644
--- a/clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl
+++ b/clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl
@@ -32,12 +32,12 @@ __kernel void use_of_local_var()
// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[X:%.*]] = alloca i32, align 4, addrspace(5)
-// CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[X]]) #[[ATTR5:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[X]]) #[[ATTR5:[0-9]+]]
// CHECK-NEXT: store i32 0, ptr addrspace(5) [[X]], align 4, !tbaa [[TBAA4:![0-9]+]]
// CHECK-NEXT: call void @private_ptr(ptr addrspace(5) noundef [[X]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
// CHECK-NEXT: call void @generic_ptr(ptr noundef [[X_ASCAST]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[X]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[X]]) #[[ATTR5]]
// CHECK-NEXT: ret void
//
//
diff --git a/clang/test/CodeGenOpenCL/preserve_vec3.cl b/clang/test/CodeGenOpenCL/preserve_vec3.cl
index 49ebae6..e73657e 100644
--- a/clang/test/CodeGenOpenCL/preserve_vec3.cl
+++ b/clang/test/CodeGenOpenCL/preserve_vec3.cl
@@ -11,8 +11,8 @@ typedef float float4 __attribute__((ext_vector_type(4)));
// CHECK-LABEL: define dso_local spir_kernel void @foo(
// CHECK-SAME: ptr addrspace(1) noundef readonly align 16 captures(none) [[A:%.*]], ptr addrspace(1) noundef writeonly align 16 captures(none) initializes((0, 16)) [[B:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] !kernel_arg_addr_space [[META3:![0-9]+]] !kernel_arg_access_qual [[META4:![0-9]+]] !kernel_arg_type [[META5:![0-9]+]] !kernel_arg_base_type [[META6:![0-9]+]] !kernel_arg_type_qual [[META7:![0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[LOADVECN:%.*]] = load <4 x float>, ptr addrspace(1) [[A]], align 16
-// CHECK-NEXT: [[EXTRACTVEC1:%.*]] = shufflevector <4 x float> [[LOADVECN]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+// CHECK-NEXT: [[TMP0:%.*]] = load <3 x float>, ptr addrspace(1) [[A]], align 16
+// CHECK-NEXT: [[EXTRACTVEC1:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
// CHECK-NEXT: store <4 x float> [[EXTRACTVEC1]], ptr addrspace(1) [[B]], align 16, !tbaa [[TBAA8:![0-9]+]]
// CHECK-NEXT: ret void
//
@@ -23,8 +23,8 @@ void kernel foo(global float3 *a, global float3 *b) {
// CHECK-LABEL: define dso_local spir_kernel void @float4_to_float3(
// CHECK-SAME: ptr addrspace(1) noundef writeonly align 16 captures(none) initializes((0, 16)) [[A:%.*]], ptr addrspace(1) noundef readonly align 16 captures(none) [[B:%.*]]) local_unnamed_addr #[[ATTR0]] !kernel_arg_addr_space [[META3]] !kernel_arg_access_qual [[META4]] !kernel_arg_type [[META11:![0-9]+]] !kernel_arg_base_type [[META12:![0-9]+]] !kernel_arg_type_qual [[META7]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr addrspace(1) [[B]], align 16, !tbaa [[TBAA8]]
-// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+// CHECK-NEXT: [[TMP0:%.*]] = load <3 x float>, ptr addrspace(1) [[B]], align 16, !tbaa [[TBAA8]]
+// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
// CHECK-NEXT: store <4 x float> [[EXTRACTVEC]], ptr addrspace(1) [[A]], align 16, !tbaa [[TBAA8]]
// CHECK-NEXT: ret void
//
@@ -35,8 +35,8 @@ void kernel float4_to_float3(global float3 *a, global float4 *b) {
// CHECK-LABEL: define dso_local spir_kernel void @float3_to_float4(
// CHECK-SAME: ptr addrspace(1) noundef readonly align 16 captures(none) [[A:%.*]], ptr addrspace(1) noundef writeonly align 16 captures(none) initializes((0, 16)) [[B:%.*]]) local_unnamed_addr #[[ATTR0]] !kernel_arg_addr_space [[META3]] !kernel_arg_access_qual [[META4]] !kernel_arg_type [[META11]] !kernel_arg_base_type [[META12]] !kernel_arg_type_qual [[META7]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[LOADVECN:%.*]] = load <4 x float>, ptr addrspace(1) [[A]], align 16
-// CHECK-NEXT: [[ASTYPE:%.*]] = shufflevector <4 x float> [[LOADVECN]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+// CHECK-NEXT: [[TMP0:%.*]] = load <3 x float>, ptr addrspace(1) [[A]], align 16
+// CHECK-NEXT: [[ASTYPE:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
// CHECK-NEXT: store <4 x float> [[ASTYPE]], ptr addrspace(1) [[B]], align 16, !tbaa [[TBAA8]]
// CHECK-NEXT: ret void
//
@@ -47,9 +47,9 @@ void kernel float3_to_float4(global float3 *a, global float4 *b) {
// CHECK-LABEL: define dso_local spir_kernel void @float3_to_double2(
// CHECK-SAME: ptr addrspace(1) noundef readonly align 16 captures(none) [[A:%.*]], ptr addrspace(1) noundef writeonly align 16 captures(none) initializes((0, 16)) [[B:%.*]]) local_unnamed_addr #[[ATTR0]] !kernel_arg_addr_space [[META3]] !kernel_arg_access_qual [[META4]] !kernel_arg_type [[META13:![0-9]+]] !kernel_arg_base_type [[META14:![0-9]+]] !kernel_arg_type_qual [[META7]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[LOADVECN:%.*]] = load <4 x float>, ptr addrspace(1) [[A]], align 16
-// CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x float> [[LOADVECN]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
-// CHECK-NEXT: store <4 x float> [[TMP0]], ptr addrspace(1) [[B]], align 16, !tbaa [[TBAA8]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <3 x float>, ptr addrspace(1) [[A]], align 16
+// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+// CHECK-NEXT: store <4 x float> [[TMP1]], ptr addrspace(1) [[B]], align 16, !tbaa [[TBAA8]]
// CHECK-NEXT: ret void
//
void kernel float3_to_double2(global float3 *a, global double2 *b) {
@@ -59,8 +59,8 @@ void kernel float3_to_double2(global float3 *a, global double2 *b) {
// CHECK-LABEL: define dso_local spir_kernel void @char8_to_short3(
// CHECK-SAME: ptr addrspace(1) noundef writeonly align 8 captures(none) initializes((0, 8)) [[A:%.*]], ptr addrspace(1) noundef readonly align 8 captures(none) [[B:%.*]]) local_unnamed_addr #[[ATTR0]] !kernel_arg_addr_space [[META3]] !kernel_arg_access_qual [[META4]] !kernel_arg_type [[META15:![0-9]+]] !kernel_arg_base_type [[META16:![0-9]+]] !kernel_arg_type_qual [[META7]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr addrspace(1) [[B]], align 8, !tbaa [[TBAA8]]
-// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <4 x i16> [[TMP0]], <4 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+// CHECK-NEXT: [[TMP0:%.*]] = load <3 x i16>, ptr addrspace(1) [[B]], align 8, !tbaa [[TBAA8]]
+// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i16> [[TMP0]], <3 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
// CHECK-NEXT: store <4 x i16> [[EXTRACTVEC]], ptr addrspace(1) [[A]], align 8, !tbaa [[TBAA8]]
// CHECK-NEXT: ret void
//
diff --git a/clang/test/CodeGenSPIRV/Builtins/generic_cast_to_ptr_explicit.c b/clang/test/CodeGenSPIRV/Builtins/generic_cast_to_ptr_explicit.c
index 8cfe650..30f4ecb 100644
--- a/clang/test/CodeGenSPIRV/Builtins/generic_cast_to_ptr_explicit.c
+++ b/clang/test/CodeGenSPIRV/Builtins/generic_cast_to_ptr_explicit.c
@@ -1,33 +1,36 @@
-// RUN: %clang_cc1 -O1 -triple spirv64 -fsycl-is-device %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -O1 -triple spirv64 -fsycl-is-device -x c++ %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -O1 -triple spirv64 -cl-std=CL3.0 -x cl %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -O1 -triple spirv32 -cl-std=CL3.0 -x cl %s -emit-llvm -o - | FileCheck %s
-// CHECK: spir_func noundef ptr @test_cast_to_private(
-// CHECK-SAME: ptr addrspace(4) noundef readnone [[P:%.*]]
+#ifdef __SYCL_DEVICE_ONLY__
+#define SYCL_EXTERNAL [[clang::sycl_external]]
+#else
+#define SYCL_EXTERNAL
+#endif
+
+// CHECK: spir_func noundef ptr @{{.*}}test_cast_to_private{{.*}}(ptr addrspace(4) noundef readnone [[P:%.*]]
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[SPV_CAST:%.*]] = tail call noundef ptr @llvm.spv.generic.cast.to.ptr.explicit.p0(ptr addrspace(4) %p)
// CHECK-NEXT: ret ptr [[SPV_CAST]]
//
-__attribute__((opencl_private)) int* test_cast_to_private(int* p) {
+SYCL_EXTERNAL __attribute__((opencl_private)) int* test_cast_to_private(int* p) {
return __builtin_spirv_generic_cast_to_ptr_explicit(p, 7);
}
-// CHECK: spir_func noundef ptr addrspace(1) @test_cast_to_global(
-// CHECK-SAME: ptr addrspace(4) noundef readnone [[P:%.*]]
+// CHECK: spir_func noundef ptr addrspace(1) @{{.*}}test_cast_to_global{{.*}}(ptr addrspace(4) noundef readnone [[P:%.*]]
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[SPV_CAST:%.*]] = tail call noundef ptr addrspace(1) @llvm.spv.generic.cast.to.ptr.explicit.p1(ptr addrspace(4) %p)
// CHECK-NEXT: ret ptr addrspace(1) [[SPV_CAST]]
//
-__attribute__((opencl_global)) int* test_cast_to_global(int* p) {
+SYCL_EXTERNAL __attribute__((opencl_global)) int* test_cast_to_global(int* p) {
return __builtin_spirv_generic_cast_to_ptr_explicit(p, 5);
}
-// CHECK: spir_func noundef ptr addrspace(3) @test_cast_to_local(
-// CHECK-SAME: ptr addrspace(4) noundef readnone [[P:%.*]]
+// CHECK: spir_func noundef ptr addrspace(3) @{{.*}}test_cast_to_local{{.*}}(ptr addrspace(4) noundef readnone [[P:%.*]]
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[SPV_CAST:%.*]] = tail call noundef ptr addrspace(3) @llvm.spv.generic.cast.to.ptr.explicit.p3(ptr addrspace(4) %p)
// CHECK-NEXT: ret ptr addrspace(3) [[SPV_CAST]]
//
-__attribute__((opencl_local)) int* test_cast_to_local(int* p) {
+SYCL_EXTERNAL __attribute__((opencl_local)) int* test_cast_to_local(int* p) {
return __builtin_spirv_generic_cast_to_ptr_explicit(p, 4);
}
diff --git a/clang/test/CodeGenSPIRV/Builtins/ids_and_ranges.c b/clang/test/CodeGenSPIRV/Builtins/ids_and_ranges.c
index f71af77..bff850b 100644
--- a/clang/test/CodeGenSPIRV/Builtins/ids_and_ranges.c
+++ b/clang/test/CodeGenSPIRV/Builtins/ids_and_ranges.c
@@ -1,106 +1,106 @@
-// RUN: %clang_cc1 -O1 -triple spirv64 -fsycl-is-device %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,CHECK64
+// RUN: %clang_cc1 -O1 -triple spirv64 -fsycl-is-device -x c++ %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,CHECK64
// RUN: %clang_cc1 -O1 -triple spirv64 -cl-std=CL3.0 -x cl %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,CHECK64
// RUN: %clang_cc1 -O1 -triple spirv32 -cl-std=CL3.0 -x cl %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,CHECK32
-// CHECK: @test_num_workgroups(
+// CHECK: @{{.*}}test_num_workgroups{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK64-NEXT: tail call i64 @llvm.spv.num.workgroups.i64(i32 0)
// CHECK32-NEXT: tail call i32 @llvm.spv.num.workgroups.i32(i32 0)
//
-unsigned int test_num_workgroups() {
+[[clang::sycl_external]] unsigned int test_num_workgroups() {
return __builtin_spirv_num_workgroups(0);
}
-// CHECK: @test_workgroup_size(
+// CHECK: @{{.*}}test_workgroup_size{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK64-NEXT: tail call i64 @llvm.spv.workgroup.size.i64(i32 0)
// CHECK32-NEXT: tail call i32 @llvm.spv.workgroup.size.i32(i32 0)
//
-unsigned int test_workgroup_size() {
+[[clang::sycl_external]] unsigned int test_workgroup_size() {
return __builtin_spirv_workgroup_size(0);
}
-// CHECK: @test_workgroup_id(
+// CHECK: @{{.*}}test_workgroup_id{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK64-NEXT: tail call i64 @llvm.spv.group.id.i64(i32 0)
// CHECK32-NEXT: tail call i32 @llvm.spv.group.id.i32(i32 0)
//
-unsigned int test_workgroup_id() {
+[[clang::sycl_external]] unsigned int test_workgroup_id() {
return __builtin_spirv_workgroup_id(0);
}
-// CHECK: @test_local_invocation_id(
+// CHECK: @{{.*}}test_local_invocation_id{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK64-NEXT: tail call i64 @llvm.spv.thread.id.in.group.i64(i32 0)
// CHECK32-NEXT: tail call i32 @llvm.spv.thread.id.in.group.i32(i32 0)
//
-unsigned int test_local_invocation_id() {
+[[clang::sycl_external]] unsigned int test_local_invocation_id() {
return __builtin_spirv_local_invocation_id(0);
}
-// CHECK: @test_global_invocation_id(
+// CHECK: @{{.*}}test_global_invocation_id{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK64-NEXT: tail call i64 @llvm.spv.thread.id.i64(i32 0)
// CHECK32-NEXT: tail call i32 @llvm.spv.thread.id.i32(i32 0)
//
-unsigned int test_global_invocation_id() {
+[[clang::sycl_external]] unsigned int test_global_invocation_id() {
return __builtin_spirv_global_invocation_id(0);
}
-// CHECK: @test_global_size(
+// CHECK: @{{.*}}test_global_size{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK64-NEXT: tail call i64 @llvm.spv.global.size.i64(i32 0)
// CHECK32-NEXT: tail call i32 @llvm.spv.global.size.i32(i32 0)
//
-unsigned int test_global_size() {
+[[clang::sycl_external]] unsigned int test_global_size() {
return __builtin_spirv_global_size(0);
}
-// CHECK: @test_global_offset(
+// CHECK: @{{.*}}test_global_offset{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK64-NEXT: tail call i64 @llvm.spv.global.offset.i64(i32 0)
// CHECK32-NEXT: tail call i32 @llvm.spv.global.offset.i32(i32 0)
//
-unsigned int test_global_offset() {
+[[clang::sycl_external]] unsigned int test_global_offset() {
return __builtin_spirv_global_offset(0);
}
-// CHECK: @test_subgroup_size(
+// CHECK: @{{.*}}test_subgroup_size{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: tail call i32 @llvm.spv.subgroup.size()
//
-unsigned int test_subgroup_size() {
+[[clang::sycl_external]] unsigned int test_subgroup_size() {
return __builtin_spirv_subgroup_size();
}
-// CHECK: @test_subgroup_max_size(
+// CHECK: @{{.*}}test_subgroup_max_size{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: tail call i32 @llvm.spv.subgroup.max.size()
//
-unsigned int test_subgroup_max_size() {
+[[clang::sycl_external]] unsigned int test_subgroup_max_size() {
return __builtin_spirv_subgroup_max_size();
}
-// CHECK: @test_num_subgroups(
+// CHECK: @{{.*}}test_num_subgroups{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: tail call i32 @llvm.spv.num.subgroups()
//
-unsigned int test_num_subgroups() {
+[[clang::sycl_external]] unsigned int test_num_subgroups() {
return __builtin_spirv_num_subgroups();
}
-// CHECK: @test_subgroup_id(
+// CHECK: @{{.*}}test_subgroup_id{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: tail call i32 @llvm.spv.subgroup.id()
//
-unsigned int test_subgroup_id() {
+[[clang::sycl_external]] unsigned int test_subgroup_id() {
return __builtin_spirv_subgroup_id();
}
-// CHECK: @test_subgroup_local_invocation_id(
+// CHECK: @{{.*}}test_subgroup_local_invocation_id{{.*}}(
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: tail call i32 @llvm.spv.subgroup.local.invocation.id()
//
-unsigned int test_subgroup_local_invocation_id() {
+[[clang::sycl_external]] unsigned int test_subgroup_local_invocation_id() {
return __builtin_spirv_subgroup_local_invocation_id();
}
diff --git a/clang/test/CodeGenSPIRV/spirv-intel.c b/clang/test/CodeGenSPIRV/spirv-intel.c
new file mode 100644
index 0000000..3cfe09f
--- /dev/null
+++ b/clang/test/CodeGenSPIRV/spirv-intel.c
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -triple spirv64-intel %s -emit-llvm -o - | FileCheck -check-prefix=CHECK-WITH %s
+// RUN: %clang_cc1 -triple spirv32-intel %s -emit-llvm -o - | FileCheck -check-prefix=CHECK-WITH %s
+// RUN: %clang_cc1 -triple spir-intel %s -emit-llvm -o - | FileCheck -check-prefix=CHECK-WITHOUT %s
+// RUN: %clang_cc1 -triple spir64-intel %s -emit-llvm -o - | FileCheck -check-prefix=CHECK-WITHOUT %s
+
+// CHECK-WITH: spir_func void @foo(ptr addrspace(4) noundef %param) #0 {
+// CHECK-WITHOUT: spir_func void @foo(ptr noundef %param) #0 {
+void foo(int *param) {
+}
diff --git a/clang/test/CodeGenSYCL/address-space-conversions.cpp b/clang/test/CodeGenSYCL/address-space-conversions.cpp
index ee3183b..fa7acb0 100644
--- a/clang/test/CodeGenSYCL/address-space-conversions.cpp
+++ b/clang/test/CodeGenSYCL/address-space-conversions.cpp
@@ -1,143 +1,143 @@
// RUN: %clang_cc1 -triple spir64 -fsycl-is-device -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s
void bar(int &Data) {}
-// CHECK: define{{.*}} spir_func void @[[RAW_REF:[a-zA-Z0-9_]+]](ptr addrspace(4) noundef align 4 dereferenceable(4) %
+// CHECK-DAG: define{{.*}} spir_func void @[[RAW_REF:[a-zA-Z0-9_]+]](ptr addrspace(4) noundef align 4 dereferenceable(4) %
void bar2(int &Data) {}
-// CHECK: define{{.*}} spir_func void @[[RAW_REF2:[a-zA-Z0-9_]+]](ptr addrspace(4) noundef align 4 dereferenceable(4) %
+// CHECK-DAG: define{{.*}} spir_func void @[[RAW_REF2:[a-zA-Z0-9_]+]](ptr addrspace(4) noundef align 4 dereferenceable(4) %
void bar(__attribute__((opencl_local)) int &Data) {}
-// CHECK: define{{.*}} spir_func void [[LOC_REF:@[a-zA-Z0-9_]+]](ptr addrspace(3) noundef align 4 dereferenceable(4) %
+// CHECK-DAG: define{{.*}} spir_func void [[LOC_REF:@[a-zA-Z0-9_]+]](ptr addrspace(3) noundef align 4 dereferenceable(4) %
void foo(int *Data) {}
-// CHECK: define{{.*}} spir_func void @[[RAW_PTR:[a-zA-Z0-9_]+]](ptr addrspace(4) noundef %
+// CHECK-DAG: define{{.*}} spir_func void @[[RAW_PTR:[a-zA-Z0-9_]+]](ptr addrspace(4) noundef %
void foo2(int *Data) {}
-// CHECK: define{{.*}} spir_func void @[[RAW_PTR2:[a-zA-Z0-9_]+]](ptr addrspace(4) noundef %
+// CHECK-DAG: define{{.*}} spir_func void @[[RAW_PTR2:[a-zA-Z0-9_]+]](ptr addrspace(4) noundef %
void foo(__attribute__((opencl_local)) int *Data) {}
-// CHECK: define{{.*}} spir_func void [[LOC_PTR:@[a-zA-Z0-9_]+]](ptr addrspace(3) noundef %
+// CHECK-DAG: define{{.*}} spir_func void [[LOC_PTR:@[a-zA-Z0-9_]+]](ptr addrspace(3) noundef %
template <typename T>
void tmpl(T t) {}
// See Check Lines below.
-void usages() {
+[[clang::sycl_external]] void usages() {
int *NoAS;
- // CHECK: [[NoAS:%[a-zA-Z0-9]+]] = alloca ptr addrspace(4)
+ // CHECK-DAG: [[NoAS:%[a-zA-Z0-9]+]] = alloca ptr addrspace(4)
__attribute__((opencl_global)) int *GLOB;
- // CHECK: [[GLOB:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1)
+ // CHECK-DAG: [[GLOB:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1)
__attribute__((opencl_local)) int *LOC;
- // CHECK: [[LOC:%[a-zA-Z0-9]+]] = alloca ptr addrspace(3)
+ // CHECK-DAG: [[LOC:%[a-zA-Z0-9]+]] = alloca ptr addrspace(3)
__attribute__((opencl_private)) int *PRIV;
- // CHECK: [[PRIV:%[a-zA-Z0-9]+]] = alloca ptr
+ // CHECK-DAG: [[PRIV:%[a-zA-Z0-9]+]] = alloca ptr
__attribute__((opencl_global_device)) int *GLOBDEVICE;
- // CHECK: [[GLOB_DEVICE:%[a-zA-Z0-9]+]] = alloca ptr addrspace(5)
+ // CHECK-DAG: [[GLOB_DEVICE:%[a-zA-Z0-9]+]] = alloca ptr addrspace(5)
__attribute__((opencl_global_host)) int *GLOBHOST;
- // CHECK: [[GLOB_HOST:%[a-zA-Z0-9]+]] = alloca ptr addrspace(6)
+ // CHECK-DAG: [[GLOB_HOST:%[a-zA-Z0-9]+]] = alloca ptr addrspace(6)
- // CHECK: [[NoAS]].ascast = addrspacecast ptr [[NoAS]] to ptr addrspace(4)
- // CHECK: [[GLOB]].ascast = addrspacecast ptr [[GLOB]] to ptr addrspace(4)
- // CHECK: [[LOC]].ascast = addrspacecast ptr [[LOC]] to ptr addrspace(4)
- // CHECK: [[PRIV]].ascast = addrspacecast ptr [[PRIV]] to ptr addrspace(4)
+ // CHECK-DAG: [[NoAS]].ascast = addrspacecast ptr [[NoAS]] to ptr addrspace(4)
+ // CHECK-DAG: [[GLOB]].ascast = addrspacecast ptr [[GLOB]] to ptr addrspace(4)
+ // CHECK-DAG: [[LOC]].ascast = addrspacecast ptr [[LOC]] to ptr addrspace(4)
+ // CHECK-DAG: [[PRIV]].ascast = addrspacecast ptr [[PRIV]] to ptr addrspace(4)
LOC = nullptr;
- // CHECK: store ptr addrspace(3) null, ptr addrspace(4) [[LOC]].ascast, align 8
+ // CHECK-DAG: store ptr addrspace(3) null, ptr addrspace(4) [[LOC]].ascast, align 8
GLOB = nullptr;
- // CHECK: store ptr addrspace(1) null, ptr addrspace(4) [[GLOB]].ascast, align 8
+ // CHECK-DAG: store ptr addrspace(1) null, ptr addrspace(4) [[GLOB]].ascast, align 8
// Explicit conversions
// From named address spaces to default address space
- // CHECK: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
- // CHECK: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr addrspace(4)
- // CHECK: store ptr addrspace(4) [[GLOB_CAST]], ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr addrspace(4)
+ // CHECK-DAG: store ptr addrspace(4) [[GLOB_CAST]], ptr addrspace(4) [[NoAS]].ascast
NoAS = (int *)GLOB;
- // CHECK: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
- // CHECK: [[LOC_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD]] to ptr addrspace(4)
- // CHECK: store ptr addrspace(4) [[LOC_CAST]], ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
+ // CHECK-DAG: [[LOC_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD]] to ptr addrspace(4)
+ // CHECK-DAG: store ptr addrspace(4) [[LOC_CAST]], ptr addrspace(4) [[NoAS]].ascast
NoAS = (int *)LOC;
- // CHECK: [[PRIV_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr addrspace(4) [[PRIV]].ascast
- // CHECK: [[PRIV_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[PRIV_LOAD]] to ptr addrspace(4)
- // CHECK: store ptr addrspace(4) [[PRIV_CAST]], ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: [[PRIV_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr addrspace(4) [[PRIV]].ascast
+ // CHECK-DAG: [[PRIV_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[PRIV_LOAD]] to ptr addrspace(4)
+ // CHECK-DAG: store ptr addrspace(4) [[PRIV_CAST]], ptr addrspace(4) [[NoAS]].ascast
NoAS = (int *)PRIV;
// From default address space to named address space
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(4) [[NoAS_LOAD]] to ptr addrspace(1)
- // CHECK: store ptr addrspace(1) [[NoAS_CAST]], ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(4) [[NoAS_LOAD]] to ptr addrspace(1)
+ // CHECK-DAG: store ptr addrspace(1) [[NoAS_CAST]], ptr addrspace(4) [[GLOB]].ascast
GLOB = (__attribute__((opencl_global)) int *)NoAS;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(4) [[NoAS_LOAD]] to ptr addrspace(3)
- // CHECK: store ptr addrspace(3) [[NoAS_CAST]], ptr addrspace(4) [[LOC]].ascast
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(4) [[NoAS_LOAD]] to ptr addrspace(3)
+ // CHECK-DAG: store ptr addrspace(3) [[NoAS_CAST]], ptr addrspace(4) [[LOC]].ascast
LOC = (__attribute__((opencl_local)) int *)NoAS;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(4) [[NoAS_LOAD]] to ptr
- // CHECK: store ptr [[NoAS_CAST]], ptr addrspace(4) [[PRIV]].ascast
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(4) [[NoAS_LOAD]] to ptr
+ // CHECK-DAG: store ptr [[NoAS_CAST]], ptr addrspace(4) [[PRIV]].ascast
PRIV = (__attribute__((opencl_private)) int *)NoAS;
// From opencl_global_[host/device] address spaces to opencl_global
- // CHECK: [[GLOBDEVICE_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(5), ptr addrspace(4) [[GLOB_DEVICE]].ascast
- // CHECK: [[GLOBDEVICE_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(5) [[GLOBDEVICE_LOAD]] to ptr addrspace(1)
- // CHECK: store ptr addrspace(1) [[GLOBDEVICE_CAST]], ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: [[GLOBDEVICE_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(5), ptr addrspace(4) [[GLOB_DEVICE]].ascast
+ // CHECK-DAG: [[GLOBDEVICE_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(5) [[GLOBDEVICE_LOAD]] to ptr addrspace(1)
+ // CHECK-DAG: store ptr addrspace(1) [[GLOBDEVICE_CAST]], ptr addrspace(4) [[GLOB]].ascast
GLOB = (__attribute__((opencl_global)) int *)GLOBDEVICE;
- // CHECK: [[GLOBHOST_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(6), ptr addrspace(4) [[GLOB_HOST]].ascast
- // CHECK: [[GLOBHOST_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(6) [[GLOBHOST_LOAD]] to ptr addrspace(1)
- // CHECK: store ptr addrspace(1) [[GLOBHOST_CAST]], ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: [[GLOBHOST_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(6), ptr addrspace(4) [[GLOB_HOST]].ascast
+ // CHECK-DAG: [[GLOBHOST_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(6) [[GLOBHOST_LOAD]] to ptr addrspace(1)
+ // CHECK-DAG: store ptr addrspace(1) [[GLOBHOST_CAST]], ptr addrspace(4) [[GLOB]].ascast
GLOB = (__attribute__((opencl_global)) int *)GLOBHOST;
bar(*GLOB);
- // CHECK: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
- // CHECK: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr addrspace(4)
- // CHECK: call spir_func void @[[RAW_REF]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[GLOB_CAST]])
+ // CHECK-DAG: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr addrspace(4)
+ // CHECK-DAG: call spir_func void @[[RAW_REF]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[GLOB_CAST]])
bar2(*GLOB);
- // CHECK: [[GLOB_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
- // CHECK: [[GLOB_CAST2:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD2]] to ptr addrspace(4)
- // CHECK: call spir_func void @[[RAW_REF2]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[GLOB_CAST2]])
+ // CHECK-DAG: [[GLOB_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: [[GLOB_CAST2:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD2]] to ptr addrspace(4)
+ // CHECK-DAG: call spir_func void @[[RAW_REF2]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[GLOB_CAST2]])
bar(*LOC);
- // CHECK: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
- // CHECK: call spir_func void [[LOC_REF]](ptr addrspace(3) noundef align 4 dereferenceable(4) [[LOC_LOAD]])
+ // CHECK-DAG: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
+ // CHECK-DAG: call spir_func void [[LOC_REF]](ptr addrspace(3) noundef align 4 dereferenceable(4) [[LOC_LOAD]])
bar2(*LOC);
- // CHECK: [[LOC_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
- // CHECK: [[LOC_CAST2:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD2]] to ptr addrspace(4)
- // CHECK: call spir_func void @[[RAW_REF2]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[LOC_CAST2]])
+ // CHECK-DAG: [[LOC_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
+ // CHECK-DAG: [[LOC_CAST2:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD2]] to ptr addrspace(4)
+ // CHECK-DAG: call spir_func void @[[RAW_REF2]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[LOC_CAST2]])
bar(*NoAS);
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
- // CHECK: call spir_func void @[[RAW_REF]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[NoAS_LOAD]])
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: call spir_func void @[[RAW_REF]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[NoAS_LOAD]])
bar2(*NoAS);
- // CHECK: [[NoAS_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
- // CHECK: call spir_func void @[[RAW_REF2]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[NoAS_LOAD2]])
+ // CHECK-DAG: [[NoAS_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: call spir_func void @[[RAW_REF2]](ptr addrspace(4) noundef align 4 dereferenceable(4) [[NoAS_LOAD2]])
foo(GLOB);
- // CHECK: [[GLOB_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
- // CHECK: [[GLOB_CAST3:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD3]] to ptr addrspace(4)
- // CHECK: call spir_func void @[[RAW_PTR]](ptr addrspace(4) noundef [[GLOB_CAST3]])
+ // CHECK-DAG: [[GLOB_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: [[GLOB_CAST3:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD3]] to ptr addrspace(4)
+ // CHECK-DAG: call spir_func void @[[RAW_PTR]](ptr addrspace(4) noundef [[GLOB_CAST3]])
foo2(GLOB);
- // CHECK: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
- // CHECK: [[GLOB_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD4]] to ptr addrspace(4)
- // CHECK: call spir_func void @[[RAW_PTR2]](ptr addrspace(4) noundef [[GLOB_CAST4]])
+ // CHECK-DAG: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: [[GLOB_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD4]] to ptr addrspace(4)
+ // CHECK-DAG: call spir_func void @[[RAW_PTR2]](ptr addrspace(4) noundef [[GLOB_CAST4]])
foo(LOC);
- // CHECK: [[LOC_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
- // CHECK: call spir_func void [[LOC_PTR]](ptr addrspace(3) noundef [[LOC_LOAD3]])
+ // CHECK-DAG: [[LOC_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
+ // CHECK-DAG: call spir_func void [[LOC_PTR]](ptr addrspace(3) noundef [[LOC_LOAD3]])
foo2(LOC);
- // CHECK: [[LOC_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
- // CHECK: [[LOC_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD4]] to ptr addrspace(4)
- // CHECK: call spir_func void @[[RAW_PTR2]](ptr addrspace(4) noundef [[LOC_CAST4]])
+ // CHECK-DAG: [[LOC_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
+ // CHECK-DAG: [[LOC_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD4]] to ptr addrspace(4)
+ // CHECK-DAG: call spir_func void @[[RAW_PTR2]](ptr addrspace(4) noundef [[LOC_CAST4]])
foo(NoAS);
- // CHECK: [[NoAS_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
- // CHECK: call spir_func void @[[RAW_PTR]](ptr addrspace(4) noundef [[NoAS_LOAD3]])
+ // CHECK-DAG: [[NoAS_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: call spir_func void @[[RAW_PTR]](ptr addrspace(4) noundef [[NoAS_LOAD3]])
foo2(NoAS);
- // CHECK: [[NoAS_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
- // CHECK: call spir_func void @[[RAW_PTR2]](ptr addrspace(4) noundef [[NoAS_LOAD4]])
+ // CHECK-DAG: [[NoAS_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: call spir_func void @[[RAW_PTR2]](ptr addrspace(4) noundef [[NoAS_LOAD4]])
// Ensure that we still get 3 different template instantiations.
tmpl(GLOB);
- // CHECK: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
- // CHECK: call spir_func void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef [[GLOB_LOAD4]])
+ // CHECK-DAG: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr addrspace(4) [[GLOB]].ascast
+ // CHECK-DAG: call spir_func void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef [[GLOB_LOAD4]])
tmpl(LOC);
- // CHECK: [[LOC_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
- // CHECK: call spir_func void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef [[LOC_LOAD5]])
+ // CHECK-DAG: [[LOC_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr addrspace(4) [[LOC]].ascast
+ // CHECK-DAG: call spir_func void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef [[LOC_LOAD5]])
tmpl(PRIV);
- // CHECK: [[PRIV_LOAD5:%[a-zA-Z0-9]+]] = load ptr, ptr addrspace(4) [[PRIV]].ascast
- // CHECK: call spir_func void @_Z4tmplIPU3AS0iEvT_(ptr noundef [[PRIV_LOAD5]])
+ // CHECK-DAG: [[PRIV_LOAD5:%[a-zA-Z0-9]+]] = load ptr, ptr addrspace(4) [[PRIV]].ascast
+ // CHECK-DAG: call spir_func void @_Z4tmplIPU3AS0iEvT_(ptr noundef [[PRIV_LOAD5]])
tmpl(NoAS);
- // CHECK: [[NoAS_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
- // CHECK: call spir_func void @_Z4tmplIPiEvT_(ptr addrspace(4) noundef [[NoAS_LOAD5]])
+ // CHECK-DAG: [[NoAS_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(4), ptr addrspace(4) [[NoAS]].ascast
+ // CHECK-DAG: call spir_func void @_Z4tmplIPiEvT_(ptr addrspace(4) noundef [[NoAS_LOAD5]])
}
-// CHECK: define linkonce_odr spir_func void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef %
-// CHECK: define linkonce_odr spir_func void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef %
-// CHECK: define linkonce_odr spir_func void @_Z4tmplIPU3AS0iEvT_(ptr noundef %
-// CHECK: define linkonce_odr spir_func void @_Z4tmplIPiEvT_(ptr addrspace(4) noundef %
+// CHECK-DAG: define linkonce_odr spir_func void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef %
+// CHECK-DAG: define linkonce_odr spir_func void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef %
+// CHECK-DAG: define linkonce_odr spir_func void @_Z4tmplIPU3AS0iEvT_(ptr noundef %
+// CHECK-DAG: define linkonce_odr spir_func void @_Z4tmplIPiEvT_(ptr addrspace(4) noundef %
diff --git a/clang/test/CodeGenSYCL/address-space-deduction.cpp b/clang/test/CodeGenSYCL/address-space-deduction.cpp
index 5910ec3..0fb5c41 100644
--- a/clang/test/CodeGenSYCL/address-space-deduction.cpp
+++ b/clang/test/CodeGenSYCL/address-space-deduction.cpp
@@ -85,7 +85,7 @@
// CHECK-NEXT: store ptr addrspace(4) addrspacecast (ptr addrspace(1) @.str.1 to ptr addrspace(4)), ptr addrspace(4) [[SELECT_STR_TRIVIAL2_ASCAST]], align 8
// CHECK-NEXT: ret void
//
-void test() {
+[[clang::sycl_external]] void test() {
static const int foo = 0x42;
diff --git a/clang/test/CodeGenSYCL/address-space-mangling.cpp b/clang/test/CodeGenSYCL/address-space-mangling.cpp
index 868bf8cc..ecc2d4b 100644
--- a/clang/test/CodeGenSYCL/address-space-mangling.cpp
+++ b/clang/test/CodeGenSYCL/address-space-mangling.cpp
@@ -18,7 +18,7 @@ void foo(int *);
// X86: declare void @_Z3fooPU9SYprivatei(ptr noundef) #1
// X86: declare void @_Z3fooPi(ptr noundef) #1
-void test() {
+[[clang::sycl_external]] void test() {
__attribute__((opencl_global)) int *glob;
__attribute__((opencl_local)) int *loc;
__attribute__((opencl_private)) int *priv;
diff --git a/clang/test/CodeGenSYCL/amd-address-space-conversions.cpp b/clang/test/CodeGenSYCL/amd-address-space-conversions.cpp
index d316f22..17a9819 100644
--- a/clang/test/CodeGenSYCL/amd-address-space-conversions.cpp
+++ b/clang/test/CodeGenSYCL/amd-address-space-conversions.cpp
@@ -1,128 +1,128 @@
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fsycl-is-device -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s
void bar(int &Data) {}
-// CHECK: define dso_local void @[[RAW_REF:[a-zA-Z0-9_]+]](ptr noundef nonnull align 4 dereferenceable(4) %
+// CHECK-DAG: define {{.*}} void @[[RAW_REF:[a-zA-Z0-9_]+]](ptr noundef nonnull align 4 dereferenceable(4) %
void bar2(int &Data) {}
-// CHECK: define dso_local void @[[RAW_REF2:[a-zA-Z0-9_]+]](ptr noundef nonnull align 4 dereferenceable(4) %
+// CHECK-DAG: define {{.*}} void @[[RAW_REF2:[a-zA-Z0-9_]+]](ptr noundef nonnull align 4 dereferenceable(4) %
void bar(__attribute__((opencl_local)) int &Data) {}
-// CHECK: define dso_local void @[[LOCAL_REF:[a-zA-Z0-9_]+]](ptr addrspace(3) noundef align 4 dereferenceable(4) %
+// CHECK-DAG: define {{.*}} void @[[LOCAL_REF:[a-zA-Z0-9_]+]](ptr addrspace(3) noundef align 4 dereferenceable(4) %
void foo(int *Data) {}
-// CHECK: define dso_local void @[[RAW_PTR:[a-zA-Z0-9_]+]](ptr noundef %
+// CHECK-DAG: define {{.*}} void @[[RAW_PTR:[a-zA-Z0-9_]+]](ptr noundef %
void foo2(int *Data) {}
-// CHECK: define dso_local void @[[RAW_PTR2:[a-zA-Z0-9_]+]](ptr noundef %
+// CHECK-DAG: define {{.*}} void @[[RAW_PTR2:[a-zA-Z0-9_]+]](ptr noundef %
void foo(__attribute__((opencl_local)) int *Data) {}
-// CHECK: define dso_local void @[[LOC_PTR:[a-zA-Z0-9_]+]](ptr addrspace(3) noundef %
+// CHECK-DAG: define {{.*}} void @[[LOC_PTR:[a-zA-Z0-9_]+]](ptr addrspace(3) noundef %
template <typename T>
-void tmpl(T t);
+void tmpl(T t) {}
// See Check Lines below.
-void usages() {
+[[clang::sycl_external]] void usages() {
int *NoAS;
- // CHECK: [[NoAS:%[a-zA-Z0-9]+]] = alloca ptr, align 8, addrspace(5)
+ // CHECK-DAG: [[NoAS:%[a-zA-Z0-9]+]] = alloca ptr, align 8, addrspace(5)
__attribute__((opencl_global)) int *GLOB;
- // CHECK: [[GLOB:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8, addrspace(5)
+ // CHECK-DAG: [[GLOB:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8, addrspace(5)
__attribute__((opencl_local)) int *LOC;
- // CHECK: [[LOC:%[a-zA-Z0-9]+]] = alloca ptr addrspace(3), align 4, addrspace(5)
+ // CHECK-DAG: [[LOC:%[a-zA-Z0-9]+]] = alloca ptr addrspace(3), align 4, addrspace(5)
__attribute__((opencl_private)) int *PRIV;
- // CHECK: [[PRIV:%[a-zA-Z0-9]+]] = alloca ptr addrspace(5), align 4, addrspace(5)
+ // CHECK-DAG: [[PRIV:%[a-zA-Z0-9]+]] = alloca ptr addrspace(5), align 4, addrspace(5)
__attribute__((opencl_global_device)) int *GLOBDEVICE;
- // CHECK: [[GLOB_DEVICE:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8, addrspace(5)
+ // CHECK-DAG: [[GLOB_DEVICE:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8, addrspace(5)
__attribute__((opencl_global_host)) int *GLOBHOST;
- // CHECK: [[GLOB_HOST:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8, addrspace(5)
+ // CHECK-DAG: [[GLOB_HOST:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8, addrspace(5)
LOC = nullptr;
- // CHECK: store ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3)), ptr [[LOC]].ascast, align 4
+ // CHECK-DAG: store ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3)), ptr [[LOC]].ascast, align 4
GLOB = nullptr;
- // CHECK: store ptr addrspace(1) null, ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: store ptr addrspace(1) null, ptr [[GLOB]].ascast, align 8
NoAS = (int *)GLOB;
- // CHECK: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
- // CHECK: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
- // CHECK: store ptr [[GLOB_CAST]], ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
+ // CHECK-DAG: store ptr [[GLOB_CAST]], ptr [[NoAS]].ascast, align 8
NoAS = (int *)LOC;
- // CHECK: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
- // CHECK: [[LOC_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD]] to ptr
- // CHECK: store ptr [[LOC_CAST]], ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
+ // CHECK-DAG: [[LOC_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD]] to ptr
+ // CHECK-DAG: store ptr [[LOC_CAST]], ptr [[NoAS]].ascast, align 8
NoAS = (int *)PRIV;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(5), ptr [[PRIV]].ascast, align 4
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(5) [[NoAS_LOAD]] to ptr
- // CHECK: store ptr %5, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(5), ptr [[PRIV]].ascast, align 4
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(5) [[NoAS_LOAD]] to ptr
+ // CHECK-DAG: store ptr %5, ptr [[NoAS]].ascast, align 8
GLOB = (__attribute__((opencl_global)) int *)NoAS;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr %6 to ptr addrspace(1)
- // CHECK: store ptr addrspace(1) %7, ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr %6 to ptr addrspace(1)
+ // CHECK-DAG: store ptr addrspace(1) %7, ptr [[GLOB]].ascast, align 8
LOC = (__attribute__((opencl_local)) int *)NoAS;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[NoAS_LOAD]] to ptr addrspace(3)
- // CHECK: store ptr addrspace(3) %9, ptr [[LOC]].ascast, align 4
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[NoAS_LOAD]] to ptr addrspace(3)
+ // CHECK-DAG: store ptr addrspace(3) %9, ptr [[LOC]].ascast, align 4
PRIV = (__attribute__((opencl_private)) int *)NoAS;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[NoAS_LOAD]] to ptr addrspace(5)
- // CHECK: store ptr addrspace(5) [[NoAS_CAST]], ptr [[PRIV]].ascast, align 4
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[NoAS_LOAD]] to ptr addrspace(5)
+ // CHECK-DAG: store ptr addrspace(5) [[NoAS_CAST]], ptr [[PRIV]].ascast, align 4
GLOB = (__attribute__((opencl_global)) int *)GLOBDEVICE;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]]DEVICE.ascast, align 8
- // CHECK: store ptr addrspace(1) [[NoAS_LOAD]], ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]]DEVICE.ascast, align 8
+ // CHECK-DAG: store ptr addrspace(1) [[NoAS_LOAD]], ptr [[GLOB]].ascast, align 8
GLOB = (__attribute__((opencl_global)) int *)GLOBHOST;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]]HOST.ascast, align 8
- // CHECK: tore ptr addrspace(1) [[NoAS_LOAD]], ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]]HOST.ascast, align 8
+ // CHECK-DAG: tore ptr addrspace(1) [[NoAS_LOAD]], ptr [[GLOB]].ascast, align 8
bar(*GLOB);
- // CHECK: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
- // CHECK: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
- // CHECK: call void @[[RAW_REF]](ptr noundef nonnull align 4 dereferenceable(4) [[GLOB_CAST]])
+ // CHECK-DAG: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
+ // CHECK-DAG: call void @[[RAW_REF]](ptr noundef nonnull align 4 dereferenceable(4) [[GLOB_CAST]])
bar2(*GLOB);
- // CHECK: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
- // CHECK: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
- // CHECK: call void @[[RAW_REF2]](ptr noundef nonnull align 4 dereferenceable(4) [[GLOB_CAST]])
+ // CHECK-DAG: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
+ // CHECK-DAG: call void @[[RAW_REF2]](ptr noundef nonnull align 4 dereferenceable(4) [[GLOB_CAST]])
bar(*LOC);
- // CHECK: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
- // CHECK: call void @_Z3barRU3AS3i(ptr addrspace(3) noundef align 4 dereferenceable(4) [[LOC_LOAD]])
+ // CHECK-DAG: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
+ // CHECK-DAG: call void @_Z3barRU3AS3i(ptr addrspace(3) noundef align 4 dereferenceable(4) [[LOC_LOAD]])
bar2(*LOC);
- // CHECK: [[LOC_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
- // CHECK: [[LOC_CAST2:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD2]] to ptr
- // CHECK: call void @_Z4bar2Ri(ptr noundef nonnull align 4 dereferenceable(4) [[LOC_CAST2]])
+ // CHECK-DAG: [[LOC_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
+ // CHECK-DAG: [[LOC_CAST2:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD2]] to ptr
+ // CHECK-DAG: call void @_Z4bar2Ri(ptr noundef nonnull align 4 dereferenceable(4) [[LOC_CAST2]])
bar(*NoAS);
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
- // CHECK: call void @_Z3barRi(ptr noundef nonnull align 4 dereferenceable(4) [[NoAS_LOAD]])
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: call void @_Z3barRi(ptr noundef nonnull align 4 dereferenceable(4) [[NoAS_LOAD]])
bar2(*NoAS);
- // CHECK: [[NoAS_LOAD2:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
- // CHECK: call void @_Z4bar2Ri(ptr noundef nonnull align 4 dereferenceable(4) [[NoAS_LOAD2]])
+ // CHECK-DAG: [[NoAS_LOAD2:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: call void @_Z4bar2Ri(ptr noundef nonnull align 4 dereferenceable(4) [[NoAS_LOAD2]])
foo(GLOB);
- // CHECK: [[GLOB_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
- // CHECK: [[GLOB_CAST3:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD3]] to ptr
- // CHECK: call void @[[RAW_PTR]](ptr noundef [[GLOB_CAST3]])
+ // CHECK-DAG: [[GLOB_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: [[GLOB_CAST3:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD3]] to ptr
+ // CHECK-DAG: call void @[[RAW_PTR]](ptr noundef [[GLOB_CAST3]])
foo2(GLOB);
- // CHECK: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
- // CHECK: [[GLOB_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD4]] to ptr
- // CHECK: call void @[[RAW_PTR2]](ptr noundef [[GLOB_CAST4]])
+ // CHECK-DAG: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: [[GLOB_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD4]] to ptr
+ // CHECK-DAG: call void @[[RAW_PTR2]](ptr noundef [[GLOB_CAST4]])
foo(LOC);
- // CHECK: [[LOC_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
- // CHECK: call void @[[LOC_PTR]](ptr addrspace(3) noundef [[LOC_LOAD3]])
+ // CHECK-DAG: [[LOC_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
+ // CHECK-DAG: call void @[[LOC_PTR]](ptr addrspace(3) noundef [[LOC_LOAD3]])
foo2(LOC);
- // CHECK: [[LOC_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
- // CHECK: [[LOC_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD4]] to ptr
- // CHECK: call void @[[RAW_PTR2]](ptr noundef [[LOC_CAST4]])
+ // CHECK-DAG: [[LOC_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
+ // CHECK-DAG: [[LOC_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD4]] to ptr
+ // CHECK-DAG: call void @[[RAW_PTR2]](ptr noundef [[LOC_CAST4]])
foo(NoAS);
- // CHECK: [[NoAS_LOAD3:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
- // CHECK: call void @[[RAW_PTR]](ptr noundef [[NoAS_LOAD3]])
+ // CHECK-DAG: [[NoAS_LOAD3:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: call void @[[RAW_PTR]](ptr noundef [[NoAS_LOAD3]])
foo2(NoAS);
- // CHECK: [[NoAS_LOAD4:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
- // CHECK: call void @[[RAW_PTR2]](ptr noundef [[NoAS_LOAD4]])
+ // CHECK-DAG: [[NoAS_LOAD4:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: call void @[[RAW_PTR2]](ptr noundef [[NoAS_LOAD4]])
// Ensure that we still get 3 different template instantiations.
tmpl(GLOB);
- // CHECK: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
- // CHECK: call void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef [[GLOB_LOAD4]])
+ // CHECK-DAG: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]].ascast, align 8
+ // CHECK-DAG: call void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef [[GLOB_LOAD4]])
tmpl(LOC);
- // CHECK: [[LOC_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
- // CHECK: call void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef [[LOC_LOAD5]])
+ // CHECK-DAG: [[LOC_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]].ascast, align 4
+ // CHECK-DAG: call void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef [[LOC_LOAD5]])
tmpl(PRIV);
- // CHECK: [[PRIV_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(5), ptr [[PRIV]].ascast, align 4
- // CHECK: call void @_Z4tmplIPU3AS5iEvT_(ptr addrspace(5) noundef [[PRIV_LOAD5]])
+ // CHECK-DAG: [[PRIV_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(5), ptr [[PRIV]].ascast, align 4
+ // CHECK-DAG: call void @_Z4tmplIPU3AS5iEvT_(ptr addrspace(5) noundef [[PRIV_LOAD5]])
tmpl(NoAS);
- // CHECK: [[NoAS_LOAD5:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
- // CHECK: call void @_Z4tmplIPiEvT_(ptr noundef [[NoAS_LOAD5]])
+ // CHECK-DAG: [[NoAS_LOAD5:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]].ascast, align 8
+ // CHECK-DAG: call void @_Z4tmplIPiEvT_(ptr noundef [[NoAS_LOAD5]])
}
-// CHECK: declare void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef)
-// CHECK: declare void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef)
-// CHECK: declare void @_Z4tmplIPU3AS5iEvT_(ptr addrspace(5) noundef)
-// CHECK: declare void @_Z4tmplIPiEvT_(ptr noundef)
+// CHECK-DAG: define linkonce_odr void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef %
+// CHECK-DAG: define linkonce_odr void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef %
+// CHECK-DAG: define linkonce_odr void @_Z4tmplIPU3AS5iEvT_(ptr addrspace(5) noundef %
+// CHECK-DAG: define linkonce_odr void @_Z4tmplIPiEvT_(ptr noundef %
diff --git a/clang/test/CodeGenSYCL/cuda-address-space-conversions.cpp b/clang/test/CodeGenSYCL/cuda-address-space-conversions.cpp
index 1875029..ffb601e 100644
--- a/clang/test/CodeGenSYCL/cuda-address-space-conversions.cpp
+++ b/clang/test/CodeGenSYCL/cuda-address-space-conversions.cpp
@@ -1,122 +1,122 @@
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fsycl-is-device -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s
void bar(int &Data) {}
-// CHECK: define dso_local void @[[RAW_REF:[a-zA-Z0-9_]+]](ptr noundef nonnull align 4 dereferenceable(4) %
+// CHECK-DAG: define {{.*}} void @[[RAW_REF:[a-zA-Z0-9_]+]](ptr noundef nonnull align 4 dereferenceable(4) %
void bar2(int &Data) {}
-// CHECK: define dso_local void @[[RAW_REF2:[a-zA-Z0-9_]+]](ptr noundef nonnull align 4 dereferenceable(4) %
+// CHECK-DAG: define {{.*}} void @[[RAW_REF2:[a-zA-Z0-9_]+]](ptr noundef nonnull align 4 dereferenceable(4) %
void bar(__attribute__((opencl_local)) int &Data) {}
-// CHECK: define dso_local void @[[LOCAL_REF:[a-zA-Z0-9_]+]](ptr addrspace(3) noundef align 4 dereferenceable(4) %
+// CHECK-DAG: define {{.*}} void @[[LOCAL_REF:[a-zA-Z0-9_]+]](ptr addrspace(3) noundef align 4 dereferenceable(4) %
void foo(int *Data) {}
-// CHECK: define dso_local void @[[RAW_PTR:[a-zA-Z0-9_]+]](ptr noundef %
+// CHECK-DAG: define {{.*}} void @[[RAW_PTR:[a-zA-Z0-9_]+]](ptr noundef %
void foo2(int *Data) {}
-// CHECK: define dso_local void @[[RAW_PTR2:[a-zA-Z0-9_]+]](ptr noundef %
+// CHECK-DAG: define {{.*}} void @[[RAW_PTR2:[a-zA-Z0-9_]+]](ptr noundef %
void foo(__attribute__((opencl_local)) int *Data) {}
-// CHECK: define dso_local void @[[LOC_PTR:[a-zA-Z0-9_]+]](ptr addrspace(3) noundef %
+// CHECK-DAG: define {{.*}} void @[[LOC_PTR:[a-zA-Z0-9_]+]](ptr addrspace(3) noundef %
template <typename T>
void tmpl(T t);
// See Check Lines below.
-void usages() {
+[[clang::sycl_external]] void usages() {
int *NoAS;
- // CHECK: [[NoAS:%[a-zA-Z0-9]+]] = alloca ptr, align 8
+ // CHECK-DAG: [[NoAS:%[a-zA-Z0-9]+]] = alloca ptr, align 8
__attribute__((opencl_global)) int *GLOB;
- // CHECK: [[GLOB:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8
+ // CHECK-DAG: [[GLOB:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8
__attribute__((opencl_local)) int *LOC;
- // CHECK: [[LOC:%[a-zA-Z0-9]+]] = alloca ptr addrspace(3), align 8
+ // CHECK-DAG: [[LOC:%[a-zA-Z0-9]+]] = alloca ptr addrspace(3), align 8
__attribute__((opencl_private)) int *PRIV;
- // CHECK: [[PRIV:%[a-zA-Z0-9]+]] = alloca ptr, align 8
+ // CHECK-DAG: [[PRIV:%[a-zA-Z0-9]+]] = alloca ptr, align 8
__attribute__((opencl_global_device)) int *GLOBDEVICE;
- // CHECK: [[GLOB_DEVICE:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8
+ // CHECK-DAG: [[GLOB_DEVICE:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8
__attribute__((opencl_global_host)) int *GLOBHOST;
- // CHECK: [[GLOB_HOST:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8
+ // CHECK-DAG: [[GLOB_HOST:%[a-zA-Z0-9]+]] = alloca ptr addrspace(1), align 8
LOC = nullptr;
- // CHECK: store ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3)), ptr [[LOC]], align 8
+ // CHECK-DAG: store ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3)), ptr [[LOC]], align 8
GLOB = nullptr;
- // CHECK: store ptr addrspace(1) null, ptr [[GLOB]], align 8
+ // CHECK-DAG: store ptr addrspace(1) null, ptr [[GLOB]], align 8
NoAS = (int *)GLOB;
- // CHECK: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
- // CHECK: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
- // CHECK: store ptr [[GLOB_CAST]], ptr [[NoAS]], align 8
+ // CHECK-DAG: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
+ // CHECK-DAG: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
+ // CHECK-DAG: store ptr [[GLOB_CAST]], ptr [[NoAS]], align 8
NoAS = (int *)LOC;
- // CHECK: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
- // CHECK: [[LOC_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD]] to ptr
- // CHECK: store ptr [[LOC_CAST]], ptr [[NoAS]], align 8
+ // CHECK-DAG: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
+ // CHECK-DAG: [[LOC_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD]] to ptr
+ // CHECK-DAG: store ptr [[LOC_CAST]], ptr [[NoAS]], align 8
NoAS = (int *)PRIV;
- // CHECK: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[PRIV]], align 8
- // CHECK: store ptr [[LOC_LOAD]], ptr [[NoAS]], align 8
+ // CHECK-DAG: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[PRIV]], align 8
+ // CHECK-DAG: store ptr [[LOC_LOAD]], ptr [[NoAS]], align 8
GLOB = (__attribute__((opencl_global)) int *)NoAS;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[NoAS_LOAD]] to ptr addrspace(1)
- // CHECK: store ptr addrspace(1) [[NoAS_CAST]], ptr [[GLOB]], align 8
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[NoAS_LOAD]] to ptr addrspace(1)
+ // CHECK-DAG: store ptr addrspace(1) [[NoAS_CAST]], ptr [[GLOB]], align 8
LOC = (__attribute__((opencl_local)) int *)NoAS;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
- // CHECK: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[NoAS_LOAD]] to ptr addrspace(3)
- // CHECK: store ptr addrspace(3) [[NoAS_CAST]], ptr [[LOC]], align 8
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
+ // CHECK-DAG: [[NoAS_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr [[NoAS_LOAD]] to ptr addrspace(3)
+ // CHECK-DAG: store ptr addrspace(3) [[NoAS_CAST]], ptr [[LOC]], align 8
PRIV = (__attribute__((opencl_private)) int *)NoAS;
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
- // CHECK: store ptr [[NoAS_LOAD]], ptr [[PRIV]], align 8
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
+ // CHECK-DAG: store ptr [[NoAS_LOAD]], ptr [[PRIV]], align 8
GLOB = (__attribute__((opencl_global)) int *)GLOBDEVICE;
- // CHECK: [[GLOBDEVICE_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB_DEVICE]], align 8
- // CHECK: store ptr addrspace(1) [[GLOBDEVICE_LOAD]], ptr %GLOB, align 8
+ // CHECK-DAG: [[GLOBDEVICE_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB_DEVICE]], align 8
+ // CHECK-DAG: store ptr addrspace(1) [[GLOBDEVICE_LOAD]], ptr %GLOB, align 8
GLOB = (__attribute__((opencl_global)) int *)GLOBHOST;
- // CHECK: [[GLOB_HOST_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB_HOST]], align 8
- // CHECK: store ptr addrspace(1) [[GLOB_HOST_LOAD]], ptr [[GLOB]], align 8
+ // CHECK-DAG: [[GLOB_HOST_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB_HOST]], align 8
+ // CHECK-DAG: store ptr addrspace(1) [[GLOB_HOST_LOAD]], ptr [[GLOB]], align 8
bar(*GLOB);
- // CHECK: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
- // CHECK: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
- // CHECK: call void @[[RAW_REF]](ptr noundef nonnull align 4 dereferenceable(4) [[GLOB_CAST]])
+ // CHECK-DAG: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
+ // CHECK-DAG: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
+ // CHECK-DAG: call void @[[RAW_REF]](ptr noundef nonnull align 4 dereferenceable(4) [[GLOB_CAST]])
bar2(*GLOB);
- // CHECK: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
- // CHECK: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
- // CHECK: call void @[[RAW_REF2]](ptr noundef nonnull align 4 dereferenceable(4) [[GLOB_CAST]])
+ // CHECK-DAG: [[GLOB_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
+ // CHECK-DAG: [[GLOB_CAST:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD]] to ptr
+ // CHECK-DAG: call void @[[RAW_REF2]](ptr noundef nonnull align 4 dereferenceable(4) [[GLOB_CAST]])
bar(*LOC);
- // CHECK: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
- // CHECK: call void @[[LOCAL_REF]](ptr addrspace(3) noundef align 4 dereferenceable(4) [[LOC_LOAD]])
+ // CHECK-DAG: [[LOC_LOAD:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
+ // CHECK-DAG: call void @[[LOCAL_REF]](ptr addrspace(3) noundef align 4 dereferenceable(4) [[LOC_LOAD]])
bar2(*LOC);
- // CHECK: [[LOC_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
- // CHECK: [[LOC_CAST2:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD2]] to ptr
- // CHECK: call void @[[RAW_REF2]](ptr noundef nonnull align 4 dereferenceable(4) [[LOC_CAST2]])
+ // CHECK-DAG: [[LOC_LOAD2:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
+ // CHECK-DAG: [[LOC_CAST2:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD2]] to ptr
+ // CHECK-DAG: call void @[[RAW_REF2]](ptr noundef nonnull align 4 dereferenceable(4) [[LOC_CAST2]])
bar(*NoAS);
- // CHECK: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
- // CHECK: call void @[[RAW_REF]](ptr noundef nonnull align 4 dereferenceable(4) [[NoAS_LOAD]])
+ // CHECK-DAG: [[NoAS_LOAD:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
+ // CHECK-DAG: call void @[[RAW_REF]](ptr noundef nonnull align 4 dereferenceable(4) [[NoAS_LOAD]])
bar2(*NoAS);
- // CHECK: [[NoAS_LOAD2:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
- // CHECK: call void @[[RAW_REF2]](ptr noundef nonnull align 4 dereferenceable(4) [[NoAS_LOAD2]])
+ // CHECK-DAG: [[NoAS_LOAD2:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
+ // CHECK-DAG: call void @[[RAW_REF2]](ptr noundef nonnull align 4 dereferenceable(4) [[NoAS_LOAD2]])
foo(GLOB);
- // CHECK: [[GLOB_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
- // CHECK: [[GLOB_CAST3:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD3]] to ptr
- // CHECK: call void @[[RAW_PTR]](ptr noundef [[GLOB_CAST3]])
+ // CHECK-DAG: [[GLOB_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
+ // CHECK-DAG: [[GLOB_CAST3:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD3]] to ptr
+ // CHECK-DAG: call void @[[RAW_PTR]](ptr noundef [[GLOB_CAST3]])
foo2(GLOB);
- // CHECK: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
- // CHECK: [[GLOB_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD4]] to ptr
- // CHECK: call void @[[RAW_PTR2]](ptr noundef [[GLOB_CAST4]])
+ // CHECK-DAG: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
+ // CHECK-DAG: [[GLOB_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(1) [[GLOB_LOAD4]] to ptr
+ // CHECK-DAG: call void @[[RAW_PTR2]](ptr noundef [[GLOB_CAST4]])
foo(LOC);
- // CHECK: [[LOC_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
- // CHECK: call void @[[LOC_PTR]](ptr addrspace(3) noundef [[LOC_LOAD3]])
+ // CHECK-DAG: [[LOC_LOAD3:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
+ // CHECK-DAG: call void @[[LOC_PTR]](ptr addrspace(3) noundef [[LOC_LOAD3]])
foo2(LOC);
- // CHECK: [[LOC_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
- // CHECK: [[LOC_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD4]] to ptr
- // CHECK: call void @[[RAW_PTR2]](ptr noundef [[LOC_CAST4]])
+ // CHECK-DAG: [[LOC_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
+ // CHECK-DAG: [[LOC_CAST4:%[a-zA-Z0-9]+]] = addrspacecast ptr addrspace(3) [[LOC_LOAD4]] to ptr
+ // CHECK-DAG: call void @[[RAW_PTR2]](ptr noundef [[LOC_CAST4]])
foo(NoAS);
- // CHECK: [[NoAS_LOAD3:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
- // CHECK: call void @[[RAW_PTR]](ptr noundef [[NoAS_LOAD3]])
+ // CHECK-DAG: [[NoAS_LOAD3:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
+ // CHECK-DAG: call void @[[RAW_PTR]](ptr noundef [[NoAS_LOAD3]])
foo2(NoAS);
- // CHECK: [[NoAS_LOAD4:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
- // CHECK: call void @[[RAW_PTR2]](ptr noundef [[NoAS_LOAD4]])
+ // CHECK-DAG: [[NoAS_LOAD4:%[a-zA-Z0-9]+]] = load ptr, ptr [[NoAS]], align 8
+ // CHECK-DAG: call void @[[RAW_PTR2]](ptr noundef [[NoAS_LOAD4]])
tmpl(GLOB);
- // CHECK: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
- // CHECK: call void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef [[GLOB_LOAD4]])
+ // CHECK-DAG: [[GLOB_LOAD4:%[a-zA-Z0-9]+]] = load ptr addrspace(1), ptr [[GLOB]], align 8
+ // CHECK-DAG: call void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef [[GLOB_LOAD4]])
tmpl(LOC);
- // CHECK: [[LOC_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
- // CHECK: call void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef [[LOC_LOAD5]])
+ // CHECK-DAG: [[LOC_LOAD5:%[a-zA-Z0-9]+]] = load ptr addrspace(3), ptr [[LOC]], align 8
+ // CHECK-DAG: call void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef [[LOC_LOAD5]])
tmpl(PRIV);
- // CHECK: [[PRIV_LOAD5:%[a-zA-Z0-9]+]] = load ptr, ptr [[PRIV]], align 8
- // CHECK: call void @_Z4tmplIPiEvT_(ptr noundef [[PRIV_LOAD5]])
+ // CHECK-DAG: [[PRIV_LOAD5:%[a-zA-Z0-9]+]] = load ptr, ptr [[PRIV]], align 8
+ // CHECK-DAG: call void @_Z4tmplIPiEvT_(ptr noundef [[PRIV_LOAD5]])
tmpl(NoAS);
-// CHECK: %33 = load ptr, ptr %NoAS, align 8
-// CHECK: call void @_Z4tmplIPiEvT_(ptr noundef %33)
+// CHECK-DAG: %33 = load ptr, ptr %NoAS, align 8
+// CHECK-DAG: call void @_Z4tmplIPiEvT_(ptr noundef %33)
}
-// CHECK: declare void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef)
-// CHECK: declare void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef)
-// CHECK: declare void @_Z4tmplIPiEvT_(ptr noundef)
+// CHECK-DAG: void @_Z4tmplIPU3AS1iEvT_(ptr addrspace(1) noundef
+// CHECK-DAG: void @_Z4tmplIPU3AS3iEvT_(ptr addrspace(3) noundef
+// CHECK-DAG: void @_Z4tmplIPiEvT_(ptr noundef
diff --git a/clang/test/CodeGenSYCL/debug-info-kernel-variables.cpp b/clang/test/CodeGenSYCL/debug-info-kernel-variables.cpp
index 96c0dcf..551c4e7 100644
--- a/clang/test/CodeGenSYCL/debug-info-kernel-variables.cpp
+++ b/clang/test/CodeGenSYCL/debug-info-kernel-variables.cpp
@@ -18,7 +18,7 @@ KERNEL void parallel_for(const KernelType &KernelFunc) {
KernelFunc();
}
-void my_kernel(int my_param) {
+[[clang::sycl_external]] void my_kernel(int my_param) {
int my_local = 0;
my_local = my_param;
}
diff --git a/clang/test/CodeGenSYCL/field-annotate-addr-space.cpp b/clang/test/CodeGenSYCL/field-annotate-addr-space.cpp
index 26bfda8..fe7a160 100644
--- a/clang/test/CodeGenSYCL/field-annotate-addr-space.cpp
+++ b/clang/test/CodeGenSYCL/field-annotate-addr-space.cpp
@@ -9,7 +9,7 @@ struct HasField {
int *a;
};
-void foo(int *b) {
+[[clang::sycl_external]] void foo(int *b) {
struct HasField f;
// CHECK: %[[A:.+]] = getelementptr inbounds nuw %struct.HasField, ptr addrspace(4) %{{.+}}
// CHECK: %[[CALL:.+]] = call ptr addrspace(4) @llvm.ptr.annotation.p4.p1(ptr addrspace(4) %[[A]], ptr addrspace(1) [[ANNOT]]
diff --git a/clang/test/CodeGenSYCL/function-attrs.cpp b/clang/test/CodeGenSYCL/function-attrs.cpp
index 83a77a6..81f8936 100644
--- a/clang/test/CodeGenSYCL/function-attrs.cpp
+++ b/clang/test/CodeGenSYCL/function-attrs.cpp
@@ -5,11 +5,11 @@
int foo();
// CHECK-LABEL: define dso_local spir_func void @_Z3barv(
-// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-SAME: ) #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[A_ASCAST:%.*]] = addrspacecast ptr [[A]] to ptr addrspace(4)
-// CHECK-NEXT: [[CALL:%.*]] = call spir_func noundef i32 @_Z3foov() #[[ATTR1:[0-9]+]]
+// CHECK-NEXT: [[CALL:%.*]] = call spir_func noundef i32 @_Z3foov() #[[ATTR3:[0-9]+]]
// CHECK-NEXT: store i32 [[CALL]], ptr addrspace(4) [[A_ASCAST]], align 4
// CHECK-NEXT: ret void
//
@@ -18,7 +18,7 @@ void bar() {
}
// CHECK-LABEL: define dso_local spir_func noundef i32 @_Z3foov(
-// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-SAME: ) #[[ATTR2]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr [[RETVAL]] to ptr addrspace(4)
@@ -29,21 +29,10 @@ int foo() {
}
template <typename Name, typename Func>
-__attribute__((sycl_kernel)) void kernel_single_task(const Func &kernelFunc) {
+[[clang::sycl_kernel_entry_point(Name)]] void kernel_single_task(const Func &kernelFunc) {
kernelFunc();
}
-// CHECK-LABEL: define dso_local noundef i32 @main(
-// CHECK-SAME: ) #[[ATTR0]] {
-// CHECK-NEXT: entry:
-// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
-// CHECK-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
-// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr [[RETVAL]] to ptr addrspace(4)
-// CHECK-NEXT: [[REF_TMP_ASCAST:%.*]] = addrspacecast ptr [[REF_TMP]] to ptr addrspace(4)
-// CHECK-NEXT: store i32 0, ptr addrspace(4) [[RETVAL_ASCAST]], align 4
-// CHECK-NEXT: call spir_func void @_Z18kernel_single_taskIZ4mainE11fake_kernelZ4mainEUlvE_EvRKT0_(ptr addrspace(4) noundef align 1 dereferenceable(1) [[REF_TMP_ASCAST]]) #[[ATTR1]]
-// CHECK-NEXT: ret i32 0
-//
int main() {
kernel_single_task<class fake_kernel>([] { bar(); });
return 0;
@@ -52,5 +41,5 @@ int main() {
// CHECK: attributes #0 = { convergent mustprogress noinline norecurse nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
// CHECK: attributes #1 = { convergent nounwind }
//.
-// CHECK: !0 = !{i32 1, !"wchar_size", i32 4}
+// CHECK: !{{[0-9]+}} = !{i32 1, !"wchar_size", i32 4}
//.
diff --git a/clang/test/CodeGenSYCL/functionptr-addrspace.cpp b/clang/test/CodeGenSYCL/functionptr-addrspace.cpp
index a477b4c..060104a 100644
--- a/clang/test/CodeGenSYCL/functionptr-addrspace.cpp
+++ b/clang/test/CodeGenSYCL/functionptr-addrspace.cpp
@@ -8,7 +8,7 @@ __attribute__((sycl_kernel)) void kernel_single_task(const Func &kernelFunc) {
}
// CHECK: define dso_local spir_func{{.*}}invoke_function{{.*}}(ptr noundef %fptr, ptr addrspace(4) noundef %ptr)
-void invoke_function(int (*fptr)(), int *ptr) {}
+[[clang::sycl_external]] void invoke_function(int (*fptr)(), int *ptr) {}
int f() { return 0; }
diff --git a/clang/test/CodeGenSYCL/kernel-caller-entry-point.cpp b/clang/test/CodeGenSYCL/kernel-caller-entry-point.cpp
index b568752..cd1d4d80 100644
--- a/clang/test/CodeGenSYCL/kernel-caller-entry-point.cpp
+++ b/clang/test/CodeGenSYCL/kernel-caller-entry-point.cpp
@@ -100,11 +100,8 @@ int main() {
// Verify that SYCL kernel caller functions are emitted for each device target.
//
-// FIXME: The following set of matches are used to skip over the declaration of
-// main(). main() shouldn't be emitted in device code, but that pruning isn't
-// performed yet.
-// CHECK-DEVICE: Function Attrs: convergent mustprogress noinline norecurse nounwind optnone
-// CHECK-DEVICE-NEXT: define {{[a-z_ ]*}}noundef i32 @main() #0
+// main() shouldn't be emitted in device code.
+// CHECK-NOT: @main()
// IR for the SYCL kernel caller function generated for
// single_purpose_kernel_task with single_purpose_kernel_name as the SYCL kernel
diff --git a/clang/test/CodeGenSYCL/sycl-external-attr.cpp b/clang/test/CodeGenSYCL/sycl-external-attr.cpp
new file mode 100644
index 0000000..2c22e65
--- /dev/null
+++ b/clang/test/CodeGenSYCL/sycl-external-attr.cpp
@@ -0,0 +1,85 @@
+// RUN: %clang_cc1 -fsycl-is-device -triple spir64-unknown-unknown -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s
+
+// This test code generation when sycl_external attribute is used
+
+// Function defined and not used - symbols emitted
+[[clang::sycl_external]] int square(int x) { return x*x; }
+// CHECK: define dso_local spir_func noundef i32 @_Z6squarei
+
+// Function defined and used - symbols emitted
+[[clang::sycl_external]] int squareUsed(int x) { return x*x; }
+// CHECK: define dso_local spir_func noundef i32 @_Z10squareUsedi
+
+// FIXME: Constexpr function defined and not used - symbols emitted
+[[clang::sycl_external]] constexpr int squareInlined(int x) { return x*x; }
+// CHECK: define linkonce_odr spir_func noundef i32 @_Z13squareInlinedi
+
+// Function declared but not defined or used - no symbols emitted
+[[clang::sycl_external]] int declOnly();
+// CHECK-NOT: define {{.*}} i32 @_Z8declOnlyv
+// CHECK-NOT: declare {{.*}} i32 @_Z8declOnlyv
+
+// Function declared and used in host but not defined - no symbols emitted
+[[clang::sycl_external]] void declUsedInHost(int y);
+
+// Function declared and used in device but not defined - emit external reference
+[[clang::sycl_external]] void declUsedInDevice(int y);
+// CHECK: define dso_local spir_func void @_Z9deviceUsev
+[[clang::sycl_external]] void deviceUse() { declUsedInDevice(3); }
+// CHECK: declare spir_func void @_Z16declUsedInDevicei
+
+// Function declared with the attribute and later defined - definition emitted
+[[clang::sycl_external]] int func1(int arg);
+int func1(int arg) { return arg; }
+// CHECK: define dso_local spir_func noundef i32 @_Z5func1i
+
+class A {
+// Unused defaulted special member functions - no symbols emitted
+ [[clang::sycl_external]] A& operator=(A& a) = default;
+};
+
+class B {
+ [[clang::sycl_external]] virtual void BFunc1WithAttr() { int i = 1; }
+// CHECK: define linkonce_odr spir_func void @_ZN1B14BFunc1WithAttrEv
+ virtual void BFunc2NoAttr() { int i = 2; }
+};
+
+class C {
+// Special member function defined - definition emitted
+ [[clang::sycl_external]] ~C() {}
+// CHECK: define linkonce_odr spir_func void @_ZN1CD1Ev
+};
+
+// Function reachable from an unused function - definition emitted
+int ret1() { return 1; }
+[[clang::sycl_external]] int withAttr() { return ret1(); }
+// CHECK: define dso_local spir_func noundef i32 @_Z8withAttrv
+// CHECK: define dso_local spir_func noundef i32 @_Z4ret1v
+
+template <typename T>
+[[clang::sycl_external]] void tFunc1(T arg) {}
+// Explicit specialization defined - symbols emitted
+template<>
+[[clang::sycl_external]] void tFunc1<int>(int arg) {}
+// CHECK: define dso_local spir_func void @_Z6tFunc1IiEvT_
+
+template <typename T>
+[[clang::sycl_external]] void tFunc2(T arg) {}
+template void tFunc2<int>(int arg);
+// CHECK: define weak_odr spir_func void @_Z6tFunc2IiEvT_
+template<> void tFunc2<char>(char arg) {}
+// CHECK: define dso_local spir_func void @_Z6tFunc2IcEvT_
+template<> [[clang::sycl_external]] void tFunc2<long>(long arg) {}
+// CHECK: define dso_local spir_func void @_Z6tFunc2IlEvT_
+
+// Functions defined without the sycl_external attribute that are used
+// in host code, but not in device code are not emitted.
+int squareNoAttr(int x) { return x*x; }
+// CHECK-NOT: define {{.*}} i32 @_Z12squareNoAttri
+
+int main() {
+ declUsedInHost(4);
+ int i = squareUsed(5);
+ int j = squareNoAttr(6);
+ return 0;
+}
diff --git a/clang/test/CodeGenSYCL/unique_stable_name.cpp b/clang/test/CodeGenSYCL/unique_stable_name.cpp
index cc9dd61..3ab7e3b8 100644
--- a/clang/test/CodeGenSYCL/unique_stable_name.cpp
+++ b/clang/test/CodeGenSYCL/unique_stable_name.cpp
@@ -1,22 +1,22 @@
-// RUN: %clang_cc1 -triple spir64-unknown-unknown-sycldevice -fsycl-is-device -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s
-// CHECK: @[[LAMBDA_KERNEL3:[^\w]+]] = private unnamed_addr addrspace(1) constant [[LAMBDA_K3_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ4mainEUlPZ4mainEUlvE_E_\00"
-// CHECK: @[[INT1:[^\w]+]] = private unnamed_addr addrspace(1) constant [[INT_SIZE:\[[0-9]+ x i8\]]] c"_ZTSi\00"
-// CHECK: @[[STRING:[^\w]+]] = private unnamed_addr addrspace(1) constant [[STRING_SIZE:\[[0-9]+ x i8\]]] c"_ZTSAppL_ZZ4mainE1jE_i\00",
-// CHECK: @[[INT2:[^\w]+]] = private unnamed_addr addrspace(1) constant [[INT_SIZE]] c"_ZTSi\00"
-// CHECK: @[[LAMBDA_X:[^\w]+]] = private unnamed_addr addrspace(1) constant [[LAMBDA_X_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE_\00"
-// CHECK: @[[MACRO_X:[^\w]+]] = private unnamed_addr addrspace(1) constant [[MACRO_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE0_\00"
-// CHECK: @[[MACRO_Y:[^\w]+]] = private unnamed_addr addrspace(1) constant [[MACRO_SIZE]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE1_\00"
-// CHECK: @{{.*}} = private unnamed_addr addrspace(1) constant [32 x i8] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE2_\00", align 1
-// CHECK: @{{.*}} = private unnamed_addr addrspace(1) constant [32 x i8] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE3_\00", align 1
-// CHECK: @[[MACRO_MACRO_X:[^\w]+]] = private unnamed_addr addrspace(1) constant [[MACRO_MACRO_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE4_\00"
-// CHECK: @[[MACRO_MACRO_Y:[^\w]+]] = private unnamed_addr addrspace(1) constant [[MACRO_MACRO_SIZE]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE5_\00"
-// CHECK: @[[INT3:[^\w]+]] = private unnamed_addr addrspace(1) constant [[INT_SIZE]] c"_ZTSi\00"
-// CHECK: @[[LAMBDA:[^\w]+]] = private unnamed_addr addrspace(1) constant [[LAMBDA_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE_\00"
-// CHECK: @[[LAMBDA_IN_DEP_INT:[^\w]+]] = private unnamed_addr addrspace(1) constant [[DEP_INT_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ28lambda_in_dependent_functionIiEvvEUlvE_\00",
-// CHECK: @[[LAMBDA_IN_DEP_X:[^\w]+]] = private unnamed_addr addrspace(1) constant [[DEP_LAMBDA_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ28lambda_in_dependent_functionIZZ4mainENKUlvE0_clEvEUlvE_EvvEUlvE_\00",
-// CHECK: @[[LAMBDA_NO_DEP:[^\w]+]] = private unnamed_addr addrspace(1) constant [[NO_DEP_LAMBDA_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ13lambda_no_depIidEvT_T0_EUlidE_\00",
-// CHECK: @[[LAMBDA_TWO_DEP:[^\w]+]] = private unnamed_addr addrspace(1) constant [[DEP_LAMBDA1_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ14lambda_two_depIZZ4mainENKUlvE0_clEvEUliE_ZZ4mainENKS0_clEvEUldE_EvvEUlvE_\00",
-// CHECK: @[[LAMBDA_TWO_DEP2:[^\w]+]] = private unnamed_addr addrspace(1) constant [[DEP_LAMBDA2_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ14lambda_two_depIZZ4mainENKUlvE0_clEvEUldE_ZZ4mainENKS0_clEvEUliE_EvvEUlvE_\00",
+// RUN: %clang_cc1 -triple x86_64-linux-pc -fsycl-is-host -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s
+// CHECK: @[[LAMBDA_KERNEL3:[^\w]+]] = private unnamed_addr constant [[LAMBDA_K3_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ4mainEUlPZ4mainEUlvE_E_\00"
+// CHECK: @[[INT1:[^\w]+]] = private unnamed_addr constant [[INT_SIZE:\[[0-9]+ x i8\]]] c"_ZTSi\00"
+// CHECK: @[[STRING:[^\w]+]] = private unnamed_addr constant [[STRING_SIZE:\[[0-9]+ x i8\]]] c"_ZTSAppL_ZZ4mainE1jE_i\00",
+// CHECK: @[[INT2:[^\w]+]] = private unnamed_addr constant [[INT_SIZE]] c"_ZTSi\00"
+// CHECK: @[[LAMBDA_X:[^\w]+]] = private unnamed_addr constant [[LAMBDA_X_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE_\00"
+// CHECK: @[[MACRO_X:[^\w]+]] = private unnamed_addr constant [[MACRO_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE0_\00"
+// CHECK: @[[MACRO_Y:[^\w]+]] = private unnamed_addr constant [[MACRO_SIZE]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE1_\00"
+// CHECK: @{{.*}} = private unnamed_addr constant [32 x i8] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE2_\00", align 1
+// CHECK: @{{.*}} = private unnamed_addr constant [32 x i8] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE3_\00", align 1
+// CHECK: @[[MACRO_MACRO_X:[^\w]+]] = private unnamed_addr constant [[MACRO_MACRO_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE4_\00"
+// CHECK: @[[MACRO_MACRO_Y:[^\w]+]] = private unnamed_addr constant [[MACRO_MACRO_SIZE]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE5_\00"
+// CHECK: @[[INT3:[^\w]+]] = private unnamed_addr constant [[INT_SIZE]] c"_ZTSi\00"
+// CHECK: @[[LAMBDA:[^\w]+]] = private unnamed_addr constant [[LAMBDA_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZZ4mainENKUlvE0_clEvEUlvE_\00"
+// CHECK: @[[LAMBDA_IN_DEP_INT:[^\w]+]] = private unnamed_addr constant [[DEP_INT_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ28lambda_in_dependent_functionIiEvvEUlvE_\00",
+// CHECK: @[[LAMBDA_IN_DEP_X:[^\w]+]] = private unnamed_addr constant [[DEP_LAMBDA_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ28lambda_in_dependent_functionIZZ4mainENKUlvE0_clEvEUlvE_EvvEUlvE_\00",
+// CHECK: @[[LAMBDA_NO_DEP:[^\w]+]] = private unnamed_addr constant [[NO_DEP_LAMBDA_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ13lambda_no_depIidEvT_T0_EUlidE_\00",
+// CHECK: @[[LAMBDA_TWO_DEP:[^\w]+]] = private unnamed_addr constant [[DEP_LAMBDA1_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ14lambda_two_depIZZ4mainENKUlvE0_clEvEUliE_ZZ4mainENKS0_clEvEUldE_EvvEUlvE_\00",
+// CHECK: @[[LAMBDA_TWO_DEP2:[^\w]+]] = private unnamed_addr constant [[DEP_LAMBDA2_SIZE:\[[0-9]+ x i8\]]] c"_ZTSZ14lambda_two_depIZZ4mainENKUlvE0_clEvEUldE_ZZ4mainENKS0_clEvEUliE_EvvEUlvE_\00",
extern "C" void puts(const char *) {}
@@ -65,95 +65,105 @@ template <typename KernelName, typename KernelType>
kernelFunc();
}
+template<typename KernelType>
+void unnamed_kernel_single_task(KernelType kernelFunc) {
+ kernel_single_task<KernelType>(kernelFunc);
+}
+
+template <typename KernelName, typename KernelType>
+void not_kernel_single_task(KernelType kernelFunc) {
+ kernelFunc();
+}
+
int main() {
- kernel_single_task<class kernel2>(func<Derp>);
- // CHECK: call spir_func void @_Z18kernel_single_taskIZ4mainE7kernel2PFPKcvEEvT0_(ptr noundef @_Z4funcI4DerpEDTu33__builtin_sycl_unique_stable_nameDtsrT_3strEEEv)
+ not_kernel_single_task<class kernel2>(func<Derp>);
+ // CHECK: call void @_Z22not_kernel_single_taskIZ4mainE7kernel2PFPKcvEEvT0_(ptr noundef @_Z4funcI4DerpEDTu33__builtin_sycl_unique_stable_nameDtsrT_3strEEEv)
auto l1 = []() { return 1; };
auto l2 = [](decltype(l1) *l = nullptr) { return 2; };
- kernel_single_task<class kernel3>(l2);
+ kernel_single_task<decltype(l2)>(l2);
puts(__builtin_sycl_unique_stable_name(decltype(l2)));
- // CHECK: call spir_func void @_Z18kernel_single_taskIZ4mainE7kernel3Z4mainEUlPZ4mainEUlvE_E_EvT0_
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[LAMBDA_KERNEL3]] to ptr addrspace(4)))
+ // CHECK: call void @_Z18kernel_single_taskIZ4mainEUlPZ4mainEUlvE_E_S2_EvT0_
+ // CHECK: call void @puts(ptr noundef @[[LAMBDA_KERNEL3]])
constexpr const char str[] = "lalala";
static_assert(__builtin_strcmp(__builtin_sycl_unique_stable_name(decltype(str)), "_ZTSA7_Kc\0") == 0, "unexpected mangling");
int i = 0;
puts(__builtin_sycl_unique_stable_name(decltype(i++)));
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[INT1]] to ptr addrspace(4)))
+ // CHECK: call void @puts(ptr noundef @[[INT1]])
// FIXME: Ensure that j is incremented because VLAs are terrible.
int j = 55;
puts(__builtin_sycl_unique_stable_name(int[++j]));
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[STRING]] to ptr addrspace(4)))
+ // CHECK: call void @puts(ptr noundef @[[STRING]])
- // CHECK: define internal spir_func void @_Z18kernel_single_taskIZ4mainE7kernel2PFPKcvEEvT0_
- // CHECK: declare spir_func noundef ptr addrspace(4) @_Z4funcI4DerpEDTu33__builtin_sycl_unique_stable_nameDtsrT_3strEEEv
- // CHECK: define internal spir_func void @_Z18kernel_single_taskIZ4mainE7kernel3Z4mainEUlPZ4mainEUlvE_E_EvT0_
- // CHECK: define internal spir_func void @_Z18kernel_single_taskIZ4mainE6kernelZ4mainEUlvE0_EvT0_
+ // CHECK: define internal void @_Z22not_kernel_single_taskIZ4mainE7kernel2PFPKcvEEvT0_
+ // CHECK: declare noundef ptr @_Z4funcI4DerpEDTu33__builtin_sycl_unique_stable_nameDtsrT_3strEEEv
+ // CHECK: define internal void @_Z18kernel_single_taskIZ4mainEUlPZ4mainEUlvE_E_S2_EvT0_
+ // CHECK: define internal void @_Z18kernel_single_taskIZ4mainEUlvE0_S0_EvT0_
- kernel_single_task<class kernel>(
+ unnamed_kernel_single_task(
[]() {
puts(__builtin_sycl_unique_stable_name(int));
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[INT2]] to ptr addrspace(4)))
+ // CHECK: call void @puts(ptr noundef @[[INT2]])
auto x = []() {};
puts(__builtin_sycl_unique_stable_name(decltype(x)));
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[LAMBDA_X]] to ptr addrspace(4)))
+ // CHECK: call void @puts(ptr noundef @[[LAMBDA_X]])
DEF_IN_MACRO();
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[MACRO_X]] to ptr addrspace(4)))
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[MACRO_Y]] to ptr addrspace(4)))
+ // CHECK: call void @puts(ptr noundef @[[MACRO_X]])
+ // CHECK: call void @puts(ptr noundef @[[MACRO_Y]])
MACRO_CALLS_MACRO();
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[MACRO_MACRO_X]] to ptr addrspace(4)))
- // CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[MACRO_MACRO_Y]] to ptr addrspace(4)))
+ // CHECK: call void @puts(ptr noundef @[[MACRO_MACRO_X]])
+ // CHECK: call void @puts(ptr noundef @[[MACRO_MACRO_Y]])
template_param<int>();
- // CHECK: call spir_func void @_Z14template_paramIiEvv
+ // CHECK: call void @_Z14template_paramIiEvv
template_param<decltype(x)>();
- // CHECK: call spir_func void @_Z14template_paramIZZ4mainENKUlvE0_clEvEUlvE_Evv
+ // CHECK: call void @_Z14template_paramIZZ4mainENKUlvE0_clEvEUlvE_Evv
lambda_in_dependent_function<int>();
- // CHECK: call spir_func void @_Z28lambda_in_dependent_functionIiEvv
+ // CHECK: call void @_Z28lambda_in_dependent_functionIiEvv
lambda_in_dependent_function<decltype(x)>();
- // CHECK: call spir_func void @_Z28lambda_in_dependent_functionIZZ4mainENKUlvE0_clEvEUlvE_Evv
+ // CHECK: call void @_Z28lambda_in_dependent_functionIZZ4mainENKUlvE0_clEvEUlvE_Evv
lambda_no_dep<int, double>(3, 5.5);
- // CHECK: call spir_func void @_Z13lambda_no_depIidEvT_T0_(i32 noundef 3, double noundef 5.500000e+00)
+ // CHECK: call void @_Z13lambda_no_depIidEvT_T0_(i32 noundef 3, double noundef 5.500000e+00)
int a = 5;
double b = 10.7;
auto y = [](int a) { return a; };
auto z = [](double b) { return b; };
lambda_two_dep<decltype(y), decltype(z)>();
- // CHECK: call spir_func void @_Z14lambda_two_depIZZ4mainENKUlvE0_clEvEUliE_ZZ4mainENKS0_clEvEUldE_Evv
+ // CHECK: call void @_Z14lambda_two_depIZZ4mainENKUlvE0_clEvEUliE_ZZ4mainENKS0_clEvEUldE_Evv
lambda_two_dep<decltype(z), decltype(y)>();
- // CHECK: call spir_func void @_Z14lambda_two_depIZZ4mainENKUlvE0_clEvEUldE_ZZ4mainENKS0_clEvEUliE_Evv
+ // CHECK: call void @_Z14lambda_two_depIZZ4mainENKUlvE0_clEvEUldE_ZZ4mainENKS0_clEvEUliE_Evv
});
}
-// CHECK: define linkonce_odr spir_func void @_Z14template_paramIiEvv
-// CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[INT3]] to ptr addrspace(4)))
+// CHECK: define linkonce_odr void @_Z14template_paramIiEvv
+// CHECK: call void @puts(ptr noundef @[[INT3]])
-// CHECK: define internal spir_func void @_Z14template_paramIZZ4mainENKUlvE0_clEvEUlvE_Evv
-// CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[LAMBDA]] to ptr addrspace(4)))
+// CHECK: define internal void @_Z14template_paramIZZ4mainENKUlvE0_clEvEUlvE_Evv
+// CHECK: call void @puts(ptr noundef @[[LAMBDA]])
-// CHECK: define linkonce_odr spir_func void @_Z28lambda_in_dependent_functionIiEvv
-// CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[LAMBDA_IN_DEP_INT]] to ptr addrspace(4)))
+// CHECK: define linkonce_odr void @_Z28lambda_in_dependent_functionIiEvv
+// CHECK: call void @puts(ptr noundef @[[LAMBDA_IN_DEP_INT]])
-// CHECK: define internal spir_func void @_Z28lambda_in_dependent_functionIZZ4mainENKUlvE0_clEvEUlvE_Evv
-// CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[LAMBDA_IN_DEP_X]] to ptr addrspace(4)))
+// CHECK: define internal void @_Z28lambda_in_dependent_functionIZZ4mainENKUlvE0_clEvEUlvE_Evv
+// CHECK: call void @puts(ptr noundef @[[LAMBDA_IN_DEP_X]])
-// CHECK: define linkonce_odr spir_func void @_Z13lambda_no_depIidEvT_T0_(i32 noundef %a, double noundef %b)
-// CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[LAMBDA_NO_DEP]] to ptr addrspace(4)))
+// CHECK: define linkonce_odr void @_Z13lambda_no_depIidEvT_T0_(i32 noundef %a, double noundef %b)
+// CHECK: call void @puts(ptr noundef @[[LAMBDA_NO_DEP]])
-// CHECK: define internal spir_func void @_Z14lambda_two_depIZZ4mainENKUlvE0_clEvEUliE_ZZ4mainENKS0_clEvEUldE_Evv
-// CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[LAMBDA_TWO_DEP]] to ptr addrspace(4)))
+// CHECK: define internal void @_Z14lambda_two_depIZZ4mainENKUlvE0_clEvEUliE_ZZ4mainENKS0_clEvEUldE_Evv
+// CHECK: call void @puts(ptr noundef @[[LAMBDA_TWO_DEP]])
-// CHECK: define internal spir_func void @_Z14lambda_two_depIZZ4mainENKUlvE0_clEvEUldE_ZZ4mainENKS0_clEvEUliE_Evv
-// CHECK: call spir_func void @puts(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @[[LAMBDA_TWO_DEP2]] to ptr addrspace(4)))
+// CHECK: define internal void @_Z14lambda_two_depIZZ4mainENKUlvE0_clEvEUldE_ZZ4mainENKS0_clEvEUliE_Evv
+// CHECK: call void @puts(ptr noundef @[[LAMBDA_TWO_DEP2]])
diff --git a/clang/test/CoverageMapping/logical.cpp b/clang/test/CoverageMapping/logical.cpp
index 2a22d6c..caa773c 100644
--- a/clang/test/CoverageMapping/logical.cpp
+++ b/clang/test/CoverageMapping/logical.cpp
@@ -1,27 +1,31 @@
// RUN: %clang_cc1 -mllvm -emptyline-comment-coverage=false -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -emit-llvm-only -main-file-name logical.cpp %s | FileCheck %s
// RUN: %clang_cc1 -mllvm -emptyline-comment-coverage=false -fcoverage-mcdc -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -emit-llvm-only -main-file-name logical.cpp %s | FileCheck %s -check-prefix=MCDC
-int main() { // CHECK: File 0, [[@LINE]]:12 -> [[@LINE+23]]:2 = #0
+int main() { // CHECK: File 0, [[@LINE]]:12 -> [[@LINE+27]]:2 = #0
bool bt = true;
bool bf = false; // MCDC: Decision,File 0, [[@LINE+1]]:12 -> [[@LINE+1]]:20 = M:3, C:2
bool a = bt && bf; // CHECK-NEXT: File 0, [[@LINE]]:12 -> [[@LINE]]:14 = #0
// CHECK-NEXT: Branch,File 0, [[@LINE-1]]:12 -> [[@LINE-1]]:14 = #1, (#0 - #1)
- // CHECK-NEXT: File 0, [[@LINE-2]]:18 -> [[@LINE-2]]:20 = #1
- // CHECK-NEXT: Branch,File 0, [[@LINE-3]]:18 -> [[@LINE-3]]:20 = #2, (#1 - #2)
+ // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:14 -> [[@LINE-2]]:18 = #1
+ // CHECK-NEXT: File 0, [[@LINE-3]]:18 -> [[@LINE-3]]:20 = #1
+ // CHECK-NEXT: Branch,File 0, [[@LINE-4]]:18 -> [[@LINE-4]]:20 = #2, (#1 - #2)
// MCDC: Decision,File 0, [[@LINE+1]]:7 -> [[@LINE+2]]:9 = M:6, C:2
a = bt && // CHECK-NEXT: File 0, [[@LINE]]:7 -> [[@LINE]]:9 = #0
bf; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = #3, (#0 - #3)
- // CHECK-NEXT: File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = #3
- // CHECK-NEXT: Branch,File 0, [[@LINE-2]]:7 -> [[@LINE-2]]:9 = #4, (#3 - #4)
+ // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:9 -> [[@LINE-1]]:7 = #3
+ // CHECK-NEXT: File 0, [[@LINE-2]]:7 -> [[@LINE-2]]:9 = #3
+ // CHECK-NEXT: Branch,File 0, [[@LINE-3]]:7 -> [[@LINE-3]]:9 = #4, (#3 - #4)
// MCDC: Decision,File 0, [[@LINE+1]]:7 -> [[@LINE+1]]:15 = M:9, C:2
a = bf || bt; // CHECK-NEXT: File 0, [[@LINE]]:7 -> [[@LINE]]:9 = #0
// CHECK-NEXT: Branch,File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = (#0 - #5), #5
- // CHECK-NEXT: File 0, [[@LINE-2]]:13 -> [[@LINE-2]]:15 = #5
- // CHECK-NEXT: Branch,File 0, [[@LINE-3]]:13 -> [[@LINE-3]]:15 = (#5 - #6), #6
+ // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:9 -> [[@LINE-2]]:13 = #5
+ // CHECK-NEXT: File 0, [[@LINE-3]]:13 -> [[@LINE-3]]:15 = #5
+ // CHECK-NEXT: Branch,File 0, [[@LINE-4]]:13 -> [[@LINE-4]]:15 = (#5 - #6), #6
// MCDC: Decision,File 0, [[@LINE+1]]:7 -> [[@LINE+2]]:9 = M:12, C:2
a = bf || // CHECK-NEXT: File 0, [[@LINE]]:7 -> [[@LINE]]:9 = #0
bt; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = (#0 - #7), #7
- // CHECK-NEXT: File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = #7
- // CHECK-NEXT: Branch,File 0, [[@LINE-2]]:7 -> [[@LINE-2]]:9 = (#7 - #8), #8
+ // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:9 -> [[@LINE-1]]:7 = #7
+ // CHECK-NEXT: File 0, [[@LINE-2]]:7 -> [[@LINE-2]]:9 = #7
+ // CHECK-NEXT: Branch,File 0, [[@LINE-3]]:7 -> [[@LINE-3]]:9 = (#7 - #8), #8
return 0;
}
diff --git a/clang/test/CodeGen/AArch64/debug-sve-vector-types.c b/clang/test/DebugInfo/AArch64/sve-vector-types.c
index ca592b1..ca592b1 100644
--- a/clang/test/CodeGen/AArch64/debug-sve-vector-types.c
+++ b/clang/test/DebugInfo/AArch64/sve-vector-types.c
diff --git a/clang/test/CodeGen/AArch64/debug-sve-vectorx2-types.c b/clang/test/DebugInfo/AArch64/sve-vectorx2-types.c
index 9884b40..9884b40 100644
--- a/clang/test/CodeGen/AArch64/debug-sve-vectorx2-types.c
+++ b/clang/test/DebugInfo/AArch64/sve-vectorx2-types.c
diff --git a/clang/test/CodeGen/AArch64/debug-sve-vectorx3-types.c b/clang/test/DebugInfo/AArch64/sve-vectorx3-types.c
index 9855f85..9855f85 100644
--- a/clang/test/CodeGen/AArch64/debug-sve-vectorx3-types.c
+++ b/clang/test/DebugInfo/AArch64/sve-vectorx3-types.c
diff --git a/clang/test/CodeGen/AArch64/debug-sve-vectorx4-types.c b/clang/test/DebugInfo/AArch64/sve-vectorx4-types.c
index 6f302d3..6f302d3 100644
--- a/clang/test/CodeGen/AArch64/debug-sve-vectorx4-types.c
+++ b/clang/test/DebugInfo/AArch64/sve-vectorx4-types.c
diff --git a/clang/test/CodeGen/AArch64/debug-types.c b/clang/test/DebugInfo/AArch64/types.c
index 19de216..19de216 100644
--- a/clang/test/CodeGen/AArch64/debug-types.c
+++ b/clang/test/DebugInfo/AArch64/types.c
diff --git a/clang/test/CodeGen/assignment-tracking/assignment-tracking.cpp b/clang/test/DebugInfo/AssignmentTracking/assignment-tracking.cpp
index b7309af..b7309af 100644
--- a/clang/test/CodeGen/assignment-tracking/assignment-tracking.cpp
+++ b/clang/test/DebugInfo/AssignmentTracking/assignment-tracking.cpp
diff --git a/clang/test/CodeGen/assignment-tracking/flag.cpp b/clang/test/DebugInfo/AssignmentTracking/flag.cpp
index 3bd974f..3bd974f 100644
--- a/clang/test/CodeGen/assignment-tracking/flag.cpp
+++ b/clang/test/DebugInfo/AssignmentTracking/flag.cpp
diff --git a/clang/test/CodeGen/assignment-tracking/memcpy-fragment.cpp b/clang/test/DebugInfo/AssignmentTracking/memcpy-fragment.cpp
index 34fc804..34fc804 100644
--- a/clang/test/CodeGen/assignment-tracking/memcpy-fragment.cpp
+++ b/clang/test/DebugInfo/AssignmentTracking/memcpy-fragment.cpp
diff --git a/clang/test/CodeGen/assignment-tracking/nested-scope.cpp b/clang/test/DebugInfo/AssignmentTracking/nested-scope.cpp
index 7d91882..7d91882 100644
--- a/clang/test/CodeGen/assignment-tracking/nested-scope.cpp
+++ b/clang/test/DebugInfo/AssignmentTracking/nested-scope.cpp
diff --git a/clang/test/CodeGen/attr-btf_tag-typedef.c b/clang/test/DebugInfo/BPF/attr-btf_tag-typedef.c
index 1f00246..1f00246 100644
--- a/clang/test/CodeGen/attr-btf_tag-typedef.c
+++ b/clang/test/DebugInfo/BPF/attr-btf_tag-typedef.c
diff --git a/clang/test/CodeGen/attr-btf_type_tag-func.c b/clang/test/DebugInfo/BPF/attr-btf_type_tag-func.c
index dbb8864..dbb8864 100644
--- a/clang/test/CodeGen/attr-btf_type_tag-func.c
+++ b/clang/test/DebugInfo/BPF/attr-btf_type_tag-func.c
diff --git a/clang/test/CodeGen/bpf-attr-type-tag-atomic.c b/clang/test/DebugInfo/BPF/bpf-attr-type-tag-atomic.c
index a10a45d..a10a45d 100644
--- a/clang/test/CodeGen/bpf-attr-type-tag-atomic.c
+++ b/clang/test/DebugInfo/BPF/bpf-attr-type-tag-atomic.c
diff --git a/clang/test/CodeGen/bpf-debug-info-extern-func.c b/clang/test/DebugInfo/BPF/bpf-debug-info-extern-func.c
index e87c8be..e87c8be 100644
--- a/clang/test/CodeGen/bpf-debug-info-extern-func.c
+++ b/clang/test/DebugInfo/BPF/bpf-debug-info-extern-func.c
diff --git a/clang/test/CodeGen/builtin-preserve-access-index-nonptr.c b/clang/test/DebugInfo/BPF/builtin-preserve-access-index-nonptr.c
index 319498c..319498c 100644
--- a/clang/test/CodeGen/builtin-preserve-access-index-nonptr.c
+++ b/clang/test/DebugInfo/BPF/builtin-preserve-access-index-nonptr.c
diff --git a/clang/test/CodeGen/builtins-bpf-preserve-field-info-1.c b/clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-1.c
index e0b289d..e0b289d 100644
--- a/clang/test/CodeGen/builtins-bpf-preserve-field-info-1.c
+++ b/clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-1.c
diff --git a/clang/test/CodeGen/builtins-bpf-preserve-field-info-2.c b/clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-2.c
index ad4f64a..ad4f64a 100644
--- a/clang/test/CodeGen/builtins-bpf-preserve-field-info-2.c
+++ b/clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-2.c
diff --git a/clang/test/CodeGen/builtins-bpf-preserve-field-info-3.c b/clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-3.c
index 53b4489..a8cc073 100644
--- a/clang/test/CodeGen/builtins-bpf-preserve-field-info-3.c
+++ b/clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-3.c
@@ -37,5 +37,5 @@ unsigned unit3() {
// CHECK: call i32 @llvm.bpf.preserve.type.info(i32 5, i64 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[ENUM_AA]]
// CHECK: ![[ENUM_AA]] = !DICompositeType(tag: DW_TAG_enumeration_type, name: "AA"
-// CHECK: ![[STRUCT_S]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s"
// CHECK: ![[TYPEDEF_INT]] = !DIDerivedType(tag: DW_TAG_typedef, name: "__int"
+// CHECK: ![[STRUCT_S]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s"
diff --git a/clang/test/CodeGen/builtins-bpf-preserve-field-info-4.c b/clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-4.c
index 993f94f..993f94f 100644
--- a/clang/test/CodeGen/builtins-bpf-preserve-field-info-4.c
+++ b/clang/test/DebugInfo/BPF/builtins-bpf-preserve-field-info-4.c
diff --git a/clang/test/CodeGenCXX/2006-11-20-GlobalSymbols.cpp b/clang/test/DebugInfo/CXX/2006-11-20-GlobalSymbols.cpp
index 74a7fb6..74a7fb6 100644
--- a/clang/test/CodeGenCXX/2006-11-20-GlobalSymbols.cpp
+++ b/clang/test/DebugInfo/CXX/2006-11-20-GlobalSymbols.cpp
diff --git a/clang/test/CodeGenCXX/2007-01-02-UnboundedArray.cpp b/clang/test/DebugInfo/CXX/2007-01-02-UnboundedArray.cpp
index c76b7ef..c76b7ef 100644
--- a/clang/test/CodeGenCXX/2007-01-02-UnboundedArray.cpp
+++ b/clang/test/DebugInfo/CXX/2007-01-02-UnboundedArray.cpp
diff --git a/clang/test/CodeGenCXX/2009-03-17-dbg.cpp b/clang/test/DebugInfo/CXX/2009-03-17.cpp
index 22d9059..22d9059 100644
--- a/clang/test/CodeGenCXX/2009-03-17-dbg.cpp
+++ b/clang/test/DebugInfo/CXX/2009-03-17.cpp
diff --git a/clang/test/CodeGenCXX/2009-06-16-DebugInfoCrash.cpp b/clang/test/DebugInfo/CXX/2009-06-16-Crash.cpp
index 870e15c..870e15c 100644
--- a/clang/test/CodeGenCXX/2009-06-16-DebugInfoCrash.cpp
+++ b/clang/test/DebugInfo/CXX/2009-06-16-Crash.cpp
diff --git a/clang/test/CodeGenCXX/2010-05-10-Var-DbgInfo.cpp b/clang/test/DebugInfo/CXX/2010-05-10-Var.cpp
index 2b39e7d..2b39e7d 100644
--- a/clang/test/CodeGenCXX/2010-05-10-Var-DbgInfo.cpp
+++ b/clang/test/DebugInfo/CXX/2010-05-10-Var.cpp
diff --git a/clang/test/CodeGenCXX/2010-05-12-PtrToMember-Dbg.cpp b/clang/test/DebugInfo/CXX/2010-05-12-PtrToMember.cpp
index 355c3c9..355c3c9 100644
--- a/clang/test/CodeGenCXX/2010-05-12-PtrToMember-Dbg.cpp
+++ b/clang/test/DebugInfo/CXX/2010-05-12-PtrToMember.cpp
diff --git a/clang/test/CodeGenCXX/2010-06-21-LocalVarDbg.cpp b/clang/test/DebugInfo/CXX/2010-06-21-LocalVarDbg.cpp
index c0c8bf6..c0c8bf6 100644
--- a/clang/test/CodeGenCXX/2010-06-21-LocalVarDbg.cpp
+++ b/clang/test/DebugInfo/CXX/2010-06-21-LocalVarDbg.cpp
diff --git a/clang/test/CodeGenCXX/2010-07-23-DeclLoc.cpp b/clang/test/DebugInfo/CXX/2010-07-23-DeclLoc.cpp
index ef589e0..ef589e0 100644
--- a/clang/test/CodeGenCXX/2010-07-23-DeclLoc.cpp
+++ b/clang/test/DebugInfo/CXX/2010-07-23-DeclLoc.cpp
diff --git a/clang/test/CodeGenCXX/Inputs/debug-info-class-limited.cpp b/clang/test/DebugInfo/CXX/Inputs/class-limited.cpp
index 34a1cfa..34a1cfa 100644
--- a/clang/test/CodeGenCXX/Inputs/debug-info-class-limited.cpp
+++ b/clang/test/DebugInfo/CXX/Inputs/class-limited.cpp
diff --git a/clang/test/CodeGenCXX/PR20038.cpp b/clang/test/DebugInfo/CXX/PR20038.cpp
index b6d12f6..b6d12f6 100644
--- a/clang/test/CodeGenCXX/PR20038.cpp
+++ b/clang/test/DebugInfo/CXX/PR20038.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-access.cpp b/clang/test/DebugInfo/CXX/access.cpp
index 9f2c044..9f2c044 100644
--- a/clang/test/CodeGenCXX/debug-info-access.cpp
+++ b/clang/test/DebugInfo/CXX/access.cpp
diff --git a/clang/test/CodeGenCXX/aix-static-init-debug-info.cpp b/clang/test/DebugInfo/CXX/aix-static-init.cpp
index 6453e43..6453e43 100644
--- a/clang/test/CodeGenCXX/aix-static-init-debug-info.cpp
+++ b/clang/test/DebugInfo/CXX/aix-static-init.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-alias.cpp b/clang/test/DebugInfo/CXX/alias.cpp
index fbb2ad6..fbb2ad6 100644
--- a/clang/test/CodeGenCXX/debug-info-alias.cpp
+++ b/clang/test/DebugInfo/CXX/alias.cpp
diff --git a/clang/test/CodeGenCXX/dbg-info-all-calls-described.cpp b/clang/test/DebugInfo/CXX/all-calls-described.cpp
index e64e07c..e64e07c 100644
--- a/clang/test/CodeGenCXX/dbg-info-all-calls-described.cpp
+++ b/clang/test/DebugInfo/CXX/all-calls-described.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-anon-namespace.cpp b/clang/test/DebugInfo/CXX/anon-namespace.cpp
index 56c8528..56c8528 100644
--- a/clang/test/CodeGenCXX/debug-info-anon-namespace.cpp
+++ b/clang/test/DebugInfo/CXX/anon-namespace.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-anon-union-vars.cpp b/clang/test/DebugInfo/CXX/anon-union-vars.cpp
index 61b3c7c..3aca4e1 100644
--- a/clang/test/CodeGenCXX/debug-info-anon-union-vars.cpp
+++ b/clang/test/DebugInfo/CXX/anon-union-vars.cpp
@@ -46,7 +46,7 @@ void instantiate(int x) {
// CHECK: !DIGlobalVariable(name: "c",{{.*}} file: [[FILE:.*]], line: 6,{{.*}} isLocal: true, isDefinition: true
// CHECK: !DIGlobalVariable(name: "d",{{.*}} file: [[FILE]], line: 6,{{.*}} isLocal: true, isDefinition: true
-// CHECK: [[FILE]] = !DIFile(filename: "{{.*}}debug-info-anon-union-vars.cpp",
+// CHECK: [[FILE]] = !DIFile(filename: "{{.*}}anon-union-vars.cpp",
// CHECK: !DIGlobalVariable(name: "a",{{.*}} file: [[FILE]], line: 6,{{.*}} isLocal: true, isDefinition: true
// CHECK: !DIGlobalVariable(name: "b",{{.*}} file: [[FILE]], line: 6,{{.*}} isLocal: true, isDefinition: true
// CHECK: !DIGlobalVariable(name: "result", {{.*}} isLocal: false, isDefinition: true
diff --git a/clang/test/CodeGenCXX/debug-info-artificial-arg.cpp b/clang/test/DebugInfo/CXX/artificial-arg.cpp
index a0cf131..a0cf131 100644
--- a/clang/test/CodeGenCXX/debug-info-artificial-arg.cpp
+++ b/clang/test/DebugInfo/CXX/artificial-arg.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-atexit-stub.cpp b/clang/test/DebugInfo/CXX/atexit-stub.cpp
index ca9bc3a..ca9bc3a 100644
--- a/clang/test/CodeGenCXX/debug-info-atexit-stub.cpp
+++ b/clang/test/DebugInfo/CXX/atexit-stub.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-auto-return.cpp b/clang/test/DebugInfo/CXX/auto-return.cpp
index c7a97ba1..c7a97ba1 100644
--- a/clang/test/CodeGenCXX/debug-info-auto-return.cpp
+++ b/clang/test/DebugInfo/CXX/auto-return.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-block-invocation-linkage-name.cpp b/clang/test/DebugInfo/CXX/block-invocation-linkage-name.cpp
index 5fadae9..5fadae9 100644
--- a/clang/test/CodeGenCXX/debug-info-block-invocation-linkage-name.cpp
+++ b/clang/test/DebugInfo/CXX/block-invocation-linkage-name.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-blocks.cpp b/clang/test/DebugInfo/CXX/blocks.cpp
index e05e2ba..e05e2ba 100644
--- a/clang/test/CodeGenCXX/debug-info-blocks.cpp
+++ b/clang/test/DebugInfo/CXX/blocks.cpp
diff --git a/clang/test/CodeGenCXX/bpf-debug-structors.cpp b/clang/test/DebugInfo/CXX/bpf-structors.cpp
index c4c9848..c4c9848 100644
--- a/clang/test/CodeGenCXX/bpf-debug-structors.cpp
+++ b/clang/test/DebugInfo/CXX/bpf-structors.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-byval.cpp b/clang/test/DebugInfo/CXX/byval.cpp
index ac122ec..ac122ec 100644
--- a/clang/test/CodeGenCXX/debug-info-byval.cpp
+++ b/clang/test/DebugInfo/CXX/byval.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-calling-conventions.cpp b/clang/test/DebugInfo/CXX/calling-conventions.cpp
index db7fbd2..db7fbd2 100644
--- a/clang/test/CodeGenCXX/debug-info-calling-conventions.cpp
+++ b/clang/test/DebugInfo/CXX/calling-conventions.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-char16.cpp b/clang/test/DebugInfo/CXX/char16.cpp
index 83ffea6..83ffea6 100644
--- a/clang/test/CodeGenCXX/debug-info-char16.cpp
+++ b/clang/test/DebugInfo/CXX/char16.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-class-limited-plugin.test b/clang/test/DebugInfo/CXX/class-limited-plugin.test
index 17248d5..1a2cc22 100644
--- a/clang/test/CodeGenCXX/debug-info-class-limited-plugin.test
+++ b/clang/test/DebugInfo/CXX/class-limited-plugin.test
@@ -1,2 +1,2 @@
-RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited -o - -load %llvmshlibdir/PrintFunctionNames%pluginext -add-plugin print-fns %S/Inputs/debug-info-class-limited.cpp 2>&1 | FileCheck %S/Inputs/debug-info-class-limited.cpp
+RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited -o - -load %llvmshlibdir/PrintFunctionNames%pluginext -add-plugin print-fns %S/Inputs/class-limited.cpp 2>&1 | FileCheck %S/Inputs/class-limited.cpp
REQUIRES: plugins, examples
diff --git a/clang/test/DebugInfo/CXX/class-limited.test b/clang/test/DebugInfo/CXX/class-limited.test
new file mode 100644
index 0000000..b1279d9
--- /dev/null
+++ b/clang/test/DebugInfo/CXX/class-limited.test
@@ -0,0 +1 @@
+RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited %S/Inputs/class-limited.cpp -o - | FileCheck %S/Inputs/class-limited.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-class-nolimit.cpp b/clang/test/DebugInfo/CXX/class-nolimit.cpp
index b184b9e..b184b9e 100644
--- a/clang/test/CodeGenCXX/debug-info-class-nolimit.cpp
+++ b/clang/test/DebugInfo/CXX/class-nolimit.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-class-optzns.cpp b/clang/test/DebugInfo/CXX/class-optzns.cpp
index cf44570..cf44570 100644
--- a/clang/test/CodeGenCXX/debug-info-class-optzns.cpp
+++ b/clang/test/DebugInfo/CXX/class-optzns.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-class.cpp b/clang/test/DebugInfo/CXX/class.cpp
index aa24a63..aa24a63 100644
--- a/clang/test/CodeGenCXX/debug-info-class.cpp
+++ b/clang/test/DebugInfo/CXX/class.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-display-name.cpp b/clang/test/DebugInfo/CXX/codeview-display-name.cpp
index 8200a65..8200a65 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-display-name.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-display-name.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-heapallocsite.cpp b/clang/test/DebugInfo/CXX/codeview-heapallocsite.cpp
index 6468b9f..6468b9f 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-heapallocsite.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-heapallocsite.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-injected-class.cpp b/clang/test/DebugInfo/CXX/codeview-injected-class.cpp
index b421b2b..b421b2b 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-injected-class.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-injected-class.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-nested-types.cpp b/clang/test/DebugInfo/CXX/codeview-nested-types.cpp
index 8caf177..8caf177 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-nested-types.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-nested-types.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-nodebug.cpp b/clang/test/DebugInfo/CXX/codeview-nodebug.cpp
index c57133d..c57133d 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-nodebug.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-nodebug.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-template-literal.cpp b/clang/test/DebugInfo/CXX/codeview-template-literal.cpp
index 49827f7..49827f7 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-template-literal.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-template-literal.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-template-type.cpp b/clang/test/DebugInfo/CXX/codeview-template-type.cpp
index be37190..be37190 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-template-type.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-template-type.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-unnamed.cpp b/clang/test/DebugInfo/CXX/codeview-unnamed.cpp
index 30815bd..30815bd 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-unnamed.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-unnamed.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-codeview-var-templates.cpp b/clang/test/DebugInfo/CXX/codeview-var-templates.cpp
index dec4c01..dec4c01 100644
--- a/clang/test/CodeGenCXX/debug-info-codeview-var-templates.cpp
+++ b/clang/test/DebugInfo/CXX/codeview-var-templates.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-coff.cpp b/clang/test/DebugInfo/CXX/coff.cpp
index 2535c5c..2535c5c 100644
--- a/clang/test/CodeGenCXX/debug-info-coff.cpp
+++ b/clang/test/DebugInfo/CXX/coff.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-composite-cc.cpp b/clang/test/DebugInfo/CXX/composite-cc.cpp
index d4d4046..d4d4046 100644
--- a/clang/test/CodeGenCXX/debug-info-composite-cc.cpp
+++ b/clang/test/DebugInfo/CXX/composite-cc.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-composite-triviality-fwd-decl.cpp b/clang/test/DebugInfo/CXX/composite-triviality-fwd-decl.cpp
index 812e3ed..812e3ed 100644
--- a/clang/test/CodeGenCXX/debug-info-composite-triviality-fwd-decl.cpp
+++ b/clang/test/DebugInfo/CXX/composite-triviality-fwd-decl.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-composite-triviality.cpp b/clang/test/DebugInfo/CXX/composite-triviality.cpp
index 962b827..962b827 100644
--- a/clang/test/CodeGenCXX/debug-info-composite-triviality.cpp
+++ b/clang/test/DebugInfo/CXX/composite-triviality.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-context.cpp b/clang/test/DebugInfo/CXX/context.cpp
index 1f7fa04..1f7fa04 100644
--- a/clang/test/CodeGenCXX/debug-info-context.cpp
+++ b/clang/test/DebugInfo/CXX/context.cpp
diff --git a/clang/test/CodeGenCXX/cp-blocks-linetables.cpp b/clang/test/DebugInfo/CXX/cp-blocks-linetables.cpp
index ca8cba8..ca8cba8 100644
--- a/clang/test/CodeGenCXX/cp-blocks-linetables.cpp
+++ b/clang/test/DebugInfo/CXX/cp-blocks-linetables.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ctor-homing-flag.cpp b/clang/test/DebugInfo/CXX/ctor-homing-flag.cpp
index 4398fa3..4398fa3 100644
--- a/clang/test/CodeGenCXX/debug-info-ctor-homing-flag.cpp
+++ b/clang/test/DebugInfo/CXX/ctor-homing-flag.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ctor.cpp b/clang/test/DebugInfo/CXX/ctor.cpp
index 16bc54d..16bc54d 100644
--- a/clang/test/CodeGenCXX/debug-info-ctor.cpp
+++ b/clang/test/DebugInfo/CXX/ctor.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ctor2.cpp b/clang/test/DebugInfo/CXX/ctor2.cpp
index 95b0608..95b0608 100644
--- a/clang/test/CodeGenCXX/debug-info-ctor2.cpp
+++ b/clang/test/DebugInfo/CXX/ctor2.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-cxx0x.cpp b/clang/test/DebugInfo/CXX/cxx0x.cpp
index c21a0bb..c21a0bb 100644
--- a/clang/test/CodeGenCXX/debug-info-cxx0x.cpp
+++ b/clang/test/DebugInfo/CXX/cxx0x.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-cxx1y.cpp b/clang/test/DebugInfo/CXX/cxx1y.cpp
index 012eb38..012eb38 100644
--- a/clang/test/CodeGenCXX/debug-info-cxx1y.cpp
+++ b/clang/test/DebugInfo/CXX/cxx1y.cpp
diff --git a/clang/test/CodeGenCXX/debug-info.cpp b/clang/test/DebugInfo/CXX/debug-info.cpp
index 9cf26ba..9cf26ba 100644
--- a/clang/test/CodeGenCXX/debug-info.cpp
+++ b/clang/test/DebugInfo/CXX/debug-info.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-decl-nested.cpp b/clang/test/DebugInfo/CXX/decl-nested.cpp
index 8d89f41..8d89f41 100644
--- a/clang/test/CodeGenCXX/debug-info-decl-nested.cpp
+++ b/clang/test/DebugInfo/CXX/decl-nested.cpp
diff --git a/clang/test/CodeGenCXX/defaulted-template-alias.cpp b/clang/test/DebugInfo/CXX/defaulted-template-alias.cpp
index a038aa0..a038aa0 100644
--- a/clang/test/CodeGenCXX/defaulted-template-alias.cpp
+++ b/clang/test/DebugInfo/CXX/defaulted-template-alias.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-deleted.cpp b/clang/test/DebugInfo/CXX/deleted.cpp
index 564f7ff..564f7ff 100644
--- a/clang/test/CodeGenCXX/debug-info-deleted.cpp
+++ b/clang/test/DebugInfo/CXX/deleted.cpp
diff --git a/clang/test/CodeGenCXX/dependent-template-alias.cpp b/clang/test/DebugInfo/CXX/dependent-template-alias.cpp
index 324b16f..324b16f 100644
--- a/clang/test/CodeGenCXX/dependent-template-alias.cpp
+++ b/clang/test/DebugInfo/CXX/dependent-template-alias.cpp
diff --git a/clang/test/DebugInfo/CXX/dependent-template-type-scope.cpp b/clang/test/DebugInfo/CXX/dependent-template-type-scope.cpp
new file mode 100644
index 0000000..25a4d87
--- /dev/null
+++ b/clang/test/DebugInfo/CXX/dependent-template-type-scope.cpp
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -debug-info-kind=standalone -o - %s | FileCheck %s
+
+template <typename T = int>
+struct Y {
+ typedef int outside;
+ outside o;
+};
+
+Y<> y;
+
+// CHECK: ![[Y:.*]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Y<int>", {{.*}}identifier: "_ZTS1YIiE")
+// CHECK: !DIDerivedType(tag: DW_TAG_typedef, name: "outside", scope: ![[Y]],
diff --git a/clang/test/CodeGenCXX/debug-info-destroy-helper.cpp b/clang/test/DebugInfo/CXX/destroy-helper.cpp
index d685e61..d685e61 100644
--- a/clang/test/CodeGenCXX/debug-info-destroy-helper.cpp
+++ b/clang/test/DebugInfo/CXX/destroy-helper.cpp
diff --git a/clang/test/CodeGenCXX/destructor-debug-info.cpp b/clang/test/DebugInfo/CXX/destructor.cpp
index d30c6c3..d30c6c3 100644
--- a/clang/test/CodeGenCXX/destructor-debug-info.cpp
+++ b/clang/test/DebugInfo/CXX/destructor.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-determinism.cpp b/clang/test/DebugInfo/CXX/determinism.cpp
index c0a70a0..c0a70a0 100644
--- a/clang/test/CodeGenCXX/debug-info-determinism.cpp
+++ b/clang/test/DebugInfo/CXX/determinism.cpp
diff --git a/clang/test/CodeGenCXX/difile_entry.cpp b/clang/test/DebugInfo/CXX/difile_entry.cpp
index 5fcd56e..5fcd56e 100644
--- a/clang/test/CodeGenCXX/difile_entry.cpp
+++ b/clang/test/DebugInfo/CXX/difile_entry.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-dllimport-base-class.cpp b/clang/test/DebugInfo/CXX/dllimport-base-class.cpp
index 855ecaa..855ecaa 100644
--- a/clang/test/CodeGenCXX/debug-info-dllimport-base-class.cpp
+++ b/clang/test/DebugInfo/CXX/dllimport-base-class.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-dtor-implicit-args.cpp b/clang/test/DebugInfo/CXX/dtor-implicit-args.cpp
index 4bb51dc..4bb51dc 100644
--- a/clang/test/CodeGenCXX/debug-info-dtor-implicit-args.cpp
+++ b/clang/test/DebugInfo/CXX/dtor-implicit-args.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-dup-fwd-decl.cpp b/clang/test/DebugInfo/CXX/dup-fwd-decl.cpp
index 3b23ebf..3b23ebf 100644
--- a/clang/test/CodeGenCXX/debug-info-dup-fwd-decl.cpp
+++ b/clang/test/DebugInfo/CXX/dup-fwd-decl.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-enum-class.cpp b/clang/test/DebugInfo/CXX/enum-class.cpp
index 4860894..4860894 100644
--- a/clang/test/CodeGenCXX/debug-info-enum-class.cpp
+++ b/clang/test/DebugInfo/CXX/enum-class.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-enum-i128.cpp b/clang/test/DebugInfo/CXX/enum-i128.cpp
index 22aaf03..22aaf03 100644
--- a/clang/test/CodeGenCXX/debug-info-enum-i128.cpp
+++ b/clang/test/DebugInfo/CXX/enum-i128.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-enum-metadata-collision.cpp b/clang/test/DebugInfo/CXX/enum-metadata-collision.cpp
index dd27acd..dd27acd 100644
--- a/clang/test/CodeGenCXX/debug-info-enum-metadata-collision.cpp
+++ b/clang/test/DebugInfo/CXX/enum-metadata-collision.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-enum.cpp b/clang/test/DebugInfo/CXX/enum.cpp
index 48162b5..48162b5 100644
--- a/clang/test/CodeGenCXX/debug-info-enum.cpp
+++ b/clang/test/DebugInfo/CXX/enum.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-explicit-cast.cpp b/clang/test/DebugInfo/CXX/explicit-cast.cpp
index 028a776..028a776 100644
--- a/clang/test/CodeGenCXX/debug-info-explicit-cast.cpp
+++ b/clang/test/DebugInfo/CXX/explicit-cast.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-explicit-this.cpp b/clang/test/DebugInfo/CXX/explicit-this.cpp
index 45ab2a0..45ab2a0 100644
--- a/clang/test/CodeGenCXX/debug-info-explicit-this.cpp
+++ b/clang/test/DebugInfo/CXX/explicit-this.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-export_symbols.cpp b/clang/test/DebugInfo/CXX/export_symbols.cpp
index 19697be..19697be 100644
--- a/clang/test/CodeGenCXX/debug-info-export_symbols.cpp
+++ b/clang/test/DebugInfo/CXX/export_symbols.cpp
diff --git a/clang/test/CodeGenCXX/fdebug-info-for-profiling.cpp b/clang/test/DebugInfo/CXX/fdebug-info-for-profiling.cpp
index e468a80..e468a80 100644
--- a/clang/test/CodeGenCXX/fdebug-info-for-profiling.cpp
+++ b/clang/test/DebugInfo/CXX/fdebug-info-for-profiling.cpp
diff --git a/clang/test/CodeGenCXX/field-access-debug-info.cpp b/clang/test/DebugInfo/CXX/field-access.cpp
index 38c06f1..38c06f1 100644
--- a/clang/test/CodeGenCXX/field-access-debug-info.cpp
+++ b/clang/test/DebugInfo/CXX/field-access.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-fn-template.cpp b/clang/test/DebugInfo/CXX/fn-template.cpp
index 2aed4be..2aed4be 100644
--- a/clang/test/CodeGenCXX/debug-info-fn-template.cpp
+++ b/clang/test/DebugInfo/CXX/fn-template.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-friend.cpp b/clang/test/DebugInfo/CXX/friend.cpp
index b103b14..b103b14 100644
--- a/clang/test/CodeGenCXX/debug-info-friend.cpp
+++ b/clang/test/DebugInfo/CXX/friend.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-function-context.cpp b/clang/test/DebugInfo/CXX/function-context.cpp
index 29c87b6..da5fa5a 100644
--- a/clang/test/CodeGenCXX/debug-info-function-context.cpp
+++ b/clang/test/DebugInfo/CXX/function-context.cpp
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited -triple x86_64-pc-linux-gnu %s -fdebug-compilation-dir=%S \
-// RUN: -dwarf-version=5 -main-file-name debug-info-function-context.cpp -o - | FileCheck %s
+// RUN: -dwarf-version=5 -main-file-name function-context.cpp -o - | FileCheck %s
struct C {
void member_function();
diff --git a/clang/test/CodeGenCXX/debug-info-fwd-ref.cpp b/clang/test/DebugInfo/CXX/fwd-ref.cpp
index 219e796..219e796 100644
--- a/clang/test/CodeGenCXX/debug-info-fwd-ref.cpp
+++ b/clang/test/DebugInfo/CXX/fwd-ref.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-fwd-template-param.cpp b/clang/test/DebugInfo/CXX/fwd-template-param.cpp
index 2983f84..2983f84 100644
--- a/clang/test/CodeGenCXX/debug-info-fwd-template-param.cpp
+++ b/clang/test/DebugInfo/CXX/fwd-template-param.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-gline-tables-only-codeview.cpp b/clang/test/DebugInfo/CXX/gline-tables-only-codeview.cpp
index 6b9c9a2..6b9c9a2 100644
--- a/clang/test/CodeGenCXX/debug-info-gline-tables-only-codeview.cpp
+++ b/clang/test/DebugInfo/CXX/gline-tables-only-codeview.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-gline-tables-only.cpp b/clang/test/DebugInfo/CXX/gline-tables-only.cpp
index 192169b..192169b 100644
--- a/clang/test/CodeGenCXX/debug-info-gline-tables-only.cpp
+++ b/clang/test/DebugInfo/CXX/gline-tables-only.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-global-ctor-dtor.cpp b/clang/test/DebugInfo/CXX/global-ctor-dtor.cpp
index 6264761..6264761 100644
--- a/clang/test/CodeGenCXX/debug-info-global-ctor-dtor.cpp
+++ b/clang/test/DebugInfo/CXX/global-ctor-dtor.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-global.cpp b/clang/test/DebugInfo/CXX/global.cpp
index 5abc050..5abc050 100644
--- a/clang/test/CodeGenCXX/debug-info-global.cpp
+++ b/clang/test/DebugInfo/CXX/global.cpp
diff --git a/clang/test/CodeGenCXX/globalinit-loc.cpp b/clang/test/DebugInfo/CXX/globalinit-loc.cpp
index fb482b6..fb482b6 100644
--- a/clang/test/CodeGenCXX/globalinit-loc.cpp
+++ b/clang/test/DebugInfo/CXX/globalinit-loc.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-globalinit.cpp b/clang/test/DebugInfo/CXX/globalinit.cpp
index bcadc6b..bcadc6b 100644
--- a/clang/test/CodeGenCXX/debug-info-globalinit.cpp
+++ b/clang/test/DebugInfo/CXX/globalinit.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-hotpatch-aarch64.cpp b/clang/test/DebugInfo/CXX/hotpatch-aarch64.cpp
index ff2dfc1..ff2dfc1 100644
--- a/clang/test/CodeGenCXX/debug-info-hotpatch-aarch64.cpp
+++ b/clang/test/DebugInfo/CXX/hotpatch-aarch64.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-hotpatch-arm.cpp b/clang/test/DebugInfo/CXX/hotpatch-arm.cpp
index e31c762..e31c762 100644
--- a/clang/test/CodeGenCXX/debug-info-hotpatch-arm.cpp
+++ b/clang/test/DebugInfo/CXX/hotpatch-arm.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-hotpatch.cpp b/clang/test/DebugInfo/CXX/hotpatch.cpp
index e005c9c..e005c9c 100644
--- a/clang/test/CodeGenCXX/debug-info-hotpatch.cpp
+++ b/clang/test/DebugInfo/CXX/hotpatch.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-incomplete-types.cpp b/clang/test/DebugInfo/CXX/incomplete-types.cpp
index 0bf5923..0bf5923 100644
--- a/clang/test/CodeGenCXX/debug-info-incomplete-types.cpp
+++ b/clang/test/DebugInfo/CXX/incomplete-types.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-indirect-field-decl.cpp b/clang/test/DebugInfo/CXX/indirect-field-decl.cpp
index 1b857d8..1b857d8 100644
--- a/clang/test/CodeGenCXX/debug-info-indirect-field-decl.cpp
+++ b/clang/test/DebugInfo/CXX/indirect-field-decl.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-inheriting-constructor.cpp b/clang/test/DebugInfo/CXX/inheriting-constructor.cpp
index 7918387..7918387 100644
--- a/clang/test/CodeGenCXX/debug-info-inheriting-constructor.cpp
+++ b/clang/test/DebugInfo/CXX/inheriting-constructor.cpp
diff --git a/clang/test/CodeGenCXX/inline-dllexport-member.cpp b/clang/test/DebugInfo/CXX/inline-dllexport-member.cpp
index d6b004d..d6b004d 100644
--- a/clang/test/CodeGenCXX/inline-dllexport-member.cpp
+++ b/clang/test/DebugInfo/CXX/inline-dllexport-member.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-inlined.cpp b/clang/test/DebugInfo/CXX/inlined.cpp
index d73a8a6..d73a8a6 100644
--- a/clang/test/CodeGenCXX/debug-info-inlined.cpp
+++ b/clang/test/DebugInfo/CXX/inlined.cpp
diff --git a/clang/test/CodeGenCXX/debug-lambda-expressions.cpp b/clang/test/DebugInfo/CXX/lambda-expressions.cpp
index 324c092..1604b90 100644
--- a/clang/test/CodeGenCXX/debug-lambda-expressions.cpp
+++ b/clang/test/DebugInfo/CXX/lambda-expressions.cpp
@@ -21,7 +21,7 @@ int d(int x) { D y[10]; return [x,y] { return y[x].x; }(); }
// CHECK-SAME: line: [[VAR_LINE:[0-9]+]]
// CHECK-SAME: type: ![[VAR_T:[0-9]+]]
-// CHECK: [[FILE:.*]] = !DIFile(filename: "{{.*}}debug-lambda-expressions.cpp",
+// CHECK: [[FILE:.*]] = !DIFile(filename: "{{.*}}lambda-expressions.cpp",
// CVAR:
// CHECK: !DIGlobalVariable(name: "cvar"
diff --git a/clang/test/CodeGenCXX/debug-lambda-this.cpp b/clang/test/DebugInfo/CXX/lambda-this.cpp
index 019d09c..019d09c 100644
--- a/clang/test/CodeGenCXX/debug-lambda-this.cpp
+++ b/clang/test/DebugInfo/CXX/lambda-this.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-lambda.cpp b/clang/test/DebugInfo/CXX/lambda.cpp
index 2c266d6..2c266d6 100644
--- a/clang/test/CodeGenCXX/debug-info-lambda.cpp
+++ b/clang/test/DebugInfo/CXX/lambda.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-large-constant.cpp b/clang/test/DebugInfo/CXX/large-constant.cpp
index 5a0d4d2..5a0d4d2 100644
--- a/clang/test/CodeGenCXX/debug-info-large-constant.cpp
+++ b/clang/test/DebugInfo/CXX/large-constant.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-limited-ctor.cpp b/clang/test/DebugInfo/CXX/limited-ctor.cpp
index 18adfde..18adfde 100644
--- a/clang/test/CodeGenCXX/debug-info-limited-ctor.cpp
+++ b/clang/test/DebugInfo/CXX/limited-ctor.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-limited.cpp b/clang/test/DebugInfo/CXX/limited.cpp
index 4467d20..4467d20 100644
--- a/clang/test/CodeGenCXX/debug-info-limited.cpp
+++ b/clang/test/DebugInfo/CXX/limited.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-line-if-2.cpp b/clang/test/DebugInfo/CXX/line-if-2.cpp
index 8ab96a7..8ab96a7 100644
--- a/clang/test/CodeGenCXX/debug-info-line-if-2.cpp
+++ b/clang/test/DebugInfo/CXX/line-if-2.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-line-if.cpp b/clang/test/DebugInfo/CXX/line-if.cpp
index 8f52428..8f52428 100644
--- a/clang/test/CodeGenCXX/debug-info-line-if.cpp
+++ b/clang/test/DebugInfo/CXX/line-if.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-line.cpp b/clang/test/DebugInfo/CXX/line.cpp
index 8ef0e02..8ef0e02 100644
--- a/clang/test/CodeGenCXX/debug-info-line.cpp
+++ b/clang/test/DebugInfo/CXX/line.cpp
diff --git a/clang/test/CodeGenCXX/linetable-cleanup.cpp b/clang/test/DebugInfo/CXX/linetable-cleanup.cpp
index 7e4ad9a..7e4ad9a 100644
--- a/clang/test/CodeGenCXX/linetable-cleanup.cpp
+++ b/clang/test/DebugInfo/CXX/linetable-cleanup.cpp
diff --git a/clang/test/CodeGenCXX/linetable-eh.cpp b/clang/test/DebugInfo/CXX/linetable-eh.cpp
index 362c626..362c626 100644
--- a/clang/test/CodeGenCXX/linetable-eh.cpp
+++ b/clang/test/DebugInfo/CXX/linetable-eh.cpp
diff --git a/clang/test/CodeGenCXX/linetable-fnbegin.cpp b/clang/test/DebugInfo/CXX/linetable-fnbegin.cpp
index d64b5a6..d64b5a6 100644
--- a/clang/test/CodeGenCXX/linetable-fnbegin.cpp
+++ b/clang/test/DebugInfo/CXX/linetable-fnbegin.cpp
diff --git a/clang/test/CodeGenCXX/linetable-virtual-variadic.cpp b/clang/test/DebugInfo/CXX/linetable-virtual-variadic.cpp
index 9d7cd3e..9d7cd3e 100644
--- a/clang/test/CodeGenCXX/linetable-virtual-variadic.cpp
+++ b/clang/test/DebugInfo/CXX/linetable-virtual-variadic.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-loops.cpp b/clang/test/DebugInfo/CXX/loops.cpp
index b46acb2..b46acb2 100644
--- a/clang/test/CodeGenCXX/debug-info-loops.cpp
+++ b/clang/test/DebugInfo/CXX/loops.cpp
diff --git a/clang/test/CodeGenCXX/lpad-linetable.cpp b/clang/test/DebugInfo/CXX/lpad-linetable.cpp
index d32aadf..d32aadf 100644
--- a/clang/test/CodeGenCXX/lpad-linetable.cpp
+++ b/clang/test/DebugInfo/CXX/lpad-linetable.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-member-call.cpp b/clang/test/DebugInfo/CXX/member-call.cpp
index 2b60de8..2b60de8 100644
--- a/clang/test/CodeGenCXX/debug-info-member-call.cpp
+++ b/clang/test/DebugInfo/CXX/member-call.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-member.cpp b/clang/test/DebugInfo/CXX/member.cpp
index 68d0252..68d0252 100644
--- a/clang/test/CodeGenCXX/debug-info-member.cpp
+++ b/clang/test/DebugInfo/CXX/member.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-method-nodebug.cpp b/clang/test/DebugInfo/CXX/method-nodebug.cpp
index 0301e2f..0301e2f 100644
--- a/clang/test/CodeGenCXX/debug-info-method-nodebug.cpp
+++ b/clang/test/DebugInfo/CXX/method-nodebug.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-method-spec.cpp b/clang/test/DebugInfo/CXX/method-spec.cpp
index a58e8de..a58e8de 100644
--- a/clang/test/CodeGenCXX/debug-info-method-spec.cpp
+++ b/clang/test/DebugInfo/CXX/method-spec.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-method.cpp b/clang/test/DebugInfo/CXX/method.cpp
index af7103e..af7103e 100644
--- a/clang/test/CodeGenCXX/debug-info-method.cpp
+++ b/clang/test/DebugInfo/CXX/method.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-method2.cpp b/clang/test/DebugInfo/CXX/method2.cpp
index cc19184..cc19184 100644
--- a/clang/test/CodeGenCXX/debug-info-method2.cpp
+++ b/clang/test/DebugInfo/CXX/method2.cpp
diff --git a/clang/test/CodeGenCXX/microsoft-abi-member-pointers-debug-info.cpp b/clang/test/DebugInfo/CXX/microsoft-abi-member-pointers.cpp
index 34be555..34be555 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-member-pointers-debug-info.cpp
+++ b/clang/test/DebugInfo/CXX/microsoft-abi-member-pointers.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ms-abi.cpp b/clang/test/DebugInfo/CXX/ms-abi.cpp
index 0ce13a02..0ce13a02 100644
--- a/clang/test/CodeGenCXX/debug-info-ms-abi.cpp
+++ b/clang/test/DebugInfo/CXX/ms-abi.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ms-anonymous-tag.cpp b/clang/test/DebugInfo/CXX/ms-anonymous-tag.cpp
index 5e2cb21..5e2cb21 100644
--- a/clang/test/CodeGenCXX/debug-info-ms-anonymous-tag.cpp
+++ b/clang/test/DebugInfo/CXX/ms-anonymous-tag.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ms-bitfields.cpp b/clang/test/DebugInfo/CXX/ms-bitfields.cpp
index e423301..e423301 100644
--- a/clang/test/CodeGenCXX/debug-info-ms-bitfields.cpp
+++ b/clang/test/DebugInfo/CXX/ms-bitfields.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ms-dtor-thunks.cpp b/clang/test/DebugInfo/CXX/ms-dtor-thunks.cpp
index 7018745..7018745 100644
--- a/clang/test/CodeGenCXX/debug-info-ms-dtor-thunks.cpp
+++ b/clang/test/DebugInfo/CXX/ms-dtor-thunks.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ms-novtable.cpp b/clang/test/DebugInfo/CXX/ms-novtable.cpp
index d880060..d880060 100644
--- a/clang/test/CodeGenCXX/debug-info-ms-novtable.cpp
+++ b/clang/test/DebugInfo/CXX/ms-novtable.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ms-ptr-to-member.cpp b/clang/test/DebugInfo/CXX/ms-ptr-to-member.cpp
index 1f4c904..1f4c904 100644
--- a/clang/test/CodeGenCXX/debug-info-ms-ptr-to-member.cpp
+++ b/clang/test/DebugInfo/CXX/ms-ptr-to-member.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ms-vbase.cpp b/clang/test/DebugInfo/CXX/ms-vbase.cpp
index 04c9a6d..04c9a6d 100644
--- a/clang/test/CodeGenCXX/debug-info-ms-vbase.cpp
+++ b/clang/test/DebugInfo/CXX/ms-vbase.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-namespace.cpp b/clang/test/DebugInfo/CXX/namespace.cpp
index 7885883..7885883 100644
--- a/clang/test/CodeGenCXX/debug-info-namespace.cpp
+++ b/clang/test/DebugInfo/CXX/namespace.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-nested-exprs.cpp b/clang/test/DebugInfo/CXX/nested-exprs.cpp
index 8f07508..8f07508 100644
--- a/clang/test/CodeGenCXX/debug-info-nested-exprs.cpp
+++ b/clang/test/DebugInfo/CXX/nested-exprs.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-nodebug.cpp b/clang/test/DebugInfo/CXX/nodebug.cpp
index 1962b8c..1962b8c 100644
--- a/clang/test/CodeGenCXX/debug-info-nodebug.cpp
+++ b/clang/test/DebugInfo/CXX/nodebug.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-noreturn.cpp b/clang/test/DebugInfo/CXX/noreturn.cpp
index 85b8132..85b8132 100644
--- a/clang/test/CodeGenCXX/debug-info-noreturn.cpp
+++ b/clang/test/DebugInfo/CXX/noreturn.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-nrvo.cpp b/clang/test/DebugInfo/CXX/nrvo.cpp
index b36e371..b36e371 100644
--- a/clang/test/CodeGenCXX/debug-info-nrvo.cpp
+++ b/clang/test/DebugInfo/CXX/nrvo.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-nullptr.cpp b/clang/test/DebugInfo/CXX/nullptr.cpp
index 3054ef8..3054ef8 100644
--- a/clang/test/CodeGenCXX/debug-info-nullptr.cpp
+++ b/clang/test/DebugInfo/CXX/nullptr.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-object-pointer.cpp b/clang/test/DebugInfo/CXX/object-pointer.cpp
index 49079f5..49079f5 100644
--- a/clang/test/CodeGenCXX/debug-info-object-pointer.cpp
+++ b/clang/test/DebugInfo/CXX/object-pointer.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-objname.cpp b/clang/test/DebugInfo/CXX/objname.cpp
index 73d3bb4..73d3bb4 100644
--- a/clang/test/CodeGenCXX/debug-info-objname.cpp
+++ b/clang/test/DebugInfo/CXX/objname.cpp
diff --git a/clang/test/CodeGenCXX/debug-prefix-map-lambda.cpp b/clang/test/DebugInfo/CXX/prefix-map-lambda.cpp
index f0fb1a3..f0fb1a3 100644
--- a/clang/test/CodeGenCXX/debug-prefix-map-lambda.cpp
+++ b/clang/test/DebugInfo/CXX/prefix-map-lambda.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-programming-language.cpp b/clang/test/DebugInfo/CXX/programming-language.cpp
index 6953266..6953266 100644
--- a/clang/test/CodeGenCXX/debug-info-programming-language.cpp
+++ b/clang/test/DebugInfo/CXX/programming-language.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ptr-to-member-function.cpp b/clang/test/DebugInfo/CXX/ptr-to-member-function.cpp
index a7e02e4..a7e02e4 100644
--- a/clang/test/CodeGenCXX/debug-info-ptr-to-member-function.cpp
+++ b/clang/test/DebugInfo/CXX/ptr-to-member-function.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-ptr-to-ptr.cpp b/clang/test/DebugInfo/CXX/ptr-to-ptr.cpp
index 12e50a5..12e50a5 100644
--- a/clang/test/CodeGenCXX/debug-info-ptr-to-ptr.cpp
+++ b/clang/test/DebugInfo/CXX/ptr-to-ptr.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-qualifiers.cpp b/clang/test/DebugInfo/CXX/qualifiers.cpp
index c48c9b5..c48c9b5 100644
--- a/clang/test/CodeGenCXX/debug-info-qualifiers.cpp
+++ b/clang/test/DebugInfo/CXX/qualifiers.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-range-for-var-names.cpp b/clang/test/DebugInfo/CXX/range-for-var-names.cpp
index 1cc13e1..1cc13e1 100644
--- a/clang/test/CodeGenCXX/debug-info-range-for-var-names.cpp
+++ b/clang/test/DebugInfo/CXX/range-for-var-names.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-rvalue-ref.cpp b/clang/test/DebugInfo/CXX/rvalue-ref.cpp
index c9500ee..c9500ee 100644
--- a/clang/test/CodeGenCXX/debug-info-rvalue-ref.cpp
+++ b/clang/test/DebugInfo/CXX/rvalue-ref.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-scope.cpp b/clang/test/DebugInfo/CXX/scope.cpp
index a90ad83..a90ad83 100644
--- a/clang/test/CodeGenCXX/debug-info-scope.cpp
+++ b/clang/test/DebugInfo/CXX/scope.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-scoped-class.cpp b/clang/test/DebugInfo/CXX/scoped-class.cpp
index 7424487..7424487 100644
--- a/clang/test/CodeGenCXX/debug-info-scoped-class.cpp
+++ b/clang/test/DebugInfo/CXX/scoped-class.cpp
diff --git a/clang/test/CodeGenCXX/scoped-enums-debug-info.cpp b/clang/test/DebugInfo/CXX/scoped-enums.cpp
index 131e31b..131e31b 100644
--- a/clang/test/CodeGenCXX/scoped-enums-debug-info.cpp
+++ b/clang/test/DebugInfo/CXX/scoped-enums.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-simple-template-names.cpp b/clang/test/DebugInfo/CXX/simple-template-names.cpp
index 98faa0f..5a5d706 100644
--- a/clang/test/CodeGenCXX/debug-info-simple-template-names.cpp
+++ b/clang/test/DebugInfo/CXX/simple-template-names.cpp
@@ -70,18 +70,18 @@ void f() {
// anything other than another unnamed class/struct.
auto Lambda = [] {};
f1<decltype(Lambda)>();
- // CHECK: !DISubprogram(name: "f1<(lambda at {{.*}}debug-info-simple-template-names.cpp:[[# @LINE - 2]]:17)>",
+ // CHECK: !DISubprogram(name: "f1<(lambda at {{.*}}simple-template-names.cpp:[[# @LINE - 2]]:17)>",
f1<t1<t1<decltype(Lambda)>>>();
// CHECK: !DISubprogram(name: "f1<t1<t1<(lambda at {{.*}}> > >",
struct {
} unnamed_struct;
f1<decltype(unnamed_struct)>();
- // CHECK: !DISubprogram(name: "f1<(unnamed struct at {{.*}}debug-info-simple-template-names.cpp:[[# @LINE - 3]]:3)>",
+ // CHECK: !DISubprogram(name: "f1<(unnamed struct at {{.*}}simple-template-names.cpp:[[# @LINE - 3]]:3)>",
f1<void (decltype(unnamed_struct))>();
- // CHECK: !DISubprogram(name: "f1<void ((unnamed struct at {{.*}}debug-info-simple-template-names.cpp:[[# @LINE - 5]]:3))>",
+ // CHECK: !DISubprogram(name: "f1<void ((unnamed struct at {{.*}}simple-template-names.cpp:[[# @LINE - 5]]:3))>",
enum {} unnamed_enum;
f1<decltype(unnamed_enum)>();
- // CHECK: !DISubprogram(name: "f1<(unnamed enum at {{.*}}debug-info-simple-template-names.cpp:[[# @LINE - 2]]:3)>",
+ // CHECK: !DISubprogram(name: "f1<(unnamed enum at {{.*}}simple-template-names.cpp:[[# @LINE - 2]]:3)>",
// Declarations can't readily be reversed as the value in the DWARF only
// contains the address of the value - we'd have to do symbol lookup to find
diff --git a/clang/test/CodeGenCXX/standalone-debug-attribute.cpp b/clang/test/DebugInfo/CXX/standalone-debug-attribute.cpp
index a814e6f..a814e6f 100644
--- a/clang/test/CodeGenCXX/standalone-debug-attribute.cpp
+++ b/clang/test/DebugInfo/CXX/standalone-debug-attribute.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-static-fns.cpp b/clang/test/DebugInfo/CXX/static-fns.cpp
index 0ce3cb7..0ce3cb7 100644
--- a/clang/test/CodeGenCXX/debug-info-static-fns.cpp
+++ b/clang/test/DebugInfo/CXX/static-fns.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-static-member.cpp b/clang/test/DebugInfo/CXX/static-member.cpp
index 972ca62..972ca62 100644
--- a/clang/test/CodeGenCXX/debug-info-static-member.cpp
+++ b/clang/test/DebugInfo/CXX/static-member.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-struct-align.cpp b/clang/test/DebugInfo/CXX/struct-align.cpp
index cd91f4c..cd91f4c 100644
--- a/clang/test/CodeGenCXX/debug-info-struct-align.cpp
+++ b/clang/test/DebugInfo/CXX/struct-align.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-structured-binding-bitfield.cpp b/clang/test/DebugInfo/CXX/structured-binding-bitfield.cpp
index b7aad6a..b7aad6a 100644
--- a/clang/test/CodeGenCXX/debug-info-structured-binding-bitfield.cpp
+++ b/clang/test/DebugInfo/CXX/structured-binding-bitfield.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-structured-binding.cpp b/clang/test/DebugInfo/CXX/structured-binding.cpp
index 5fbd54c..5fbd54c 100644
--- a/clang/test/CodeGenCXX/debug-info-structured-binding.cpp
+++ b/clang/test/DebugInfo/CXX/structured-binding.cpp
diff --git a/clang/test/CodeGenCXX/template-alias.cpp b/clang/test/DebugInfo/CXX/template-alias.cpp
index 256ed69..256ed69 100644
--- a/clang/test/CodeGenCXX/template-alias.cpp
+++ b/clang/test/DebugInfo/CXX/template-alias.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-align.cpp b/clang/test/DebugInfo/CXX/template-align.cpp
index 42fdb26..42fdb26 100644
--- a/clang/test/CodeGenCXX/debug-info-template-align.cpp
+++ b/clang/test/DebugInfo/CXX/template-align.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-array.cpp b/clang/test/DebugInfo/CXX/template-array.cpp
index 305327b..305327b 100644
--- a/clang/test/CodeGenCXX/debug-info-template-array.cpp
+++ b/clang/test/DebugInfo/CXX/template-array.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-deduction-guide.cpp b/clang/test/DebugInfo/CXX/template-deduction-guide.cpp
index 036deb0..036deb0 100644
--- a/clang/test/CodeGenCXX/debug-info-template-deduction-guide.cpp
+++ b/clang/test/DebugInfo/CXX/template-deduction-guide.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-explicit-specialization.cpp b/clang/test/DebugInfo/CXX/template-explicit-specialization.cpp
index b756674..b756674 100644
--- a/clang/test/CodeGenCXX/debug-info-template-explicit-specialization.cpp
+++ b/clang/test/DebugInfo/CXX/template-explicit-specialization.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-fwd.cpp b/clang/test/DebugInfo/CXX/template-fwd.cpp
index b6c6aa1..b6c6aa1 100644
--- a/clang/test/CodeGenCXX/debug-info-template-fwd.cpp
+++ b/clang/test/DebugInfo/CXX/template-fwd.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-limit.cpp b/clang/test/DebugInfo/CXX/template-limit.cpp
index 172ab94..172ab94 100644
--- a/clang/test/CodeGenCXX/debug-info-template-limit.cpp
+++ b/clang/test/DebugInfo/CXX/template-limit.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-member.cpp b/clang/test/DebugInfo/CXX/template-member.cpp
index bb947c2..bb947c2 100644
--- a/clang/test/CodeGenCXX/debug-info-template-member.cpp
+++ b/clang/test/DebugInfo/CXX/template-member.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-parameter.cpp b/clang/test/DebugInfo/CXX/template-parameter.cpp
index b2ca54a..b2ca54a 100644
--- a/clang/test/CodeGenCXX/debug-info-template-parameter.cpp
+++ b/clang/test/DebugInfo/CXX/template-parameter.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-partial-specialization.cpp b/clang/test/DebugInfo/CXX/template-partial-specialization.cpp
index 1595bf8..1595bf8 100644
--- a/clang/test/CodeGenCXX/debug-info-template-partial-specialization.cpp
+++ b/clang/test/DebugInfo/CXX/template-partial-specialization.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-quals.cpp b/clang/test/DebugInfo/CXX/template-quals.cpp
index cfee78d..cfee78d 100644
--- a/clang/test/CodeGenCXX/debug-info-template-quals.cpp
+++ b/clang/test/DebugInfo/CXX/template-quals.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template-recursive.cpp b/clang/test/DebugInfo/CXX/template-recursive.cpp
index 9693b38..9693b38 100644
--- a/clang/test/CodeGenCXX/debug-info-template-recursive.cpp
+++ b/clang/test/DebugInfo/CXX/template-recursive.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-template.cpp b/clang/test/DebugInfo/CXX/template.cpp
index a5a17c9..a5a17c9 100644
--- a/clang/test/CodeGenCXX/debug-info-template.cpp
+++ b/clang/test/DebugInfo/CXX/template.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-this.cpp b/clang/test/DebugInfo/CXX/this.cpp
index f1cd4aa..f1cd4aa 100644
--- a/clang/test/CodeGenCXX/debug-info-this.cpp
+++ b/clang/test/DebugInfo/CXX/this.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-thunk-msabi.cpp b/clang/test/DebugInfo/CXX/thunk-msabi.cpp
index cc59f0b..cc59f0b 100644
--- a/clang/test/CodeGenCXX/debug-info-thunk-msabi.cpp
+++ b/clang/test/DebugInfo/CXX/thunk-msabi.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-thunk.cpp b/clang/test/DebugInfo/CXX/thunk.cpp
index f48adce..f48adce 100644
--- a/clang/test/CodeGenCXX/debug-info-thunk.cpp
+++ b/clang/test/DebugInfo/CXX/thunk.cpp
diff --git a/clang/test/CodeGenCXX/trivial_abi_debuginfo.cpp b/clang/test/DebugInfo/CXX/trivial_abi.cpp
index 3e48614..3e48614 100644
--- a/clang/test/CodeGenCXX/trivial_abi_debuginfo.cpp
+++ b/clang/test/DebugInfo/CXX/trivial_abi.cpp
diff --git a/clang/test/CodeGenCXX/ubsan-check-debuglocs.cpp b/clang/test/DebugInfo/CXX/ubsan-check-debuglocs.cpp
index 81db5c4..81db5c4 100644
--- a/clang/test/CodeGenCXX/ubsan-check-debuglocs.cpp
+++ b/clang/test/DebugInfo/CXX/ubsan-check-debuglocs.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-union-template.cpp b/clang/test/DebugInfo/CXX/union-template.cpp
index d9219fc..d9219fc 100644
--- a/clang/test/CodeGenCXX/debug-info-union-template.cpp
+++ b/clang/test/DebugInfo/CXX/union-template.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-union.cpp b/clang/test/DebugInfo/CXX/union.cpp
index cffe6e9..cffe6e9 100644
--- a/clang/test/CodeGenCXX/debug-info-union.cpp
+++ b/clang/test/DebugInfo/CXX/union.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-use-after-free.cpp b/clang/test/DebugInfo/CXX/use-after-free.cpp
index 1001248..1001248 100644
--- a/clang/test/CodeGenCXX/debug-info-use-after-free.cpp
+++ b/clang/test/DebugInfo/CXX/use-after-free.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-uuid.cpp b/clang/test/DebugInfo/CXX/uuid.cpp
index 09279f2..09279f2 100644
--- a/clang/test/CodeGenCXX/debug-info-uuid.cpp
+++ b/clang/test/DebugInfo/CXX/uuid.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-var-template-partial-spec.cpp b/clang/test/DebugInfo/CXX/var-template-partial-spec.cpp
index 141ff58..141ff58 100644
--- a/clang/test/CodeGenCXX/debug-info-var-template-partial-spec.cpp
+++ b/clang/test/DebugInfo/CXX/var-template-partial-spec.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-var-template-partial.cpp b/clang/test/DebugInfo/CXX/var-template-partial.cpp
index 21ea03b..21ea03b 100644
--- a/clang/test/CodeGenCXX/debug-info-var-template-partial.cpp
+++ b/clang/test/DebugInfo/CXX/var-template-partial.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-varargs.cpp b/clang/test/DebugInfo/CXX/varargs.cpp
index 7afbcd2..7afbcd2 100644
--- a/clang/test/CodeGenCXX/debug-info-varargs.cpp
+++ b/clang/test/DebugInfo/CXX/varargs.cpp
diff --git a/clang/test/CodeGenCXX/variadic-template-alias.cpp b/clang/test/DebugInfo/CXX/variadic-template-alias.cpp
index b4340d6..b4340d6 100644
--- a/clang/test/CodeGenCXX/variadic-template-alias.cpp
+++ b/clang/test/DebugInfo/CXX/variadic-template-alias.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-verbose-trap.cpp b/clang/test/DebugInfo/CXX/verbose-trap.cpp
index f492698..4a88df9 100644
--- a/clang/test/CodeGenCXX/debug-info-verbose-trap.cpp
+++ b/clang/test/DebugInfo/CXX/verbose-trap.cpp
@@ -17,7 +17,7 @@
// CHECK: attributes #[[ATTR1]] = { cold {{.*}}}
-// CHECK: ![[FILESCOPE:.*]] = !DIFile(filename: "{{.*}}debug-info-verbose-trap.cpp"
+// CHECK: ![[FILESCOPE:.*]] = !DIFile(filename: "{{.*}}verbose-trap.cpp"
char const constCat[] = "category2";
char const constMsg[] = "hello";
diff --git a/clang/test/CodeGenCXX/debug-info-vla.cpp b/clang/test/DebugInfo/CXX/vla.cpp
index 73bdaf0..73bdaf0 100644
--- a/clang/test/CodeGenCXX/debug-info-vla.cpp
+++ b/clang/test/DebugInfo/CXX/vla.cpp
diff --git a/clang/test/CodeGenCXX/vtable-holder-self-reference.cpp b/clang/test/DebugInfo/CXX/vtable-holder-self-reference.cpp
index 727de7a..727de7a 100644
--- a/clang/test/CodeGenCXX/vtable-holder-self-reference.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-holder-self-reference.cpp
diff --git a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-diamond.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-diamond.cpp
index 5ed1353..5ed1353 100644
--- a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-diamond.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-diamond.cpp
diff --git a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-multiple.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-multiple.cpp
index 23973a3..23973a3 100644
--- a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-multiple.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-multiple.cpp
diff --git a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-simple-main.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-simple-main.cpp
index d64e711..d64e711 100644
--- a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-simple-main.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-simple-main.cpp
diff --git a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-simple.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-simple.cpp
index b24ece1..b24ece1 100644
--- a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-simple.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-simple.cpp
diff --git a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-virtual.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-virtual.cpp
index b01f156..b01f156 100644
--- a/clang/test/CodeGenCXX/vtable-debug-info-inheritance-virtual.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-virtual.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-vtable-optzn.cpp b/clang/test/DebugInfo/CXX/vtable-optzn.cpp
index 8b49e95..8b49e95 100644
--- a/clang/test/CodeGenCXX/debug-info-vtable-optzn.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-optzn.cpp
diff --git a/clang/test/CodeGenCXX/vtable-debug-info.cpp b/clang/test/DebugInfo/CXX/vtable.cpp
index 932b440..932b440 100644
--- a/clang/test/CodeGenCXX/vtable-debug-info.cpp
+++ b/clang/test/DebugInfo/CXX/vtable.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-wchar.cpp b/clang/test/DebugInfo/CXX/wchar.cpp
index 1ecdd56..1ecdd56 100644
--- a/clang/test/CodeGenCXX/debug-info-wchar.cpp
+++ b/clang/test/DebugInfo/CXX/wchar.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-windows-dtor.cpp b/clang/test/DebugInfo/CXX/windows-dtor.cpp
index beea56c..beea56c 100644
--- a/clang/test/CodeGenCXX/debug-info-windows-dtor.cpp
+++ b/clang/test/DebugInfo/CXX/windows-dtor.cpp
diff --git a/clang/test/CodeGenCXX/debug-info-zero-length-arrays.cpp b/clang/test/DebugInfo/CXX/zero-length-arrays.cpp
index 0b3fd93..0b3fd93 100644
--- a/clang/test/CodeGenCXX/debug-info-zero-length-arrays.cpp
+++ b/clang/test/DebugInfo/CXX/zero-length-arrays.cpp
diff --git a/clang/test/CodeGen/2007-05-11-str-const.c b/clang/test/DebugInfo/Generic/2007-05-11-str-const.c
index 5c3039c..5c3039c 100644
--- a/clang/test/CodeGen/2007-05-11-str-const.c
+++ b/clang/test/DebugInfo/Generic/2007-05-11-str-const.c
diff --git a/clang/test/CodeGen/2009-03-13-dbg.c b/clang/test/DebugInfo/Generic/2009-03-13-dbg.c
index 3effacb..3effacb 100644
--- a/clang/test/CodeGen/2009-03-13-dbg.c
+++ b/clang/test/DebugInfo/Generic/2009-03-13-dbg.c
diff --git a/clang/test/CodeGen/2009-04-23-dbg.c b/clang/test/DebugInfo/Generic/2009-04-23-dbg.c
index 356400c..356400c 100644
--- a/clang/test/CodeGen/2009-04-23-dbg.c
+++ b/clang/test/DebugInfo/Generic/2009-04-23-dbg.c
diff --git a/clang/test/CodeGen/2009-07-31-DbgDeclare.c b/clang/test/DebugInfo/Generic/2009-07-31-DbgDeclare.c
index dabb389..dabb389 100644
--- a/clang/test/CodeGen/2009-07-31-DbgDeclare.c
+++ b/clang/test/DebugInfo/Generic/2009-07-31-DbgDeclare.c
diff --git a/clang/test/CodeGen/2010-01-14-FnType-DebugInfo.c b/clang/test/DebugInfo/Generic/2010-01-14-FnType-DebugInfo.c
index 5cb0015..5cb0015 100644
--- a/clang/test/CodeGen/2010-01-14-FnType-DebugInfo.c
+++ b/clang/test/DebugInfo/Generic/2010-01-14-FnType-DebugInfo.c
diff --git a/clang/test/CodeGen/2010-01-18-Inlined-Debug.c b/clang/test/DebugInfo/Generic/2010-01-18-Inlined-Debug.c
index d763744d..d763744d 100644
--- a/clang/test/CodeGen/2010-01-18-Inlined-Debug.c
+++ b/clang/test/DebugInfo/Generic/2010-01-18-Inlined-Debug.c
diff --git a/clang/test/CodeGen/2010-02-10-PointerName.c b/clang/test/DebugInfo/Generic/2010-02-10-PointerName.c
index 2dc8e19..2dc8e19 100644
--- a/clang/test/CodeGen/2010-02-10-PointerName.c
+++ b/clang/test/DebugInfo/Generic/2010-02-10-PointerName.c
diff --git a/clang/test/CodeGen/2010-02-15-DbgStaticVar.c b/clang/test/DebugInfo/Generic/2010-02-15-DbgStaticVar.c
index 07e5bb0..07e5bb0 100644
--- a/clang/test/CodeGen/2010-02-15-DbgStaticVar.c
+++ b/clang/test/DebugInfo/Generic/2010-02-15-DbgStaticVar.c
diff --git a/clang/test/CodeGen/2010-02-16-DbgScopes.c b/clang/test/DebugInfo/Generic/2010-02-16-DbgScopes.c
index 8b962c6..8b962c6 100644
--- a/clang/test/CodeGen/2010-02-16-DbgScopes.c
+++ b/clang/test/DebugInfo/Generic/2010-02-16-DbgScopes.c
diff --git a/clang/test/CodeGen/2010-03-5-LexicalScope.c b/clang/test/DebugInfo/Generic/2010-03-5-LexicalScope.c
index c0da9f0..c0da9f0 100644
--- a/clang/test/CodeGen/2010-03-5-LexicalScope.c
+++ b/clang/test/DebugInfo/Generic/2010-03-5-LexicalScope.c
diff --git a/clang/test/CodeGen/2010-07-08-DeclDebugLineNo.c b/clang/test/DebugInfo/Generic/2010-07-08-DeclDebugLineNo.c
index e6b7aa0..e6b7aa0 100644
--- a/clang/test/CodeGen/2010-07-08-DeclDebugLineNo.c
+++ b/clang/test/DebugInfo/Generic/2010-07-08-DeclDebugLineNo.c
diff --git a/clang/test/CodeGen/2010-08-10-DbgConstant.c b/clang/test/DebugInfo/Generic/2010-08-10-DbgConstant.c
index 7220f3f..7220f3f 100644
--- a/clang/test/CodeGen/2010-08-10-DbgConstant.c
+++ b/clang/test/DebugInfo/Generic/2010-08-10-DbgConstant.c
diff --git a/clang/test/CodeGen/debug-info-257-args.c b/clang/test/DebugInfo/Generic/257-args.c
index ce8d093..ce8d093 100644
--- a/clang/test/CodeGen/debug-info-257-args.c
+++ b/clang/test/DebugInfo/Generic/257-args.c
diff --git a/clang/test/CodeGen/Inputs/debug-info-embed-source.c b/clang/test/DebugInfo/Generic/Inputs/debug-info-embed-source.c
index 335ccc9..335ccc9 100644
--- a/clang/test/CodeGen/Inputs/debug-info-embed-source.c
+++ b/clang/test/DebugInfo/Generic/Inputs/debug-info-embed-source.c
diff --git a/clang/test/CodeGen/Inputs/debug-info-file-checksum-line.cpp b/clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum-line.cpp
index 2e98a11..2e98a11 100644
--- a/clang/test/CodeGen/Inputs/debug-info-file-checksum-line.cpp
+++ b/clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum-line.cpp
diff --git a/clang/test/CodeGen/Inputs/debug-info-file-checksum-pre.cpp b/clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum-pre.cpp
index ade9f16..ade9f16 100644
--- a/clang/test/CodeGen/Inputs/debug-info-file-checksum-pre.cpp
+++ b/clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum-pre.cpp
diff --git a/clang/test/CodeGen/Inputs/debug-info-file-checksum.c b/clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum.c
index 081f6f0..081f6f0 100644
--- a/clang/test/CodeGen/Inputs/debug-info-file-checksum.c
+++ b/clang/test/DebugInfo/Generic/Inputs/debug-info-file-checksum.c
diff --git a/clang/test/CodeGen/Inputs/debug-info-macro.h b/clang/test/DebugInfo/Generic/Inputs/debug-info-macro.h
index f71d5c3..f71d5c3 100644
--- a/clang/test/CodeGen/Inputs/debug-info-macro.h
+++ b/clang/test/DebugInfo/Generic/Inputs/debug-info-macro.h
diff --git a/clang/test/CodeGen/Inputs/debug-info-slash.cpp b/clang/test/DebugInfo/Generic/Inputs/debug-info-slash.cpp
index 563077ed..563077ed 100644
--- a/clang/test/CodeGen/Inputs/debug-info-slash.cpp
+++ b/clang/test/DebugInfo/Generic/Inputs/debug-info-slash.cpp
diff --git a/clang/test/CodeGen/Inputs/debug-info-slash.h b/clang/test/DebugInfo/Generic/Inputs/debug-info-slash.h
index 9092f4a..9092f4a 100644
--- a/clang/test/CodeGen/Inputs/debug-info-slash.h
+++ b/clang/test/DebugInfo/Generic/Inputs/debug-info-slash.h
diff --git a/clang/test/DebugInfo/Generic/Inputs/stdio.h b/clang/test/DebugInfo/Generic/Inputs/stdio.h
new file mode 100644
index 0000000..fc49fd8
--- /dev/null
+++ b/clang/test/DebugInfo/Generic/Inputs/stdio.h
@@ -0,0 +1,9 @@
+struct FILE;
+extern int vfprintf(struct FILE *s, const char *format, __builtin_va_list arg);
+extern int vprintf(const char *format, __builtin_va_list arg);
+
+extern __inline __attribute__((gnu_inline,always_inline)) int
+vprintf(const char *x, __builtin_va_list y)
+{
+ return vfprintf (0, 0, y);
+}
diff --git a/clang/test/CodeGen/debug-info-abspath.c b/clang/test/DebugInfo/Generic/abspath.c
index 193a72c..95bed45 100644
--- a/clang/test/CodeGen/debug-info-abspath.c
+++ b/clang/test/DebugInfo/Generic/abspath.c
@@ -1,9 +1,9 @@
// RUN: mkdir -p %t/UNIQUEISH_SENTINEL
-// RUN: cp %s %t/UNIQUEISH_SENTINEL/debug-info-abspath.c
+// RUN: cp %s %t/UNIQUEISH_SENTINEL/abspath.c
// RUN: %clang_cc1 -debug-info-kind=limited -triple %itanium_abi_triple \
-// RUN: -fdebug-compilation-dir=%t/UNIQUEISH_SENTINEL/debug-info-abspath.c \
-// RUN: %t/UNIQUEISH_SENTINEL/debug-info-abspath.c -emit-llvm -o - \
+// RUN: -fdebug-compilation-dir=%t/UNIQUEISH_SENTINEL/abspath.c \
+// RUN: %t/UNIQUEISH_SENTINEL/abspath.c -emit-llvm -o - \
// RUN: | FileCheck %s
// RUN: cp %s %t.c
@@ -18,8 +18,8 @@ void foo(void) {}
// CHECK: = distinct !DISubprogram({{.*}}file: ![[SPFILE:[0-9]+]]
// CHECK: ![[SPFILE]] = !DIFile(filename: "{{.*}}UNIQUEISH_SENTINEL
-// CHECK-SAME: debug-info-abspath.c"
+// CHECK-SAME: abspath.c"
// CHECK-NOT: directory: "{{.*}}UNIQUEISH_SENTINEL
// INTREE: = distinct !DISubprogram({{.*}}![[SPFILE:[0-9]+]]
-// INTREE: DIFile({{.*}}directory: "{{.+}}CodeGen{{.*}}")
+// INTREE: DIFile({{.*}}directory: "{{.+}}Generic{{.*}}")
diff --git a/clang/test/CodeGen/debug-info-alias-pointer.c b/clang/test/DebugInfo/Generic/alias-pointer.c
index 507a101f..507a101f 100644
--- a/clang/test/CodeGen/debug-info-alias-pointer.c
+++ b/clang/test/DebugInfo/Generic/alias-pointer.c
diff --git a/clang/test/CodeGen/debug-info-alias.c b/clang/test/DebugInfo/Generic/alias.c
index 1f5e477..1f5e477 100644
--- a/clang/test/CodeGen/debug-info-alias.c
+++ b/clang/test/DebugInfo/Generic/alias.c
diff --git a/clang/test/CodeGen/amdgpu-barrier-type-debug-info.c b/clang/test/DebugInfo/Generic/amdgpu-barrier-type-debug-info.c
index 4eafbba..4eafbba 100644
--- a/clang/test/CodeGen/amdgpu-barrier-type-debug-info.c
+++ b/clang/test/DebugInfo/Generic/amdgpu-barrier-type-debug-info.c
diff --git a/clang/test/CodeGen/amdgpu-buffer-rsrc-type-debug-info.c b/clang/test/DebugInfo/Generic/amdgpu-buffer-rsrc-type-debug-info.c
index c266fa8..c266fa8 100644
--- a/clang/test/CodeGen/amdgpu-buffer-rsrc-type-debug-info.c
+++ b/clang/test/DebugInfo/Generic/amdgpu-buffer-rsrc-type-debug-info.c
diff --git a/clang/test/CodeGen/debug-info-args.c b/clang/test/DebugInfo/Generic/args.c
index ce21e7c..ce21e7c 100644
--- a/clang/test/CodeGen/debug-info-args.c
+++ b/clang/test/DebugInfo/Generic/args.c
diff --git a/clang/test/CodeGen/artificial.c b/clang/test/DebugInfo/Generic/artificial.c
index 5c433c8..5c433c8 100644
--- a/clang/test/CodeGen/artificial.c
+++ b/clang/test/DebugInfo/Generic/artificial.c
diff --git a/clang/test/CodeGen/debug-info-atomic.c b/clang/test/DebugInfo/Generic/atomic.c
index 3de0d35..3de0d35 100644
--- a/clang/test/CodeGen/debug-info-atomic.c
+++ b/clang/test/DebugInfo/Generic/atomic.c
diff --git a/clang/test/CodeGen/attr-btf_type_tag-func-ptr.c b/clang/test/DebugInfo/Generic/attr-btf_type_tag-func-ptr.c
index 26935c8..26935c8 100644
--- a/clang/test/CodeGen/attr-btf_type_tag-func-ptr.c
+++ b/clang/test/DebugInfo/Generic/attr-btf_type_tag-func-ptr.c
diff --git a/clang/test/CodeGen/attr-btf_type_tag-similar-type.c b/clang/test/DebugInfo/Generic/attr-btf_type_tag-similar-type.c
index ba9cd2e..ba9cd2e 100644
--- a/clang/test/CodeGen/attr-btf_type_tag-similar-type.c
+++ b/clang/test/DebugInfo/Generic/attr-btf_type_tag-similar-type.c
diff --git a/clang/test/CodeGen/attr-btf_type_tag-typedef-field.c b/clang/test/DebugInfo/Generic/attr-btf_type_tag-typedef-field.c
index 0c02336..0c02336 100644
--- a/clang/test/CodeGen/attr-btf_type_tag-typedef-field.c
+++ b/clang/test/DebugInfo/Generic/attr-btf_type_tag-typedef-field.c
diff --git a/clang/test/CodeGen/attr-btf_type_tag-var.c b/clang/test/DebugInfo/Generic/attr-btf_type_tag-var.c
index ed729e2..ed729e2 100644
--- a/clang/test/CodeGen/attr-btf_type_tag-var.c
+++ b/clang/test/DebugInfo/Generic/attr-btf_type_tag-var.c
diff --git a/clang/test/CodeGen/attr-counted-by-debug-info.c b/clang/test/DebugInfo/Generic/attr-counted-by-debug-info.c
index a6c2b13..a6c2b13 100644
--- a/clang/test/CodeGen/attr-counted-by-debug-info.c
+++ b/clang/test/DebugInfo/Generic/attr-counted-by-debug-info.c
diff --git a/clang/test/CodeGen/attr-nodebug.c b/clang/test/DebugInfo/Generic/attr-nodebug.c
index 75b4089..75b4089 100644
--- a/clang/test/CodeGen/attr-nodebug.c
+++ b/clang/test/DebugInfo/Generic/attr-nodebug.c
diff --git a/clang/test/CodeGen/attr-nodebug2.c b/clang/test/DebugInfo/Generic/attr-nodebug2.c
index a17e1e6..a17e1e6 100644
--- a/clang/test/CodeGen/attr-nodebug2.c
+++ b/clang/test/DebugInfo/Generic/attr-nodebug2.c
diff --git a/clang/test/CodeGen/debug-info-attributed-stmt.c b/clang/test/DebugInfo/Generic/attributed-stmt.c
index b60aaf6..b60aaf6 100644
--- a/clang/test/CodeGen/debug-info-attributed-stmt.c
+++ b/clang/test/DebugInfo/Generic/attributed-stmt.c
diff --git a/clang/test/CodeGen/debug-info-bitfield-0-struct.c b/clang/test/DebugInfo/Generic/bitfield-0-struct.c
index 9fadf89..9fadf89 100644
--- a/clang/test/CodeGen/debug-info-bitfield-0-struct.c
+++ b/clang/test/DebugInfo/Generic/bitfield-0-struct.c
diff --git a/clang/test/CodeGen/debug-info-block-decl.c b/clang/test/DebugInfo/Generic/block-decl.c
index 6e95ecc..6e95ecc 100644
--- a/clang/test/CodeGen/debug-info-block-decl.c
+++ b/clang/test/DebugInfo/Generic/block-decl.c
diff --git a/clang/test/CodeGen/debug-info-block-expr.c b/clang/test/DebugInfo/Generic/block-expr.c
index 712158f..712158f 100644
--- a/clang/test/CodeGen/debug-info-block-expr.c
+++ b/clang/test/DebugInfo/Generic/block-expr.c
diff --git a/clang/test/CodeGen/debug-info-block-out-return.c b/clang/test/DebugInfo/Generic/block-out-return.c
index cae1688..cae1688 100644
--- a/clang/test/CodeGen/debug-info-block-out-return.c
+++ b/clang/test/DebugInfo/Generic/block-out-return.c
diff --git a/clang/test/CodeGen/debug-info-block-vars.c b/clang/test/DebugInfo/Generic/block-vars.c
index 90d1d4b..90d1d4b 100644
--- a/clang/test/CodeGen/debug-info-block-vars.c
+++ b/clang/test/DebugInfo/Generic/block-vars.c
diff --git a/clang/test/CodeGen/debug-info-block.c b/clang/test/DebugInfo/Generic/block.c
index a4b8fde..a4b8fde 100644
--- a/clang/test/CodeGen/debug-info-block.c
+++ b/clang/test/DebugInfo/Generic/block.c
diff --git a/clang/test/CodeGen/bounds-checking-debuginfo.c b/clang/test/DebugInfo/Generic/bounds-checking-debuginfo.c
index bd7aedd..bd7aedd 100644
--- a/clang/test/CodeGen/bounds-checking-debuginfo.c
+++ b/clang/test/DebugInfo/Generic/bounds-checking-debuginfo.c
diff --git a/clang/test/CodeGen/bpf-debug-info-unref.c b/clang/test/DebugInfo/Generic/bpf-debug-info-unref.c
index 91f761e..91f761e 100644
--- a/clang/test/CodeGen/bpf-debug-info-unref.c
+++ b/clang/test/DebugInfo/Generic/bpf-debug-info-unref.c
diff --git a/clang/test/CodeGen/builtin-preserve-access-index-array.c b/clang/test/DebugInfo/Generic/builtin-preserve-access-index-array.c
index 3ac5409..3ac5409 100644
--- a/clang/test/CodeGen/builtin-preserve-access-index-array.c
+++ b/clang/test/DebugInfo/Generic/builtin-preserve-access-index-array.c
diff --git a/clang/test/CodeGen/debug-info-cc.c b/clang/test/DebugInfo/Generic/cc.c
index 2bfb1c2..2bfb1c2 100644
--- a/clang/test/CodeGen/debug-info-cc.c
+++ b/clang/test/DebugInfo/Generic/cc.c
diff --git a/clang/test/CodeGen/cfi-check-fail-debuginfo.c b/clang/test/DebugInfo/Generic/cfi-check-fail-debuginfo.c
index 74ed556..74ed556 100644
--- a/clang/test/CodeGen/cfi-check-fail-debuginfo.c
+++ b/clang/test/DebugInfo/Generic/cfi-check-fail-debuginfo.c
diff --git a/clang/test/CodeGen/cfi-icall-generalize-debuginfo.c b/clang/test/DebugInfo/Generic/cfi-icall-generalize-debuginfo.c
index 0ffc2b9..0ffc2b9 100644
--- a/clang/test/CodeGen/cfi-icall-generalize-debuginfo.c
+++ b/clang/test/DebugInfo/Generic/cfi-icall-generalize-debuginfo.c
diff --git a/clang/test/CodeGen/cfi-icall-normalize2-debuginfo.c b/clang/test/DebugInfo/Generic/cfi-icall-normalize2-debuginfo.c
index 258c3bf..258c3bf 100644
--- a/clang/test/CodeGen/cfi-icall-normalize2-debuginfo.c
+++ b/clang/test/DebugInfo/Generic/cfi-icall-normalize2-debuginfo.c
diff --git a/clang/test/CodeGen/debug-info-codeview-buildinfo.c b/clang/test/DebugInfo/Generic/codeview-buildinfo.c
index 98d249b..ee6f772 100644
--- a/clang/test/CodeGen/debug-info-codeview-buildinfo.c
+++ b/clang/test/DebugInfo/Generic/codeview-buildinfo.c
@@ -17,7 +17,7 @@ int main(void) { return 42; }
// CHECK: Types (.debug$T)
// CHECK: ============================================================
// CHECK: 0x[[PWD:.+]] | LF_STRING_ID [size = {{.+}}] ID: <no type>, String: [[PWDVAL:.+]]
-// CHECK: 0x[[FILEPATH:.+]] | LF_STRING_ID [size = {{.+}}] ID: <no type>, String: [[FILEPATHVAL:.+[\\/]debug-info-codeview-buildinfo.c]]
+// CHECK: 0x[[FILEPATH:.+]] | LF_STRING_ID [size = {{.+}}] ID: <no type>, String: [[FILEPATHVAL:.+[\\/]codeview-buildinfo.c]]
// CHECK: 0x[[ZIPDB:.+]] | LF_STRING_ID [size = {{.+}}] ID: <no type>, String:
// CHECK: 0x[[TOOL:.+]] | LF_STRING_ID [size = {{.+}}] ID: <no type>, String: [[TOOLVAL:.+[\\/][clang|llvm].*]]
// CHECK: 0x[[CMDLINE:.+]] | LF_STRING_ID [size = {{.+}}] ID: <no type>, String: "-cc1
diff --git a/clang/test/CodeGen/debug-info-codeview-heapallocsite.c b/clang/test/DebugInfo/Generic/codeview-heapallocsite.c
index 95d4cc9..95d4cc9 100644
--- a/clang/test/CodeGen/debug-info-codeview-heapallocsite.c
+++ b/clang/test/DebugInfo/Generic/codeview-heapallocsite.c
diff --git a/clang/test/CodeGen/debug-info-codeview-unnamed.c b/clang/test/DebugInfo/Generic/codeview-unnamed.c
index 0df6e1a..0df6e1a 100644
--- a/clang/test/CodeGen/debug-info-codeview-unnamed.c
+++ b/clang/test/DebugInfo/Generic/codeview-unnamed.c
diff --git a/clang/test/CodeGen/debug-info-compilation-dir.c b/clang/test/DebugInfo/Generic/compilation-dir.c
index 5f5542c..d2c19f2 100644
--- a/clang/test/CodeGen/debug-info-compilation-dir.c
+++ b/clang/test/DebugInfo/Generic/compilation-dir.c
@@ -5,12 +5,12 @@
// CHECK-NONSENSE: nonsense
// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck -check-prefix=CHECK-DIR %s
-// CHECK-DIR: CodeGen
+// CHECK-DIR: Generic
/// Test path remapping.
// RUN: %clang_cc1 -fdebug-compilation-dir=%S -main-file-name %s -emit-llvm -debug-info-kind=limited %s -o - | FileCheck -check-prefix=CHECK-ABS %s
-// CHECK-ABS: DIFile(filename: "{{.*}}debug-info-compilation-dir.c", directory: "{{.*}}CodeGen")
+// CHECK-ABS: DIFile(filename: "{{.*}}compilation-dir.c", directory: "{{.*}}Generic")
// RUN: %clang_cc1 -main-file-name %s -emit-llvm -debug-info-kind=limited %s -o - | FileCheck -check-prefix=CHECK-NOMAP %s
-// CHECK-NOMAP: DIFile(filename: "{{.*}}debug-info-compilation-dir.c", directory: "")
+// CHECK-NOMAP: DIFile(filename: "{{.*}}compilation-dir.c", directory: "")
diff --git a/clang/test/CodeGen/debug-info-crash.c b/clang/test/DebugInfo/Generic/crash.c
index e3a8f81..e3a8f81 100644
--- a/clang/test/CodeGen/debug-info-crash.c
+++ b/clang/test/DebugInfo/Generic/crash.c
diff --git a/clang/test/CodeGen/dbg-const-int128.c b/clang/test/DebugInfo/Generic/dbg-const-int128.c
index 7f8aaef..7f8aaef 100644
--- a/clang/test/CodeGen/dbg-const-int128.c
+++ b/clang/test/DebugInfo/Generic/dbg-const-int128.c
diff --git a/clang/test/CodeGen/dbg-info-all-calls-described.c b/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c
index 3ca3aaa..3ca3aaa 100644
--- a/clang/test/CodeGen/dbg-info-all-calls-described.c
+++ b/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c
diff --git a/clang/test/CodeGen/debug-info.c b/clang/test/DebugInfo/Generic/debug-info.c
index 024e957..024e957 100644
--- a/clang/test/CodeGen/debug-info.c
+++ b/clang/test/DebugInfo/Generic/debug-info.c
diff --git a/clang/test/CodeGen/debug-label-inline.c b/clang/test/DebugInfo/Generic/debug-label-inline.c
index 9d92ffb..9d92ffb 100644
--- a/clang/test/CodeGen/debug-label-inline.c
+++ b/clang/test/DebugInfo/Generic/debug-label-inline.c
diff --git a/clang/test/CodeGen/debug-line-1.c b/clang/test/DebugInfo/Generic/debug-line-1.c
index 56f447e..56f447e 100644
--- a/clang/test/CodeGen/debug-line-1.c
+++ b/clang/test/DebugInfo/Generic/debug-line-1.c
diff --git a/clang/test/CodeGen/debug-prefix-map.c b/clang/test/DebugInfo/Generic/debug-prefix-map.c
index e58909f..e58909f 100644
--- a/clang/test/CodeGen/debug-prefix-map.c
+++ b/clang/test/DebugInfo/Generic/debug-prefix-map.c
diff --git a/clang/test/CodeGen/debug-prefix-map.cpp b/clang/test/DebugInfo/Generic/debug-prefix-map.cpp
index 174bef5..174bef5 100644
--- a/clang/test/CodeGen/debug-prefix-map.cpp
+++ b/clang/test/DebugInfo/Generic/debug-prefix-map.cpp
diff --git a/clang/test/CodeGen/debug-info-embed-source.c b/clang/test/DebugInfo/Generic/embed-source.c
index daa6a6c..daa6a6c 100644
--- a/clang/test/CodeGen/debug-info-embed-source.c
+++ b/clang/test/DebugInfo/Generic/embed-source.c
diff --git a/clang/test/CodeGen/debug-info-enum-case-val.c b/clang/test/DebugInfo/Generic/enum-case-val.c
index f39de0d..f39de0d 100644
--- a/clang/test/CodeGen/debug-info-enum-case-val.c
+++ b/clang/test/DebugInfo/Generic/enum-case-val.c
diff --git a/clang/test/CodeGen/debug-info-enum-extensibility.c b/clang/test/DebugInfo/Generic/enum-extensibility.c
index 4f8a42b..4f8a42b 100644
--- a/clang/test/CodeGen/debug-info-enum-extensibility.c
+++ b/clang/test/DebugInfo/Generic/enum-extensibility.c
diff --git a/clang/test/CodeGen/debug-info-enum.c b/clang/test/DebugInfo/Generic/enum.c
index 5454eb5..5454eb5 100644
--- a/clang/test/CodeGen/debug-info-enum.c
+++ b/clang/test/DebugInfo/Generic/enum.c
diff --git a/clang/test/CodeGen/debug-info-enum.cpp b/clang/test/DebugInfo/Generic/enum.cpp
index 4d83fdc..4d83fdc 100644
--- a/clang/test/CodeGen/debug-info-enum.cpp
+++ b/clang/test/DebugInfo/Generic/enum.cpp
diff --git a/clang/test/CodeGen/enum2.c b/clang/test/DebugInfo/Generic/enum2.c
index fcf6381..fcf6381 100644
--- a/clang/test/CodeGen/enum2.c
+++ b/clang/test/DebugInfo/Generic/enum2.c
diff --git a/clang/test/CodeGen/debug-info-extern-basic.c b/clang/test/DebugInfo/Generic/extern-basic.c
index 13b8811..13b8811 100644
--- a/clang/test/CodeGen/debug-info-extern-basic.c
+++ b/clang/test/DebugInfo/Generic/extern-basic.c
diff --git a/clang/test/CodeGen/debug-info-extern-basic.cpp b/clang/test/DebugInfo/Generic/extern-basic.cpp
index 1756690..1756690 100644
--- a/clang/test/CodeGen/debug-info-extern-basic.cpp
+++ b/clang/test/DebugInfo/Generic/extern-basic.cpp
diff --git a/clang/test/CodeGen/debug-info-extern-call.c b/clang/test/DebugInfo/Generic/extern-call.c
index 0d18dc4..0d18dc4 100644
--- a/clang/test/CodeGen/debug-info-extern-call.c
+++ b/clang/test/DebugInfo/Generic/extern-call.c
diff --git a/clang/test/CodeGen/debug-info-extern-callback.c b/clang/test/DebugInfo/Generic/extern-callback.c
index a9f210c..a9f210c 100644
--- a/clang/test/CodeGen/debug-info-extern-callback.c
+++ b/clang/test/DebugInfo/Generic/extern-callback.c
diff --git a/clang/test/CodeGen/debug-info-extern-duplicate.c b/clang/test/DebugInfo/Generic/extern-duplicate.c
index 597865c..597865c 100644
--- a/clang/test/CodeGen/debug-info-extern-duplicate.c
+++ b/clang/test/DebugInfo/Generic/extern-duplicate.c
diff --git a/clang/test/CodeGen/debug-info-extern-multi.c b/clang/test/DebugInfo/Generic/extern-multi.c
index c8a355b..c8a355b 100644
--- a/clang/test/CodeGen/debug-info-extern-multi.c
+++ b/clang/test/DebugInfo/Generic/extern-multi.c
diff --git a/clang/test/CodeGen/debug-info-extern-unused.c b/clang/test/DebugInfo/Generic/extern-unused.c
index 52b3b1b..52b3b1b 100644
--- a/clang/test/CodeGen/debug-info-extern-unused.c
+++ b/clang/test/DebugInfo/Generic/extern-unused.c
diff --git a/clang/test/CodeGen/fake-use-return-line.c b/clang/test/DebugInfo/Generic/fake-use-return-line.c
index 50d5885..50d5885 100644
--- a/clang/test/CodeGen/fake-use-return-line.c
+++ b/clang/test/DebugInfo/Generic/fake-use-return-line.c
diff --git a/clang/test/CodeGen/debug-info-file-change.c b/clang/test/DebugInfo/Generic/file-change.c
index 94e3c9d..94e3c9d 100644
--- a/clang/test/CodeGen/debug-info-file-change.c
+++ b/clang/test/DebugInfo/Generic/file-change.c
diff --git a/clang/test/CodeGen/debug-info-file-checksum.c b/clang/test/DebugInfo/Generic/file-checksum.c
index 30138e7..30138e7 100644
--- a/clang/test/CodeGen/debug-info-file-checksum.c
+++ b/clang/test/DebugInfo/Generic/file-checksum.c
diff --git a/clang/test/CodeGen/debug-info-gline-tables-only.c b/clang/test/DebugInfo/Generic/gline-tables-only.c
index 6321edd..6321edd 100644
--- a/clang/test/CodeGen/debug-info-gline-tables-only.c
+++ b/clang/test/DebugInfo/Generic/gline-tables-only.c
diff --git a/clang/test/CodeGen/debug-info-gline-tables-only2.c b/clang/test/DebugInfo/Generic/gline-tables-only2.c
index 204fd6d..204fd6d 100644
--- a/clang/test/CodeGen/debug-info-gline-tables-only2.c
+++ b/clang/test/DebugInfo/Generic/gline-tables-only2.c
diff --git a/clang/test/CodeGen/global-blocks-lines.c b/clang/test/DebugInfo/Generic/global-blocks-lines.c
index 9466173..9466173 100644
--- a/clang/test/CodeGen/global-blocks-lines.c
+++ b/clang/test/DebugInfo/Generic/global-blocks-lines.c
diff --git a/clang/test/CodeGen/debug-info-global-constant.c b/clang/test/DebugInfo/Generic/global-constant.c
index 80e4651..80e4651 100644
--- a/clang/test/CodeGen/debug-info-global-constant.c
+++ b/clang/test/DebugInfo/Generic/global-constant.c
diff --git a/clang/test/CodeGen/debug-info-imported-entity.cpp b/clang/test/DebugInfo/Generic/imported-entity.cpp
index 398536e..f4763c1 100644
--- a/clang/test/CodeGen/debug-info-imported-entity.cpp
+++ b/clang/test/DebugInfo/Generic/imported-entity.cpp
@@ -7,4 +7,4 @@ using std::A; using ::A;
// CHECK: [[Imports]] = !{[[ImportedEntity:![0-9]+]]}
// CHECK: [[ImportedEntity]] = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: [[CompileUnit]], entity: [[STDA:![0-9]+]], file: [[FILE:![0-9]+]], line: 4)
// CHECK: [[STDA]] = !DICompositeType(tag: DW_TAG_class_type, name: "A",
-// CHECK: [[FILE]] = !DIFile(filename: {{.*}}debug-info-imported-entity.cpp
+// CHECK: [[FILE]] = !DIFile(filename: {{.*}}imported-entity.cpp
diff --git a/clang/test/CodeGen/debug-info-inline-for.c b/clang/test/DebugInfo/Generic/inline-for.c
index 55066b2..55066b2 100644
--- a/clang/test/CodeGen/debug-info-inline-for.c
+++ b/clang/test/DebugInfo/Generic/inline-for.c
diff --git a/clang/test/CodeGen/debug-label.c b/clang/test/DebugInfo/Generic/label.c
index 308d664..308d664 100644
--- a/clang/test/CodeGen/debug-label.c
+++ b/clang/test/DebugInfo/Generic/label.c
diff --git a/clang/test/CodeGen/lifetime-debuginfo-1.c b/clang/test/DebugInfo/Generic/lifetime-debuginfo-1.c
index 10b00f1..10b00f1 100644
--- a/clang/test/CodeGen/lifetime-debuginfo-1.c
+++ b/clang/test/DebugInfo/Generic/lifetime-debuginfo-1.c
diff --git a/clang/test/CodeGen/lifetime-debuginfo-2.c b/clang/test/DebugInfo/Generic/lifetime-debuginfo-2.c
index 17d3d0b..17d3d0b 100644
--- a/clang/test/CodeGen/lifetime-debuginfo-2.c
+++ b/clang/test/DebugInfo/Generic/lifetime-debuginfo-2.c
diff --git a/clang/test/CodeGen/debug-info-limited.c b/clang/test/DebugInfo/Generic/limited.c
index 7c20ca4..7c20ca4 100644
--- a/clang/test/CodeGen/debug-info-limited.c
+++ b/clang/test/DebugInfo/Generic/limited.c
diff --git a/clang/test/CodeGen/debug-info-line.c b/clang/test/DebugInfo/Generic/line.c
index 13cab2e..13cab2e 100644
--- a/clang/test/CodeGen/debug-info-line.c
+++ b/clang/test/DebugInfo/Generic/line.c
diff --git a/clang/test/CodeGen/debug-info-line2.c b/clang/test/DebugInfo/Generic/line2.c
index 085fd97..085fd97 100644
--- a/clang/test/CodeGen/debug-info-line2.c
+++ b/clang/test/DebugInfo/Generic/line2.c
diff --git a/clang/test/CodeGen/debug-info-line3.c b/clang/test/DebugInfo/Generic/line3.c
index b2da4ef..b2da4ef 100644
--- a/clang/test/CodeGen/debug-info-line3.c
+++ b/clang/test/DebugInfo/Generic/line3.c
diff --git a/clang/test/CodeGen/debug-info-line4.c b/clang/test/DebugInfo/Generic/line4.c
index 3c99fc5..3c99fc5 100644
--- a/clang/test/CodeGen/debug-info-line4.c
+++ b/clang/test/DebugInfo/Generic/line4.c
diff --git a/clang/test/CodeGen/lineno-dbginfo.c b/clang/test/DebugInfo/Generic/lineno-dbginfo.c
index 82f54d0..82f54d0 100644
--- a/clang/test/CodeGen/lineno-dbginfo.c
+++ b/clang/test/DebugInfo/Generic/lineno-dbginfo.c
diff --git a/clang/test/CodeGen/linetable-endscope.c b/clang/test/DebugInfo/Generic/linetable-endscope.c
index 03291a2..03291a2 100644
--- a/clang/test/CodeGen/linetable-endscope.c
+++ b/clang/test/DebugInfo/Generic/linetable-endscope.c
diff --git a/clang/test/CodeGen/debug-info-lto.c b/clang/test/DebugInfo/Generic/lto.c
index 5dab0a1..5dab0a1 100644
--- a/clang/test/CodeGen/debug-info-lto.c
+++ b/clang/test/DebugInfo/Generic/lto.c
diff --git a/clang/test/CodeGen/debug-info-macro.c b/clang/test/DebugInfo/Generic/macro.c
index 23fd675..de8d1ab 100644
--- a/clang/test/CodeGen/debug-info-macro.c
+++ b/clang/test/DebugInfo/Generic/macro.c
@@ -30,7 +30,7 @@
// PCH: [[Macros]] = !{[[MainMacroFile:![0-9]+]], [[DefineC1:![0-9]+]], [[DefineA:![0-9]+]], [[UndefC1:![0-9]+]]}
// CHECK: [[MainMacroFile]] = !DIMacroFile(file: [[MainFile:![0-9]+]], nodes: [[N1:![0-9]+]])
-// CHECK: [[MainFile]] = !DIFile(filename: "{{.*}}debug-info-macro.c"
+// CHECK: [[MainFile]] = !DIFile(filename: "{{.*}}macro.c"
// CHECK: [[N1]] = !{[[CommandLineInclude:![0-9]+]], [[DefineD1:![0-9]+]], [[FileInclude1:![0-9]+]], [[UndefD1:![0-9]+]], [[DefineD2:![0-9]+]], [[FileInclude2:![0-9]+]], [[UndefD2:![0-9]+]]}
// CHECK: [[CommandLineInclude]] = !DIMacroFile(file: [[HeaderFile:![0-9]+]], nodes: [[N2:![0-9]+]])
diff --git a/clang/test/CodeGen/debug-info-matrix-types.c b/clang/test/DebugInfo/Generic/matrix-types.c
index c16e35e..c16e35e 100644
--- a/clang/test/CodeGen/debug-info-matrix-types.c
+++ b/clang/test/DebugInfo/Generic/matrix-types.c
diff --git a/clang/test/CodeGen/debug-info-member.c b/clang/test/DebugInfo/Generic/member.c
index 87d4a74..87d4a74 100644
--- a/clang/test/CodeGen/debug-info-member.c
+++ b/clang/test/DebugInfo/Generic/member.c
diff --git a/clang/test/CodeGen/mips-debug-info-bitfield.c b/clang/test/DebugInfo/Generic/mips-debug-info-bitfield.c
index a0e2ed9c..a0e2ed9c 100644
--- a/clang/test/CodeGen/mips-debug-info-bitfield.c
+++ b/clang/test/DebugInfo/Generic/mips-debug-info-bitfield.c
diff --git a/clang/test/CodeGen/debug-info-names.c b/clang/test/DebugInfo/Generic/names.c
index a807fb8..a807fb8 100644
--- a/clang/test/CodeGen/debug-info-names.c
+++ b/clang/test/DebugInfo/Generic/names.c
diff --git a/clang/test/CodeGen/debug-info-no-inline-line-tables.c b/clang/test/DebugInfo/Generic/no-inline-line-tables.c
index f6c19fd..f6c19fd 100644
--- a/clang/test/CodeGen/debug-info-no-inline-line-tables.c
+++ b/clang/test/DebugInfo/Generic/no-inline-line-tables.c
diff --git a/clang/test/CodeGen/nodebug-attr.c b/clang/test/DebugInfo/Generic/nodebug-attr.c
index ee19a4c..ee19a4c 100644
--- a/clang/test/CodeGen/nodebug-attr.c
+++ b/clang/test/DebugInfo/Generic/nodebug-attr.c
diff --git a/clang/test/CodeGen/null-sanitizer-debug-info-regression.cpp b/clang/test/DebugInfo/Generic/null-sanitizer-debug-info-regression.cpp
index 0b62f24..0b62f24 100644
--- a/clang/test/CodeGen/null-sanitizer-debug-info-regression.cpp
+++ b/clang/test/DebugInfo/Generic/null-sanitizer-debug-info-regression.cpp
diff --git a/clang/test/CodeGen/debug-nvptx.c b/clang/test/DebugInfo/Generic/nvptx.c
index 3b6ef70..3b6ef70 100644
--- a/clang/test/CodeGen/debug-nvptx.c
+++ b/clang/test/DebugInfo/Generic/nvptx.c
diff --git a/clang/test/CodeGen/openmp-prefix-map.c b/clang/test/DebugInfo/Generic/openmp-prefix-map.c
index be3429c..be3429c 100644
--- a/clang/test/CodeGen/openmp-prefix-map.c
+++ b/clang/test/DebugInfo/Generic/openmp-prefix-map.c
diff --git a/clang/test/CodeGen/debug-info-oslog.c b/clang/test/DebugInfo/Generic/oslog.c
index 49c361a..49c361a 100644
--- a/clang/test/CodeGen/debug-info-oslog.c
+++ b/clang/test/DebugInfo/Generic/oslog.c
diff --git a/clang/test/CodeGen/overloadable-debug.c b/clang/test/DebugInfo/Generic/overloadable-debug.c
index c742f74f..c742f74f 100644
--- a/clang/test/CodeGen/overloadable-debug.c
+++ b/clang/test/DebugInfo/Generic/overloadable-debug.c
diff --git a/clang/test/CodeGen/debug-info-packed-struct.c b/clang/test/DebugInfo/Generic/packed-struct.c
index 676cdb3..676cdb3 100644
--- a/clang/test/CodeGen/debug-info-packed-struct.c
+++ b/clang/test/DebugInfo/Generic/packed-struct.c
diff --git a/clang/test/CodeGen/pr52782-stdcall-func-decl.cpp b/clang/test/DebugInfo/Generic/pr52782-stdcall-func-decl.cpp
index c3c94ec..c3c94ec 100644
--- a/clang/test/CodeGen/pr52782-stdcall-func-decl.cpp
+++ b/clang/test/DebugInfo/Generic/pr52782-stdcall-func-decl.cpp
diff --git a/clang/test/CodeGen/debug-info-preferred-type.cpp b/clang/test/DebugInfo/Generic/preferred-type.cpp
index 6406657..6406657 100644
--- a/clang/test/CodeGen/debug-info-preferred-type.cpp
+++ b/clang/test/DebugInfo/Generic/preferred-type.cpp
diff --git a/clang/test/CodeGen/preferred_name-chain.cpp b/clang/test/DebugInfo/Generic/preferred_name-chain.cpp
index 3108cae..3108cae 100644
--- a/clang/test/CodeGen/preferred_name-chain.cpp
+++ b/clang/test/DebugInfo/Generic/preferred_name-chain.cpp
diff --git a/clang/test/CodeGen/preferred_name.cpp b/clang/test/DebugInfo/Generic/preferred_name.cpp
index 9b3f532..9b3f532 100644
--- a/clang/test/CodeGen/preferred_name.cpp
+++ b/clang/test/DebugInfo/Generic/preferred_name.cpp
diff --git a/clang/test/CodeGen/debug-info-preprocessed-file.i b/clang/test/DebugInfo/Generic/preprocessed-file.i
index c8a2307..c8a2307 100644
--- a/clang/test/CodeGen/debug-info-preprocessed-file.i
+++ b/clang/test/DebugInfo/Generic/preprocessed-file.i
diff --git a/clang/test/CodeGen/debug-info-programming-language.c b/clang/test/DebugInfo/Generic/programming-language.c
index 02e45d8..02e45d8 100644
--- a/clang/test/CodeGen/debug-info-programming-language.c
+++ b/clang/test/DebugInfo/Generic/programming-language.c
diff --git a/clang/test/CodeGen/debug-info-pseudo-probe.cpp b/clang/test/DebugInfo/Generic/pseudo-probe.cpp
index 75c1084..75c1084 100644
--- a/clang/test/CodeGen/debug-info-pseudo-probe.cpp
+++ b/clang/test/DebugInfo/Generic/pseudo-probe.cpp
diff --git a/clang/test/CodeGen/ptrauth-debuginfo.c b/clang/test/DebugInfo/Generic/ptrauth-debuginfo.c
index b76baff..b76baff 100644
--- a/clang/test/CodeGen/ptrauth-debuginfo.c
+++ b/clang/test/DebugInfo/Generic/ptrauth-debuginfo.c
diff --git a/clang/test/CodeGen/debug-info-ranges-base-address.c b/clang/test/DebugInfo/Generic/ranges-base-address.c
index 9f6ffa0..9f6ffa0 100644
--- a/clang/test/CodeGen/debug-info-ranges-base-address.c
+++ b/clang/test/DebugInfo/Generic/ranges-base-address.c
diff --git a/clang/test/CodeGen/debug-info-same-line.c b/clang/test/DebugInfo/Generic/same-line.c
index a791222..a791222 100644
--- a/clang/test/CodeGen/debug-info-same-line.c
+++ b/clang/test/DebugInfo/Generic/same-line.c
diff --git a/clang/test/CodeGen/debug-info-scope-file.c b/clang/test/DebugInfo/Generic/scope-file.c
index 9ba8a65..9ba8a65 100644
--- a/clang/test/CodeGen/debug-info-scope-file.c
+++ b/clang/test/DebugInfo/Generic/scope-file.c
diff --git a/clang/test/CodeGen/debug-info-scope.c b/clang/test/DebugInfo/Generic/scope.c
index 26d98d0..26d98d0 100644
--- a/clang/test/CodeGen/debug-info-scope.c
+++ b/clang/test/DebugInfo/Generic/scope.c
diff --git a/clang/test/CodeGen/debug-info-slash.c b/clang/test/DebugInfo/Generic/slash.c
index 56e51c0..02187d0 100644
--- a/clang/test/CodeGen/debug-info-slash.c
+++ b/clang/test/DebugInfo/Generic/slash.c
@@ -2,5 +2,5 @@
// RUN: %clang -target x86_64-linux-gnu -ffile-reproducible -emit-llvm -S -g %s -o - | FileCheck --check-prefix=LINUX %s
int main() { return 0; }
-// WIN: !DIFile(filename: "{{.*}}\\debug-info-slash.c"
-// LINUX: !DIFile(filename: "{{.*}}/debug-info-slash.c"
+// WIN: !DIFile(filename: "{{.*}}\\slash.c"
+// LINUX: !DIFile(filename: "{{.*}}/slash.c"
diff --git a/clang/test/CodeGen/debug-info-slash.test b/clang/test/DebugInfo/Generic/slash.test
index 0e42912..0e42912 100644
--- a/clang/test/CodeGen/debug-info-slash.test
+++ b/clang/test/DebugInfo/Generic/slash.test
diff --git a/clang/test/CodeGen/split-debug-filename.c b/clang/test/DebugInfo/Generic/split-debug-filename.c
index ea710cc..ea710cc 100644
--- a/clang/test/CodeGen/split-debug-filename.c
+++ b/clang/test/DebugInfo/Generic/split-debug-filename.c
diff --git a/clang/test/CodeGen/split-debug-inlining.c b/clang/test/DebugInfo/Generic/split-debug-inlining.c
index b1c9814..b1c9814 100644
--- a/clang/test/CodeGen/split-debug-inlining.c
+++ b/clang/test/DebugInfo/Generic/split-debug-inlining.c
diff --git a/clang/test/CodeGen/split-debug-output.c b/clang/test/DebugInfo/Generic/split-debug-output.c
index 94cc141..94cc141 100644
--- a/clang/test/CodeGen/split-debug-output.c
+++ b/clang/test/DebugInfo/Generic/split-debug-output.c
diff --git a/clang/test/CodeGen/split-debug-single-file.c b/clang/test/DebugInfo/Generic/split-debug-single-file.c
index 0c38d0f..0c38d0f 100644
--- a/clang/test/CodeGen/split-debug-single-file.c
+++ b/clang/test/DebugInfo/Generic/split-debug-single-file.c
diff --git a/clang/test/CodeGen/debug-info-static-const-fp.c b/clang/test/DebugInfo/Generic/static-const-fp.c
index be4b51d..be4b51d 100644
--- a/clang/test/CodeGen/debug-info-static-const-fp.c
+++ b/clang/test/DebugInfo/Generic/static-const-fp.c
diff --git a/clang/test/CodeGen/debug-info-static.c b/clang/test/DebugInfo/Generic/static.c
index d6ade2ae..d6ade2ae 100644
--- a/clang/test/CodeGen/debug-info-static.c
+++ b/clang/test/DebugInfo/Generic/static.c
diff --git a/clang/test/CodeGen/debug-info-switch-fallthrough.c b/clang/test/DebugInfo/Generic/switch-fallthrough.c
index 36ac371d..36ac371d 100644
--- a/clang/test/CodeGen/debug-info-switch-fallthrough.c
+++ b/clang/test/DebugInfo/Generic/switch-fallthrough.c
diff --git a/clang/test/CodeGen/debug-info-sysroot-sdk.c b/clang/test/DebugInfo/Generic/sysroot-sdk.c
index b52d2e1d..b52d2e1d 100644
--- a/clang/test/CodeGen/debug-info-sysroot-sdk.c
+++ b/clang/test/DebugInfo/Generic/sysroot-sdk.c
diff --git a/clang/test/CodeGen/thinlto-split-dwarf.c b/clang/test/DebugInfo/Generic/thinlto-split-dwarf.c
index b1db029..b1db029 100644
--- a/clang/test/CodeGen/thinlto-split-dwarf.c
+++ b/clang/test/DebugInfo/Generic/thinlto-split-dwarf.c
diff --git a/clang/test/CodeGen/debug-info-typedef.c b/clang/test/DebugInfo/Generic/typedef.c
index ea3b549..ea3b549 100644
--- a/clang/test/CodeGen/debug-info-typedef.c
+++ b/clang/test/DebugInfo/Generic/typedef.c
diff --git a/clang/test/CodeGen/ubsan-debuglog-return.c b/clang/test/DebugInfo/Generic/ubsan-debuglog-return.c
index 0a96465..0a96465 100644
--- a/clang/test/CodeGen/ubsan-debuglog-return.c
+++ b/clang/test/DebugInfo/Generic/ubsan-debuglog-return.c
diff --git a/clang/test/CodeGen/ubsan-function-debuginfo.c b/clang/test/DebugInfo/Generic/ubsan-function-debuginfo.c
index 106aece..106aece 100644
--- a/clang/test/CodeGen/ubsan-function-debuginfo.c
+++ b/clang/test/DebugInfo/Generic/ubsan-function-debuginfo.c
diff --git a/clang/test/CodeGen/ubsan-trap-debugloc.c b/clang/test/DebugInfo/Generic/ubsan-trap-debugloc.c
index 2f5258a..2f5258a 100644
--- a/clang/test/CodeGen/ubsan-trap-debugloc.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-debugloc.c
diff --git a/clang/test/DebugInfo/Generic/ubsan-trap-reason-add-overflow.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-add-overflow.c
new file mode 100644
index 0000000..862d434
--- /dev/null
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-add-overflow.c
@@ -0,0 +1,32 @@
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-trap=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -emit-llvm %s -o - | FileCheck --check-prefixes=CHECK,DETAILED %s
+
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-trap=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-debug-trap-reasons=basic \
+// RUN: -emit-llvm %s -o - | FileCheck --check-prefixes=CHECK,BASIC %s
+
+int sadd_overflow(int a, int b) { return a + b; }
+
+unsigned add_overflow(unsigned c, unsigned d) { return c + d; }
+
+// CHECK-LABEL: @sadd_overflow
+// CHECK: call void @llvm.ubsantrap(i8 0) {{.*}}!dbg [[SLOC:![0-9]+]]
+
+// CHECK-LABEL: @add_overflow
+// CHECK: call void @llvm.ubsantrap(i8 0) {{.*}}!dbg [[LOC:![0-9]+]]
+
+// DETAILED: [[SLOC]] = !DILocation(line: 0, scope: [[SMSG:![0-9]+]], {{.+}})
+// DETAILED: [[SMSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$signed integer addition overflow in 'a + b'"
+// DETAILED: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
+// DETAILED: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$unsigned integer addition overflow in 'c + d'"
+
+// In "Basic" mode the trap reason is shared by both functions.
+// BASIC: [[SLOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
+// BASIC: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Integer addition overflowed"
+// BASIC: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
+
+
diff --git a/clang/test/CodeGen/ubsan-trap-reason-alignment-assumption.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-alignment-assumption.c
index 3247ceb..3247ceb 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-alignment-assumption.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-alignment-assumption.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-builtin-unreachable.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-builtin-unreachable.c
index 97bd690..97bd690 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-builtin-unreachable.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-builtin-unreachable.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-cfi-check-fail.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-cfi-check-fail.c
index 9304f51..9304f51 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-cfi-check-fail.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-cfi-check-fail.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-crash.cpp b/clang/test/DebugInfo/Generic/ubsan-trap-reason-crash.cpp
index 6add9bf..6add9bf 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-crash.cpp
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-crash.cpp
diff --git a/clang/test/CodeGen/ubsan-trap-reason-div-rem-overflow.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-div-rem-overflow.c
index d0b21dd..d0b21dd 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-div-rem-overflow.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-div-rem-overflow.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-dynamic-type-cache-miss.cpp b/clang/test/DebugInfo/Generic/ubsan-trap-reason-dynamic-type-cache-miss.cpp
index f89fbdcf..f89fbdcf 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-dynamic-type-cache-miss.cpp
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-dynamic-type-cache-miss.cpp
diff --git a/clang/test/DebugInfo/Generic/ubsan-trap-reason-flag.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-flag.c
new file mode 100644
index 0000000..2968e6b
--- /dev/null
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-flag.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow -emit-llvm %s -o - \
+// RUN: | FileCheck %s --check-prefix=ANNOTATE
+
+//==============================================================================
+// Detailed trap reasons
+//==============================================================================
+
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow \
+// RUN: -fsanitize-debug-trap-reasons -emit-llvm %s -o - | FileCheck %s --check-prefixes=ANNOTATE,DETAILED
+
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow \
+// RUN: -fsanitize-debug-trap-reasons=detailed -emit-llvm %s -o - | FileCheck %s --check-prefixes=ANNOTATE,DETAILED
+
+//==============================================================================
+// Basic trap reasons
+//==============================================================================
+
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow \
+// RUN: -fsanitize-debug-trap-reasons=basic -emit-llvm %s -o - | FileCheck %s --check-prefixes=ANNOTATE,BASIC
+
+//==============================================================================
+// No trap reasons
+//==============================================================================
+
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow \
+// RUN: -fno-sanitize-debug-trap-reasons -emit-llvm %s -o - | FileCheck %s --check-prefix=NO-ANNOTATE
+
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow \
+// RUN: -fsanitize-debug-trap-reasons=none -emit-llvm %s -o - | FileCheck %s --check-prefix=NO-ANNOTATE
+
+int add_overflow(int a, int b) { return a + b; }
+
+// ANNOTATE-LABEL: @add_overflow
+// ANNOTATE: call void @llvm.ubsantrap(i8 0) {{.*}}!dbg [[LOC:![0-9]+]]
+// ANNOTATE: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
+// DETAILED: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$signed integer addition overflow in 'a + b'"
+// BASIC: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Integer addition overflowed"
+
+// NO-ANNOTATE-LABEL: @add_overflow
+// NO-ANNOTATE: call void @llvm.ubsantrap(i8 0) {{.*}}!dbg [[LOC:![0-9]+]]
+// NO-ANNOTATE-NOT: __clang_trap_msg
diff --git a/clang/test/CodeGen/ubsan-trap-reason-float-cast-overflow.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-float-cast-overflow.c
index 079a191e..079a191e 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-float-cast-overflow.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-float-cast-overflow.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-function-type-mismatch.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-function-type-mismatch.c
index 1727f9c..1727f9c 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-function-type-mismatch.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-function-type-mismatch.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-implicit-conversion.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-implicit-conversion.c
index 43c091d..43c091d 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-implicit-conversion.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-implicit-conversion.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-invalid-builtin.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-invalid-builtin.c
index 56cf674..56cf674 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-invalid-builtin.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-invalid-builtin.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-invalid-objc-cast.m b/clang/test/DebugInfo/Generic/ubsan-trap-reason-invalid-objc-cast.m
index ed2d5ff..ed2d5ff 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-invalid-objc-cast.m
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-invalid-objc-cast.m
diff --git a/clang/test/CodeGen/ubsan-trap-reason-load-invalid-value.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-load-invalid-value.c
index 4aad832..4aad832 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-load-invalid-value.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-load-invalid-value.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-missing-return.cpp b/clang/test/DebugInfo/Generic/ubsan-trap-reason-missing-return.cpp
index 2818d9d..2818d9d 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-missing-return.cpp
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-missing-return.cpp
diff --git a/clang/test/DebugInfo/Generic/ubsan-trap-reason-mul-overflow.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-mul-overflow.c
new file mode 100644
index 0000000..ba3928d
--- /dev/null
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-mul-overflow.c
@@ -0,0 +1,30 @@
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-trap=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -emit-llvm %s -o - | FileCheck --check-prefixes=CHECK,DETAILED %s
+
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-trap=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-debug-trap-reasons=basic \
+// RUN: -emit-llvm %s -o - | FileCheck --check-prefixes=CHECK,BASIC %s
+
+int smul_overflow(int a, int b) { return a * b; }
+
+unsigned mul_overflow(unsigned c, unsigned d) { return c * d; }
+
+// CHECK-LABEL: @smul_overflow
+// CHECK: call void @llvm.ubsantrap(i8 12) {{.*}}!dbg [[SLOC:![0-9]+]]
+
+// CHECK-LABEL: @mul_overflow
+// CHECK: call void @llvm.ubsantrap(i8 12) {{.*}}!dbg [[LOC:![0-9]+]]
+
+// DETAILED: [[SLOC]] = !DILocation(line: 0, scope: [[SMSG:![0-9]+]], {{.+}})
+// DETAILED: [[SMSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$signed integer multiplication overflow in 'a * b'"
+// DETAILED: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
+// DETAILED: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$unsigned integer multiplication overflow in 'c * d'"
+
+// In "Basic" mode the trap reason is shared by both functions.
+// BASIC: [[SLOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
+// BASIC: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Integer multiplication overflowed"
+// BASIC: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
diff --git a/clang/test/CodeGen/ubsan-trap-reason-negate-overflow.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-negate-overflow.c
index 5534679..5534679 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-negate-overflow.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-negate-overflow.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-nonnull-arg.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-nonnull-arg.c
index 1f0f450..1f0f450 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-nonnull-arg.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-nonnull-arg.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-nonnull-return.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-nonnull-return.c
index 1197b4d..1197b4d 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-nonnull-return.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-nonnull-return.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-nullability-arg.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-nullability-arg.c
index 2bc71de..2bc71de 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-nullability-arg.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-nullability-arg.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-nullability-return.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-nullability-return.c
index 3d64c5a..3d64c5a 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-nullability-return.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-nullability-return.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-out-of-bounds.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-out-of-bounds.c
index 979886d..979886d 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-out-of-bounds.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-out-of-bounds.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-pointer-overflow.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-pointer-overflow.c
index 41cb487..41cb487 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-pointer-overflow.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-pointer-overflow.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-shift-out-of-bounds.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-shift-out-of-bounds.c
index 1a7465d..1a7465d 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-shift-out-of-bounds.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-shift-out-of-bounds.c
diff --git a/clang/test/DebugInfo/Generic/ubsan-trap-reason-sub-overflow.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-sub-overflow.c
new file mode 100644
index 0000000..596d777
--- /dev/null
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-sub-overflow.c
@@ -0,0 +1,30 @@
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-trap=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -emit-llvm %s -o - | FileCheck --check-prefixes=CHECK,DETAILED %s
+
+// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -O0 -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: -fsanitize=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-trap=signed-integer-overflow,unsigned-integer-overflow \
+// RUN: -fsanitize-debug-trap-reasons=basic \
+// RUN: -emit-llvm %s -o - | FileCheck --check-prefixes=CHECK,BASIC %s
+
+int ssub_overflow(int a, int b) { return a - b; }
+
+unsigned sub_overflow(unsigned c, unsigned d) { return c - d; }
+
+// CHECK-LABEL: @ssub_overflow
+// CHECK: call void @llvm.ubsantrap(i8 21) {{.*}}!dbg [[SLOC:![0-9]+]]
+
+// CHECK-LABEL: @sub_overflow
+// CHECK: call void @llvm.ubsantrap(i8 21) {{.*}}!dbg [[LOC:![0-9]+]]
+
+// DETAILED: [[SLOC]] = !DILocation(line: 0, scope: [[SMSG:![0-9]+]], {{.+}})
+// DETAILED: [[SMSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$signed integer subtraction overflow in 'a - b'"
+// DETAILED: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
+// DETAILED: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$unsigned integer subtraction overflow in 'c - d'"
+
+// In "Basic" mode the trap reason is shared by both functions.
+// BASIC: [[SLOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
+// BASIC: [[MSG]] = distinct !DISubprogram(name: "__clang_trap_msg$Undefined Behavior Sanitizer$Integer subtraction overflowed"
+// BASIC: [[LOC]] = !DILocation(line: 0, scope: [[MSG:![0-9]+]], {{.+}})
diff --git a/clang/test/CodeGen/ubsan-trap-reason-type-mismatch.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-type-mismatch.c
index 802ec91..802ec91 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-type-mismatch.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-type-mismatch.c
diff --git a/clang/test/CodeGen/ubsan-trap-reason-vla-bound-not-positive.c b/clang/test/DebugInfo/Generic/ubsan-trap-reason-vla-bound-not-positive.c
index ad9c408..ad9c408 100644
--- a/clang/test/CodeGen/ubsan-trap-reason-vla-bound-not-positive.c
+++ b/clang/test/DebugInfo/Generic/ubsan-trap-reason-vla-bound-not-positive.c
diff --git a/clang/test/CodeGen/unique-internal-linkage-names-dwarf.c b/clang/test/DebugInfo/Generic/unique-internal-linkage-names-dwarf.c
index 5a40118..5a40118 100644
--- a/clang/test/CodeGen/unique-internal-linkage-names-dwarf.c
+++ b/clang/test/DebugInfo/Generic/unique-internal-linkage-names-dwarf.c
diff --git a/clang/test/CodeGen/unique-internal-linkage-names-dwarf.cpp b/clang/test/DebugInfo/Generic/unique-internal-linkage-names-dwarf.cpp
index cdd7062..cdd7062 100644
--- a/clang/test/CodeGen/unique-internal-linkage-names-dwarf.cpp
+++ b/clang/test/DebugInfo/Generic/unique-internal-linkage-names-dwarf.cpp
diff --git a/clang/test/CodeGen/unsigned-promotion-debuginfo.c b/clang/test/DebugInfo/Generic/unsigned-promotion-debuginfo.c
index 88e691d..88e691d 100644
--- a/clang/test/CodeGen/unsigned-promotion-debuginfo.c
+++ b/clang/test/DebugInfo/Generic/unsigned-promotion-debuginfo.c
diff --git a/clang/test/CodeGen/debug-info-unused-types.c b/clang/test/DebugInfo/Generic/unused-types.c
index 3e9f7b0..3e9f7b0 100644
--- a/clang/test/CodeGen/debug-info-unused-types.c
+++ b/clang/test/DebugInfo/Generic/unused-types.c
diff --git a/clang/test/CodeGen/debug-info-unused-types.cpp b/clang/test/DebugInfo/Generic/unused-types.cpp
index 023cac1..023cac1 100644
--- a/clang/test/CodeGen/debug-info-unused-types.cpp
+++ b/clang/test/DebugInfo/Generic/unused-types.cpp
diff --git a/clang/test/CodeGen/unused_nested_enump.cpp b/clang/test/DebugInfo/Generic/unused_nested_enump.cpp
index 7689b5b..7689b5b 100644
--- a/clang/test/CodeGen/unused_nested_enump.cpp
+++ b/clang/test/DebugInfo/Generic/unused_nested_enump.cpp
diff --git a/clang/test/CodeGen/debug-info-variables.c b/clang/test/DebugInfo/Generic/variables.c
index 9fbace7..9fbace7 100644
--- a/clang/test/CodeGen/debug-info-variables.c
+++ b/clang/test/DebugInfo/Generic/variables.c
diff --git a/clang/test/CodeGen/debug-info-vector-bool.c b/clang/test/DebugInfo/Generic/vector-bool.c
index e1dfd2c..e1dfd2c 100644
--- a/clang/test/CodeGen/debug-info-vector-bool.c
+++ b/clang/test/DebugInfo/Generic/vector-bool.c
diff --git a/clang/test/CodeGen/debug-info-vector.c b/clang/test/DebugInfo/Generic/vector.c
index 93e6392..93e6392 100644
--- a/clang/test/CodeGen/debug-info-vector.c
+++ b/clang/test/DebugInfo/Generic/vector.c
diff --git a/clang/test/CodeGen/debug-info-version-coff.c b/clang/test/DebugInfo/Generic/version-coff.c
index 6497a58..6497a58 100644
--- a/clang/test/CodeGen/debug-info-version-coff.c
+++ b/clang/test/DebugInfo/Generic/version-coff.c
diff --git a/clang/test/CodeGen/debug-info-version.c b/clang/test/DebugInfo/Generic/version.c
index c7c2bb9..485b80e 100644
--- a/clang/test/CodeGen/debug-info-version.c
+++ b/clang/test/DebugInfo/Generic/version.c
@@ -1,4 +1,4 @@
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
// RUN: %clang -g -S -emit-llvm -o - %s | FileCheck %s
// RUN: %clang -S -emit-llvm -o - %s | FileCheck %s --check-prefix=NO_DEBUG
int main (void) {
diff --git a/clang/test/CodeGen/debug-info-vla.c b/clang/test/DebugInfo/Generic/vla.c
index e949411..e949411 100644
--- a/clang/test/CodeGen/debug-info-vla.c
+++ b/clang/test/DebugInfo/Generic/vla.c
diff --git a/clang/test/DebugInfo/KeyInstructions/flag.cpp b/clang/test/DebugInfo/KeyInstructions/flag.cpp
index e34faa6c..a5cd855 100644
--- a/clang/test/DebugInfo/KeyInstructions/flag.cpp
+++ b/clang/test/DebugInfo/KeyInstructions/flag.cpp
@@ -1,7 +1,5 @@
// RUN: %clang -### -target x86_64 -c -gdwarf -gkey-instructions %s 2>&1 | FileCheck %s --check-prefixes=KEY-INSTRUCTIONS
// RUN: %clang -### -target x86_64 -c -gdwarf -gno-key-instructions %s 2>&1 | FileCheck %s --check-prefixes=NO-KEY-INSTRUCTIONS
-//// Default: Off.
-// RUN: %clang -### -target x86_64 -c -gdwarf %s 2>&1 | FileCheck %s --check-prefixes=NO-KEY-INSTRUCTIONS
//// Help.
// RUN %clang --help | FileCheck %s --check-prefix=HELP
@@ -23,3 +21,43 @@ void f() {}
// RUN: %clang_cc1 %s -triple x86_64-linux-gnu -gkey-instructions -debug-info-kind=line-tables-only -emit-llvm -o - | FileCheck %s --check-prefix=SMOKETEST-ON
// SMOKETEST-ON: keyInstructions: true
// SMOKETEST-ON: atomGroup: 1
+
+//// Enable Key Instructions by default if optimisations are enabled and we're
+//// emitting DWARF.
+////
+//// | opt level | -gkey-instructions | feature |
+//// | 0 | no | off |
+//// | 0 | yes | on |
+//// | >=1 | no | on |
+//// | >=1 | yes | on |
+//// | >=1 | no & no -g flags | off |
+//// | >=1 | no & emit codeview | off |
+//
+// RUN: %clang %s -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=NO-KEY-INSTRUCTIONS
+// RUN: %clang %s -target x86_64 -gdwarf -gmlt -gkey-instructions -### 2>&1 | FileCheck %s --check-prefix=KEY-INSTRUCTIONS
+// RUN: %clang %s -O0 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=NO-KEY-INSTRUCTIONS
+// RUN: %clang %s -O0 -target x86_64 -gdwarf -gmlt -gkey-instructions -### 2>&1 | FileCheck %s --check-prefix=KEY-INSTRUCTIONS
+// RUN: %clang %s -O1 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=KEY-INSTRUCTIONS
+// RUN: %clang %s -O1 -target x86_64 -gdwarf -gmlt -gkey-instructions -### 2>&1 | FileCheck %s --check-prefix=KEY-INSTRUCTIONS
+// RUN: %clang %s -O1 -target x86_64 -### 2>&1 | FileCheck %s --check-prefixes=NO-KEY-INSTRUCTIONS
+// RUN: %clang %s -O1 -target x86_64 -gcodeview -gmlt -### 2>&1 | FileCheck %s --check-prefixes=NO-KEY-INSTRUCTIONS
+//
+// RUN: %clang %s -target x86_64 -gdwarf -gmlt -S -emit-llvm -o - | FileCheck %s --check-prefix=SMOKETEST-OFF
+// RUN: %clang %s -target x86_64 -gdwarf -gmlt -gkey-instructions -S -emit-llvm -o - | FileCheck %s --check-prefix=SMOKETEST-ON
+// RUN: %clang %s -O0 -target x86_64 -gdwarf -gmlt -S -emit-llvm -o - | FileCheck %s --check-prefix=SMOKETEST-OFF
+// RUN: %clang %s -O0 -target x86_64 -gdwarf -gmlt -gkey-instructions -S -emit-llvm -o - | FileCheck %s --check-prefix=SMOKETEST-ON
+// RUN: %clang %s -O1 -target x86_64 -gdwarf -gmlt -S -emit-llvm -o - | FileCheck %s --check-prefix=SMOKETEST-ON
+// RUN: %clang %s -O1 -target x86_64 -gdwarf -gmlt -gkey-instructions -S -emit-llvm -o - | FileCheck %s --check-prefix=SMOKETEST-ON
+// RUN: %clang %s -O1 -target x86_64 -S -emit-llvm -o - | FileCheck %s --check-prefixes=SMOKETEST-OFF,SMOKETEST-NO-DEBUG
+// RUN: %clang %s -O1 -target x86_64 -gcodeview -gmlt -S -emit-llvm -o - | FileCheck %s --check-prefixes=SMOKETEST-OFF
+// SMOKETEST-NO-DEBUG: llvm.module.flags
+// SMOKETEST-NO-DEBUG-NOT: DICompileUnit
+
+//// Check only "plain" C/C++ turns on Key Instructions by default.
+// RUN: %clang -x c %s -O1 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=KEY-INSTRUCTIONS
+// RUN: %clang -x c++ %s -O1 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=KEY-INSTRUCTIONS
+// RUN: %clang -x cuda -nocudalib -nocudainc %s -O1 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=NO-KEY-INSTRUCTIONS
+// RUN: %clang -x hip -nogpulib -nogpuinc %s -O1 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=NO-KEY-INSTRUCTIONS
+// RUN: %clang -x cl %s -O1 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=NO-KEY-INSTRUCTIONS
+// RUN: %clang -x objective-c %s -O1 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=NO-KEY-INSTRUCTIONS
+// RUN: %clang -x objective-c++ %s -O1 -target x86_64 -gdwarf -gmlt -### 2>&1 | FileCheck %s --check-prefix=NO-KEY-INSTRUCTIONS
diff --git a/clang/test/DebugInfo/KeyInstructions/lit.local.cfg b/clang/test/DebugInfo/KeyInstructions/lit.local.cfg
deleted file mode 100644
index 482bd5c..0000000
--- a/clang/test/DebugInfo/KeyInstructions/lit.local.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-if not config.has_key_instructions:
- config.unsupported = True
diff --git a/clang/test/CodeGenObjC/2009-01-21-invalid-debug-info.m b/clang/test/DebugInfo/ObjC/2009-01-21-invalid.m
index 8278708..8278708 100644
--- a/clang/test/CodeGenObjC/2009-01-21-invalid-debug-info.m
+++ b/clang/test/DebugInfo/ObjC/2009-01-21-invalid.m
diff --git a/clang/test/CodeGenObjC/2010-02-09-DbgSelf.m b/clang/test/DebugInfo/ObjC/2010-02-09-Self.m
index 7696163..7696163 100644
--- a/clang/test/CodeGenObjC/2010-02-09-DbgSelf.m
+++ b/clang/test/DebugInfo/ObjC/2010-02-09-Self.m
diff --git a/clang/test/CodeGenObjC/2010-02-15-Dbg-MethodStart.m b/clang/test/DebugInfo/ObjC/2010-02-15-MethodStart.m
index 31d4e69..31d4e69 100644
--- a/clang/test/CodeGenObjC/2010-02-15-Dbg-MethodStart.m
+++ b/clang/test/DebugInfo/ObjC/2010-02-15-MethodStart.m
diff --git a/clang/test/CodeGenObjC/2010-02-23-DbgInheritance.m b/clang/test/DebugInfo/ObjC/2010-02-23-Inheritance.m
index cb8b5f8..cb8b5f8 100644
--- a/clang/test/CodeGenObjC/2010-02-23-DbgInheritance.m
+++ b/clang/test/DebugInfo/ObjC/2010-02-23-Inheritance.m
diff --git a/clang/test/CodeGenObjC/arc-linetable-autorelease.m b/clang/test/DebugInfo/ObjC/arc-linetable-autorelease.m
index 93eeaca..93eeaca 100644
--- a/clang/test/CodeGenObjC/arc-linetable-autorelease.m
+++ b/clang/test/DebugInfo/ObjC/arc-linetable-autorelease.m
diff --git a/clang/test/CodeGenObjC/arc-linetable.m b/clang/test/DebugInfo/ObjC/arc-linetable.m
index 60e0e0f..60e0e0f 100644
--- a/clang/test/CodeGenObjC/arc-linetable.m
+++ b/clang/test/DebugInfo/ObjC/arc-linetable.m
diff --git a/clang/test/CodeGenObjC/block-byref-debuginfo.m b/clang/test/DebugInfo/ObjC/block-byref.m
index d815d8d..d815d8d 100644
--- a/clang/test/CodeGenObjC/block-byref-debuginfo.m
+++ b/clang/test/DebugInfo/ObjC/block-byref.m
diff --git a/clang/test/CodeGenObjC/debug-info-block-helper.m b/clang/test/DebugInfo/ObjC/block-helper.m
index ac4ba39..ac4ba39 100644
--- a/clang/test/CodeGenObjC/debug-info-block-helper.m
+++ b/clang/test/DebugInfo/ObjC/block-helper.m
diff --git a/clang/test/CodeGenObjC/debug-info-block-line.m b/clang/test/DebugInfo/ObjC/block-line.m
index 35afbb6..35afbb6 100644
--- a/clang/test/CodeGenObjC/debug-info-block-line.m
+++ b/clang/test/DebugInfo/ObjC/block-line.m
diff --git a/clang/test/CodeGenObjC/debug-info-block-type.m b/clang/test/DebugInfo/ObjC/block-type.m
index 943dfd5..943dfd5 100644
--- a/clang/test/CodeGenObjC/debug-info-block-type.m
+++ b/clang/test/DebugInfo/ObjC/block-type.m
diff --git a/clang/test/CodeGenObjC/blocks-ivar-debug.m b/clang/test/DebugInfo/ObjC/blocks-ivar.m
index 4d1ca0d..4d1ca0d 100644
--- a/clang/test/CodeGenObjC/blocks-ivar-debug.m
+++ b/clang/test/DebugInfo/ObjC/blocks-ivar.m
diff --git a/clang/test/CodeGenObjC/debug-info-blocks.m b/clang/test/DebugInfo/ObjC/blocks.m
index de4eec4f..de4eec4f 100644
--- a/clang/test/CodeGenObjC/debug-info-blocks.m
+++ b/clang/test/DebugInfo/ObjC/blocks.m
diff --git a/clang/test/CodeGenObjC/catch-lexical-block.m b/clang/test/DebugInfo/ObjC/catch-lexical-block.m
index a7f5b36..a7f5b36 100644
--- a/clang/test/CodeGenObjC/catch-lexical-block.m
+++ b/clang/test/DebugInfo/ObjC/catch-lexical-block.m
diff --git a/clang/test/CodeGenObjC/debug-info-category.m b/clang/test/DebugInfo/ObjC/category.m
index 4cd71f2..4cd71f2 100644
--- a/clang/test/CodeGenObjC/debug-info-category.m
+++ b/clang/test/DebugInfo/ObjC/category.m
diff --git a/clang/test/CodeGenObjC/debug-info-class-extension.m b/clang/test/DebugInfo/ObjC/class-extension.m
index db654e6..db654e6 100644
--- a/clang/test/CodeGenObjC/debug-info-class-extension.m
+++ b/clang/test/DebugInfo/ObjC/class-extension.m
diff --git a/clang/test/CodeGenObjC/debug-info-class-extension2.m b/clang/test/DebugInfo/ObjC/class-extension2.m
index ea7865b..ea7865b 100644
--- a/clang/test/CodeGenObjC/debug-info-class-extension2.m
+++ b/clang/test/DebugInfo/ObjC/class-extension2.m
diff --git a/clang/test/CodeGenObjC/debug-info-class-extension3.m b/clang/test/DebugInfo/ObjC/class-extension3.m
index f81445b..f81445b 100644
--- a/clang/test/CodeGenObjC/debug-info-class-extension3.m
+++ b/clang/test/DebugInfo/ObjC/class-extension3.m
diff --git a/clang/test/CodeGenObjC/debug-info-crash-2.m b/clang/test/DebugInfo/ObjC/crash-2.m
index e464cc7..e464cc7 100644
--- a/clang/test/CodeGenObjC/debug-info-crash-2.m
+++ b/clang/test/DebugInfo/ObjC/crash-2.m
diff --git a/clang/test/CodeGenObjC/debug-info-crash.m b/clang/test/DebugInfo/ObjC/crash.m
index 845a65e..845a65e 100644
--- a/clang/test/CodeGenObjC/debug-info-crash.m
+++ b/clang/test/DebugInfo/ObjC/crash.m
diff --git a/clang/test/CodeGenObjC/debug-info-default-synth-ivar.m b/clang/test/DebugInfo/ObjC/default-synth-ivar.m
index a0ce783..a0ce783 100644
--- a/clang/test/CodeGenObjC/debug-info-default-synth-ivar.m
+++ b/clang/test/DebugInfo/ObjC/default-synth-ivar.m
diff --git a/clang/test/CodeGenObjC/debug-info-direct-method.m b/clang/test/DebugInfo/ObjC/direct-method.m
index e5e2939..e5e2939 100644
--- a/clang/test/CodeGenObjC/debug-info-direct-method.m
+++ b/clang/test/DebugInfo/ObjC/direct-method.m
diff --git a/clang/test/CodeGenObjC/debug-info-fwddecl.m b/clang/test/DebugInfo/ObjC/fwddecl.m
index 8f419de..8f419de 100644
--- a/clang/test/CodeGenObjC/debug-info-fwddecl.m
+++ b/clang/test/DebugInfo/ObjC/fwddecl.m
diff --git a/clang/test/CodeGenObjC/debug-info-getter-name.m b/clang/test/DebugInfo/ObjC/getter-name.m
index 6ee113b..6ee113b 100644
--- a/clang/test/CodeGenObjC/debug-info-getter-name.m
+++ b/clang/test/DebugInfo/ObjC/getter-name.m
diff --git a/clang/test/CodeGenObjC/debug-info-id-with-protocol.m b/clang/test/DebugInfo/ObjC/id-with-protocol.m
index 38f69ef..38f69ef 100644
--- a/clang/test/CodeGenObjC/debug-info-id-with-protocol.m
+++ b/clang/test/DebugInfo/ObjC/id-with-protocol.m
diff --git a/clang/test/CodeGenObjC/debug-info-impl.m b/clang/test/DebugInfo/ObjC/impl.m
index 0e08295..0e08295 100644
--- a/clang/test/CodeGenObjC/debug-info-impl.m
+++ b/clang/test/DebugInfo/ObjC/impl.m
diff --git a/clang/test/CodeGenObjC/debug-info-instancetype.m b/clang/test/DebugInfo/ObjC/instancetype.m
index c4c857c..c4c857c 100644
--- a/clang/test/CodeGenObjC/debug-info-instancetype.m
+++ b/clang/test/DebugInfo/ObjC/instancetype.m
diff --git a/clang/test/CodeGenObjC/debug-info-ivars-extension.m b/clang/test/DebugInfo/ObjC/ivars-extension.m
index 2091b56..2091b56 100644
--- a/clang/test/CodeGenObjC/debug-info-ivars-extension.m
+++ b/clang/test/DebugInfo/ObjC/ivars-extension.m
diff --git a/clang/test/CodeGenObjC/debug-info-ivars-indirect.m b/clang/test/DebugInfo/ObjC/ivars-indirect.m
index 6b13fbf..6b13fbf 100644
--- a/clang/test/CodeGenObjC/debug-info-ivars-indirect.m
+++ b/clang/test/DebugInfo/ObjC/ivars-indirect.m
diff --git a/clang/test/CodeGenObjC/debug-info-ivars-private.m b/clang/test/DebugInfo/ObjC/ivars-private.m
index 583868e..583868e 100644
--- a/clang/test/CodeGenObjC/debug-info-ivars-private.m
+++ b/clang/test/DebugInfo/ObjC/ivars-private.m
diff --git a/clang/test/CodeGenObjC/debug-info-ivars.m b/clang/test/DebugInfo/ObjC/ivars.m
index dc253b2..dc253b2 100644
--- a/clang/test/CodeGenObjC/debug-info-ivars.m
+++ b/clang/test/DebugInfo/ObjC/ivars.m
diff --git a/clang/test/CodeGenObjC/layout-bitfield-crash.m b/clang/test/DebugInfo/ObjC/layout-bitfield-crash.m
index 6f0943e..6f0943e 100644
--- a/clang/test/CodeGenObjC/layout-bitfield-crash.m
+++ b/clang/test/DebugInfo/ObjC/layout-bitfield-crash.m
diff --git a/clang/test/CodeGenObjC/debug-info-lifetime-crash.m b/clang/test/DebugInfo/ObjC/lifetime-crash.m
index 2c3b7f0..2c3b7f0 100644
--- a/clang/test/CodeGenObjC/debug-info-lifetime-crash.m
+++ b/clang/test/DebugInfo/ObjC/lifetime-crash.m
diff --git a/clang/test/CodeGenObjC/debug-info-linkagename.m b/clang/test/DebugInfo/ObjC/linkagename.m
index 94d438a..94d438a 100644
--- a/clang/test/CodeGenObjC/debug-info-linkagename.m
+++ b/clang/test/DebugInfo/ObjC/linkagename.m
diff --git a/clang/test/DebugInfo/ObjC/lit.local.cfg b/clang/test/DebugInfo/ObjC/lit.local.cfg
new file mode 100644
index 0000000..b48f68b
--- /dev/null
+++ b/clang/test/DebugInfo/ObjC/lit.local.cfg
@@ -0,0 +1,5 @@
+# objective-C is not supported on AIX and zOS
+unsupported_platforms = [ "system-aix", "system-zos" ]
+
+if any(up in config.available_features for up in unsupported_platforms):
+ config.unsupported = True
diff --git a/clang/test/CodeGenObjC/debug-info-nested-blocks.m b/clang/test/DebugInfo/ObjC/nested-blocks.m
index de30eed..de30eed 100644
--- a/clang/test/CodeGenObjC/debug-info-nested-blocks.m
+++ b/clang/test/DebugInfo/ObjC/nested-blocks.m
diff --git a/clang/test/CodeGenObjC/debug-info-nodebug.m b/clang/test/DebugInfo/ObjC/nodebug.m
index 42d630b..42d630b 100644
--- a/clang/test/CodeGenObjC/debug-info-nodebug.m
+++ b/clang/test/DebugInfo/ObjC/nodebug.m
diff --git a/clang/test/CodeGenObjC/nontrivial-c-struct-exception.m b/clang/test/DebugInfo/ObjC/nontrivial-c-struct-exception.m
index ba8a04b..ba8a04b 100644
--- a/clang/test/CodeGenObjC/nontrivial-c-struct-exception.m
+++ b/clang/test/DebugInfo/ObjC/nontrivial-c-struct-exception.m
diff --git a/clang/test/CodeGenObjC/objc-fixed-enum.m b/clang/test/DebugInfo/ObjC/objc-fixed-enum.m
index 6ac2ae6..6ac2ae6 100644
--- a/clang/test/CodeGenObjC/objc-fixed-enum.m
+++ b/clang/test/DebugInfo/ObjC/objc-fixed-enum.m
diff --git a/clang/test/CodeGenObjC/debug-info-objc-property-dwarf5.m b/clang/test/DebugInfo/ObjC/objc-property-dwarf5.m
index 2b3a86f..2b3a86f 100644
--- a/clang/test/CodeGenObjC/debug-info-objc-property-dwarf5.m
+++ b/clang/test/DebugInfo/ObjC/objc-property-dwarf5.m
diff --git a/clang/test/CodeGenObjC/objc2-weak-ivar-debug.m b/clang/test/DebugInfo/ObjC/objc2-weak-ivar.m
index f2096dc..f2096dc 100644
--- a/clang/test/CodeGenObjC/objc2-weak-ivar-debug.m
+++ b/clang/test/DebugInfo/ObjC/objc2-weak-ivar.m
diff --git a/clang/test/CodeGenObjC/debuginfo-properties.m b/clang/test/DebugInfo/ObjC/properties.m
index 3c9d8f8..3c9d8f8 100644
--- a/clang/test/CodeGenObjC/debuginfo-properties.m
+++ b/clang/test/DebugInfo/ObjC/properties.m
diff --git a/clang/test/CodeGenObjC/property-dbg.m b/clang/test/DebugInfo/ObjC/property-2.m
index f152131..f152131 100644
--- a/clang/test/CodeGenObjC/property-dbg.m
+++ b/clang/test/DebugInfo/ObjC/property-2.m
diff --git a/clang/test/CodeGenObjC/debug-info-property-accessors.m b/clang/test/DebugInfo/ObjC/property-accessors.m
index 22e5491..22e5491 100644
--- a/clang/test/CodeGenObjC/debug-info-property-accessors.m
+++ b/clang/test/DebugInfo/ObjC/property-accessors.m
diff --git a/clang/test/CodeGenObjC/debug-info-property-class-extension.m b/clang/test/DebugInfo/ObjC/property-class-extension.m
index 58f72f5..58f72f5 100644
--- a/clang/test/CodeGenObjC/debug-info-property-class-extension.m
+++ b/clang/test/DebugInfo/ObjC/property-class-extension.m
diff --git a/clang/test/CodeGenObjC/debug-info-property-class-instance-same-name.m b/clang/test/DebugInfo/ObjC/property-class-instance-same-name.m
index 930544e..930544e 100644
--- a/clang/test/CodeGenObjC/debug-info-property-class-instance-same-name.m
+++ b/clang/test/DebugInfo/ObjC/property-class-instance-same-name.m
diff --git a/clang/test/CodeGenObjC/debug-property-synth.m b/clang/test/DebugInfo/ObjC/property-synth.m
index e320516..e320516 100644
--- a/clang/test/CodeGenObjC/debug-property-synth.m
+++ b/clang/test/DebugInfo/ObjC/property-synth.m
diff --git a/clang/test/CodeGenObjC/debug-info-property.m b/clang/test/DebugInfo/ObjC/property.m
index ca013b2..ca013b2 100644
--- a/clang/test/CodeGenObjC/debug-info-property.m
+++ b/clang/test/DebugInfo/ObjC/property.m
diff --git a/clang/test/CodeGenObjC/debug-info-property2.m b/clang/test/DebugInfo/ObjC/property2.m
index 7e0a5e9..7e0a5e9 100644
--- a/clang/test/CodeGenObjC/debug-info-property2.m
+++ b/clang/test/DebugInfo/ObjC/property2.m
diff --git a/clang/test/CodeGenObjC/debug-info-property3.m b/clang/test/DebugInfo/ObjC/property3.m
index d76988d..d76988d 100644
--- a/clang/test/CodeGenObjC/debug-info-property3.m
+++ b/clang/test/DebugInfo/ObjC/property3.m
diff --git a/clang/test/CodeGenObjC/debug-info-property4.m b/clang/test/DebugInfo/ObjC/property4.m
index 1f489f2..1f489f2 100644
--- a/clang/test/CodeGenObjC/debug-info-property4.m
+++ b/clang/test/DebugInfo/ObjC/property4.m
diff --git a/clang/test/CodeGenObjC/debug-info-property5.m b/clang/test/DebugInfo/ObjC/property5.m
index 8b70f1f..8b70f1f 100644
--- a/clang/test/CodeGenObjC/debug-info-property5.m
+++ b/clang/test/DebugInfo/ObjC/property5.m
diff --git a/clang/test/CodeGenObjC/debug-info-pubtypes.m b/clang/test/DebugInfo/ObjC/pubtypes.m
index ce3896f..ce3896f 100644
--- a/clang/test/CodeGenObjC/debug-info-pubtypes.m
+++ b/clang/test/DebugInfo/ObjC/pubtypes.m
diff --git a/clang/test/CodeGenObjC/debug-info-selector.m b/clang/test/DebugInfo/ObjC/selector.m
index 9946f3d..9946f3d 100644
--- a/clang/test/CodeGenObjC/debug-info-selector.m
+++ b/clang/test/DebugInfo/ObjC/selector.m
diff --git a/clang/test/CodeGenObjC/debug-info-self.m b/clang/test/DebugInfo/ObjC/self.m
index 0391ac4..0391ac4 100644
--- a/clang/test/CodeGenObjC/debug-info-self.m
+++ b/clang/test/DebugInfo/ObjC/self.m
diff --git a/clang/test/CodeGenObjC/debug-info-static-var.m b/clang/test/DebugInfo/ObjC/static-var.m
index f98f454..f98f454 100644
--- a/clang/test/CodeGenObjC/debug-info-static-var.m
+++ b/clang/test/DebugInfo/ObjC/static-var.m
diff --git a/clang/test/CodeGenObjC/debug-info-synthesis.m b/clang/test/DebugInfo/ObjC/synthesis.m
index 7fbbc6d..7fbbc6d 100644
--- a/clang/test/CodeGenObjC/debug-info-synthesis.m
+++ b/clang/test/DebugInfo/ObjC/synthesis.m
diff --git a/clang/test/CodeGenObjC/ubsan-check-debuglocs.m b/clang/test/DebugInfo/ObjC/ubsan-check-debuglocs.m
index d85c8e8..d85c8e8 100644
--- a/clang/test/CodeGenObjC/ubsan-check-debuglocs.m
+++ b/clang/test/DebugInfo/ObjC/ubsan-check-debuglocs.m
diff --git a/clang/test/CodeGenObjC/debug-info-variadic-method.m b/clang/test/DebugInfo/ObjC/variadic-method.m
index d570198..d570198 100644
--- a/clang/test/CodeGenObjC/debug-info-variadic-method.m
+++ b/clang/test/DebugInfo/ObjC/variadic-method.m
diff --git a/clang/test/CodeGenObjCXX/debug-info-block-capture-this.mm b/clang/test/DebugInfo/ObjCXX/block-capture-this.mm
index 9464222..9464222 100644
--- a/clang/test/CodeGenObjCXX/debug-info-block-capture-this.mm
+++ b/clang/test/DebugInfo/ObjCXX/block-capture-this.mm
diff --git a/clang/test/CodeGenObjCXX/debug-info-cyclic.mm b/clang/test/DebugInfo/ObjCXX/cyclic.mm
index 2fb1611..2fb1611 100644
--- a/clang/test/CodeGenObjCXX/debug-info-cyclic.mm
+++ b/clang/test/DebugInfo/ObjCXX/cyclic.mm
diff --git a/clang/test/CodeGenObjCXX/debug-info.mm b/clang/test/DebugInfo/ObjCXX/debug-info.mm
index ea5bb62..ea5bb62 100644
--- a/clang/test/CodeGenObjCXX/debug-info.mm
+++ b/clang/test/DebugInfo/ObjCXX/debug-info.mm
diff --git a/clang/test/CodeGenObjCXX/debug-info-line.mm b/clang/test/DebugInfo/ObjCXX/line.mm
index 9f543db..9f543db 100644
--- a/clang/test/CodeGenObjCXX/debug-info-line.mm
+++ b/clang/test/DebugInfo/ObjCXX/line.mm
diff --git a/clang/test/CodeGenObjCXX/nested-ehlocation.mm b/clang/test/DebugInfo/ObjCXX/nested-ehlocation.mm
index 030bc7c..030bc7c 100644
--- a/clang/test/CodeGenObjCXX/nested-ehlocation.mm
+++ b/clang/test/DebugInfo/ObjCXX/nested-ehlocation.mm
diff --git a/clang/test/CodeGenObjCXX/pr14474-gline-tables-only.mm b/clang/test/DebugInfo/ObjCXX/pr14474-gline-tables-only.mm
index 01fa98b..01fa98b 100644
--- a/clang/test/CodeGenObjCXX/pr14474-gline-tables-only.mm
+++ b/clang/test/DebugInfo/ObjCXX/pr14474-gline-tables-only.mm
diff --git a/clang/test/CodeGen/RISCV/riscv-v-debuginfo.c b/clang/test/DebugInfo/RISCV/riscv-v-debuginfo.c
index fb0ee98..fb0ee98 100644
--- a/clang/test/CodeGen/RISCV/riscv-v-debuginfo.c
+++ b/clang/test/DebugInfo/RISCV/riscv-v-debuginfo.c
diff --git a/clang/test/CodeGen/X86/i128-debuginfo.c b/clang/test/DebugInfo/X86/i128-debuginfo.c
index 4b865c1..4b865c1 100644
--- a/clang/test/CodeGen/X86/i128-debuginfo.c
+++ b/clang/test/DebugInfo/X86/i128-debuginfo.c
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/include/c++/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/include/c++/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/10/include/c++/.keep
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/include/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/include/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/11/include/.keep
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/include/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/include/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu/12/include/.keep
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/include/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/include/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/10/include/.keep
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/include/c++/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/include/c++/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/11/include/c++/.keep
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/include/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/include/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu/12/include/.keep
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/include/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/include/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/10/include/.keep
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/include/c++/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/include/c++/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/11/include/c++/.keep
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/crtbegin.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/crtend.o
diff --git a/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/include/c++/.keep b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/include/c++/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clang/test/Driver/Inputs/gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu/12/include/c++/.keep
diff --git a/clang/test/Driver/aarch64-cpu-defaults-appleos26.c b/clang/test/Driver/aarch64-cpu-defaults-appleos26.c
new file mode 100644
index 0000000..9917605
--- /dev/null
+++ b/clang/test/Driver/aarch64-cpu-defaults-appleos26.c
@@ -0,0 +1,22 @@
+/// iOS 26 and watchOS 26 bump the default arm64 CPU targets.
+
+/// arm64 iOS 26 defaults to apple-a12. arm64e already did.
+// RUN: %clang -target arm64-apple-ios26 -### -c %s 2>&1 | FileCheck %s --check-prefix=A12
+// RUN: %clang -target arm64e-apple-ios26 -### -c %s 2>&1 | FileCheck %s --check-prefix=A12
+
+/// arm64e/arm64_32 watchOS 26 default to apple-s6.
+// RUN: %clang -target arm64e-apple-watchos26 -### -c %s 2>&1 | FileCheck %s --check-prefix=S6
+// RUN: %clang -target arm64_32-apple-watchos26 -### -c %s 2>&1 | FileCheck %s --check-prefix=S6
+
+/// arm64 is new in watchOS 26, and defaults to apple-s6.
+// RUN: %clang -target arm64-apple-watchos26 -### -c %s 2>&1 | FileCheck %s --check-prefix=S6
+
+/// llvm usually treats tvOS like iOS, but it runs on different hardware.
+// RUN: %clang -target arm64-apple-tvos26 -### -c %s 2>&1 | FileCheck %s --check-prefix=A7
+// RUN: %clang -target arm64e-apple-tvos26 -### -c %s 2>&1 | FileCheck %s --check-prefix=A12
+
+/// Simulators are tested with other Mac-like targets in aarch64-mac-cpus.c.
+
+// A12: "-target-cpu" "apple-a12"
+// S6: "-target-cpu" "apple-s6"
+// A7: "-target-cpu" "apple-a7"
diff --git a/clang/test/Driver/aarch64-features.c b/clang/test/Driver/aarch64-features.c
index d2075c9..05dd3bf 100644
--- a/clang/test/Driver/aarch64-features.c
+++ b/clang/test/Driver/aarch64-features.c
@@ -45,7 +45,7 @@
// RUN: -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-OUTLINE-ATOMICS-OFF %s
// RUN: %clang --target=aarch64-unknown-openbsd -rtlib=compiler-rt \
-// RUN: -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-OUTLINE-ATOMICS-OFF %s
+// RUN: -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-OUTLINE-ATOMICS-ON %s
// RUN: %clang --target=aarch64-linux-gnu -rtlib=libgcc \
// RUN: --gcc-toolchain=%S/Inputs/aarch64-linux-gnu-tree/gcc-10 \
diff --git a/clang/test/Driver/aarch64-mac-cpus.c b/clang/test/Driver/aarch64-mac-cpus.c
index 8d23ad8..1aef1ae 100644
--- a/clang/test/Driver/aarch64-mac-cpus.c
+++ b/clang/test/Driver/aarch64-mac-cpus.c
@@ -1,4 +1,4 @@
-// arm64 Mac-based targets default to Apple A13.
+// arm64/arm64e Mac-based targets default to Apple M1.
// RUN: %clang --target=arm64-apple-macos -### -c %s 2>&1 | FileCheck %s
// RUN: %clang --target=arm64-apple-ios-macabi -### -c %s 2>&1 | FileCheck %s
diff --git a/clang/test/Driver/aarch64-toolchain.c b/clang/test/Driver/aarch64-toolchain.c
index 512b5a8..4fddcce 100644
--- a/clang/test/Driver/aarch64-toolchain.c
+++ b/clang/test/Driver/aarch64-toolchain.c
@@ -157,3 +157,15 @@
// AARCH64-BAREMETAL-UNWINDLIB: "{{.*}}clang_rt.crtbegin.o"
// AARCH64-BAREMETAL-UNWINDLIB: "--start-group" "{{.*}}libclang_rt.builtins{{.*}}.a" "--as-needed" "-lunwind" "--no-as-needed" "-lc" "-lgloss" "--end-group"
// AARCH64-BAREMETAL-UNWINDLIB: "{{.*}}clang_rt.crtend.o"
+
+// RUN: %clang -static-pie -### %s -fuse-ld= \
+// RUN: --target=aarch64-none-elf --rtlib=libgcc --unwindlib=platform \
+// RUN: --gcc-toolchain=%S/Inputs/basic_aarch64_gcc_tree \
+// RUN: --sysroot=%S/Inputs/basic_arm_gcc_tree/aarch64-none-elf 2>&1 \
+// RUN: | FileCheck -check-prefix=C-ARM-STATIC-PIE %s
+
+// C-ARM-STATIC-PIE: "-Bstatic" "-pie" "--no-dynamic-linker" "-z" "text" "-m" "aarch64elf" "-EL"
+// C-ARM-STATIC-PIE: "{{.*}}rcrt1.o"
+// C-ARM-STATIC-PIE: "{{.*}}crtbeginS.o"
+// C-ARM-STATIC-PIE: "--start-group" "-lgcc" "-lgcc_eh" "-lc" "-lgloss" "--end-group"
+// C-ARM-STATIC-PIE: "{{.*}}crtendS.o"
diff --git a/clang/test/Driver/arch-arm64e.c b/clang/test/Driver/arch-arm64e.c
index 0fb12d4..39006d2 100644
--- a/clang/test/Driver/arch-arm64e.c
+++ b/clang/test/Driver/arch-arm64e.c
@@ -2,11 +2,20 @@
// RUN: %clang -target arm64-apple-darwin -c %s -### 2>&1 | FileCheck %s --check-prefix NONE
// NONE: "-cc1"
-// NONE-NOT: "-fptrauth-intrinsics"
+
// NONE-NOT: "-fptrauth-calls"
// NONE-NOT: "-fptrauth-returns"
+// NONE-NOT: "-fptrauth-intrinsics"
// NONE-NOT: "-fptrauth-indirect-gotos"
// NONE-NOT: "-fptrauth-auth-traps"
+// NONE-NOT: "-fptrauth-vtable-pointer-address-discrimination"
+// NONE-NOT: "-fptrauth-vtable-pointer-type-discrimination"
+// NONE-NOT: "-fptrauth-objc-isa"
+// NONE-NOT: "-fptrauth-objc-class-ro"
+// NONE-NOT: "-fptrauth-objc-interface-sel"
+
+// Final catch all if any new flags are added
+// NONE-NOT: "-fptrauth"
// RUN: %clang -target arm64-apple-darwin -fptrauth-calls -c %s -### 2>&1 | FileCheck %s --check-prefix CALL
// CALL: "-cc1"{{.*}} {{.*}} "-fptrauth-calls"
@@ -23,39 +32,39 @@
// RUN: %clang -target arm64-apple-darwin -fptrauth-auth-traps -c %s -### 2>&1 | FileCheck %s --check-prefix TRAPS
// TRAPS: "-cc1"{{.*}} {{.*}} "-fptrauth-auth-traps"
-
// Check the arm64e defaults.
// RUN: %clang -target arm64e-apple-ios -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT
+// RUN: %clang -target arm64e-apple-macos -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULTMAC
+// RUN: %if system-darwin && target={{.*}}-{{darwin|macos}}{{.*}} %{ %clang -target arm64e-apple-macos -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULTARCH %}
// RUN: %clang -mkernel -target arm64e-apple-ios -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT
// RUN: %clang -fapple-kext -target arm64e-apple-ios -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT
-// DEFAULT: "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" {{.*}}"-target-cpu" "apple-a12"{{.*}}
+// DEFAULT: "-fptrauth-calls" "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-objc-isa" "-fptrauth-objc-class-ro" "-fptrauth-objc-interface-sel" {{.*}}"-target-cpu" "apple-a12"{{.*}}
+// DEFAULTMAC: "-fptrauth-calls" "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-objc-isa" "-fptrauth-objc-class-ro" "-fptrauth-objc-interface-sel" {{.*}}"-target-cpu" "apple-m1"{{.*}}
+// DEFAULTARCH: "-fptrauth-calls" "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-objc-isa" "-fptrauth-objc-class-ro" "-fptrauth-objc-interface-sel"
// RUN: %clang -target arm64e-apple-none-macho -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT-MACHO
-// DEFAULT-MACHO: "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" {{.*}}"-target-cpu" "apple-a12"{{.*}}
+// DEFAULT-MACHO: "-fptrauth-calls" "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-objc-isa" "-fptrauth-objc-class-ro" "-fptrauth-objc-interface-sel" {{.*}}"-target-cpu" "apple-a12"{{.*}}
// RUN: %clang -target arm64e-apple-ios -fno-ptrauth-calls -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT-NOCALL
-// RUN: %clang -mkernel -target arm64e-apple-ios -fno-ptrauth-calls -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT-NOCALL
-// RUN: %clang -fapple-kext -target arm64e-apple-ios -fno-ptrauth-calls -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT-NOCALL
// DEFAULT-NOCALL-NOT: "-fptrauth-calls"
-// DEFAULT-NOCALL: "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" {{.*}}"-target-cpu" "apple-a12"
+// DEFAULT-NOCALL: "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-objc-isa" "-fptrauth-objc-class-ro" "-fptrauth-objc-interface-sel" {{.*}}"-target-cpu" "apple-a12"
// RUN: %clang -target arm64e-apple-ios -fno-ptrauth-returns -c %s -### 2>&1 | FileCheck %s --check-prefix NORET
// NORET-NOT: "-fptrauth-returns"
-// NORET: "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" {{.*}}"-target-cpu" "apple-a12"
+// NORET: "-fptrauth-calls" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-objc-isa" "-fptrauth-objc-class-ro" "-fptrauth-objc-interface-sel" {{.*}}"-target-cpu" "apple-a12"
// RUN: %clang -target arm64e-apple-ios -fno-ptrauth-intrinsics -c %s -### 2>&1 | FileCheck %s --check-prefix NOINTRIN
-// NOINTRIN: "-fptrauth-returns"
// NOINTRIN-NOT: "-fptrauth-intrinsics"
-// NOINTRIN: "-fptrauth-calls" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" {{.*}}"-target-cpu" "apple-a12"{{.*}}
+// NOINTRIN: "-fptrauth-calls" "-fptrauth-returns" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-objc-isa" "-fptrauth-objc-class-ro" "-fptrauth-objc-interface-sel" {{.*}}"-target-cpu" "apple-a12"
// RUN: %clang -target arm64e-apple-ios -fno-ptrauth-auth-traps -c %s -### 2>&1 | FileCheck %s --check-prefix NOTRAP
-// NOTRAP: "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-indirect-gotos" {{.*}}"-target-cpu" "apple-a12"
+// NOTRAP: "-fptrauth-calls" "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-objc-isa" "-fptrauth-objc-class-ro" "-fptrauth-objc-interface-sel" {{.*}}"-target-cpu" "apple-a12"
// Check the CPU defaults and overrides.
diff --git a/clang/test/Driver/arm-toolchain.c b/clang/test/Driver/arm-toolchain.c
index 9005992f..7ee65a4 100644
--- a/clang/test/Driver/arm-toolchain.c
+++ b/clang/test/Driver/arm-toolchain.c
@@ -158,3 +158,15 @@
// ARM-BAREMETAL-UNWINDLIB: "{{.*}}clang_rt.crtbegin.o"
// ARM-BAREMETAL-UNWINDLIB: "--start-group" "{{.*}}libclang_rt.builtins.a" "--as-needed" "-lunwind" "--no-as-needed" "-lc" "-lgloss" "--end-group"
// ARM-BAREMETAL-UNWINDLIB: "{{.*}}clang_rt.crtend.o"
+
+// RUN: %clang -static-pie -### %s -fuse-ld= \
+// RUN: --target=armv6m-none-eabi --rtlib=libgcc --unwindlib=platform \
+// RUN: --gcc-toolchain=%S/Inputs/basic_arm_gcc_tree \
+// RUN: --sysroot=%S/Inputs/basic_arm_gcc_tree/armv6m-none-eabi 2>&1 \
+// RUN: | FileCheck -check-prefix=C-ARM-STATIC-PIE %s
+
+// C-ARM-STATIC-PIE: "-Bstatic" "-pie" "--no-dynamic-linker" "-z" "text" "-m" "armelf" "-EL"
+// C-ARM-STATIC-PIE: "{{.*}}rcrt1.o"
+// C-ARM-STATIC-PIE: "{{.*}}crtbeginS.o"
+// C-ARM-STATIC-PIE: "--start-group" "-lgcc" "-lgcc_eh" "-lc" "-lgloss" "--end-group"
+// C-ARM-STATIC-PIE: "{{.*}}crtendS.o"
diff --git a/clang/test/Driver/cl-options.c b/clang/test/Driver/cl-options.c
index 57e16e8..e970868 100644
--- a/clang/test/Driver/cl-options.c
+++ b/clang/test/Driver/cl-options.c
@@ -728,6 +728,7 @@
// RUN: -fno-profile-instr-use \
// RUN: -fcs-profile-generate \
// RUN: -fcs-profile-generate=dir \
+// RUN: -fpseudo-probe-for-profiling \
// RUN: -ftime-trace \
// RUN: -fmodules \
// RUN: -fno-modules \
diff --git a/clang/test/Driver/cl-x86-flags.c b/clang/test/Driver/cl-x86-flags.c
index 1e64182..8952674 100644
--- a/clang/test/Driver/cl-x86-flags.c
+++ b/clang/test/Driver/cl-x86-flags.c
@@ -145,6 +145,7 @@ void f(void) {
// RUN: %clang_cl --target=x86_64-pc-windows -mapxf -### -- 2>&1 %s | FileCheck -check-prefix=APXF %s
// RUN: %clang_cl --target=x86_64-pc-windows -mapxf -mno-apxf -### -- 2>&1 %s | FileCheck -check-prefix=NO-APXF %s
-// RUN: %clang_cl --target=x86_64-pc-windows -mapx-features=egpr,push2pop2,ppx,ndd,ccmp,nf,cf,zu -### -- 2>&1 %s | FileCheck -check-prefix=APXF %s
-// APXF: "-target-feature" "+egpr" "-target-feature" "+push2pop2" "-target-feature" "+ppx" "-target-feature" "+ndd" "-target-feature" "+ccmp" "-target-feature" "+nf" "-target-feature" "+cf" "-target-feature" "+zu"
-// NO-APXF: "-target-feature" "-egpr" "-target-feature" "-push2pop2" "-target-feature" "-ppx" "-target-feature" "-ndd" "-target-feature" "-ccmp" "-target-feature" "-nf" "-target-feature" "-cf" "-target-feature" "-zu"
+// RUN: %clang_cl --target=x86_64-pc-windows -mapx-features=egpr,push2pop2,ppx,ndd,ccmp,nf,cf,zu -### -- 2>&1 %s | FileCheck -check-prefix=APXALL %s
+// APXF: "-target-feature" "+egpr" "-target-feature" "+push2pop2" "-target-feature" "+ppx" "-target-feature" "+ndd" "-target-feature" "+ccmp" "-target-feature" "+nf" "-target-feature" "+zu"
+// NO-APXF: "-target-feature" "-egpr" "-target-feature" "-push2pop2" "-target-feature" "-ppx" "-target-feature" "-ndd" "-target-feature" "-ccmp" "-target-feature" "-nf" "-target-feature" "-zu"
+// APXALL: "-target-feature" "+egpr" "-target-feature" "+push2pop2" "-target-feature" "+ppx" "-target-feature" "+ndd" "-target-feature" "+ccmp" "-target-feature" "+nf" "-target-feature" "+cf" "-target-feature" "+zu"
diff --git a/clang/test/Driver/clang-offload-bundler-zlib.c b/clang/test/Driver/clang-offload-bundler-zlib.c
index b026e2e..211601c 100644
--- a/clang/test/Driver/clang-offload-bundler-zlib.c
+++ b/clang/test/Driver/clang-offload-bundler-zlib.c
@@ -66,6 +66,30 @@
// NOHOST-V3-DAG: hip-amdgcn-amd-amdhsa--gfx900
// NOHOST-V3-DAG: hip-amdgcn-amd-amdhsa--gfx906
+// Check compression/decompression of offload bundle using version 2 format.
+//
+// RUN: env OFFLOAD_BUNDLER_COMPRESS=1 OFFLOAD_BUNDLER_VERBOSE=1 COMPRESSED_BUNDLE_FORMAT_VERSION=2 \
+// RUN: clang-offload-bundler -type=bc -targets=hip-amdgcn-amd-amdhsa--gfx900,hip-amdgcn-amd-amdhsa--gfx906 \
+// RUN: -input=%t.tgt1 -input=%t.tgt2 -output=%t.hip.bundle.bc 2>&1 | \
+// RUN: FileCheck -check-prefix=COMPRESS-V2 %s
+// RUN: clang-offload-bundler -type=bc -list -input=%t.hip.bundle.bc | FileCheck -check-prefix=NOHOST-V2 %s
+// RUN: env OFFLOAD_BUNDLER_VERBOSE=1 \
+// RUN: clang-offload-bundler -type=bc -targets=hip-amdgcn-amd-amdhsa--gfx900,hip-amdgcn-amd-amdhsa--gfx906 \
+// RUN: -output=%t.res.tgt1 -output=%t.res.tgt2 -input=%t.hip.bundle.bc -unbundle 2>&1 | \
+// RUN: FileCheck -check-prefix=DECOMPRESS-V2 %s
+// RUN: diff %t.tgt1 %t.res.tgt1
+// RUN: diff %t.tgt2 %t.res.tgt2
+//
+// COMPRESS-V2: Compressed bundle format version: 2
+// COMPRESS-V2: Compression method used: zlib
+// COMPRESS-V2: Compression level: 6
+// DECOMPRESS-V2: Compressed bundle format version: 2
+// DECOMPRESS-V2: Decompression method: zlib
+// DECOMPRESS-V2: Hashes match: Yes
+// NOHOST-V2-NOT: host-
+// NOHOST-V2-DAG: hip-amdgcn-amd-amdhsa--gfx900
+// NOHOST-V2-DAG: hip-amdgcn-amd-amdhsa--gfx906
+
// Check -compression-level= option
// RUN: clang-offload-bundler -type=bc -targets=hip-amdgcn-amd-amdhsa--gfx900,hip-amdgcn-amd-amdhsa--gfx906 \
diff --git a/clang/test/Driver/clang-offload-bundler-zstd.c b/clang/test/Driver/clang-offload-bundler-zstd.c
index 667d955..c1123ae 100644
--- a/clang/test/Driver/clang-offload-bundler-zstd.c
+++ b/clang/test/Driver/clang-offload-bundler-zstd.c
@@ -29,11 +29,11 @@
// RUN: diff %t.tgt1 %t.res.tgt1
// RUN: diff %t.tgt2 %t.res.tgt2
//
-// CHECK: Compressed bundle format version: 2
+// CHECK: Compressed bundle format version: 3
// CHECK: Total file size (including headers): [[SIZE:[0-9]*]] bytes
// CHECK: Compression method used: zstd
// CHECK: Compression level: 3
-// CHECK: Compressed bundle format version: 2
+// CHECK: Compressed bundle format version: 3
// CHECK: Total file size (from header): [[SIZE]] bytes
// CHECK: Decompression method: zstd
// CHECK: Hashes match: Yes
diff --git a/clang/test/Driver/cuda-bad-arch.cu b/clang/test/Driver/cuda-bad-arch.cu
index 85231a5..6ac7229 100644
--- a/clang/test/Driver/cuda-bad-arch.cu
+++ b/clang/test/Driver/cuda-bad-arch.cu
@@ -25,6 +25,8 @@
// RUN: | FileCheck -check-prefix OK %s
// RUN: %clang -### -x hip --target=x86_64-linux-gnu -nogpulib -nogpuinc --cuda-gpu-arch=gfx942 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix OK %s
+// RUN: %clang -### -x hip --target=x86_64-linux-gnu -nogpulib -nogpuinc --cuda-gpu-arch=gfx1250 -c %s 2>&1 \
+// RUN: | FileCheck -check-prefix OK %s
// We don't allow using NVPTX/AMDGCN for host compilation.
// RUN: not %clang -### --no-offload-new-driver --cuda-host-only --target=nvptx-nvidia-cuda -nogpulib -nogpuinc -c %s 2>&1 \
diff --git a/clang/test/Driver/cuda-cross-compiling.c b/clang/test/Driver/cuda-cross-compiling.c
index 1df231e..6717ae8 100644
--- a/clang/test/Driver/cuda-cross-compiling.c
+++ b/clang/test/Driver/cuda-cross-compiling.c
@@ -104,3 +104,11 @@
// RUN: -nogpulib -nogpuinc -### %s 2>&1 | FileCheck -check-prefix=STARTUP %s
// STARTUP: clang-nvlink-wrapper{{.*}}"-lc" "-lm" "{{.*}}crt1.o"
+
+//
+// Test cuda path handling
+//
+// RUN: %clang -target nvptx64-nvidia-cuda -march=sm_52 --cuda-path=%S/Inputs/CUDA/usr/local/cuda \
+// RUN: -nogpulib -nogpuinc -### %s 2>&1 | FileCheck -check-prefix=PATH %s
+
+// PATH: clang-nvlink-wrapper{{.*}}"--cuda-path={{.*}}/Inputs/CUDA/usr/local/cuda"
diff --git a/clang/test/Driver/cuda-detect-path.cu b/clang/test/Driver/cuda-detect-path.cu
index 8d249bd..ce42ed7 100644
--- a/clang/test/Driver/cuda-detect-path.cu
+++ b/clang/test/Driver/cuda-detect-path.cu
@@ -1,5 +1,5 @@
// This tests uses the PATH environment variable.
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
// RUN: env PATH=%S/Inputs/CUDA/usr/local/cuda/bin \
// RUN: %clang -v --target=i386-unknown-linux --sysroot=%S/no-cuda-there \
diff --git a/clang/test/Driver/dxc_fspv_extension.hlsl b/clang/test/Driver/dxc_fspv_extension.hlsl
index f09f21e..aeba9f5 100644
--- a/clang/test/Driver/dxc_fspv_extension.hlsl
+++ b/clang/test/Driver/dxc_fspv_extension.hlsl
@@ -14,6 +14,14 @@
// RUN: %clang_dxc -spirv -Tlib_6_7 -### %s -fspv-extension=SPV_TEST1 -fspv-extension=KHR -fspv-extension=SPV_TEST2 2>&1 | FileCheck %s -check-prefix=TEST3
// TEST3: "-spirv-ext=+SPV_TEST1,KHR,+SPV_TEST2"
+// Merge KHR with other extensions.
+// RUN: %clang_dxc -spirv -Tlib_6_7 -### %s -fspv-extension=KHR -fspv-extension=SPV_TEST2 2>&1 | FileCheck %s -check-prefix=TEST4
+// TEST4: "-spirv-ext=KHR,+SPV_TEST2"
+
+// Convert DXC to a list of SPV extensions.
+// RUN: %clang_dxc -spirv -Tlib_6_7 -### %s -fspv-extension=DXC 2>&1 | FileCheck %s -check-prefix=TEST5
+// TEST5: "-spirv-ext={{(\+SPV_[a-zA-Z0-9_]+,?)+}}"
+
// Check for the error message if the extension name is not properly formed.
// RUN: not %clang_dxc -spirv -Tlib_6_7 -### %s -fspv-extension=KHR_BAD -fspv-extension=TEST1 -fspv-extension=SPV_GOOD -fspv-extension=TEST2 2>&1 | FileCheck %s -check-prefix=FAIL
// FAIL: invalid value 'KHR_BAD' in '-fspv-extension'
diff --git a/clang/test/Driver/dxc_rootsig-define.hlsl b/clang/test/Driver/dxc_rootsig-define.hlsl
new file mode 100644
index 0000000..40c3e12
--- /dev/null
+++ b/clang/test/Driver/dxc_rootsig-define.hlsl
@@ -0,0 +1,33 @@
+// RUN: %clang_dxc -T cs_6_0 -fcgl %s | FileCheck %s --check-prefixes=CHECK,REG
+// RUN: %clang_dxc -T cs_6_0 -fcgl -rootsig-define EmptyRS %s | FileCheck %s --check-prefixes=CHECK,EMPTY
+// RUN: %clang_dxc -T cs_6_0 -fcgl -rootsig-define CmdRS -D CmdRS='"SRV(t0)"' %s | FileCheck %s --check-prefixes=CHECK,CMD
+
+// Equivalent clang checks:
+// RUN: %clang -target dxil-unknown-shadermodel6.0-compute -S -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefixes=CHECK,REG
+
+// RUN: %clang -target dxil-unknown-shadermodel6.0-compute -S -emit-llvm -o - %s \
+// RUN: -fdx-rootsignature-define=EmptyRS \
+// RUN: | FileCheck %s --check-prefixes=CHECK,EMPTY
+
+// RUN: %clang -target dxil-unknown-shadermodel6.0-compute -S -emit-llvm -o - %s \
+// RUN: -fdx-rootsignature-define=CmdRS -D CmdRS='"SRV(t0)"' \
+// RUN: | FileCheck %s --check-prefixes=CHECK,CMD
+
+#define EmptyRS ""
+#define NotEmptyRS "CBV(b0)"
+
+// CHECK: !dx.rootsignatures = !{![[#ENTRY:]]}
+// CHECK: ![[#ENTRY]] = !{ptr @main, ![[#RS:]], i32 2}
+
+// REG: ![[#RS]] = !{![[#CBV:]]}
+// REG: ![[#CBV]] = !{!"RootCBV"
+
+// EMPTY: ![[#RS]] = !{}
+
+// CMD: ![[#RS]] = !{![[#SRV:]]}
+// CMD: ![[#SRV]] = !{!"RootSRV"
+
+[shader("compute"), RootSignature(NotEmptyRS)]
+[numthreads(1,1,1)]
+void main() {}
diff --git a/clang/test/Driver/dxc_strip_rootsignature.hlsl b/clang/test/Driver/dxc_strip_rootsignature.hlsl
new file mode 100644
index 0000000..79d5ef2
--- /dev/null
+++ b/clang/test/Driver/dxc_strip_rootsignature.hlsl
@@ -0,0 +1,15 @@
+// Create a dummy dxv to run
+// RUN: mkdir -p %t.dir
+// RUN: echo "dxv" > %t.dir/dxv && chmod 754 %t.dir/dxv
+
+// RUN: %clang_dxc -Qstrip-rootsignature --dxv-path=%t.dir -T cs_6_0 /Fo %t.dxo -### %s 2>&1 | FileCheck %s
+
+// Test to demonstrate that we specify to the root signature with the
+// -Qstrip-rootsignature option and that it occurs before DXV
+
+// CHECK: "{{.*}}llvm-objcopy{{(.exe)?}}" "{{.*}}.obj" "{{.*}}.obj" "--remove-section=RTS0"
+// CHECK: "{{.*}}dxv{{(.exe)?}}" "{{.*}}.obj" "-o" "{{.*}}.dxo"
+
+[shader("compute"), RootSignature("")]
+[numthreads(1,1,1)]
+void EmptyEntry() {}
diff --git a/clang/test/Driver/fsanitize-debug-trap-reasons.c b/clang/test/Driver/fsanitize-debug-trap-reasons.c
new file mode 100644
index 0000000..5a0ccde
--- /dev/null
+++ b/clang/test/Driver/fsanitize-debug-trap-reasons.c
@@ -0,0 +1,57 @@
+// =============================================================================
+// No Trap Reasons
+// =============================================================================
+
+// RUN: %clang -fsanitize=undefined -fsanitize-trap=undefined \
+// RUN: -fsanitize-debug-trap-reasons=none %s -### 2>&1 | \
+// RUN: FileCheck --check-prefix=NONE %s
+
+// RUN: %clang -fsanitize=undefined -fsanitize-trap=undefined \
+// RUN: -fno-sanitize-debug-trap-reasons %s -### 2>&1 | \
+// RUN: FileCheck --check-prefix=NONE %s
+
+// NONE: -fsanitize-debug-trap-reasons=none
+
+// =============================================================================
+// Basic Trap Reasons
+// =============================================================================
+
+// RUN: %clang -fsanitize=undefined -fsanitize-trap=undefined \
+// RUN: -fsanitize-debug-trap-reasons=basic %s -### 2>&1 | \
+// RUN: FileCheck --check-prefix=BASIC %s
+// BASIC: -fsanitize-debug-trap-reasons=basic
+
+// =============================================================================
+// Detailed Trap Reasons
+// =============================================================================
+
+// RUN: %clang -fsanitize=undefined -fsanitize-trap=undefined \
+// RUN: -fsanitize-debug-trap-reasons=detailed %s -### 2>&1 | \
+// RUN: FileCheck --check-prefix=DETAILED %s
+
+// RUN: %clang -fsanitize=undefined -fsanitize-trap=undefined \
+// RUN: -fsanitize-debug-trap-reasons %s -### 2>&1 | \
+// RUN: FileCheck --check-prefix=DETAILED %s
+
+// DETAILED: -fsanitize-debug-trap-reasons=detailed
+
+// =============================================================================
+// Other cases
+// =============================================================================
+
+// By default the driver doesn't pass along any value and the default value is
+// whatever is the default in CodeGenOptions.
+// RUN: %clang %s -### 2>&1 | FileCheck --check-prefix=DEFAULT %s
+// DEFAULT-NOT: -fsanitize-debug-trap-reasons
+
+// Warning when not using UBSan
+// RUN: %clang -fsanitize-debug-trap-reasons=none %s -### 2>&1 | \
+// RUN: FileCheck --check-prefix=WARN %s
+// WARN: warning: argument unused during compilation: '-fsanitize-debug-trap-reasons=none'
+
+// Bad flag arguments are just passed along to the Frontend which handles rejecting
+// invalid values. See `clang/test/Frontend/fsanitize-debug-trap-reasons.c`
+// RUN: %clang -fsanitize=undefined -fsanitize-trap=undefined \
+// RUN: -fsanitize-debug-trap-reasons=bad_value %s -### 2>&1 | \
+// RUN: FileCheck --check-prefix=BAD_VALUE %s
+// BAD_VALUE: -fsanitize-debug-trap-reasons=bad_value
diff --git a/clang/test/Driver/fsanitize.c b/clang/test/Driver/fsanitize.c
index fbe1fd7..263301a 100644
--- a/clang/test/Driver/fsanitize.c
+++ b/clang/test/Driver/fsanitize.c
@@ -794,6 +794,11 @@
// RUN: not %clang --target=x86_64-linux-gnu -fsanitize=cfi-icall -fsanitize-cfi-icall-generalize-pointers -fsanitize-cfi-cross-dso -fvisibility=hidden -flto -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI-GENERALIZE-AND-CROSS-DSO
// CHECK-CFI-GENERALIZE-AND-CROSS-DSO: error: invalid argument '-fsanitize-cfi-cross-dso' not allowed with '-fsanitize-cfi-icall-generalize-pointers'
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=kcfi -fsanitize-cfi-icall-generalize-pointers -fvisibility=hidden -flto -c -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-KCFI-GENERALIZE-POINTERS
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=kcfi -fvisibility=hidden -flto -c -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-KCFI-GENERALIZE-POINTERS
+// CHECK-KCFI-GENERALIZE-POINTERS: -fsanitize-cfi-icall-generalize-pointers
+// CHECK-NO-KCFI-GENERALIZE-POINTERS-NOT: -fsanitize-cfi-icall-generalize-pointers
+
// RUN: %clang --target=x86_64-linux-gnu -fsanitize=cfi-icall -fsanitize-cfi-canonical-jump-tables -fvisibility=hidden -flto -c -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI-CANONICAL-JUMP-TABLES
// RUN: %clang --target=x86_64-linux-gnu -fsanitize=cfi-icall -fno-sanitize-cfi-canonical-jump-tables -fvisibility=hidden -flto -c %s -resource-dir=%S/Inputs/resource_dir -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-CFI-CANONICAL-JUMP-TABLES
// RUN: %clang --target=x86_64-linux-gnu -fsanitize=cfi-icall -fvisibility=hidden -flto -c -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI-CANONICAL-JUMP-TABLES
diff --git a/clang/test/Driver/gcc-toolchain-libstdcxx.cpp b/clang/test/Driver/gcc-toolchain-libstdcxx.cpp
new file mode 100644
index 0000000..bd5b0af
--- /dev/null
+++ b/clang/test/Driver/gcc-toolchain-libstdcxx.cpp
@@ -0,0 +1,28 @@
+// UNSUPPORTED: system-windows
+
+// This file tests that the GCC installation directory detection takes
+// the libstdc++ includes into account. In each directory
+// Inputs/gcc_toolchain_libstdcxx/gccX, the installation directory for
+// gcc X should be picked in the future since it is the directory with
+// the largest version number which also contains the libstdc++
+// include directory.
+
+// RUN: %clang --target=x86_64-linux-gnu --gcc-toolchain=%S/Inputs/gcc_toolchain_libstdcxx/gcc10/usr -v 2>&1 | FileCheck %s --check-prefix=GCC10
+// GCC10: clang: warning: future releases of the clang compiler will prefer GCC installations containing libstdc++ include directories; '[[PREFIX:.*gcc_toolchain_libstdcxx/gcc10/usr/lib/gcc/x86_64-linux-gnu]]/10' would be chosen over '[[PREFIX]]/12' [-Wgcc-install-dir-libstdcxx]
+// GCC10: Found candidate GCC installation: [[PREFIX]]/10
+// GCC10: Found candidate GCC installation: [[PREFIX]]/11
+// GCC10: Found candidate GCC installation: [[PREFIX]]/12
+// GCC10: Selected GCC installation: [[PREFIX]]/12
+
+// RUN: %clang --target=x86_64-linux-gnu --gcc-toolchain=%S/Inputs/gcc_toolchain_libstdcxx/gcc11/usr -v 2>&1 | FileCheck %s --check-prefix=ONLY_GCC11
+// ONLY_GCC11: clang: warning: future releases of the clang compiler will prefer GCC installations containing libstdc++ include directories; '[[PREFIX:.*gcc_toolchain_libstdcxx/gcc11/usr/lib/gcc/x86_64-linux-gnu]]/11' would be chosen over '[[PREFIX]]/12' [-Wgcc-install-dir-libstdcxx]
+// ONLY_GCC11: Found candidate GCC installation: [[PREFIX]]/10
+// ONLY_GCC11: Found candidate GCC installation: [[PREFIX]]/11
+// ONLY_GCC11: Found candidate GCC installation: [[PREFIX]]/12
+// ONLY_GCC11: Selected GCC installation: [[PREFIX]]/12
+
+// RUN: %clang --target=x86_64-linux-gnu --gcc-toolchain=%S/Inputs/gcc_toolchain_libstdcxx/gcc12/usr -v 2>&1 | FileCheck %s --check-prefix=GCC12
+// GCC12: Found candidate GCC installation: [[PREFIX:.*gcc_toolchain_libstdcxx/gcc12/usr/lib/gcc/x86_64-linux-gnu]]/10
+// GCC12: Found candidate GCC installation: [[PREFIX]]/11
+// GCC12: Found candidate GCC installation: [[PREFIX]]/12
+// GCC12: Selected GCC installation: [[PREFIX]]/12
diff --git a/clang/test/Driver/hip-macros.hip b/clang/test/Driver/hip-macros.hip
index bd93f99..516e01a 100644
--- a/clang/test/Driver/hip-macros.hip
+++ b/clang/test/Driver/hip-macros.hip
@@ -27,21 +27,27 @@
// RUN: %clang -E -dM --offload-arch=gfx906 --cuda-device-only -nogpuinc -nogpulib -mcumode \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-ON %s
// RUN: %clang -E -dM --offload-arch=gfx906 --cuda-device-only -nogpuinc -nogpulib -mno-cumode \
-// RUN: %s 2>&1 | FileCheck --check-prefixes=CUMODE-ON,WARN-CUMODE %s
+// RUN: %s 2>&1 | FileCheck -DOFFLOAD_ARCH=gfx906 --check-prefixes=CUMODE-ON,WARN-CUMODE %s
// RUN: %clang -E -dM --offload-arch=gfx1030 --cuda-device-only -nogpuinc -nogpulib \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-OFF %s
// RUN: %clang -E -dM --offload-arch=gfx1030 --cuda-device-only -nogpuinc -nogpulib -mcumode \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-ON %s
// RUN: %clang -E -dM --offload-arch=gfx1030 --cuda-device-only -nogpuinc -nogpulib -mno-cumode \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-OFF %s
+// RUN: %clang -E -dM --offload-arch=gfx1250 --cuda-device-only -nogpuinc -nogpulib \
+// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-ON %s
+// RUN: %clang -E -dM --offload-arch=gfx1250 --cuda-device-only -nogpuinc -nogpulib -mcumode \
+// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-ON %s
+// RUN: %clang -E -dM --offload-arch=gfx1250 --cuda-device-only -nogpuinc -nogpulib -mno-cumode \
+// RUN: %s 2>&1 | FileCheck -DOFFLOAD_ARCH=gfx1250 --check-prefixes=CUMODE-ON,WARN-CUMODE %s
// Check no duplicate warnings.
// RUN: %clang -E -dM --offload-arch=gfx906 --cuda-device-only -nogpuinc -nogpulib -mcumode \
// RUN: -mno-cumode -mno-cumode \
-// RUN: %s 2>&1 | FileCheck --check-prefixes=CUMODE-ON,WARN-CUMODE %s
+// RUN: %s 2>&1 | FileCheck -DOFFLOAD_ARCH=gfx906 --check-prefixes=CUMODE-ON,WARN-CUMODE %s
-// WARN-CUMODE-DAG: warning: ignoring '-mno-cumode' option as it is not currently supported for processor 'gfx906' [-Woption-ignored]
-// WARN-CUMODE-NOT: warning: ignoring '-mno-cumode' option as it is not currently supported for processor 'gfx906' [-Woption-ignored]
+// WARN-CUMODE-DAG: warning: ignoring '-mno-cumode' option as it is not currently supported for processor '[[OFFLOAD_ARCH]]' [-Woption-ignored]
+// WARN-CUMODE-NOT: warning: ignoring '-mno-cumode' option as it is not currently supported for processor '[[OFFLOAD_ARCH]]' [-Woption-ignored]
// CUMODE-ON-DAG: #define __AMDGCN_CUMODE__ 1
// CUMODE-OFF-DAG: #define __AMDGCN_CUMODE__ 0
diff --git a/clang/test/Driver/hip-runtime-libs-msvc.hip b/clang/test/Driver/hip-runtime-libs-msvc.hip
index 943cd05..d282a26 100644
--- a/clang/test/Driver/hip-runtime-libs-msvc.hip
+++ b/clang/test/Driver/hip-runtime-libs-msvc.hip
@@ -10,4 +10,11 @@
// RUN: --rocm-path=%S/Inputs/rocm %s 2>&1 \
// RUN: | FileCheck %s
+// Test HIP runtime lib is linked even if -nostdlib is specified when the input
+// is a HIP file. This is important when composing with e.g. the UCRT or other
+// non glibc-like implementations of the C standard library.
+// RUN: %clang -### --target=x86_64-pc-windows-msvc -nogpuinc -nogpulib \
+// RUN: -nostdlib --rocm-path=%S/Inputs/rocm %s 2>&1 \
+// RUN: | FileCheck %s
+
// CHECK: "-libpath:{{.*Inputs.*rocm.*lib}}" "amdhip64.lib"
diff --git a/clang/test/Driver/hipspv-toolchain.hip b/clang/test/Driver/hipspv-toolchain.hip
index b2187ac..3c175eb 100644
--- a/clang/test/Driver/hipspv-toolchain.hip
+++ b/clang/test/Driver/hipspv-toolchain.hip
@@ -1,4 +1,4 @@
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
// RUN: %clang -### -target x86_64-linux-gnu --offload=spirv64 \
// RUN: --no-offload-new-driver --hip-path=%S/Inputs/hipspv -nohipwrapperinc %s \
diff --git a/clang/test/Driver/lanai-mcmodel.c b/clang/test/Driver/lanai-mcmodel.c
new file mode 100644
index 0000000..d62d7e1
--- /dev/null
+++ b/clang/test/Driver/lanai-mcmodel.c
@@ -0,0 +1,10 @@
+// RUN: %clang --target=lanai -### -c -mcmodel=small %s 2>&1 | FileCheck --check-prefix=SMALL %s
+// RUN: %clang --target=lanai -### -c -mcmodel=medium %s 2>&1 | FileCheck --check-prefix=MEDIUM %s
+// RUN: %clang --target=lanai -### -c -mcmodel=large %s 2>&1 | FileCheck --check-prefix=LARGE %s
+// RUN: not %clang --target=lanai -### -c -mcmodel=something %s 2>&1 | FileCheck --check-prefix=ERR-MCMODEL %s
+
+// SMALL: "-mcmodel=small"
+// MEDIUM: "-mcmodel=medium"
+// LARGE: "-mcmodel=large"
+
+// ERR-MCMODEL: error: unsupported argument 'something' to option '-mcmodel=' for target 'lanai'
diff --git a/clang/test/Driver/ld-path.c b/clang/test/Driver/ld-path.c
index bc10b9e..e00b63d 100644
--- a/clang/test/Driver/ld-path.c
+++ b/clang/test/Driver/ld-path.c
@@ -1,5 +1,5 @@
/// This tests uses the PATH environment variable.
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
// RUN: cd %S
diff --git a/clang/test/Driver/linker-wrapper-libs.c b/clang/test/Driver/linker-wrapper-libs.c
deleted file mode 100644
index 1404fe3..0000000
--- a/clang/test/Driver/linker-wrapper-libs.c
+++ /dev/null
@@ -1,191 +0,0 @@
-// REQUIRES: x86-registered-target
-// REQUIRES: nvptx-registered-target
-// REQUIRES: amdgpu-registered-target
-
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.elf.o
-
-#if defined(RESOLVES)
-int __attribute__((visibility("hidden"))) sym;
-#elif defined(GLOBAL)
-int __attribute__((visibility("protected"))) global;
-#elif defined(WEAK)
-int __attribute__((visibility("hidden"))) weak;
-#elif defined(HIDDEN)
-int __attribute__((visibility("hidden"))) hidden;
-#elif defined(UNDEFINED)
-extern int sym;
-int baz() { return sym; }
-#else
-extern int sym;
-
-extern int __attribute__((weak)) weak;
-
-int foo() { return sym; }
-int bar() { return weak; }
-#endif
-
-//
-// Check that we extract a static library defining an undefined symbol.
-//
-// RUN: %clang -cc1 %s -triple nvptx64-nvidia-cuda -emit-llvm-bc -DRESOLVES -o %t.nvptx.resolves.bc
-// RUN: %clang -cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm-bc -DRESOLVES -o %t.amdgpu.resolves.bc
-// RUN: %clang -cc1 %s -triple nvptx64-nvidia-cuda -emit-llvm-bc -DUNDEFINED -o %t.nvptx.undefined.bc
-// RUN: %clang -cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm-bc -DUNDEFINED -o %t.amdgpu.undefined.bc
-// RUN: clang-offload-packager -o %t-lib.out \
-// RUN: --image=file=%t.nvptx.undefined.bc,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.amdgpu.undefined.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030 \
-// RUN: --image=file=%t.nvptx.resolves.bc,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.amdgpu.resolves.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t-lib.out
-// RUN: llvm-ar rcs %t.a %t.o
-// RUN: clang-offload-packager -o %t.out \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.o %t.a -o a.out 2>&1 \
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.a %t.o -o a.out 2>&1 \
-// RUN: | FileCheck %s --check-prefix=LIBRARY-RESOLVES
-
-// LIBRARY-RESOLVES: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=nvptx64-nvidia-cuda -march=sm_70 {{.*}}.o {{.*}}.o
-// LIBRARY-RESOLVES: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=amdgcn-amd-amdhsa -mcpu=gfx1030 {{.*}}.o {{.*}}.o
-
-//
-// Check that we extract a static library that defines a global visibile to the
-// host.
-//
-// RUN: %clang -cc1 %s -triple nvptx64-nvidia-cuda -emit-llvm-bc -DGLOBAL -o %t.nvptx.global.bc
-// RUN: %clang -cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm-bc -DGLOBAL -o %t.amdgpu.global.bc
-// RUN: clang-offload-packager -o %t-lib.out \
-// RUN: --image=file=%t.nvptx.global.bc,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.amdgpu.global.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t-lib.out
-// RUN: llvm-ar rcs %t.a %t.o
-// RUN: clang-offload-packager -o %t.out \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.o %t.a -o a.out 2>&1 \
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.a %t.o -o a.out 2>&1 \
-// RUN: | FileCheck %s --check-prefix=LIBRARY-GLOBAL
-
-// LIBRARY-GLOBAL: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=nvptx64-nvidia-cuda -march=sm_70 {{.*}}.o {{.*}}.o
-// LIBRARY-GLOBAL: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=amdgcn-amd-amdhsa -mcpu=gfx1030 {{.*}}.o {{.*}}.o
-
-//
-// Check that we do not extract a global symbol if the source file was not
-// created by an offloading language that expects there to be a host version of
-// the symbol.
-//
-// RUN: %clang -cc1 %s -triple nvptx64-nvidia-cuda -emit-llvm-bc -DGLOBAL -o %t.nvptx.global.bc
-// RUN: %clang -cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm-bc -DGLOBAL -o %t.amdgpu.global.bc
-// RUN: clang-offload-packager -o %t-lib.out \
-// RUN: --image=file=%t.nvptx.global.bc,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.amdgpu.global.bc,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t-lib.out
-// RUN: llvm-ar rcs %t.a %t.o
-// RUN: clang-offload-packager -o %t.out \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.o %t.a -o a.out 2>&1 \
-// RUN: | FileCheck %s --check-prefix=LIBRARY-GLOBAL-NONE
-
-// LIBRARY-GLOBAL-NONE-NOT: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=amdgcn-amd-amdhsa -mcpu=gfx1030 {{.*}}.o {{.*}}.o
-// LIBRARY-GLOBAL-NONE-NOT: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=nvptx64-nvidia-cuda -march=sm_70 {{.*}}.o {{.*}}.o
-
-//
-// Check that we do not extract an external weak symbol.
-//
-// RUN: %clang -cc1 %s -triple nvptx64-nvidia-cuda -emit-llvm-bc -DWEAK -o %t.nvptx.weak.bc
-// RUN: %clang -cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm-bc -DWEAK -o %t.amdgpu.weak.bc
-// RUN: clang-offload-packager -o %t-lib.out \
-// RUN: --image=file=%t.nvptx.weak.bc,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.amdgpu.weak.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t-lib.out
-// RUN: llvm-ar rcs %t.a %t.o
-// RUN: clang-offload-packager -o %t.out \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.o %t.a -o a.out 2>&1 \
-// RUN: | FileCheck %s --check-prefix=LIBRARY-WEAK
-
-// LIBRARY-WEAK: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=nvptx64-nvidia-cuda -march=sm_70
-// LIBRARY-WEAK-NOT: {{.*}}.o {{.*}}.o
-// LIBRARY-WEAK: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=amdgcn-amd-amdhsa -mcpu=gfx1030
-
-//
-// Check that we do not extract an unneeded hidden symbol.
-//
-// RUN: %clang -cc1 %s -triple nvptx64-nvidia-cuda -emit-llvm-bc -DHIDDEN -o %t.nvptx.hidden.bc
-// RUN: %clang -cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm-bc -DHIDDEN -o %t.amdgpu.hidden.bc
-// RUN: clang-offload-packager -o %t-lib.out \
-// RUN: --image=file=%t.nvptx.hidden.bc,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.amdgpu.hidden.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t-lib.out
-// RUN: llvm-ar rcs %t.a %t.o
-// RUN: clang-offload-packager -o %t.out \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.o %t.a -o a.out 2>&1 \
-// RUN: | FileCheck %s --check-prefix=LIBRARY-HIDDEN
-
-// LIBRARY-HIDDEN: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=nvptx64-nvidia-cuda -march=sm_70
-// LIBRARY-HIDDEN-NOT: {{.*}}.o {{.*}}.o
-// LIBRARY-HIDDEN: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=amdgcn-amd-amdhsa -mcpu=gfx1030
-
-//
-// Check that we do not extract a static library that defines a global visibile
-// to the host that is already defined.
-//
-// RUN: %clang -cc1 %s -triple nvptx64-nvidia-cuda -emit-llvm-bc -DGLOBAL -o %t.nvptx.global.bc
-// RUN: %clang -cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm-bc -DGLOBAL -o %t.amdgpu.global.bc
-// RUN: clang-offload-packager -o %t-lib.out \
-// RUN: --image=file=%t.nvptx.global.bc,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.amdgpu.global.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t-lib.out
-// RUN: llvm-ar rcs %t.a %t.o
-// RUN: clang-offload-packager -o %t.out \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.o %t.a %t.a -o a.out 2>&1 \
-// RUN: | FileCheck %s --check-prefix=LIBRARY-GLOBAL-DEFINED
-
-// LIBRARY-GLOBAL-DEFINED: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=nvptx64-nvidia-cuda -march=sm_70 {{.*}}.o {{.*}}.o
-// LIBRARY-GLOBAL-DEFINED-NOT: {{.*}}gfx1030{{.*}}.o
-// LIBRARY-GLOBAL-DEFINED: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=amdgcn-amd-amdhsa -mcpu=gfx1030 {{.*}}.o {{.*}}.o
-
-//
-// Check that we can use --[no-]whole-archive to control extraction.
-//
-// RUN: %clang -cc1 %s -triple nvptx64-nvidia-cuda -emit-llvm-bc -DGLOBAL -o %t.nvptx.global.bc
-// RUN: %clang -cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm-bc -DGLOBAL -o %t.amdgpu.global.bc
-// RUN: clang-offload-packager -o %t-lib.out \
-// RUN: --image=file=%t.nvptx.global.bc,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.nvptx.global.bc,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_52 \
-// RUN: --image=file=%t.amdgpu.global.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030 \
-// RUN: --image=file=%t.amdgpu.global.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx90a
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t-lib.out
-// RUN: llvm-ar rcs %t.a %t.o
-// RUN: clang-offload-packager -o %t.out \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
-// RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030
-// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out
-// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
-// RUN: --linker-path=/usr/bin/ld %t.o --whole-archive %t.a -o a.out 2>&1 \
-// RUN: | FileCheck %s --check-prefix=LIBRARY-WHOLE-ARCHIVE
-
-// LIBRARY-WHOLE-ARCHIVE: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=nvptx64-nvidia-cuda -march=sm_70 {{.*}}.o {{.*}}.o
-// LIBRARY-WHOLE-ARCHIVE: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=amdgcn-amd-amdhsa -mcpu=gfx1030 {{.*}}.o {{.*}}.o
-// LIBRARY-WHOLE-ARCHIVE: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=nvptx64-nvidia-cuda -march=sm_52 {{.*}}.o
-// LIBRARY-WHOLE-ARCHIVE: clang{{.*}} -o {{.*}}.img -dumpdir {{.*}}.img. --target=amdgcn-amd-amdhsa -mcpu=gfx90a {{.*}}.o
diff --git a/clang/test/Driver/modules-driver-cxx20-module-usage-scanner.cpp b/clang/test/Driver/modules-driver-cxx20-module-usage-scanner.cpp
new file mode 100644
index 0000000..a434587
--- /dev/null
+++ b/clang/test/Driver/modules-driver-cxx20-module-usage-scanner.cpp
@@ -0,0 +1,192 @@
+// The driver never checks to implicitly enable the explicit module build
+// support unless at least two input files are provided.
+// To trigger the C++20 module usage check, we always pass a second dummy file
+// as input.
+// TODO: Remove -fmodules everywhere once implicitly enabled explicit module
+// builds are supported.
+
+// RUN: split-file %s %t
+//--- empty.cpp
+// Nothing here
+
+//--- only-global.cpp
+// RUN: %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/only-global.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK1
+// CHECK1: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+module;
+
+//--- only-import.cpp
+// RUN: %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/only-import.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK2
+// CHECK2: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+import A;
+
+//--- only-export.cpp
+// RUN: %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/only-export.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK3
+// CHECK3: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+export module A;
+
+//--- leading-line-comment.cpp
+// RUN: %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/leading-line-comment.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK4
+// CHECK4: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+// My line comment
+import A;
+
+//--- leading-block-comment1.cpp
+// RUN: %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/leading-block-comment1.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK5
+// CHECK5: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+/*My block comment */
+import A;
+
+//--- leading-block-comment2.cpp
+// RUN: %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/leading-block-comment2.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK6
+// CHECK6: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+/*My line comment */ import A;
+
+//--- inline-block-comment1.cpp
+// RUN: %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/leading-block-comment1.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK7
+// CHECK7: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+export/*a comment*/module/*another comment*/A;
+
+//--- inline-block-comment2.cpp
+// RUN: %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/leading-block-comment2.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK8
+// CHECK8: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+module/*a comment*/;
+
+//--- leading-directives.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/leading-directives.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK9
+// CHECK9: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+#define A
+#undef A
+#if A
+#ifdef A
+#elifdef A
+#elifndef A
+#endif
+#ifndef A
+#elif A
+#else
+#endif
+#endif
+#pragma once;
+#include <iostream>
+import m;
+
+//--- multiline-directive.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/multiline-directive.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK10
+// CHECK10: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+#define MACRO(a, \
+ b) \
+ call((a), \
+ (b)
+import a;
+
+//--- leading-line-splice.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/leading-line-splice.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK11
+// CHECK11: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+\
+module;
+
+//--- leading-line-splice-trailing-whitespace.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/leading-line-splice-trailing-whitespace.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK12
+// CHECK12: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+// v This backslash has trailing whitespace.
+ \
+export module A;
+
+//--- comment-line-splice.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/comment-line-splice.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --allow-empty --check-prefix=CHECK13
+// CHECK13-NOT: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+// My comment continues next-line!\
+import A;
+
+//--- comment-line-splice-trailing-whitespace.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/comment-line-splice-trailing-whitespace.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --allow-empty --check-prefix=CHECK14
+// CHECK14-NOT: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+// My comment continues next-line! This backslash has trailing whitespace. -> \
+module;
+
+//--- line-splice-in-directive1.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/line-splice-in-directive1.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK15
+// CHECK15: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+
+module\
+;
+
+//--- line-splice-in-directive2.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/line-splice-in-directive2.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK16
+// CHECK16: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+
+export\
+ module\
+ A;
+
+//--- no-module-usage1.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/no-module-usage1.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --allow-empty --check-prefix=CHECK17
+// CHECK17-NOT: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+auto main() -> int {}
+
+//--- no-module-usage2.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/no-module-usage2.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --allow-empty --check-prefix=CHECK18
+// CHECK18-NOT: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+moduleStruct{};
+
+//--- no-module-usage3.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/no-module-usage3.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --allow-empty --check-prefix=CHECK19
+// CHECK19-NOT: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+export_struct{};
+
+//--- no-module-usage-namespace-import.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/no-module-usage-namespace-import.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --allow-empty --check-prefix=CHECK20
+// CHECK20-NOT: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+import::inner xi = {};
+
+//--- no-module-usage-namespace-module.cpp
+// RUN: %clang -std=c++23 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: %t/no-module-usage-namespace-module.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --allow-empty --check-prefix=CHECK21
+// CHECK21-NOT: remark: found C++20 module usage in file '{{.*}}' [-Rmodules-driver]
+module::inner yi = {};
+
+// RUN: not %clang -std=c++20 -ccc-print-phases -fmodules-driver -Rmodules-driver \
+// RUN: imaginary-file.cpp %t/empty.cpp 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK-NON-EXISTING-FILE-ERR
+// CHECK-NON-EXISTING-FILE-ERR: clang: error: no such file or directory: 'imaginary-file.cpp'
diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c
index 2503f24..413275d 100644
--- a/clang/test/Driver/print-supported-extensions-riscv.c
+++ b/clang/test/Driver/print-supported-extensions-riscv.c
@@ -128,6 +128,7 @@
// CHECK-NEXT: smcdeleg 1.0 'Smcdeleg' (Counter Delegation Machine Level)
// CHECK-NEXT: smcntrpmf 1.0 'Smcntrpmf' (Cycle and Instret Privilege Mode Filtering)
// CHECK-NEXT: smcsrind 1.0 'Smcsrind' (Indirect CSR Access Machine Level)
+// CHECK-NEXT: smctr 1.0 'Smctr' (Control Transfer Records Machine Level)
// CHECK-NEXT: smdbltrp 1.0 'Smdbltrp' (Double Trap Machine Level)
// CHECK-NEXT: smepmp 1.0 'Smepmp' (Enhanced Physical Memory Protection)
// CHECK-NEXT: smmpm 1.0 'Smmpm' (Machine-level Pointer Masking for M-mode)
@@ -140,6 +141,7 @@
// CHECK-NEXT: sscofpmf 1.0 'Sscofpmf' (Count Overflow and Mode-Based Filtering)
// CHECK-NEXT: sscounterenw 1.0 'Sscounterenw' (Support writeable scounteren enable bit for any hpmcounter that is not read-only zero)
// CHECK-NEXT: sscsrind 1.0 'Sscsrind' (Indirect CSR Access Supervisor Level)
+// CHECK-NEXT: ssctr 1.0 'Ssctr' (Control Transfer Records Supervisor Level)
// CHECK-NEXT: ssdbltrp 1.0 'Ssdbltrp' (Double Trap Supervisor Level)
// CHECK-NEXT: ssnpm 1.0 'Ssnpm' (Supervisor-level Pointer Masking for next lower privilege mode)
// CHECK-NEXT: sspm 1.0 'Sspm' (Indicates Supervisor-mode Pointer Masking)
@@ -173,6 +175,7 @@
// CHECK-NEXT: xcvsimd 1.0 'XCVsimd' (CORE-V SIMD ALU)
// CHECK-NEXT: xmipscbop 1.0 'XMIPSCBOP' (MIPS Software Prefetch)
// CHECK-NEXT: xmipscmov 1.0 'XMIPSCMov' (MIPS conditional move instruction (mips.ccmov))
+// CHECK-NEXT: xmipsexectl 1.0 'XMIPSEXECTL' (MIPS execution control)
// CHECK-NEXT: xmipslsp 1.0 'XMIPSLSP' (MIPS optimization for hardware load-store bonding)
// CHECK-NEXT: xsfcease 1.0 'XSfcease' (SiFive sf.cease Instruction)
// CHECK-NEXT: xsfmm128t 0.6 'XSfmm128t' (TE=128 configuration)
@@ -192,6 +195,7 @@
// CHECK-NEXT: xsfvqmaccqoq 1.0 'XSfvqmaccqoq' (SiFive Int8 Matrix Multiplication Instructions (4-by-8 and 8-by-4))
// CHECK-NEXT: xsifivecdiscarddlone 1.0 'XSiFivecdiscarddlone' (SiFive sf.cdiscard.d.l1 Instruction)
// CHECK-NEXT: xsifivecflushdlone 1.0 'XSiFivecflushdlone' (SiFive sf.cflush.d.l1 Instruction)
+// CHECK-NEXT: xsmtvdot 1.0 'XSMTVDot' (SpacemiT Vector Dot Product Extension)
// CHECK-NEXT: xtheadba 1.0 'XTHeadBa' (T-Head address calculation instructions)
// CHECK-NEXT: xtheadbb 1.0 'XTHeadBb' (T-Head basic bit-manipulation instructions)
// CHECK-NEXT: xtheadbs 1.0 'XTHeadBs' (T-Head single-bit instructions)
@@ -207,15 +211,14 @@
// CHECK-NEXT: xwchc 2.2 'Xwchc' (WCH/QingKe additional compressed opcodes)
// CHECK-EMPTY:
// CHECK-NEXT: Experimental extensions
-// CHECK-NEXT: p 0.14 'P' ('Base P' (Packed SIMD))
+// CHECK-NEXT: p 0.15 'P' ('Base P' (Packed SIMD))
// CHECK-NEXT: zicfilp 1.0 'Zicfilp' (Landing pad)
// CHECK-NEXT: zicfiss 1.0 'Zicfiss' (Shadow stack)
// CHECK-NEXT: zalasr 0.1 'Zalasr' (Load-Acquire and Store-Release Instructions)
// CHECK-NEXT: zvbc32e 0.7 'Zvbc32e' (Vector Carryless Multiplication with 32-bits elements)
+// CHECK-NEXT: zvfbfa 0.1 'Zvfbfa' (Additional BF16 vector compute support)
// CHECK-NEXT: zvkgs 0.7 'Zvkgs' (Vector-Scalar GCM instructions for Cryptography)
// CHECK-NEXT: zvqdotq 0.0 'Zvqdotq' (Vector quad widening 4D Dot Product)
-// CHECK-NEXT: smctr 1.0 'Smctr' (Control Transfer Records Machine Level)
-// CHECK-NEXT: ssctr 1.0 'Ssctr' (Control Transfer Records Supervisor Level)
// CHECK-NEXT: svukte 0.3 'Svukte' (Address-Independent Latency of User-Mode Faults to Supervisor Addresses)
// CHECK-NEXT: xqccmp 0.3 'Xqccmp' (Qualcomm 16-bit Push/Pop and Double Moves)
// CHECK-NEXT: xqcia 0.7 'Xqcia' (Qualcomm uC Arithmetic Extension)
diff --git a/clang/test/Driver/program-path-priority.c b/clang/test/Driver/program-path-priority.c
index c940c4c..b88c0f2 100644
--- a/clang/test/Driver/program-path-priority.c
+++ b/clang/test/Driver/program-path-priority.c
@@ -1,5 +1,5 @@
/// Don't create symlinks on Windows
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
/// Check the priority used when searching for tools
/// Names and locations are usually in this order:
diff --git a/clang/test/Driver/riscv-cpus.c b/clang/test/Driver/riscv-cpus.c
index 2698612f..ea0821c 100644
--- a/clang/test/Driver/riscv-cpus.c
+++ b/clang/test/Driver/riscv-cpus.c
@@ -157,6 +157,7 @@
// MCPU-SPACEMIT-X60-SAME: "-target-feature" "+svinval"
// MCPU-SPACEMIT-X60-SAME: "-target-feature" "+svnapot"
// MCPU-SPACEMIT-X60-SAME: "-target-feature" "+svpbmt"
+// MCPU-SPACEMIT-X60-SAME: "-target-feature" "+xsmtvdot"
// MCPU-SPACEMIT-X60-SAME: "-target-abi" "lp64d"
// We cannot check much for -mcpu=native, but it should be replaced by a valid CPU string.
@@ -403,6 +404,11 @@
// MCPU-MARCH: "-nostdsysteminc" "-target-cpu" "sifive-e31" "-target-feature" "+m" "-target-feature" "+c"
// MCPU-MARCH: "-target-abi" "ilp32"
+// -march=unset erases previous march
+// RUN: %clang --target=riscv32 -### -c %s 2>&1 -march=rv32imc -march=unset -mcpu=sifive-e31 | FileCheck -check-prefix=MARCH-UNSET %s
+// MARCH-UNSET: "-nostdsysteminc" "-target-cpu" "sifive-e31" "-target-feature" "+m" "-target-feature" "+a" "-target-feature" "+c"
+// MARCH-UNSET-SAME: "-target-abi" "ilp32"
+
// Check interaction between -mcpu and mtune, -mtune won't affect arch related
// target feature, but -mcpu will.
//
diff --git a/clang/test/Driver/riscv32-toolchain.c b/clang/test/Driver/riscv32-toolchain.c
index 8cf20aa..04acf1e 100644
--- a/clang/test/Driver/riscv32-toolchain.c
+++ b/clang/test/Driver/riscv32-toolchain.c
@@ -215,7 +215,7 @@
// RUN: %clang --target=riscv32 %s -emit-llvm -S -o - | FileCheck %s
-// Check that "--no-relax" is forwarded to the linker for RISC-V.
+/// Check that "--no-relax" is forwarded to the linker for RISC-V.
// RUN: env "PATH=" %clang %s -### 2>&1 -mno-relax \
// RUN: --target=riscv32-unknown-elf --rtlib=platform --unwindlib=platform --sysroot= \
// RUN: -march=rv32imac -mabi=lp32\
@@ -223,7 +223,7 @@
// RUN: | FileCheck --check-prefix=CHECK-RV32-NORELAX %s
// CHECK-RV32-NORELAX: "--no-relax"
-// Check that "--no-relax" is not forwarded to the linker for RISC-V.
+/// Check that "--no-relax" is not forwarded to the linker for RISC-V.
// RUN:env "PATH=" %clang %s -### 2>&1 \
// RUN: --target=riscv32-unknown-elf --rtlib=platform --unwindlib=platform --sysroot= \
// RUN: -march=rv32imac -mabi=lp32\
@@ -231,7 +231,7 @@
// RUN: | FileCheck --check-prefix=CHECK-RV32-RELAX %s
// CHECK-RV32-RELAX-NOT: "--no-relax"
-// Check that "--no-relax" is forwarded to the linker for RISC-V (Gnu.cpp).
+/// Check that "--no-relax" is forwarded to the linker for RISC-V (Gnu.cpp).
// RUN: env "PATH=" %clang -### %s -fuse-ld=ld -no-pie -mno-relax \
// RUN: --target=riscv32-unknown-linux-gnu --rtlib=platform --unwindlib=platform -mabi=ilp32 \
// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_linux_sdk \
@@ -239,7 +239,7 @@
// RUN: | FileCheck -check-prefix=CHECK-RV32-GNU-NORELAX %s
// CHECK-RV32-GNU-NORELAX: "--no-relax"
-// Check that "--no-relax" is not forwarded to the linker for RISC-V (Gnu.cpp).
+/// Check that "--no-relax" is not forwarded to the linker for RISC-V (Gnu.cpp).
// RUN: env "PATH=" %clang -### %s -fuse-ld=ld -no-pie \
// RUN: --target=riscv32-unknown-linux-gnu --rtlib=platform --unwindlib=platform -mabi=ilp32 \
// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_linux_sdk \
@@ -247,6 +247,19 @@
// RUN: | FileCheck -check-prefix=CHECK-RV32-GNU-RELAX %s
// CHECK-RV32-GNU-RELAX-NOT: "--no-relax"
+/// Check that "-static -pie" is forwarded to linker when "-static-pie" is used
+// RUN: %clang -static-pie -### %s -fuse-ld= \
+// RUN: --target=riscv32-unknown-elf -rtlib=platform --unwindlib=platform \
+// RUN: --gcc-toolchain=%S/Inputs/basic_riscv32_tree \
+// RUN: --sysroot=%S/Inputs/basic_riscv32_tree/riscv32-unknown-elf 2>&1 \
+// RUN: | FileCheck -check-prefix=C-RV32-STATIC-PIE %s
+
+// C-RV32-STATIC-PIE: "-Bstatic" "-pie" "--no-dynamic-linker" "-z" "text" "-m" "elf32lriscv" "-X"
+// C-RV32-STATIC-PIE: "{{.*}}rcrt1.o"
+// C-RV32-STATIC-PIE: "{{.*}}crtbeginS.o"
+// C-RV32-STATIC-PIE: "--start-group" "-lgcc" "-lc" "-lgloss" "--end-group"
+// C-RV32-STATIC-PIE: "{{.*}}crtendS.o"
+
typedef __builtin_va_list va_list;
typedef __SIZE_TYPE__ size_t;
typedef __PTRDIFF_TYPE__ ptrdiff_t;
diff --git a/clang/test/Driver/riscv64-toolchain.c b/clang/test/Driver/riscv64-toolchain.c
index 1550f46..378f3d9 100644
--- a/clang/test/Driver/riscv64-toolchain.c
+++ b/clang/test/Driver/riscv64-toolchain.c
@@ -171,7 +171,7 @@
// RUN: %clang --target=riscv64 %s -emit-llvm -S -o - | FileCheck %s
-// Check that "--no-relax" is forwarded to the linker for RISC-V.
+/// Check that "--no-relax" is forwarded to the linker for RISC-V.
// RUN: env "PATH=" %clang %s -### 2>&1 -mno-relax \
// RUN: --target=riscv64-unknown-elf --rtlib=platform --unwindlib=platform --sysroot= \
// RUN: -march=rv64imac -mabi=lp64\
@@ -179,7 +179,7 @@
// RUN: | FileCheck --check-prefix=CHECK-RV64-NORELAX %s
// CHECK-RV64-NORELAX: "--no-relax"
-// Check that "--no-relax" is not forwarded to the linker for RISC-V.
+/// Check that "--no-relax" is not forwarded to the linker for RISC-V.
// RUN:env "PATH=" %clang %s -### 2>&1 \
// RUN: --target=riscv64-unknown-elf --rtlib=platform --unwindlib=platform --sysroot= \
// RUN: -march=rv64imac -mabi=lp64\
@@ -187,7 +187,7 @@
// RUN: | FileCheck --check-prefix=CHECK-RV64-RELAX %s
// CHECK-RV64-RELAX-NOT: "--no-relax"
-// Check that "--no-relax" is forwarded to the linker for RISC-V (Gnu.cpp).
+/// Check that "--no-relax" is forwarded to the linker for RISC-V (Gnu.cpp).
// RUN: env "PATH=" %clang -### %s -fuse-ld=ld -no-pie -mno-relax \
// RUN: --target=riscv64-unknown-linux-gnu --rtlib=platform --unwindlib=platform -mabi=lp64 \
// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_linux_sdk \
@@ -195,7 +195,7 @@
// RUN: | FileCheck -check-prefix=CHECK-RV64-GNU-NORELAX %s
// CHECK-RV64-GNU-NORELAX: "--no-relax"
-// Check that "--no-relax" is not forwarded to the linker for RISC-V (Gnu.cpp).
+/// Check that "--no-relax" is not forwarded to the linker for RISC-V (Gnu.cpp).
// RUN: env "PATH=" %clang -### %s -fuse-ld=ld -no-pie \
// RUN: --target=riscv64-unknown-linux-gnu --rtlib=platform --unwindlib=platform -mabi=lp64 \
// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_linux_sdk \
@@ -203,6 +203,19 @@
// RUN: | FileCheck -check-prefix=CHECK-RV64-GNU-RELAX %s
// CHECK-RV64-GNU-RELAX-NOT: "--no-relax"
+/// Check that "-static -pie" is forwarded to linker when "-static-pie" is used
+// RUN: %clang -static-pie -### %s -fuse-ld= \
+// RUN: --target=riscv64-unknown-elf -rtlib=platform --unwindlib=platform \
+// RUN: --gcc-toolchain=%S/Inputs/basic_riscv64_tree \
+// RUN: --sysroot=%S/Inputs/basic_riscv64_tree/riscv64-unknown-elf 2>&1 \
+// RUN: | FileCheck -check-prefix=C-RV64-STATIC-PIE %s
+
+// C-RV64-STATIC-PIE: "-Bstatic" "-pie" "--no-dynamic-linker" "-z" "text" "-m" "elf64lriscv" "-X"
+// C-RV64-STATIC-PIE: "{{.*}}rcrt1.o"
+// C-RV64-STATIC-PIE: "{{.*}}crtbeginS.o"
+// C-RV64-STATIC-PIE: "--start-group" "-lgcc" "-lc" "-lgloss" "--end-group"
+// C-RV64-STATIC-PIE: "{{.*}}crtendS.o"
+
typedef __builtin_va_list va_list;
typedef __SIZE_TYPE__ size_t;
typedef __PTRDIFF_TYPE__ ptrdiff_t;
diff --git a/clang/test/Driver/spirv-openmp-toolchain.c b/clang/test/Driver/spirv-openmp-toolchain.c
index a61f3bc..1542f50 100644
--- a/clang/test/Driver/spirv-openmp-toolchain.c
+++ b/clang/test/Driver/spirv-openmp-toolchain.c
@@ -60,3 +60,12 @@
// RUN: --libomptarget-spirv-bc-path=%t/ -nogpulib %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-OFFLOAD-ARCH-ERROR
// CHECK-OFFLOAD-ARCH-ERROR: error: failed to deduce triple for target architecture 'spirv64-intel'; specify the triple using '-fopenmp-targets' and '-Xopenmp-target' instead
+
+// RUN: %clang -mllvm --spirv-ext=+SPV_INTEL_function_pointers -### --target=x86_64-unknown-linux-gnu -fopenmp=libomp -fopenmp-targets=spirv64-intel \
+// RUN: -nogpulib %s 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK-LINKER-ARG
+// CHECK-LINKER-ARG: clang-linker-wrapper
+// CHECK-LINKER-ARG-NOT: --device-linker=spirv64
+// CHECK-LINKER-ARG-NOT: -mllvm
+// CHECK-LINKER-ARG-NOT: --spirv-ext=+SPV_INTEL_function_pointers
+// CHECK-LINKER-ARG: --linker-path
diff --git a/clang/test/Driver/spirv-toolchain.cl b/clang/test/Driver/spirv-toolchain.cl
index 53e8455..54c794c 100644
--- a/clang/test/Driver/spirv-toolchain.cl
+++ b/clang/test/Driver/spirv-toolchain.cl
@@ -92,7 +92,7 @@
// RUN: mkdir -p %t/versioned
// RUN: touch %t/versioned/spirv-as-%llvm-version-major \
// RUN: && chmod +x %t/versioned/spirv-as-%llvm-version-major
-// RUN: %if !system-windows %{ env "PATH=%t/versioned" %clang -### --target=spirv64 -x cl -c --save-temps %s 2>&1 \
+// RUN: %if !system-windows && !system-cygwin %{ env "PATH=%t/versioned" %clang -### --target=spirv64 -x cl -c --save-temps %s 2>&1 \
// RUN: | FileCheck -DVERSION=%llvm-version-major --check-prefix=VERSIONED %s %}
// VERSIONED: {{.*}}spirv-as-[[VERSION]]
diff --git a/clang/test/Driver/x86-target-features.c b/clang/test/Driver/x86-target-features.c
index e83b4f3..7ec4531 100644
--- a/clang/test/Driver/x86-target-features.c
+++ b/clang/test/Driver/x86-target-features.c
@@ -476,8 +476,8 @@
// RUN: %clang --target=x86_64-unknown-linux-gnu -mno-apxf -mapxf %s -### -o %t.o 2>&1 | FileCheck -check-prefix=APXF %s
// RUN: %clang --target=x86_64-unknown-linux-gnu -mapxf -mno-apxf %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-APXF %s
//
-// APXF: "-target-feature" "+egpr" "-target-feature" "+push2pop2" "-target-feature" "+ppx" "-target-feature" "+ndd" "-target-feature" "+ccmp" "-target-feature" "+nf" "-target-feature" "+cf" "-target-feature" "+zu"
-// NO-APXF: "-target-feature" "-egpr" "-target-feature" "-push2pop2" "-target-feature" "-ppx" "-target-feature" "-ndd" "-target-feature" "-ccmp" "-target-feature" "-nf" "-target-feature" "-cf" "-target-feature" "-zu"
+// APXF: "-target-feature" "+egpr" "-target-feature" "+push2pop2" "-target-feature" "+ppx" "-target-feature" "+ndd" "-target-feature" "+ccmp" "-target-feature" "+nf" "-target-feature" "+zu"
+// NO-APXF: "-target-feature" "-egpr" "-target-feature" "-push2pop2" "-target-feature" "-ppx" "-target-feature" "-ndd" "-target-feature" "-ccmp" "-target-feature" "-nf" "-target-feature" "-zu"
// RUN: %clang --target=x86_64-unknown-linux-gnu -mapx-features=egpr %s -### -o %t.o 2>&1 | FileCheck -check-prefix=EGPR %s
// RUN: %clang --target=x86_64-unknown-linux-gnu -mapx-features=push2pop2 %s -### -o %t.o 2>&1 | FileCheck -check-prefix=PUSH2POP2 %s
diff --git a/clang/test/ExtractAPI/class_template_param_inheritance.cpp b/clang/test/ExtractAPI/class_template_param_inheritance.cpp
index 53b331e..5f00564 100644
--- a/clang/test/ExtractAPI/class_template_param_inheritance.cpp
+++ b/clang/test/ExtractAPI/class_template_param_inheritance.cpp
@@ -44,7 +44,7 @@ template<typename T> class Foo : public T {};
{
"kind": "inheritsFrom",
"source": "c:@ST>1#T@Foo",
- "target": "",
+ "target": "c:input.h@9",
"targetFallback": "T"
}
],
diff --git a/clang/test/FixIt/fixit-enum-scoped.cpp b/clang/test/FixIt/fixit-enum-scoped.cpp
new file mode 100644
index 0000000..b7e4138
--- /dev/null
+++ b/clang/test/FixIt/fixit-enum-scoped.cpp
@@ -0,0 +1,95 @@
+// RUN: not %clang_cc1 -fsyntax-only -fdiagnostics-parseable-fixits -std=c++20 -triple x86_64-apple-darwin %s 2>&1 | FileCheck %s
+
+namespace GH24265 {
+ enum class E_int { e };
+ enum class E_long : long { e };
+
+ void f() {
+ E_int::e + E_long::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:5-[[@LINE]]:5}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:12-[[@LINE-1]]:12}:")"
+ // CHECK: fix-it:{{.*}}:{[[@LINE-2]]:16-[[@LINE-2]]:16}:"static_cast<long>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-3]]:24-[[@LINE-3]]:24}:")"
+ E_int::e + 0; // CHECK: fix-it:{{.*}}:{[[@LINE]]:5-[[@LINE]]:5}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:12-[[@LINE-1]]:12}:")"
+
+ 0 * E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 / E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 % E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 + E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 - E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 << E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ 0 >> E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ 0 <=> E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:11-[[@LINE]]:11}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:18-[[@LINE-1]]:18}:")"
+ 0 < E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 > E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 <= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ 0 >= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ 0 == E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ 0 != E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ 0 & E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 ^ E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 | E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:9-[[@LINE]]:9}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:16-[[@LINE-1]]:16}:")"
+ 0 && E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ 0 || E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+
+ int a;
+ a *= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ a /= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ a %= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ a += E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ a -= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ a <<= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:11-[[@LINE]]:11}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:18-[[@LINE-1]]:18}:")"
+ a >>= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:11-[[@LINE]]:11}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:18-[[@LINE-1]]:18}:")"
+ a &= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ a ^= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+ a |= E_int::e; // CHECK: fix-it:{{.*}}:{[[@LINE]]:10-[[@LINE]]:10}:"static_cast<int>("
+ // CHECK: fix-it:{{.*}}:{[[@LINE-1]]:17-[[@LINE-1]]:17}:")"
+
+ // TODO: These do not have the diagnostic yet
+ E_int b;
+ b *= 0;
+ b /= 0;
+ b %= 0;
+ b += 0;
+ b -= 0;
+ b <<= 0;
+ b >>= 0;
+ b &= 0;
+ b ^= 0;
+ b |= 0;
+
+ a = E_int::e;
+ b = 0;
+
+ E_int c = 0;
+ int d = E_int::e;
+ }
+}
diff --git a/clang/test/Frontend/fsanitize-debug-trap-reasons.c b/clang/test/Frontend/fsanitize-debug-trap-reasons.c
new file mode 100644
index 0000000..82b33ea
--- /dev/null
+++ b/clang/test/Frontend/fsanitize-debug-trap-reasons.c
@@ -0,0 +1,6 @@
+// RUN: not %clang_cc1 -triple arm64-apple-macosx14.0.0 \
+// RUN: -fsanitize=signed-integer-overflow -fsanitize=signed-integer-overflow \
+// RUN: -fsanitize-debug-trap-reasons=bad_value 2>&1 | FileCheck %s
+
+// CHECK: error: invalid value 'bad_value' in '-fsanitize-debug-trap-reasons=bad_value'
+int test(void) { return 0;}
diff --git a/clang/test/Frontend/skip-function-bodies.cpp b/clang/test/Frontend/skip-function-bodies.cpp
index d0593b4..4cfc4c5 100644
--- a/clang/test/Frontend/skip-function-bodies.cpp
+++ b/clang/test/Frontend/skip-function-bodies.cpp
@@ -1,13 +1,15 @@
// Trivial check to ensure skip-function-bodies flag is propagated.
//
-// RUN: %clang_cc1 -verify -skip-function-bodies -pedantic-errors %s
-// expected-no-diagnostics
+// RUN: %clang_cc1 -verify -skip-function-bodies %s
int f() {
// normally this should emit some diags, but we're skipping it!
this is garbage;
}
+void g() __attribute__((__diagnose_if__(baz))) {}
+// expected-error@-1 {{use of undeclared identifier 'baz'}}
+
// Make sure we only accept it as a cc1 arg.
// RUN: not %clang -skip-function-bodies %s 2>&1 | FileCheck %s
// CHECK: clang: error: unknown argument '-skip-function-bodies'; did you mean '-Xclang -skip-function-bodies'?
diff --git a/clang/test/Headers/__clang_hip_cmath.hip b/clang/test/Headers/__clang_hip_cmath.hip
index fcd7499..2e0b776 100644
--- a/clang/test/Headers/__clang_hip_cmath.hip
+++ b/clang/test/Headers/__clang_hip_cmath.hip
@@ -124,12 +124,12 @@ namespace user_namespace {
// DEFAULT-NEXT: [[B:%.*]] = alloca [[STRUCT_USER_BFLOAT16]], align 1, addrspace(5)
// DEFAULT-NEXT: [[A_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A]] to ptr
// DEFAULT-NEXT: [[B_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B]] to ptr
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 1, ptr addrspace(5) [[A]]) #[[ATTR11:[0-9]+]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[A]]) #[[ATTR11:[0-9]+]]
// DEFAULT-NEXT: call void @_ZN13user_bfloat16C1Ef(ptr noundef nonnull align 1 dereferenceable(1) [[A_ASCAST]], float noundef 1.000000e+00) #[[ATTR10]]
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 1, ptr addrspace(5) [[B]]) #[[ATTR11]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[B]]) #[[ATTR11]]
// DEFAULT-NEXT: call void @_ZN13user_bfloat16C1Ef(ptr noundef nonnull align 1 dereferenceable(1) [[B_ASCAST]], float noundef 2.000000e+00) #[[ATTR10]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 1, ptr addrspace(5) [[B]]) #[[ATTR11]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 1, ptr addrspace(5) [[A]]) #[[ATTR11]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[B]]) #[[ATTR11]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[A]]) #[[ATTR11]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @_ZN14user_namespace8test_fmaEv(
@@ -138,12 +138,12 @@ namespace user_namespace {
// FINITEONLY-NEXT: [[B:%.*]] = alloca [[STRUCT_USER_BFLOAT16]], align 1, addrspace(5)
// FINITEONLY-NEXT: [[A_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A]] to ptr
// FINITEONLY-NEXT: [[B_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B]] to ptr
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 1, ptr addrspace(5) [[A]]) #[[ATTR11:[0-9]+]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[A]]) #[[ATTR11:[0-9]+]]
// FINITEONLY-NEXT: call void @_ZN13user_bfloat16C1Ef(ptr noundef nonnull align 1 dereferenceable(1) [[A_ASCAST]], float noundef nofpclass(nan inf) 1.000000e+00) #[[ATTR10]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 1, ptr addrspace(5) [[B]]) #[[ATTR11]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[B]]) #[[ATTR11]]
// FINITEONLY-NEXT: call void @_ZN13user_bfloat16C1Ef(ptr noundef nonnull align 1 dereferenceable(1) [[B_ASCAST]], float noundef nofpclass(nan inf) 2.000000e+00) #[[ATTR10]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 1, ptr addrspace(5) [[B]]) #[[ATTR11]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 1, ptr addrspace(5) [[A]]) #[[ATTR11]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[B]]) #[[ATTR11]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[A]]) #[[ATTR11]]
// FINITEONLY-NEXT: ret void
//
__global__ void test_fma() {
diff --git a/clang/test/Headers/__clang_hip_math.hip b/clang/test/Headers/__clang_hip_math.hip
index 81c5f43..15bdb75 100644
--- a/clang/test/Headers/__clang_hip_math.hip
+++ b/clang/test/Headers/__clang_hip_math.hip
@@ -3636,52 +3636,52 @@ extern "C" __device__ long int test_lround(double x) {
// DEFAULT-LABEL: @test_modff(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_modf_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16:![0-9]+]]
// DEFAULT-NEXT: store float [[TMP0]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret float [[CALL_I]]
//
// FINITEONLY-LABEL: @test_modff(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract noundef nofpclass(nan inf) float @__ocml_modf_f32(float noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16:![0-9]+]]
// FINITEONLY-NEXT: store float [[TMP0]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret float [[CALL_I]]
//
// APPROX-LABEL: @test_modff(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_modf_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16:![0-9]+]]
// APPROX-NEXT: store float [[TMP0]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret float [[CALL_I]]
//
// NCRDIV-LABEL: @test_modff(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_modf_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA17:![0-9]+]]
// NCRDIV-NEXT: store float [[TMP0]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA17]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret float [[CALL_I]]
//
// AMDGCNSPIRV-LABEL: @test_modff(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15:[0-9]+]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15:[0-9]+]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) float @__ocml_modf_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17:![0-9]+]]
// AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret float [[CALL_I]]
//
extern "C" __device__ float test_modff(float x, float* y) {
@@ -3691,52 +3691,52 @@ extern "C" __device__ float test_modff(float x, float* y) {
// DEFAULT-LABEL: @test_modf(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_modf_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18:![0-9]+]]
// DEFAULT-NEXT: store double [[TMP0]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret double [[CALL_I]]
//
// FINITEONLY-LABEL: @test_modf(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract noundef nofpclass(nan inf) double @__ocml_modf_f64(double noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18:![0-9]+]]
// FINITEONLY-NEXT: store double [[TMP0]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret double [[CALL_I]]
//
// APPROX-LABEL: @test_modf(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_modf_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18:![0-9]+]]
// APPROX-NEXT: store double [[TMP0]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret double [[CALL_I]]
//
// NCRDIV-LABEL: @test_modf(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_modf_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA19:![0-9]+]]
// NCRDIV-NEXT: store double [[TMP0]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA19]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret double [[CALL_I]]
//
// AMDGCNSPIRV-LABEL: @test_modf(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) double @__ocml_modf_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19:![0-9]+]]
// AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret double [[CALL_I]]
//
extern "C" __device__ double test_modf(double x, double* y) {
@@ -5362,52 +5362,52 @@ extern "C" __device__ double test_remainder(double x, double y) {
// DEFAULT-LABEL: @test_remquof(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// DEFAULT-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret float [[CALL_I]]
//
// FINITEONLY-LABEL: @test_remquof(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract noundef nofpclass(nan inf) float @__ocml_remquo_f32(float noundef nofpclass(nan inf) [[X:%.*]], float noundef nofpclass(nan inf) [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// FINITEONLY-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret float [[CALL_I]]
//
// APPROX-LABEL: @test_remquof(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// APPROX-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret float [[CALL_I]]
//
// NCRDIV-LABEL: @test_remquof(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA13]]
// NCRDIV-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA13]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret float [[CALL_I]]
//
// AMDGCNSPIRV-LABEL: @test_remquof(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA13]]
// AMDGCNSPIRV-NEXT: store i32 [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA13]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret float [[CALL_I]]
//
extern "C" __device__ float test_remquof(float x, float y, int* z) {
@@ -5417,52 +5417,52 @@ extern "C" __device__ float test_remquof(float x, float y, int* z) {
// DEFAULT-LABEL: @test_remquo(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// DEFAULT-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret double [[CALL_I]]
//
// FINITEONLY-LABEL: @test_remquo(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract noundef nofpclass(nan inf) double @__ocml_remquo_f64(double noundef nofpclass(nan inf) [[X:%.*]], double noundef nofpclass(nan inf) [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// FINITEONLY-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret double [[CALL_I]]
//
// APPROX-LABEL: @test_remquo(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// APPROX-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret double [[CALL_I]]
//
// NCRDIV-LABEL: @test_remquo(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA13]]
// NCRDIV-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA13]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret double [[CALL_I]]
//
// AMDGCNSPIRV-LABEL: @test_remquo(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA13]]
// AMDGCNSPIRV-NEXT: store i32 [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA13]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret double [[CALL_I]]
//
extern "C" __device__ double test_remquo(double x, double y, int* z) {
@@ -6198,57 +6198,57 @@ extern "C" __device__ BOOL_TYPE test___signbit(double x) {
// DEFAULT-LABEL: @test_sincosf(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// DEFAULT-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @test_sincosf(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract nofpclass(nan inf) float @__ocml_sincos_f32(float noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// FINITEONLY-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret void
//
// APPROX-LABEL: @test_sincosf(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// APPROX-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// APPROX-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret void
//
// NCRDIV-LABEL: @test_sincosf(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA17]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA17]]
// NCRDIV-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA17]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret void
//
// AMDGCNSPIRV-LABEL: @test_sincosf(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: store float [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17]]
// AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA17]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret void
//
extern "C" __device__ void test_sincosf(float x, float *y, float *z) {
@@ -6258,57 +6258,57 @@ extern "C" __device__ void test_sincosf(float x, float *y, float *z) {
// DEFAULT-LABEL: @test_sincos(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// DEFAULT-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @test_sincos(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract nofpclass(nan inf) double @__ocml_sincos_f64(double noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// FINITEONLY-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret void
//
// APPROX-LABEL: @test_sincos(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// APPROX-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// APPROX-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret void
//
// NCRDIV-LABEL: @test_sincos(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA19]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA19]]
// NCRDIV-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA19]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret void
//
// AMDGCNSPIRV-LABEL: @test_sincos(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: store double [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19]]
// AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 8, !tbaa [[TBAA19]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret void
//
extern "C" __device__ void test_sincos(double x, double *y, double *z) {
@@ -6318,57 +6318,57 @@ extern "C" __device__ void test_sincos(double x, double *y, double *z) {
// DEFAULT-LABEL: @test_sincospif(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// DEFAULT-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @test_sincospif(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract nofpclass(nan inf) float @__ocml_sincospi_f32(float noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// FINITEONLY-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret void
//
// APPROX-LABEL: @test_sincospif(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// APPROX-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// APPROX-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret void
//
// NCRDIV-LABEL: @test_sincospif(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA17]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA17]]
// NCRDIV-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA17]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret void
//
// AMDGCNSPIRV-LABEL: @test_sincospif(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: store float [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17]]
// AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA17]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret void
//
extern "C" __device__ void test_sincospif(float x, float *y, float *z) {
@@ -6378,57 +6378,57 @@ extern "C" __device__ void test_sincospif(float x, float *y, float *z) {
// DEFAULT-LABEL: @test_sincospi(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// DEFAULT-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @test_sincospi(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract nofpclass(nan inf) double @__ocml_sincospi_f64(double noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// FINITEONLY-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret void
//
// APPROX-LABEL: @test_sincospi(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// APPROX-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// APPROX-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret void
//
// NCRDIV-LABEL: @test_sincospi(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA19]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA19]]
// NCRDIV-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA19]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret void
//
// AMDGCNSPIRV-LABEL: @test_sincospi(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: store double [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19]]
// AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 8, !tbaa [[TBAA19]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret void
//
extern "C" __device__ void test_sincospi(double x, double *y, double *z) {
diff --git a/clang/test/Headers/__cpuidex_conflict.c b/clang/test/Headers/__cpuidex_conflict.c
index 74f4532..67f2a0c 100644
--- a/clang/test/Headers/__cpuidex_conflict.c
+++ b/clang/test/Headers/__cpuidex_conflict.c
@@ -5,6 +5,7 @@
// Ensure that we do not run into conflicts when offloading.
// RUN: %clang_cc1 %s -DIS_STATIC=static -ffreestanding -fopenmp -fopenmp-is-target-device -aux-triple x86_64-unknown-linux-gnu
+// RUN: %clang_cc1 -DIS_STATIC="" -triple nvptx64-nvidia-cuda -aux-triple x86_64-unknown-linux-gnu -aux-target-cpu x86-64 -fcuda-is-device -x cuda %s -o -
typedef __SIZE_TYPE__ size_t;
diff --git a/clang/test/Headers/mm3dnow.c b/clang/test/Headers/mm3dnow.c
index a9b6dd8..e45acb12 100644
--- a/clang/test/Headers/mm3dnow.c
+++ b/clang/test/Headers/mm3dnow.c
@@ -2,6 +2,9 @@
// RUN: %clang_cc1 -fsyntax-only -D_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS -ffreestanding %s -verify
// RUN: %clang_cc1 -fsyntax-only -ffreestanding -x c++ %s -verify
+// XFAIL: target=arm64ec-pc-windows-msvc
+// These intrinsics are not yet implemented for Arm64EC.
+
#if defined(i386) || defined(__x86_64__)
#ifndef _CLANG_DISABLE_CRT_DEPRECATION_WARNINGS
// expected-warning@mm3dnow.h:*{{The <mm3dnow.h> header is deprecated}}
diff --git a/clang/test/Headers/pmmintrin.c b/clang/test/Headers/pmmintrin.c
index 5b7a3a4..776ef75 100644
--- a/clang/test/Headers/pmmintrin.c
+++ b/clang/test/Headers/pmmintrin.c
@@ -2,6 +2,9 @@
// RUN: %clang_cc1 -fsyntax-only -ffreestanding -x c++ %s -verify
// expected-no-diagnostics
+// XFAIL: target=arm64ec-pc-windows-msvc
+// These intrinsics are not yet implemented for Arm64EC.
+
#if defined(i386) || defined(__x86_64__)
#include <pmmintrin.h>
diff --git a/clang/test/Headers/spirv_functions.cpp b/clang/test/Headers/spirv_functions.cpp
index ff036b7..fa9e2ce 100644
--- a/clang/test/Headers/spirv_functions.cpp
+++ b/clang/test/Headers/spirv_functions.cpp
@@ -15,7 +15,7 @@
// NV: call noundef ptr @_Z42__spirv_GenericCastToPtrExplicit_ToPrivatePvi
// NV: addrspacecast ptr %{{.*}} to ptr addrspace(1)
// NV: addrspacecast ptr %{{.*}} to ptr addrspace(3)
-void test_cast(int* p) {
+[[clang::sycl_external]] void test_cast(int* p) {
__spirv_GenericCastToPtrExplicit_ToGlobal(p, 5);
__spirv_GenericCastToPtrExplicit_ToLocal(p, 4);
__spirv_GenericCastToPtrExplicit_ToPrivate(p, 7);
diff --git a/clang/test/Headers/spirv_ids.cpp b/clang/test/Headers/spirv_ids.cpp
index 466be5d..a1c953e 100644
--- a/clang/test/Headers/spirv_ids.cpp
+++ b/clang/test/Headers/spirv_ids.cpp
@@ -80,7 +80,7 @@
// NV: call noundef i32 @_Z25__spirv_BuiltInSubgroupIdv() #2
// NV: call noundef i32 @_Z40__spirv_BuiltInSubgroupLocalInvocationIdv() #2
-void test_id_and_range() {
+[[clang::sycl_external]] void test_id_and_range() {
__spirv_BuiltInNumWorkgroups(0);
__spirv_BuiltInNumWorkgroups(1);
__spirv_BuiltInNumWorkgroups(2);
diff --git a/clang/test/Headers/x86-intrinsics-headers.c b/clang/test/Headers/x86-intrinsics-headers.c
index 59ca354..dc06cbd 100644
--- a/clang/test/Headers/x86-intrinsics-headers.c
+++ b/clang/test/Headers/x86-intrinsics-headers.c
@@ -2,6 +2,9 @@
// RUN: %clang_cc1 -fsyntax-only -ffreestanding -flax-vector-conversions=none %s
// RUN: %clang_cc1 -fsyntax-only -ffreestanding -x c++ %s
+// XFAIL: target=arm64ec-pc-windows-msvc
+// These intrinsics are not yet implemented for Arm64EC.
+
#if defined(i386) || defined(__x86_64__)
#ifdef __SSE4_2__
diff --git a/clang/test/Headers/x86intrin.c b/clang/test/Headers/x86intrin.c
index 53e3695..c01af1a 100644
--- a/clang/test/Headers/x86intrin.c
+++ b/clang/test/Headers/x86intrin.c
@@ -3,6 +3,9 @@
// RUN: %clang_cc1 -fsyntax-only -ffreestanding -x c++ %s -verify
// expected-no-diagnostics
+// XFAIL: target=arm64ec-pc-windows-msvc
+// These intrinsics are not yet implemented for Arm64EC.
+
#if defined(i386) || defined(__x86_64__)
// Include the metaheader that includes all x86 intrinsic headers.
diff --git a/clang/test/Headers/x86intrin.cpp b/clang/test/Headers/x86intrin.cpp
index 11d442db..6c9baa6 100644
--- a/clang/test/Headers/x86intrin.cpp
+++ b/clang/test/Headers/x86intrin.cpp
@@ -1,6 +1,9 @@
// RUN: %clang_cc1 -fsyntax-only -ffreestanding %s -verify
// expected-no-diagnostics
+// XFAIL: target=arm64ec-pc-windows-msvc
+// These intrinsics are not yet implemented for Arm64EC.
+
#if defined(i386) || defined(__x86_64__)
// Include the metaheader that includes all x86 intrinsic headers.
diff --git a/clang/test/Import/builtin-template/Inputs/S.cpp b/clang/test/Import/builtin-template/Inputs/S.cpp
index d5c9a9a..85c71f61 100644
--- a/clang/test/Import/builtin-template/Inputs/S.cpp
+++ b/clang/test/Import/builtin-template/Inputs/S.cpp
@@ -14,3 +14,13 @@ using TypePackElement = __type_pack_element<i, T...>;
template <int i>
struct X;
+
+using X0 = X<0>;
+template <int I>
+using SameAsX = X<I>;
+
+template <template <class...> class Templ, class...Types>
+using TypePackDedup = Templ<__builtin_dedup_pack<Types...>...>;
+
+template <class ...Ts>
+struct TypeList {};
diff --git a/clang/test/Import/builtin-template/test.cpp b/clang/test/Import/builtin-template/test.cpp
index 590efad..a9afbd1 100644
--- a/clang/test/Import/builtin-template/test.cpp
+++ b/clang/test/Import/builtin-template/test.cpp
@@ -1,9 +1,11 @@
// RUN: clang-import-test -dump-ast -import %S/Inputs/S.cpp -expression %s -Xcc -DSEQ | FileCheck --check-prefix=CHECK-SEQ %s
// RUN: clang-import-test -dump-ast -import %S/Inputs/S.cpp -expression %s -Xcc -DPACK | FileCheck --check-prefix=CHECK-PACK %s
-// RUN: clang-import-test -dump-ast -import %S/Inputs/S.cpp -expression %s -Xcc -DPACK -Xcc -DSEQ | FileCheck --check-prefixes=CHECK-SEQ,CHECK-PACK %s
+// RUN: clang-import-test -dump-ast -import %S/Inputs/S.cpp -expression %s -Xcc -DDEDUP | FileCheck --check-prefix=CHECK-DEDUP %s
+// RUN: clang-import-test -dump-ast -import %S/Inputs/S.cpp -expression %s -Xcc -DPACK -Xcc -DSEQ -Xcc -DDEDUP | FileCheck --check-prefixes=CHECK-SEQ,CHECK-PACK,CHECK-DEDUP %s
// CHECK-SEQ: BuiltinTemplateDecl {{.+}} <<invalid sloc>> <invalid sloc> implicit __make_integer_seq{{$}}
// CHECK-PACK: BuiltinTemplateDecl {{.+}} <<invalid sloc>> <invalid sloc> implicit __type_pack_element{{$}}
+// CHECK-DEDUP: BuiltinTemplateDecl {{.+}} <<invalid sloc>> <invalid sloc> implicit __builtin_dedup_pack{{$}}
void expr() {
#ifdef SEQ
@@ -20,4 +22,12 @@ void expr() {
static_assert(__is_same(TypePackElement<0, X<0>, X<1>>, X<0>), "");
static_assert(__is_same(TypePackElement<1, X<0>, X<1>>, X<1>), "");
#endif
+
+#ifdef DEDUP
+ static_assert(__is_same(TypePackDedup<TypeList>, TypeList<>), "");
+ static_assert(__is_same(TypePackDedup<TypeList, int, double, int>, TypeList<int, double>), "");
+ static_assert(!__is_same(TypePackDedup<TypeList, int, double, int>, TypeList<double, int>), "");
+ static_assert(__is_same(TypePackDedup<TypeList, X<0>, X<1>, X<1>, X<2>, X<0>>, TypeList<X<0>, X<1>, X<2>>), "");
+ static_assert(__is_same(TypePackDedup<TypeList, X0, SameAsX<1>, X<1>, X<0>>, TypeList<X<0>,X<1>>), "");
+#endif
}
diff --git a/clang/test/Index/Core/index-instantiated-source.cpp b/clang/test/Index/Core/index-instantiated-source.cpp
index 2a67a3a..91be4a4 100644
--- a/clang/test/Index/Core/index-instantiated-source.cpp
+++ b/clang/test/Index/Core/index-instantiated-source.cpp
@@ -73,8 +73,8 @@ void canonicalizeInstaniationReferences(TemplateClass<int, float> &object) {
typedef TemplateClass<int, float> TT;
TT::NestedType::SubNestedType subNestedType(0);
-// CHECK: [[@LINE-1]]:7 | struct/C++ | NestedType | c:@ST>2#T#T@TemplateClass@S@NestedType |
-// CHECK: [[@LINE-2]]:19 | class/C++ | SubNestedType | c:@ST>2#T#T@TemplateClass@S@NestedType@S@SubNestedType |
+// CHECK: [[@LINE-1]]:19 | class/C++ | SubNestedType | c:@ST>2#T#T@TemplateClass@S@NestedType@S@SubNestedType |
+// CHECK: [[@LINE-2]]:7 | struct/C++ | NestedType | c:@ST>2#T#T@TemplateClass@S@NestedType |
TT::NestedType::TypeAlias nestedTypeAlias;
// CHECK: [[@LINE-1]]:19 | type-alias/C++ | TypeAlias | c:@ST>2#T#T@TemplateClass@S@NestedType@TypeAlias |
diff --git a/clang/test/Index/Core/index-source.cpp b/clang/test/Index/Core/index-source.cpp
index 043e616..36bc663 100644
--- a/clang/test/Index/Core/index-source.cpp
+++ b/clang/test/Index/Core/index-source.cpp
@@ -525,9 +525,9 @@ struct Outer {
template<>
struct rd33122110::Outer::Nested<int>;
-// CHECK: [[@LINE-1]]:8 | namespace/C++ | rd33122110 | c:@N@rd33122110 | <no-cgname> | Ref,RelCont | rel: 1
+// CHECK: [[@LINE-1]]:20 | struct/C++ | Outer | c:@N@rd33122110@S@Outer | <no-cgname> | Ref,RelCont | rel: 1
// CHECK-NEXT: RelCont | Nested | c:@N@rd33122110@S@Outer@S@Nested>#I
-// CHECK: [[@LINE-3]]:20 | struct/C++ | Outer | c:@N@rd33122110@S@Outer | <no-cgname> | Ref,RelCont | rel: 1
+// CHECK: [[@LINE-3]]:8 | namespace/C++ | rd33122110 | c:@N@rd33122110 | <no-cgname> | Ref,RelCont | rel: 1
// CHECK-NEXT: RelCont | Nested | c:@N@rd33122110@S@Outer@S@Nested>#I
namespace index_offsetof {
diff --git a/clang/test/Index/c-index-api-loadTU-test.m b/clang/test/Index/c-index-api-loadTU-test.m
index eb3fde0..8ddb193 100644
--- a/clang/test/Index/c-index-api-loadTU-test.m
+++ b/clang/test/Index/c-index-api-loadTU-test.m
@@ -164,7 +164,7 @@ struct X0 {};
// CHECK: c-index-api-loadTU-test.m:66:28: TypeRef=id:0:0 Extent=[66:28 - 66:30]
// CHECK: c-index-api-loadTU-test.m:69:16: StructDecl=X0:69:16 Extent=[69:9 - 69:18]
// CHECK: c-index-api-loadTU-test.m:69:19: TypedefDecl=X1:69:19 (Definition) Extent=[69:1 - 69:21]
-// CHECK: c-index-api-loadTU-test.m:69:16: TypeRef=struct X0:71:8 Extent=[69:16 - 69:18]
+// CHECK: c-index-api-loadTU-test.m:69:16: TypeRef=struct X0:69:16 Extent=[69:16 - 69:18]
// CHECK: c-index-api-loadTU-test.m:70:8: StructDecl=X0:70:8 Extent=[70:1 - 70:10]
// CHECK: c-index-api-loadTU-test.m:71:8: StructDecl=X0:71:8 (Definition) Extent=[71:1 - 71:14]
// CHECK: c-index-api-loadTU-test.m:73:12: ObjCCategoryDecl=:73:12 Extent=[73:1 - 76:5]
diff --git a/clang/test/Index/copy-assignment-operator.cpp b/clang/test/Index/copy-assignment-operator.cpp
index 7f74193..46902c0 100644
--- a/clang/test/Index/copy-assignment-operator.cpp
+++ b/clang/test/Index/copy-assignment-operator.cpp
@@ -32,7 +32,7 @@ class Bar {
// CHECK: CXXMethod=operator=:4:10 (copy-assignment operator) [type=bool (Foo &)] [typekind=FunctionProto] [canonicaltype=bool (Foo &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:5:10 (copy-assignment operator) [type=bool (volatile Foo &)] [typekind=FunctionProto] [canonicaltype=bool (volatile Foo &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [volatile Foo &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:6:10 (copy-assignment operator) [type=bool (const volatile Foo &)] [typekind=FunctionProto] [canonicaltype=bool (const volatile Foo &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const volatile Foo &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
-// CHECK: CXXMethod=operator=:7:10 (copy-assignment operator) [type=bool (Foo)] [typekind=FunctionProto] [canonicaltype=bool (Foo)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo] [Elaborated]] [isPOD=0] [isAnonRecDecl=0]
+// CHECK: CXXMethod=operator=:7:10 (copy-assignment operator) [type=bool (Foo)] [typekind=FunctionProto] [canonicaltype=bool (Foo)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo] [Record]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: FunctionTemplate=operator=:11:10 [type=bool (const T &)] [typekind=FunctionProto] [canonicaltype=bool (const type-parameter-0-0 &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:12:10 [type=bool (const bool &)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const bool &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:13:10 [type=bool (char &)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [char &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
@@ -44,4 +44,4 @@ class Bar {
// CHECK: CXXMethod=operator=:23:10 (copy-assignment operator) [type=bool (Bar<T> &)] [typekind=FunctionProto] [canonicaltype=bool (Bar<T> &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Bar<T> &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:24:10 (copy-assignment operator) [type=bool (volatile Bar<T> &)] [typekind=FunctionProto] [canonicaltype=bool (volatile Bar<T> &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [volatile Bar<T> &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:25:10 (copy-assignment operator) [type=bool (const volatile Bar<T> &)] [typekind=FunctionProto] [canonicaltype=bool (const volatile Bar<T> &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const volatile Bar<T> &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
-// CHECK: CXXMethod=operator=:26:10 (copy-assignment operator) [type=bool (Bar<T>)] [typekind=FunctionProto] [canonicaltype=bool (Bar<T>)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Bar<T>] [Elaborated]] [isPOD=0] [isAnonRecDecl=0]
+// CHECK: CXXMethod=operator=:26:10 (copy-assignment operator) [type=bool (Bar<T>)] [typekind=FunctionProto] [canonicaltype=bool (Bar<T>)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Bar<T>] [Unexposed]] [isPOD=0] [isAnonRecDecl=0]
diff --git a/clang/test/Index/index-refs.cpp b/clang/test/Index/index-refs.cpp
index 1494684..22eb753 100644
--- a/clang/test/Index/index-refs.cpp
+++ b/clang/test/Index/index-refs.cpp
@@ -81,13 +81,13 @@ int ginitlist[] = {EnumVal};
// CHECK-NEXT: [indexDeclaration]: kind: enum
// CHECK-NEXT: [indexDeclaration]: kind: enumerator | name: EnumVal
// CHECK-NEXT: [indexDeclaration]: kind: variable | name: gx
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
// CHECK-NEXT: [indexEntityReference]: kind: typedef | name: MyInt
// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
// CHECK-NEXT: [indexEntityReference]: kind: enumerator | name: EnumVal
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
// CHECK-NEXT: [indexEntityReference]: kind: typedef | name: MyInt
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
// CHECK-NEXT: [indexDeclaration]: kind: enum
// CHECK-NEXT: [indexDeclaration]: kind: enumerator | name: SecondVal
// CHECK-NEXT: [indexEntityReference]: kind: enumerator | name: EnumVal
diff --git a/clang/test/Index/keep-going.cpp b/clang/test/Index/keep-going.cpp
index 6354151..0b2df72 100644
--- a/clang/test/Index/keep-going.cpp
+++ b/clang/test/Index/keep-going.cpp
@@ -26,10 +26,10 @@ class C : public A<float> { };
// CHECK: FieldDecl=a:4:13 (Definition) [type=T] [typekind=Unexposed] [canonicaltype=type-parameter-0-0] [canonicaltypekind=Unexposed] [isPOD=0]
// CHECK: TypeRef=T:3:16 [type=T] [typekind=Unexposed] [canonicaltype=type-parameter-0-0] [canonicaltypekind=Unexposed] [isPOD=0]
// CHECK: ClassDecl=B:6:7 (Definition) [type=B] [typekind=Record] [isPOD=0]
-// CHECK: C++ base class specifier=A<int>:4:7 [access=public isVirtual=false] [type=A<int>] [typekind=Elaborated] [templateargs/1= [type=int] [typekind=Int]] [canonicaltype=A<int>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=int] [typekind=Int]] [isPOD=0] [nbFields=1]
+// CHECK: C++ base class specifier=A<int>:4:7 [access=public isVirtual=false] [type=A<int>] [typekind=Unexposed] [templateargs/1= [type=int] [typekind=Int]] [canonicaltype=A<int>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=int] [typekind=Int]] [isPOD=0] [nbFields=1]
// CHECK: TemplateRef=A:4:7 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: ClassDecl=C:10:7 (Definition) [type=C] [typekind=Record] [isPOD=0]
-// CHECK: C++ base class specifier=A<float>:4:7 [access=public isVirtual=false] [type=A<float>] [typekind=Elaborated] [templateargs/1= [type=float] [typekind=Float]] [canonicaltype=A<float>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=float] [typekind=Float]] [isPOD=0] [nbFields=1]
+// CHECK: C++ base class specifier=A<float>:4:7 [access=public isVirtual=false] [type=A<float>] [typekind=Unexposed] [templateargs/1= [type=float] [typekind=Float]] [canonicaltype=A<float>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=float] [typekind=Float]] [isPOD=0] [nbFields=1]
// CHECK: TemplateRef=A:4:7 [type=] [typekind=Invalid] [isPOD=0]
// CHECK-KEEP-GOING-ONLY: VarDecl=global_var:1:12 [type=int] [typekind=Int] [isPOD=1]
diff --git a/clang/test/Index/move-assignment-operator.cpp b/clang/test/Index/move-assignment-operator.cpp
index a2c71e6..d6c4e9a 100644
--- a/clang/test/Index/move-assignment-operator.cpp
+++ b/clang/test/Index/move-assignment-operator.cpp
@@ -37,7 +37,7 @@ class Bar {
// CHECK: CXXMethod=operator=:13:10 [type=bool (volatile unsigned int &&)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [volatile unsigned int &&] [RValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:14:10 [type=bool (const volatile unsigned char &&)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const volatile unsigned char &&] [RValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:15:10 [type=bool (int)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [int] [Int]] [isPOD=0] [isAnonRecDecl=0]
-// CHECK: CXXMethod=operator=:16:10 (copy-assignment operator) [type=bool (Foo)] [typekind=FunctionProto] [canonicaltype=bool (Foo)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo] [Elaborated]] [isPOD=0] [isAnonRecDecl=0]
+// CHECK: CXXMethod=operator=:16:10 (copy-assignment operator) [type=bool (Foo)] [typekind=FunctionProto] [canonicaltype=bool (Foo)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo] [Record]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: ClassTemplate=Bar:21:7 (Definition) [type=] [typekind=Invalid] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:22:10 (move-assignment operator) [type=bool (const Bar<T> &&)] [typekind=FunctionProto] [canonicaltype=bool (const Bar<T> &&)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const Bar<T> &&] [RValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:23:10 (move-assignment operator) [type=bool (Bar<T> &&)] [typekind=FunctionProto] [canonicaltype=bool (Bar<T> &&)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Bar<T> &&] [RValueReference]] [isPOD=0] [isAnonRecDecl=0]
diff --git a/clang/test/Index/opencl-types.cl b/clang/test/Index/opencl-types.cl
index 13d7937..4850601 100644
--- a/clang/test/Index/opencl-types.cl
+++ b/clang/test/Index/opencl-types.cl
@@ -17,11 +17,11 @@ void kernel testFloatTypes() {
}
// CHECK: VarDecl=scalarHalf:11:8 (Definition){{( \(invalid\))?}} [type=__private half] [typekind=Half] [isPOD=1]
-// CHECK: VarDecl=vectorHalf:12:9 (Definition) [type=__private half4] [typekind=Elaborated] [canonicaltype=half __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
+// CHECK: VarDecl=vectorHalf:12:9 (Definition) [type=__private half4] [typekind=Typedef] [canonicaltype=half __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
// CHECK: VarDecl=scalarFloat:13:9 (Definition) [type=__private float] [typekind=Float] [isPOD=1]
-// CHECK: VarDecl=vectorFloat:14:10 (Definition) [type=__private float4] [typekind=Elaborated] [canonicaltype=float __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
+// CHECK: VarDecl=vectorFloat:14:10 (Definition) [type=__private float4] [typekind=Typedef] [canonicaltype=float __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
// CHECK: VarDecl=scalarDouble:15:10 (Definition){{( \(invalid\))?}} [type=__private double] [typekind=Double] [isPOD=1]
-// CHECK: VarDecl=vectorDouble:16:11 (Definition){{( \(invalid\))?}} [type=__private double4] [typekind=Elaborated] [canonicaltype=double __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
+// CHECK: VarDecl=vectorDouble:16:11 (Definition){{( \(invalid\))?}} [type=__private double4] [typekind=Typedef] [canonicaltype=double __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
@@ -120,10 +120,10 @@ void kernel testMiscOpenCLTypes() {
reserve_id_t scalarOCLReserveID;
}
-// CHECK: VarDecl=scalarOCLSampler:117:19 (Definition) [type=const sampler_t] [typekind=Elaborated] const [canonicaltype=const sampler_t] [canonicaltypekind=OCLSampler] [isPOD=1]
-// CHECK: VarDecl=scalarOCLEvent:118:15 (Definition) [type=__private clk_event_t] [typekind=Elaborated] [canonicaltype=__private clk_event_t] [canonicaltypekind=Unexposed] [isPOD=1]
-// CHECK: VarDecl=scalarOCLQueue:119:11 (Definition) [type=__private queue_t] [typekind=Elaborated] [canonicaltype=__private queue_t] [canonicaltypekind=OCLQueue] [isPOD=1]
-// CHECK: VarDecl=scalarOCLReserveID:120:16 (Definition) [type=__private reserve_id_t] [typekind=Elaborated] [canonicaltype=__private reserve_id_t] [canonicaltypekind=OCLReserveID] [isPOD=1]
+// CHECK: VarDecl=scalarOCLSampler:117:19 (Definition) [type=const sampler_t] [typekind=Typedef] const [canonicaltype=const sampler_t] [canonicaltypekind=OCLSampler] [isPOD=1]
+// CHECK: VarDecl=scalarOCLEvent:118:15 (Definition) [type=__private clk_event_t] [typekind=Typedef] [canonicaltype=__private clk_event_t] [canonicaltypekind=Unexposed] [isPOD=1]
+// CHECK: VarDecl=scalarOCLQueue:119:11 (Definition) [type=__private queue_t] [typekind=Typedef] [canonicaltype=__private queue_t] [canonicaltypekind=OCLQueue] [isPOD=1]
+// CHECK: VarDecl=scalarOCLReserveID:120:16 (Definition) [type=__private reserve_id_t] [typekind=Typedef] [canonicaltype=__private reserve_id_t] [canonicaltypekind=OCLReserveID] [isPOD=1]
#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : enable
@@ -131,4 +131,4 @@ void kernel testExtOpenCLTypes() {
intel_sub_group_avc_mce_payload_t mce_payload;
}
-// CHECK: VarDecl=mce_payload:131:37 (Definition){{( \(invalid\))?}} [type=__private intel_sub_group_avc_mce_payload_t] [typekind=Elaborated] [canonicaltype=__private intel_sub_group_avc_mce_payload_t] [canonicaltypekind=OCLIntelSubgroupAVCMcePayload] [isPOD=1]
+// CHECK: VarDecl=mce_payload:131:37 (Definition){{( \(invalid\))?}} [type=__private intel_sub_group_avc_mce_payload_t] [typekind=Typedef] [canonicaltype=__private intel_sub_group_avc_mce_payload_t] [canonicaltypekind=OCLIntelSubgroupAVCMcePayload] [isPOD=1]
diff --git a/clang/test/Index/paren-type.c b/clang/test/Index/paren-type.c
index 0975191..14a7785 100644
--- a/clang/test/Index/paren-type.c
+++ b/clang/test/Index/paren-type.c
@@ -9,7 +9,7 @@ extern int (VariableWithParentheses);
typedef int MyTypedef;
// CHECK-TYPE: VarDecl=VariableWithParentheses2:
-// CHECK-TYPE-SAME: [type=MyTypedef] [typekind=Elaborated]
+// CHECK-TYPE-SAME: [type=MyTypedef] [typekind=Typedef]
// CHECK-TYPE-SAME: [canonicaltype=int] [canonicaltypekind=Int]
// CHECK-TYPEDECL: VarDecl=VariableWithParentheses2
// CHECK-TYPEDECL-SAME: [typedeclaration=MyTypedef] [typekind=Typedef]
diff --git a/clang/test/Index/print-type-size.cpp b/clang/test/Index/print-type-size.cpp
index a365528f..6a5a02a 100644
--- a/clang/test/Index/print-type-size.cpp
+++ b/clang/test/Index/print-type-size.cpp
@@ -45,8 +45,8 @@ union u {
struct simple s1;
};
-// CHECK64: VarDecl=s1:[[@LINE+2]]:8 (Definition) [type=simple] [typekind=Elaborated] [sizeof=48] [alignof=8]
-// CHECK32: VarDecl=s1:[[@LINE+1]]:8 (Definition) [type=simple] [typekind=Elaborated] [sizeof=36] [alignof=4]
+// CHECK64: VarDecl=s1:[[@LINE+2]]:8 (Definition) [type=simple] [typekind=Record] [sizeof=48] [alignof=8]
+// CHECK32: VarDecl=s1:[[@LINE+1]]:8 (Definition) [type=simple] [typekind=Record] [sizeof=36] [alignof=4]
simple s1;
struct Test {
@@ -354,8 +354,8 @@ struct BaseStruct
BaseStruct(){}
double v0;
float v1;
-// CHECK64: FieldDecl=fg:[[@LINE+2]]:7 (Definition) [type=C] [typekind=Elaborated] [sizeof=88] [alignof=8] [offsetof=128]
-// CHECK32: FieldDecl=fg:[[@LINE+1]]:7 (Definition) [type=C] [typekind=Elaborated] [sizeof=60] [alignof=4] [offsetof=96]
+// CHECK64: FieldDecl=fg:[[@LINE+2]]:7 (Definition) [type=C] [typekind=Record] [sizeof=88] [alignof=8] [offsetof=128]
+// CHECK32: FieldDecl=fg:[[@LINE+1]]:7 (Definition) [type=C] [typekind=Record] [sizeof=60] [alignof=4] [offsetof=96]
C fg;
// CHECK64: FieldDecl=rg:[[@LINE+2]]:8 (Definition) [type=C &] [typekind=LValueReference] [sizeof=88] [alignof=8] [offsetof=832]
// CHECK32: FieldDecl=rg:[[@LINE+1]]:8 (Definition) [type=C &] [typekind=LValueReference] [sizeof=60] [alignof=4] [offsetof=576]
diff --git a/clang/test/Index/print-type.c b/clang/test/Index/print-type.c
index 7375644..d30f4be 100644
--- a/clang/test/Index/print-type.c
+++ b/clang/test/Index/print-type.c
@@ -32,10 +32,10 @@ void fun(struct { int x; int y; } *param);
_Atomic(unsigned long) aul;
// RUN: c-index-test -test-print-type %s | FileCheck %s
-// CHECK: FunctionDecl=f:3:6 (Definition) [type=int *(int *, char *, FooType, int *, void (*)(int))] [typekind=FunctionProto] [canonicaltype=int *(int *, char *, int, int *, void (*)(int))] [canonicaltypekind=FunctionProto] [resulttype=int *] [resulttypekind=Pointer] [args= [int *] [Pointer] [char *] [Pointer] [FooType] [Elaborated] [int[5]] [ConstantArray] [void (*)(int)] [Pointer]] [isPOD=0]
+// CHECK: FunctionDecl=f:3:6 (Definition) [type=int *(int *, char *, FooType, int *, void (*)(int))] [typekind=FunctionProto] [canonicaltype=int *(int *, char *, int, int *, void (*)(int))] [canonicaltypekind=FunctionProto] [resulttype=int *] [resulttypekind=Pointer] [args= [int *] [Pointer] [char *] [Pointer] [FooType] [Typedef] [int[5]] [ConstantArray] [void (*)(int)] [Pointer]] [isPOD=0]
// CHECK: ParmDecl=p:3:13 (Definition) [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: ParmDecl=x:3:22 (Definition) [type=char *] [typekind=Pointer] [isPOD=1] [pointeetype=char] [pointeekind=Char_{{[US]}}]
-// CHECK: ParmDecl=z:3:33 (Definition) [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: ParmDecl=z:3:33 (Definition) [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: TypeRef=FooType:1:13 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: ParmDecl=arr:3:40 (Definition) [type=int[5]] [typekind=ConstantArray] [isPOD=1]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
@@ -47,14 +47,14 @@ _Atomic(unsigned long) aul;
// CHECK: UnaryOperator= [type=int] [typekind=Int] [isPOD=1]
// CHECK: DeclRefExpr=p:3:13 [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: DeclStmt= [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: VarDecl=w:5:17 (Definition) [type=const FooType] [typekind=Elaborated] const [canonicaltype=const int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: VarDecl=w:5:17 (Definition) [type=const FooType] [typekind=Typedef] const [canonicaltype=const int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: TypeRef=FooType:1:13 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: DeclRefExpr=z:3:33 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: DeclRefExpr=z:3:33 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: ReturnStmt= [type=] [typekind=Invalid] [isPOD=0]
// CHECK: BinaryOperator=+ [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: BinaryOperator=+ [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: DeclRefExpr=p:3:13 [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
-// CHECK: DeclRefExpr=z:3:33 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: DeclRefExpr=z:3:33 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: ArraySubscriptExpr= [type=int] [typekind=Int] [isPOD=1]
// CHECK: UnexposedExpr=arr:3:40 [type=int[5]] [typekind=ConstantArray] [isPOD=1]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
@@ -64,10 +64,10 @@ _Atomic(unsigned long) aul;
// CHECK: VarDecl=x:10:38 [type=__attribute__((__vector_size__(4 * sizeof(int)))) int] [typekind=Vector] [isPOD=1]
// CHECK: TypedefDecl=int4_t:11:46 (Definition) [type=int4_t] [typekind=Typedef] [canonicaltype=__attribute__((__vector_size__(4 * sizeof(int)))) int] [canonicaltypekind=Vector] [isPOD=1]
// CHECK: ParmDecl=incompletearray:13:12 (Definition) [type=int[]] [typekind=IncompleteArray] [isPOD=1]
-// CHECK: FunctionDecl=elaboratedEnumType:15:25 [type=enum Enum ()] [typekind=FunctionNoProto] [canonicaltype=enum Enum ()] [canonicaltypekind=FunctionNoProto] [resulttype=enum Enum] [resulttypekind=Elaborated] [isPOD=0]
+// CHECK: FunctionDecl=elaboratedEnumType:15:25 [type=enum Enum ()] [typekind=FunctionNoProto] [canonicaltype=enum Enum ()] [canonicaltypekind=FunctionNoProto] [resulttype=enum Enum] [resulttypekind=Enum] [isPOD=0]
// CHECK: TypeRef=enum Enum:15:6 [type=enum Enum] [typekind=Enum] [isPOD=1]
// CHECK: StructDecl=Struct:16:8 (Definition) [type=struct Struct] [typekind=Record] [isPOD=1]
-// CHECK: FunctionDecl=elaboratedStructType:16:32 [type=struct Struct ()] [typekind=FunctionNoProto] [canonicaltype=struct Struct ()] [canonicaltypekind=FunctionNoProto] [resulttype=struct Struct] [resulttypekind=Elaborated] [isPOD=0]
+// CHECK: FunctionDecl=elaboratedStructType:16:32 [type=struct Struct ()] [typekind=FunctionNoProto] [canonicaltype=struct Struct ()] [canonicaltypekind=FunctionNoProto] [resulttype=struct Struct] [resulttypekind=Record] [isPOD=0]
// CHECK: TypeRef=struct Struct:16:8 [type=struct Struct] [typekind=Record] [isPOD=1]
// CHECK: StructDecl=struct (unnamed at {{.*}}):18:1 (Definition) [type=struct (unnamed at {{.*}}print-type.c:18:1)] [typekind=Record] [isPOD=1] [nbFields=2] [isAnon=1] [isAnonRecDecl=0]
// CHECK: StructDecl=struct (unnamed at {{.*}}):23:1 (Definition) [type=struct (unnamed at {{.*}}print-type.c:23:1)] [typekind=Record] [isPOD=1] [nbFields=1] [isAnon=1] [isAnonRecDecl=0]
diff --git a/clang/test/Index/print-type.cpp b/clang/test/Index/print-type.cpp
index 141895d..ef9805f 100644
--- a/clang/test/Index/print-type.cpp
+++ b/clang/test/Index/print-type.cpp
@@ -105,38 +105,38 @@ inline namespace InlineNS {}
// CHECK: Namespace=inner:14:11 (Definition) [type=] [typekind=Invalid] [isPOD=0]
// CHECK: StructDecl=Bar:16:8 (Definition) [type=outer::inner::Bar] [typekind=Record] [isPOD=0] [nbFields=3]
// CHECK: CXXConstructor=Bar:17:3 (Definition) (converting constructor) [type=void (outer::Foo<bool> *){{.*}}] [typekind=FunctionProto] [canonicaltype=void (outer::Foo<bool> *){{.*}}] [canonicaltypekind=FunctionProto] [resulttype=void] [resulttypekind=Void] [args= [outer::Foo<bool> *] [Pointer]] [isPOD=0]
-// CHECK: ParmDecl=foo:17:25 (Definition) [type=outer::Foo<bool> *] [typekind=Pointer] [canonicaltype=outer::Foo<bool> *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=outer::Foo<bool>] [pointeekind=Elaborated]
+// CHECK: ParmDecl=foo:17:25 (Definition) [type=outer::Foo<bool> *] [typekind=Pointer] [canonicaltype=outer::Foo<bool> *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=outer::Foo<bool>] [pointeekind=Unexposed]
// CHECK: NamespaceRef=outer:1:11 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TemplateRef=Foo:4:8 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: CompoundStmt= [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: TypedefDecl=FooType:19:15 (Definition) [type=outer::inner::Bar::FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: TypeAliasDecl=AliasType:20:9 (Definition) [type=outer::inner::Bar::AliasType] [typekind=Typedef] [canonicaltype=double] [canonicaltypekind=Double] [isPOD=1]
+// CHECK: TypedefDecl=FooType:19:15 (Definition) [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: TypeAliasDecl=AliasType:20:9 (Definition) [type=AliasType] [typekind=Typedef] [canonicaltype=double] [canonicaltypekind=Double] [isPOD=1]
// CHECK: FieldDecl=p:21:8 (Definition) [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
-// CHECK: CXXMethod=f:22:8 (Definition) [type=int *(int *, char *, FooType){{.*}}] [typekind=FunctionProto] [canonicaltype=int *(int *, char *, int){{.*}}] [canonicaltypekind=FunctionProto] [resulttype=int *] [resulttypekind=Pointer] [args= [int *] [Pointer] [char *] [Pointer] [FooType] [Elaborated]] [isPOD=0]
+// CHECK: CXXMethod=f:22:8 (Definition) [type=int *(int *, char *, FooType){{.*}}] [typekind=FunctionProto] [canonicaltype=int *(int *, char *, int){{.*}}] [canonicaltypekind=FunctionProto] [resulttype=int *] [resulttypekind=Pointer] [args= [int *] [Pointer] [char *] [Pointer] [FooType] [Typedef]] [isPOD=0]
// CHECK: ParmDecl=p:22:15 (Definition) [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: ParmDecl=x:22:24 (Definition) [type=char *] [typekind=Pointer] [isPOD=1] [pointeetype=char] [pointeekind=Char_{{[US]}}]
-// CHECK: ParmDecl=z:22:35 (Definition) [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: TypeRef=outer::inner::Bar::FooType:19:15 [type=outer::inner::Bar::FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: ParmDecl=z:22:35 (Definition) [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: TypeRef=outer::inner::Bar::FooType:19:15 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: CompoundStmt= [type=] [typekind=Invalid] [isPOD=0]
// CHECK: DeclStmt= [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: VarDecl=w:23:19 (Definition) [type=const FooType] [typekind=Elaborated] const [canonicaltype=const int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: TypeRef=outer::inner::Bar::FooType:19:15 [type=outer::inner::Bar::FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: UnexposedExpr=z:22:35 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: DeclRefExpr=z:22:35 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: VarDecl=w:23:19 (Definition) [type=const FooType] [typekind=Typedef] const [canonicaltype=const int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: TypeRef=outer::inner::Bar::FooType:19:15 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: UnexposedExpr=z:22:35 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: DeclRefExpr=z:22:35 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: ReturnStmt= [type=] [typekind=Invalid] [isPOD=0]
// CHECK: BinaryOperator=+ [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: UnexposedExpr=p:22:15 [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: DeclRefExpr=p:22:15 [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
-// CHECK: UnexposedExpr=z:22:35 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: DeclRefExpr=z:22:35 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: TypedefDecl=OtherType:26:18 (Definition) [type=outer::inner::Bar::OtherType] [typekind=Typedef] [canonicaltype=double] [canonicaltypekind=Double] [isPOD=1]
-// CHECK: TypedefDecl=ArrayType:27:15 (Definition) [type=outer::inner::Bar::ArrayType] [typekind=Typedef] [canonicaltype=int[5]] [canonicaltypekind=ConstantArray] [isPOD=1]
+// CHECK: UnexposedExpr=z:22:35 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: DeclRefExpr=z:22:35 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: TypedefDecl=OtherType:26:18 (Definition) [type=OtherType] [typekind=Typedef] [canonicaltype=double] [canonicaltypekind=Double] [isPOD=1]
+// CHECK: TypedefDecl=ArrayType:27:15 (Definition) [type=ArrayType] [typekind=Typedef] [canonicaltype=int[5]] [canonicaltypekind=ConstantArray] [isPOD=1]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
-// CHECK: FieldDecl=baz:28:20 (Definition) [type=Baz<int, 1, Foo>] [typekind=Elaborated] [templateargs/3= [type=int] [typekind=Int]] [canonicaltype=outer::Baz<int, 1, outer::Foo>] [canonicaltypekind=Record] [canonicaltemplateargs/3= [type=int] [typekind=Int]] [isPOD=1]
+// CHECK: FieldDecl=baz:28:20 (Definition) [type=Baz<int, 1, Foo>] [typekind=Unexposed] [templateargs/3= [type=int] [typekind=Int]] [canonicaltype=outer::Baz<int, 1, outer::Foo>] [canonicaltypekind=Record] [canonicaltemplateargs/3= [type=int] [typekind=Int]] [isPOD=1]
// CHECK: TemplateRef=Baz:9:8 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
// CHECK: TemplateRef=Foo:4:8 [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: FieldDecl=qux:29:38 (Definition) [type=Qux<int, char *, Foo<int>, FooType>] [typekind=Elaborated] [templateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=Foo<int>] [typekind=Elaborated] [type=FooType] [typekind=Elaborated]] [canonicaltype=outer::Qux<int, char *, outer::Foo<int>, int>] [canonicaltypekind=Record] [canonicaltemplateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=outer::Foo<int>] [typekind=Record] [type=int] [typekind=Int]] [isPOD=1]
+// CHECK: FieldDecl=qux:29:38 (Definition) [type=Qux<int, char *, Foo<int>, FooType>] [typekind=Unexposed] [templateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=Foo<int>] [typekind=Unexposed] [type=FooType] [typekind=Typedef]] [canonicaltype=outer::Qux<int, char *, outer::Foo<int>, int>] [canonicaltypekind=Record] [canonicaltemplateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=outer::Foo<int>] [typekind=Record] [type=int] [typekind=Int]] [isPOD=1]
// CHECK: TemplateRef=Qux:12:8 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TemplateRef=Foo:4:8 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: FunctionTemplate=tbar:36:3 [type=T (int)] [typekind=FunctionProto] [canonicaltype=type-parameter-0-0 (int)] [canonicaltypekind=FunctionProto] [resulttype=T] [resulttypekind=Unexposed] [isPOD=0]
@@ -163,11 +163,11 @@ inline namespace InlineNS {}
// CHECK: DeclRefExpr=i:44:14 [type=int] [typekind=Int] [isPOD=1]
// CHECK: StructDecl=Blob:46:8 (Definition) [type=Blob] [typekind=Record] [isPOD=1] [nbFields=2]
// CHECK: FieldDecl=i:47:7 (Definition) [type=int] [typekind=Int] [isPOD=1]
-// CHECK: VarDecl=member_pointer:50:12 (Definition) [type=int Blob::*] [typekind=MemberPointer] [isPOD=1] [pointeetype=int] [pointeekind=Int] [isAnonRecDecl=0]
-// CHECK: FunctionDecl=elaboratedNamespaceType:52:42 [type=NS::Type (const NS::Type)] [typekind=FunctionProto] [canonicaltype=NS::Type (NS::Type)] [canonicaltypekind=FunctionProto] [resulttype=NS::Type] [resulttypekind=Elaborated] [args= [const NS::Type] [Elaborated]] [isPOD=0]
+// CHECK: VarDecl=member_pointer:50:12 (Definition) [type=int Blob::*] [typekind=MemberPointer] [canonicaltype=int Blob::*] [canonicaltypekind=MemberPointer] [isPOD=1] [pointeetype=int] [pointeekind=Int] [isAnonRecDecl=0]
+// CHECK: FunctionDecl=elaboratedNamespaceType:52:42 [type=NS::Type (const NS::Type)] [typekind=FunctionProto] [canonicaltype=NS::Type (NS::Type)] [canonicaltypekind=FunctionProto] [resulttype=NS::Type] [resulttypekind=Record] [args= [const NS::Type] [Record]] [isPOD=0]
// CHECK: NamespaceRef=NS:52:11 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TypeRef=struct NS::Type:52:23 [type=NS::Type] [typekind=Record] [isPOD=1]
-// CHECK: ParmDecl=t:52:81 (Definition) [type=const NS::Type] [typekind=Elaborated] const [canonicaltype=const NS::Type] [canonicaltypekind=Record] [isPOD=1]
+// CHECK: ParmDecl=t:52:81 (Definition) [type=const NS::Type] [typekind=Record] const [canonicaltype=const NS::Type] [canonicaltypekind=Record] [isPOD=1]
// CHECK: VarDecl=autoI:54:6 (Definition) [type=int] [typekind=Auto] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
// CHECK: VarDecl=autoTbar:55:6 (Definition) [type=int] [typekind=Auto] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
@@ -176,9 +176,9 @@ inline namespace InlineNS {}
// CHECK: DeclRefExpr=tbar:36:3 RefName=[55:17 - 55:21] RefName=[55:21 - 55:26] [type=int (int)] [typekind=FunctionProto] [canonicaltype=int (int)] [canonicaltypekind=FunctionProto] [isPOD=0]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
// CHECK: VarDecl=autoBlob:56:6 (Definition) [type=Blob *] [typekind=Auto] [canonicaltype=Blob *] [canonicaltypekind=Pointer] [isPOD=1]
-// CHECK: CXXNewExpr= [type=Blob *] [typekind=Pointer] [canonicaltype=Blob *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=Blob] [pointeekind=Elaborated]
+// CHECK: CXXNewExpr= [type=Blob *] [typekind=Pointer] [canonicaltype=Blob *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=Blob] [pointeekind=Record]
// CHECK: TypeRef=struct Blob:46:8 [type=Blob] [typekind=Record] [isPOD=1] [nbFields=2]
-// CHECK: CallExpr=Blob:46:8 [type=Blob] [typekind=Elaborated] [canonicaltype=Blob] [canonicaltypekind=Record] [isPOD=1] [nbFields=2]
+// CHECK: CallExpr=Blob:46:8 [type=Blob] [typekind=Record] [canonicaltype=Blob] [canonicaltypekind=Record] [isPOD=1] [nbFields=2]
// CHECK: FunctionDecl=autoFunction:57:6 (Definition) [type=int ()] [typekind=FunctionProto] [canonicaltype=int ()] [canonicaltypekind=FunctionProto] [resulttype=int] [resulttypekind=Auto] [isPOD=0]
// CHECK: CompoundStmt= [type=] [typekind=Invalid] [isPOD=0]
// CHECK: ReturnStmt= [type=] [typekind=Invalid] [isPOD=0]
@@ -187,20 +187,20 @@ inline namespace InlineNS {}
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
// CHECK: TypeAliasTemplateDecl=TypeAlias:61:1 (Definition) [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TemplateTypeParameter=T:60:20 (Definition) [type=T] [typekind=Unexposed] [canonicaltype=type-parameter-0-0] [canonicaltypekind=Unexposed] [isPOD=0]
-// CHECK: FieldDecl=foo:63:39 (Definition) [type=TypeAlias<int>] [typekind=Elaborated] [templateargs/1= [type=int] [typekind=Int]] [canonicaltype=outer::Qux<int>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=int] [typekind=Int]] [isPOD=1]
+// CHECK: FieldDecl=foo:63:39 (Definition) [type=TypeAlias<int>] [typekind=Unexposed] [templateargs/1= [type=int] [typekind=Int]] [canonicaltype=outer::Qux<int>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=int] [typekind=Int]] [isPOD=1]
// CHECK: TemplateRef=TypeAlias:61:1 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: ClassTemplate=Specialization:66:8 (Definition) [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TemplateTypeParameter=T:65:19 (Definition) [type=T] [typekind=Unexposed] [canonicaltype=type-parameter-0-0] [canonicaltypekind=Unexposed] [isPOD=0]
// CHECK: StructDecl=Specialization:69:8 [Specialization of Specialization:66:8] [Template arg 0: kind: 1, type: int] [type=Specialization<int>] [typekind=Record] [templateargs/1= [type=int] [typekind=Int]] [isPOD=0]
-// CHECK: VarDecl=templRefParam:71:40 (Definition) [type=Specialization<Specialization<bool> &>] [typekind=Elaborated] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
+// CHECK: VarDecl=templRefParam:71:40 (Definition) [type=Specialization<Specialization<bool> &>] [typekind=Unexposed] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
// CHECK: TemplateRef=Specialization:66:8 [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: CallExpr=Specialization:66:8 [type=Specialization<Specialization<bool> &>] [typekind=Elaborated] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
+// CHECK: CallExpr=Specialization:66:8 [type=Specialization<Specialization<bool> &>] [typekind=Unexposed] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
// CHECK: VarDecl=autoTemplRefParam:72:6 (Definition) [type=Specialization<Specialization<bool> &>] [typekind=Auto] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
-// CHECK: UnexposedExpr=templRefParam:71:40 [type=const Specialization<Specialization<bool> &>] [typekind=Elaborated] const [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=const Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1] [isAnonRecDecl=0]
-// CHECK: DeclRefExpr=templRefParam:71:40 [type=Specialization<Specialization<bool> &>] [typekind=Elaborated] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
-// CHECK: TypeAliasDecl=baz:76:7 (Definition) [type=baz] [typekind=Typedef] [templateargs/1= [type=A<void>] [typekind=Elaborated]] [canonicaltype=A<void>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=void] [typekind=Void]] [isPOD=0]
+// CHECK: UnexposedExpr=templRefParam:71:40 [type=const Specialization<Specialization<bool> &>] [typekind=Record] const [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=const Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1] [isAnonRecDecl=0]
+// CHECK: DeclRefExpr=templRefParam:71:40 [type=Specialization<Specialization<bool> &>] [typekind=Unexposed] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
+// CHECK: TypeAliasDecl=baz:76:7 (Definition) [type=baz] [typekind=Typedef] [templateargs/1= [type=A<void>] [typekind=Unexposed]] [canonicaltype=A<void>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=void] [typekind=Void]] [isPOD=0]
// CHECK: VarDecl=autoTemplPointer:78:6 (Definition) [type=Specialization<Specialization<bool> &> *] [typekind=Auto] [canonicaltype=Specialization<Specialization<bool> &> *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=Specialization<Specialization<bool> &>] [pointeekind=Auto]
-// CHECK: CallExpr=Bar:17:3 [type=outer::inner::Bar] [typekind=Elaborated] [canonicaltype=outer::inner::Bar] [canonicaltypekind=Record] [args= [outer::Foo<bool> *] [Pointer]] [isPOD=0] [nbFields=3]
+// CHECK: CallExpr=Bar:17:3 [type=outer::inner::Bar] [typekind=Record] [canonicaltype=outer::inner::Bar] [canonicaltypekind=Record] [args= [outer::Foo<bool> *] [Pointer]] [isPOD=0] [nbFields=3]
// CHECK: StructDecl=(anonymous struct at {{.*}}):84:3 (Definition) [type=X::(anonymous struct at {{.*}}print-type.cpp:84:3)] [typekind=Record] [isPOD=1] [nbFields=1] [isAnon=1]
// CHECK: ClassDecl=(anonymous class at {{.*}}:85:3 (Definition) [type=X::(anonymous class at {{.*}}print-type.cpp:85:3)] [typekind=Record] [isPOD=1] [nbFields=1] [isAnon=1]
// CHECK: UnionDecl=(anonymous union at {{.*}}:86:3 (Definition) [type=X::(anonymous union at {{.*}}print-type.cpp:86:3)] [typekind=Record] [isPOD=1] [nbFields=2] [isAnon=1]
diff --git a/clang/test/Index/recursive-cxx-member-calls.cpp b/clang/test/Index/recursive-cxx-member-calls.cpp
index 11c011a..c7f0053 100644
--- a/clang/test/Index/recursive-cxx-member-calls.cpp
+++ b/clang/test/Index/recursive-cxx-member-calls.cpp
@@ -402,7 +402,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) {
// CHECK-tokens: Identifier: "getKind" [33:17 - 33:24] CXXMethod=getKind:33:17 (static)
// CHECK-tokens: Punctuation: "(" [33:24 - 33:25] CXXMethod=getKind:33:17 (static)
// CHECK-tokens: Keyword: "const" [33:25 - 33:30] ParmDecl=Name:33:48 (Definition)
-// CHECK-tokens: Identifier: "IdentifierInfo" [33:31 - 33:45] TypeRef=class clang::IdentifierInfo:66:7
+// CHECK-tokens: Identifier: "IdentifierInfo" [33:31 - 33:45] TypeRef=class clang::IdentifierInfo:11:9
// CHECK-tokens: Punctuation: "*" [33:46 - 33:47] ParmDecl=Name:33:48 (Definition)
// CHECK-tokens: Identifier: "Name" [33:48 - 33:52] ParmDecl=Name:33:48 (Definition)
// CHECK-tokens: Punctuation: ")" [33:52 - 33:53] CXXMethod=getKind:33:17 (static)
@@ -846,7 +846,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) {
// CHECK-tokens: Keyword: "unsigned" [88:14 - 88:22] NonTypeTemplateParameter=N:88:23 (Definition)
// CHECK-tokens: Identifier: "N" [88:23 - 88:24] NonTypeTemplateParameter=N:88:23 (Definition)
// CHECK-tokens: Punctuation: ">" [88:25 - 88:26] FunctionTemplate=Case:88:42 (Definition)
-// CHECK-tokens: Identifier: "StringSwitch" [88:27 - 88:39] TypeRef=StringSwitch<T, R>:83:47
+// CHECK-tokens: Identifier: "StringSwitch" [88:27 - 88:39] TypeRef=llvm::StringSwitch<T, R>:83:47
// CHECK-tokens: Punctuation: "&" [88:40 - 88:41] FunctionTemplate=Case:88:42 (Definition)
// CHECK-tokens: Identifier: "Case" [88:42 - 88:46] FunctionTemplate=Case:88:42 (Definition)
// CHECK-tokens: Punctuation: "(" [88:46 - 88:47] FunctionTemplate=Case:88:42 (Definition)
@@ -1619,7 +1619,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) {
// CHECK: 33:17: CXXMethod=getKind:33:17 (static) Extent=[33:5 - 33:53]
// CHECK: 33:12: TypeRef=enum clang::AttributeList::Kind:13:10 Extent=[33:12 - 33:16]
// CHECK: 33:48: ParmDecl=Name:33:48 (Definition) Extent=[33:25 - 33:52]
-// CHECK: 33:31: TypeRef=class clang::IdentifierInfo:66:7 Extent=[33:31 - 33:45]
+// CHECK: 33:31: TypeRef=class clang::IdentifierInfo:11:9 Extent=[33:31 - 33:45]
// CHECK: 36:8: FunctionDecl=magic_length:36:8 Extent=[36:1 - 36:35]
// CHECK: 36:1: TypeRef=size_t:2:25 Extent=[36:1 - 36:7]
// CHECK: 36:33: ParmDecl=s:36:33 (Definition) Extent=[36:21 - 36:34]
diff --git a/clang/test/Index/redeclarations.cpp b/clang/test/Index/redeclarations.cpp
index 11dc932..ea5e970 100644
--- a/clang/test/Index/redeclarations.cpp
+++ b/clang/test/Index/redeclarations.cpp
@@ -17,5 +17,5 @@ class A
// CHECK: redeclarations.h:19:19: FieldDecl=x:19:19 (Definition) Extent=[19:5 - 19:20]
// CHECK: redeclarations.h:19:5: TemplateRef=B:8:7 Extent=[19:5 - 19:6]
// CHECK: redeclarations.h:19:7: TypeRef=class D:17:7 Extent=[19:7 - 19:8]
-// CHECK: redeclarations.h:19:16: TypeRef=class A:3:7 Extent=[19:16 - 19:17]
+// CHECK: redeclarations.h:19:16: TypeRef=class A:19:16 Extent=[19:16 - 19:17]
// CHECK: redeclarations.cpp:3:7: ClassDecl=A:3:7 (Definition) Extent=[3:1 - 5:2]
diff --git a/clang/test/Index/skip-parsed-bodies/compile_commands.json b/clang/test/Index/skip-parsed-bodies/compile_commands.json
index 991227a8..e087a28 100644
--- a/clang/test/Index/skip-parsed-bodies/compile_commands.json
+++ b/clang/test/Index/skip-parsed-bodies/compile_commands.json
@@ -25,8 +25,8 @@
// CHECK-NEXT: [indexEntityReference]: kind: variable | name: some_val | {{.*}} | loc: .{{/|\\\\?}}t.h:9:27
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 0 | isDef: 0 | isContainer: 0
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 1 | isDef: 1 | isContainer: 1
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexEntityReference]: kind: c++-class | name: C |
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexEntityReference]: kind: variable | name: some_val | {{.*}} | loc: .{{/|\\\\?}}t.h:15:5
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: 1
// CHECK-NEXT: [indexEntityReference]: kind: variable | name: some_val | {{.*}} | loc: .{{/|\\\\?}}t.h:19:5
@@ -40,8 +40,8 @@
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 0 | isDef: 0 | isContainer: 0
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isContainer: skipped
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexEntityReference]: kind: c++-class | name: C |
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
// CHECK-NEXT: [ppIncludedFile]: .{{/|\\\\?}}pragma_once.h
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo2 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: 1
@@ -60,8 +60,8 @@
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 0 | isDef: 0 | isContainer: 0
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 1 | isDef: 1 | isContainer: skipped
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexEntityReference]: kind: c++-class | name: C |
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
// CHECK-NEXT: [ppIncludedFile]: .{{/|\\\\?}}pragma_once.h
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo2 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
diff --git a/clang/test/Interpreter/assignment-with-implicit-ctor.cpp b/clang/test/Interpreter/assignment-with-implicit-ctor.cpp
index 24cea8e..cef568c 100644
--- a/clang/test/Interpreter/assignment-with-implicit-ctor.cpp
+++ b/clang/test/Interpreter/assignment-with-implicit-ctor.cpp
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit
-// UNSUPPORTED: system-aix
//
// RUN: cat %s | clang-repl | FileCheck %s
// RUN: cat %s | clang-repl -Xcc -O2 | FileCheck %s
diff --git a/clang/test/Interpreter/code-undo.cpp b/clang/test/Interpreter/code-undo.cpp
index 83ade0e..4516910 100644
--- a/clang/test/Interpreter/code-undo.cpp
+++ b/clang/test/Interpreter/code-undo.cpp
@@ -1,4 +1,3 @@
-// UNSUPPORTED: system-aix
// RUN: cat %s | clang-repl | FileCheck %s
extern "C" int printf(const char *, ...);
int x1 = 0;
diff --git a/clang/test/Interpreter/const.cpp b/clang/test/Interpreter/const.cpp
index 52be75e0..cadd446 100644
--- a/clang/test/Interpreter/const.cpp
+++ b/clang/test/Interpreter/const.cpp
@@ -1,4 +1,3 @@
-// UNSUPPORTED: system-aix, system-zos
// see https://github.com/llvm/llvm-project/issues/68092
// XFAIL: host={{.*}}-windows-msvc
diff --git a/clang/test/Interpreter/cxx20-modules.cppm b/clang/test/Interpreter/cxx20-modules.cppm
index 4e56e2f..97744e3 100644
--- a/clang/test/Interpreter/cxx20-modules.cppm
+++ b/clang/test/Interpreter/cxx20-modules.cppm
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit, x86_64-linux
-// UNSUPPORTED: system-aix
//
// RUN: rm -rf %t
// RUN: mkdir -p %t
diff --git a/clang/test/Interpreter/execute-stmts.cpp b/clang/test/Interpreter/execute-stmts.cpp
index 433c6811..cc27fa6 100644
--- a/clang/test/Interpreter/execute-stmts.cpp
+++ b/clang/test/Interpreter/execute-stmts.cpp
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit
-// UNSUPPORTED: system-aix
// RUN: cat %s | clang-repl -Xcc -Xclang -Xcc -verify | FileCheck %s
// RUN: %clang_cc1 -verify -fincremental-extensions -emit-llvm -o - %s \
// RUN: | FileCheck --check-prefix=CODEGEN-CHECK %s
diff --git a/clang/test/Interpreter/execute-weak.cpp b/clang/test/Interpreter/execute-weak.cpp
index 85fa5d2..f469451 100644
--- a/clang/test/Interpreter/execute-weak.cpp
+++ b/clang/test/Interpreter/execute-weak.cpp
@@ -1,4 +1,4 @@
-// UNSUPPORTED: system-aix, system-windows
+// UNSUPPORTED: system-windows
// RUN: cat %s | clang-repl | FileCheck %s
extern "C" int printf(const char *, ...);
diff --git a/clang/test/Interpreter/execute.c b/clang/test/Interpreter/execute.c
index 44a3a32..ca8f83c 100644
--- a/clang/test/Interpreter/execute.c
+++ b/clang/test/Interpreter/execute.c
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit
-// UNSUPPORTED: system-aix
// RUN: cat %s | clang-repl -Xcc -xc -Xcc -Xclang -Xcc -verify | FileCheck %s
// RUN: cat %s | clang-repl -Xcc -xc -Xcc -O2 -Xcc -Xclang -Xcc -verify| FileCheck %s
diff --git a/clang/test/Interpreter/execute.cpp b/clang/test/Interpreter/execute.cpp
index 534a54e..82cd70a 100644
--- a/clang/test/Interpreter/execute.cpp
+++ b/clang/test/Interpreter/execute.cpp
@@ -1,5 +1,3 @@
-// UNSUPPORTED: system-aix
-
// clang-format off
// RUN: clang-repl "int i = 10;" 'extern "C" int printf(const char*,...);' \
// RUN: 'auto r1 = printf("i = %d\n", i);' | FileCheck --check-prefix=CHECK-DRIVER %s
diff --git a/clang/test/Interpreter/fail.cpp b/clang/test/Interpreter/fail.cpp
index 4963df8..d92debc 100644
--- a/clang/test/Interpreter/fail.cpp
+++ b/clang/test/Interpreter/fail.cpp
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit
-// UNSUPPORTED: system-aix
// clang-repl can be called from the prompt in non-interactive mode as a
// calculator in shell scripts, for example. In that case if there is an error
// we should set the exit code as failure.
diff --git a/clang/test/Interpreter/global-dtor.cpp b/clang/test/Interpreter/global-dtor.cpp
index 1f241d9..9cb454b 100644
--- a/clang/test/Interpreter/global-dtor.cpp
+++ b/clang/test/Interpreter/global-dtor.cpp
@@ -1,5 +1,4 @@
// clang-format off
-// UNSUPPORTED: system-aix
//
// Tests that a global destructor is ran on platforms with gnu exception support.
//
@@ -10,4 +9,4 @@ extern "C" int printf(const char *, ...);
struct D { float f = 1.0; D *m = nullptr; D(){} ~D() { printf("D[f=%f, m=0x%llx]\n", f, reinterpret_cast<unsigned long long>(m)); }} d;
// CHECK: D[f=1.000000, m=0x0]
-%quit \ No newline at end of file
+%quit
diff --git a/clang/test/Interpreter/incremental-mode.cpp b/clang/test/Interpreter/incremental-mode.cpp
index 71ff794..d63cee0 100644
--- a/clang/test/Interpreter/incremental-mode.cpp
+++ b/clang/test/Interpreter/incremental-mode.cpp
@@ -1,5 +1,3 @@
-// UNSUPPORTED: system-aix
-//
// RUN: clang-repl -Xcc -E
// RUN: clang-repl -Xcc -emit-llvm
// RUN: clang-repl -Xcc -xc
diff --git a/clang/test/Interpreter/inline-asm.cpp b/clang/test/Interpreter/inline-asm.cpp
index f94f14d..6d071b1 100644
--- a/clang/test/Interpreter/inline-asm.cpp
+++ b/clang/test/Interpreter/inline-asm.cpp
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit, x86_64-linux
-// UNSUPPORTED: system-aix
//
// RUN: rm -rf %t
// RUN: mkdir -p %t
diff --git a/clang/test/Interpreter/inline-virtual.cpp b/clang/test/Interpreter/inline-virtual.cpp
index c9e8568..3790c11 100644
--- a/clang/test/Interpreter/inline-virtual.cpp
+++ b/clang/test/Interpreter/inline-virtual.cpp
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit
-// UNSUPPORTED: system-aix
//
// This test is flaky with ASan: https://github.com/llvm/llvm-project/issues/135401
// UNSUPPORTED: asan
diff --git a/clang/test/Interpreter/lambda.cpp b/clang/test/Interpreter/lambda.cpp
index db8c9db..e694108 100644
--- a/clang/test/Interpreter/lambda.cpp
+++ b/clang/test/Interpreter/lambda.cpp
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit
-// UNSUPPORTED: system-aix
// RUN: cat %s | clang-repl | FileCheck %s
// At -O2, somehow "x = 42" appears first when piped into FileCheck,
// see https://github.com/llvm/llvm-project/issues/143547.
@@ -27,4 +26,4 @@ auto capture = [&]() { return x * 2; };
printf("x = %d\n", x);
// CHECK: x = 42
-%quit \ No newline at end of file
+%quit
diff --git a/clang/test/Interpreter/lit.local.cfg b/clang/test/Interpreter/lit.local.cfg
index ac6d220..37af512 100644
--- a/clang/test/Interpreter/lit.local.cfg
+++ b/clang/test/Interpreter/lit.local.cfg
@@ -1,2 +1,6 @@
-if "host-supports-jit" not in config.available_features:
+# clang-repl is not supported on AIX and zOS
+unsupported_platforms = [ "system-aix", "system-zos" ]
+
+if "host-supports-jit" not in config.available_features or \
+ any(up in config.available_features for up in unsupported_platforms):
config.unsupported = True
diff --git a/clang/test/Interpreter/multiline.cpp b/clang/test/Interpreter/multiline.cpp
index 0f5ef48..a9f1455 100644
--- a/clang/test/Interpreter/multiline.cpp
+++ b/clang/test/Interpreter/multiline.cpp
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit
-// UNSUPPORTED: system-aix
// RUN: cat %s | clang-repl -Xcc -Xclang -Xcc -verify | FileCheck %s
// expected-no-diagnostics
diff --git a/clang/test/Interpreter/pretty-print.c b/clang/test/Interpreter/pretty-print.c
index e1408c0..588df70 100644
--- a/clang/test/Interpreter/pretty-print.c
+++ b/clang/test/Interpreter/pretty-print.c
@@ -1,5 +1,4 @@
// REQUIRES: host-supports-jit
-// UNSUPPORTED: system-aix
// RUN: cat %s | clang-repl -Xcc -xc | FileCheck %s
// RUN: cat %s | clang-repl -Xcc -std=c++11 | FileCheck %s
diff --git a/clang/test/Interpreter/pretty-print.cpp b/clang/test/Interpreter/pretty-print.cpp
index e1036ab..bad71cd 100644
--- a/clang/test/Interpreter/pretty-print.cpp
+++ b/clang/test/Interpreter/pretty-print.cpp
@@ -1,7 +1,7 @@
// RUN: clang-repl "int i = 10;" 'extern "C" int printf(const char*,...);' \
// RUN: 'auto r1 = printf("i = %d\n", i);' | FileCheck --check-prefix=CHECK-DRIVER %s
// The test is flaky with asan https://github.com/llvm/llvm-project/pull/148701.
-// UNSUPPORTED: system-aix, asan
+// UNSUPPORTED: asan
// CHECK-DRIVER: i = 10
// RUN: cat %s | clang-repl -Xcc -std=c++11 -Xcc -fno-delayed-template-parsing | FileCheck %s
extern "C" int printf(const char*,...);
diff --git a/clang/test/Interpreter/simple-exception.cpp b/clang/test/Interpreter/simple-exception.cpp
index 651e8d9..2d43f80 100644
--- a/clang/test/Interpreter/simple-exception.cpp
+++ b/clang/test/Interpreter/simple-exception.cpp
@@ -1,7 +1,6 @@
// clang-format off
-// UNSUPPORTED: system-aix
// XFAIL for arm, or running on Windows.
-// XFAIL: target=arm-{{.*}}, target=armv{{.*}}, system-windows
+// XFAIL: target=arm-{{.*}}, target=armv{{.*}}, system-windows, system-cygwin
// RUN: cat %s | clang-repl | FileCheck %s
// Incompatible with msan. It passes with -O3 but fail -Oz. Interpreter
diff --git a/clang/test/Lexer/cross-windows-on-linux.cpp b/clang/test/Lexer/cross-windows-on-linux.cpp
index 3932ffc..ece16b1 100644
--- a/clang/test/Lexer/cross-windows-on-linux.cpp
+++ b/clang/test/Lexer/cross-windows-on-linux.cpp
@@ -10,4 +10,4 @@
// on non-Windows unless -fms-extensions is passed. It won't fail in this way on
// Windows because the filesystem will interpret the backslash as a directory
// separator.
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
diff --git a/clang/test/Lexer/has_feature_cfi.c b/clang/test/Lexer/has_feature_cfi.c
new file mode 100644
index 0000000..a4e5803
--- /dev/null
+++ b/clang/test/Lexer/has_feature_cfi.c
@@ -0,0 +1,87 @@
+// REQUIRES: target={{x86_64.*-linux.*}}
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi -c %s -o - | FileCheck %s --check-prefix=CHECK-CFI
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi -fsanitize-cfi-cross-dso -c %s -o - | FileCheck %s --check-prefix=CHECK-CFI
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi -fno-sanitize=cfi-nvcall,cfi-vcall,cfi-mfcall,cfi-icall -c %s -o - | FileCheck %s --check-prefix=CHECK-CFI
+// CHECK-CFI: CFISanitizerEnabled
+
+// RUN: %clang -E -c %s -o - | FileCheck %s --check-prefix=CHECK-NO-CFI
+// CHECK-NO-CFI: CFISanitizerDisabled
+
+// RUN: %clang -E -fsanitize=kcfi -c %s -o - | FileCheck %s --check-prefixes=CHECK-KCFI,CHECK-NO-CFI
+// CHECK-KCFI: KCFISanitizerEnabled
+
+// RUN: %clang -E -fsanitize=cfi-cast-strict -c %s -o - | FileCheck %s --check-prefix=CHECK-CFI-CAST-STRICT
+// CHECK-CFI-CAST-STRICT: CFICastStrictSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-derived-cast -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-DERIVED-CAST
+// CHECK-CFI-DERIVED-CAST: CFIDerivedCastSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-icall -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-ICALL
+// CHECK-CFI-ICALL: CFIICallSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-mfcall -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-MFCALL
+// CHECK-CFI-MFCALL: CFIMFCallSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-unrelated-cast -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-UNRELATED-CAST
+// CHECK-CFI-UNRELATED-CAST: CFIUnrelatedCastSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-nvcall -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-NVCALL
+// CHECK-CFI-NVCALL: CFINVCallSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-vcall -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-VCALL
+// CHECK-CFI-VCALL: CFIVCallSanitizerEnabled
+
+#if __has_feature(cfi_sanitizer)
+int CFISanitizerEnabled();
+#else
+int CFISanitizerDisabled();
+#endif
+
+#if __has_feature(kcfi)
+int KCFISanitizerEnabled();
+#else
+int KCFISanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_cast_strict_sanitizer)
+int CFICastStrictSanitizerEnabled();
+#else
+int CFICastStrictSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_derived_cast_sanitizer)
+int CFIDerivedCastSanitizerEnabled();
+#else
+int CFIDerivedCastSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_icall_sanitizer)
+int CFIICallSanitizerEnabled();
+#else
+int CFIICallSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_mfcall_sanitizer)
+int CFIMFCallSanitizerEnabled();
+#else
+int CFIMFCallSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_unrelated_cast_sanitizer)
+int CFIUnrelatedCastSanitizerEnabled();
+#else
+int CFIUnrelatedCastSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_nvcall_sanitizer)
+int CFINVCallSanitizerEnabled();
+#else
+int CFINVCallSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_vcall_sanitizer)
+int CFIVCallSanitizerEnabled();
+#else
+int CFIVCallSanitizerDisabled();
+#endif
diff --git a/clang/test/Misc/diag-template-diffing-cxx11.cpp b/clang/test/Misc/diag-template-diffing-cxx11.cpp
index c62bffe..0b145475 100644
--- a/clang/test/Misc/diag-template-diffing-cxx11.cpp
+++ b/clang/test/Misc/diag-template-diffing-cxx11.cpp
@@ -24,17 +24,17 @@ namespace std {
}
} // end namespace std
// CHECK-ELIDE-NOTREE: no matching function for call to 'f'
-// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector<std::string>' to 'vector<string>' for 1st argument
+// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector<std::basic_string>' to 'vector<versa_string>' for 1st argument
// CHECK-NOELIDE-NOTREE: no matching function for call to 'f'
-// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector<std::string>' to 'vector<string>' for 1st argument
+// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector<std::basic_string>' to 'vector<versa_string>' for 1st argument
// CHECK-ELIDE-TREE: no matching function for call to 'f'
// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument
// CHECK-ELIDE-TREE: vector<
-// CHECK-ELIDE-TREE: [std::string != string]>
+// CHECK-ELIDE-TREE: [std::basic_string != versa_string]>
// CHECK-NOELIDE-TREE: no matching function for call to 'f'
// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument
// CHECK-NOELIDE-TREE: vector<
-// CHECK-NOELIDE-TREE: [std::string != string]>
+// CHECK-NOELIDE-TREE: [std::basic_string != versa_string]>
template <int... A>
class I1{};
diff --git a/clang/test/Misc/pragma-attribute-supported-attributes-list.test b/clang/test/Misc/pragma-attribute-supported-attributes-list.test
index 0569353..37ff33e 100644
--- a/clang/test/Misc/pragma-attribute-supported-attributes-list.test
+++ b/clang/test/Misc/pragma-attribute-supported-attributes-list.test
@@ -31,6 +31,7 @@
// CHECK-NEXT: CFConsumed (SubjectMatchRule_variable_is_parameter)
// CHECK-NEXT: CFGuard (SubjectMatchRule_function)
// CHECK-NEXT: CFICanonicalJumpTable (SubjectMatchRule_function)
+// CHECK-NEXT: CFISalt (SubjectMatchRule_hasType_functionType)
// CHECK-NEXT: CFUnknownTransfer (SubjectMatchRule_function)
// CHECK-NEXT: CPUDispatch (SubjectMatchRule_function)
// CHECK-NEXT: CPUSpecific (SubjectMatchRule_function)
@@ -181,6 +182,7 @@
// CHECK-NEXT: ReturnTypestate (SubjectMatchRule_function, SubjectMatchRule_variable_is_parameter)
// CHECK-NEXT: ReturnsNonNull (SubjectMatchRule_objc_method, SubjectMatchRule_function)
// CHECK-NEXT: ReturnsTwice (SubjectMatchRule_function)
+// CHECK-NEXT: SYCLExternal (SubjectMatchRule_function)
// CHECK-NEXT: SYCLKernelEntryPoint (SubjectMatchRule_function)
// CHECK-NEXT: SYCLSpecialClass (SubjectMatchRule_record)
// CHECK-NEXT: ScopedLockable (SubjectMatchRule_record)
diff --git a/clang/test/Modules/GH153933.cpp b/clang/test/Modules/GH153933.cpp
new file mode 100644
index 0000000..41184c6
--- /dev/null
+++ b/clang/test/Modules/GH153933.cpp
@@ -0,0 +1,23 @@
+// RUN: rm -rf %t
+// RUN: mkdir -p %t
+// RUN: split-file %s %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/B.cppm -emit-module-interface -o %t/B.pcm
+// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fprebuilt-module-path=%t %t/C.cpp
+
+//--- A.hpp
+template<class> struct A {};
+template<class T> struct B {
+ virtual A<T> v() { return {}; }
+};
+B<void> x;
+
+//--- B.cppm
+module;
+#include "A.hpp"
+export module B;
+using ::x;
+
+//--- C.cpp
+#include "A.hpp"
+import B;
diff --git a/clang/test/Modules/GH155028-1.cpp b/clang/test/Modules/GH155028-1.cpp
new file mode 100644
index 0000000..d60112b4
--- /dev/null
+++ b/clang/test/Modules/GH155028-1.cpp
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 -std=c++20 -verify %s
+// expected-no-diagnostics
+
+#pragma clang module build M
+module "M" {
+ module "A" {}
+ module "B" {}
+}
+#pragma clang module contents
+#pragma clang module begin M.A
+enum E1 {};
+#pragma clang module end
+#pragma clang module begin M.B
+enum E1 {};
+using T = __underlying_type(E1);
+#pragma clang module end
+#pragma clang module endbuild
diff --git a/clang/test/Modules/befriend-2.cppm b/clang/test/Modules/befriend-2.cppm
new file mode 100644
index 0000000..9d0baf8
--- /dev/null
+++ b/clang/test/Modules/befriend-2.cppm
@@ -0,0 +1,65 @@
+// RUN: rm -rf %t
+// RUN: mkdir -p %t
+// RUN: split-file %s %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-reduced-module-interface -o %t/test-A.pcm
+// RUN: %clang_cc1 -std=c++20 %t/N.cppm -emit-reduced-module-interface -o %t/test-N.pcm
+// RUN: %clang_cc1 -std=c++20 %t/B.cppm -verify -fsyntax-only -fprebuilt-module-path=%t
+
+//--- a.h
+namespace N {
+
+ template <typename>
+ class C {
+ template <typename> friend void foo();
+ };
+
+ template <typename> void foo() {}
+} // namespace N
+
+//--- a.cppm
+// This is some unrelated file. It also #includes system headers, but
+// here does not even export anything.
+module;
+#include "a.h"
+export module test:A;
+export {
+ using N::C;
+ using N::foo;
+}
+
+//--- std.h
+// Declarations typically #included from C++ header files:
+namespace N { // In practice, this would be namespace std
+ inline namespace impl { // In practice, this would be namespace __1
+ template <typename>
+ class C {
+ template <typename> friend void foo();
+ };
+
+ template <typename> void foo() {}
+ } // namespace impl
+ } // namespace N
+
+//--- N.cppm
+module;
+#include "std.h"
+export module test:N;
+
+// Now wrap these names into a module and export them:
+export {
+ namespace N {
+ using N::C;
+ using N::foo;
+ }
+}
+
+//--- B.cppm
+// expected-no-diagnostics
+// A file that consumes the partitions from the other two files,
+// including the exported N::C name.
+module test:B;
+import :N;
+import :A;
+
+N::C<int> x;
diff --git a/clang/test/Modules/befriend-3.cppm b/clang/test/Modules/befriend-3.cppm
new file mode 100644
index 0000000..f8dbc423
--- /dev/null
+++ b/clang/test/Modules/befriend-3.cppm
@@ -0,0 +1,19 @@
+// RUN: %clang_cc1 -std=c++20 %s -fsyntax-only -verify
+export module m;
+
+namespace test {
+namespace ns1 {
+ namespace ns2 {
+ template<class T> void f(T t); // expected-note {{target of using declaration}}
+ }
+ using ns2::f; // expected-note {{using declaration}}
+}
+struct A { void f(); }; // expected-note 2{{target of using declaration}}
+struct B : public A { using A::f; }; // expected-note {{using declaration}}
+template<typename T> struct C : A { using A::f; }; // expected-note {{using declaration}}
+struct X {
+ template<class T> friend void ns1::f(T t); // expected-error {{cannot befriend target of using declaration}}
+ friend void B::f(); // expected-error {{cannot befriend target of using declaration}}
+ friend void C<int>::f(); // expected-error {{cannot befriend target of using declaration}}
+};
+}
diff --git a/clang/test/Modules/merge-records.cppm b/clang/test/Modules/merge-records.cppm
index dee41bd..bf06d7c 100644
--- a/clang/test/Modules/merge-records.cppm
+++ b/clang/test/Modules/merge-records.cppm
@@ -31,27 +31,6 @@ union U {
int c;
};
-//--- another_records.h
-struct A {
- int a;
- double b;
- float c;
-};
-
-struct NoNameEntity {
- struct {
- int a;
- unsigned b;
- long c;
- };
-};
-
-union U {
- int a;
- double b;
- short c;
-};
-
//--- A.cppm
module;
#include "records.h"
diff --git a/clang/test/Modules/modules-merge-enum.m b/clang/test/Modules/modules-merge-enum.m
index f1010c1..fc07c46 100644
--- a/clang/test/Modules/modules-merge-enum.m
+++ b/clang/test/Modules/modules-merge-enum.m
@@ -18,9 +18,8 @@ typedef enum MyEnum1 { MyVal_A } MyEnum1;
// CHECK-NEXT: | |-also in ModB
// CHECK-NEXT: | `-EnumConstantDecl 0x{{.*}} imported in ModA.ModAFile1 referenced MyVal_A 'int'
// CHECK-NEXT: |-TypedefDecl 0x{{.*}} imported in ModA.ModAFile1 hidden MyEnum1 'enum MyEnum1'
-// CHECK-NEXT: | `-ElaboratedType 0x{{.*}} 'enum MyEnum1' sugar imported
-// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum1' imported
-// CHECK-NEXT: | `-Enum 0x{{.*}} 'MyEnum1'
+// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum1' imported
+// CHECK-NEXT: | `-Enum 0x{{.*}} 'MyEnum1'
enum MyEnum2 { MyVal_B };
@@ -33,10 +32,9 @@ typedef enum { MyVal_C } MyEnum3;
// CHECK: |-EnumDecl 0x{{.*}} imported in ModA.ModAFile1 <undeserialized declarations>
// CHECK-NEXT: | |-also in ModB
// CHECK-NEXT: | `-EnumConstantDecl 0x{{.*}} imported in ModA.ModAFile1 referenced MyVal_C 'int'
-// CHECK-NEXT: |-TypedefDecl 0x{{.*}} imported in ModA.ModAFile1 hidden MyEnum3 'enum MyEnum3':'MyEnum3'
-// CHECK-NEXT: | `-ElaboratedType 0x{{.*}} 'enum MyEnum3' sugar imported
-// CHECK-NEXT: | `-EnumType 0x{{.*}} 'MyEnum3' imported
-// CHECK-NEXT: | `-Enum 0x{{.*}}
+// CHECK-NEXT: |-TypedefDecl 0x{{.*}} imported in ModA.ModAFile1 hidden MyEnum3 'enum MyEnum3'
+// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum3' imported
+// CHECK-NEXT: | `-Enum 0x{{.*}}
struct MyStruct {
enum MyEnum5 { MyVal_D } Field;
@@ -57,17 +55,15 @@ enum { MyVal_E };
// Redeclarations coming from ModB.
// CHECK: |-TypedefDecl 0x{{.*}} prev 0x{{.*}} imported in ModB MyEnum1 'enum MyEnum1'
-// CHECK-NEXT: | `-ElaboratedType 0x{{.*}} 'enum MyEnum1' sugar imported
-// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum1' imported
-// CHECK-NEXT: | `-Enum 0x{{.*}} 'MyEnum1'
+// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum1' imported
+// CHECK-NEXT: | `-Enum 0x{{.*}} 'MyEnum1'
// CHECK: |-EnumDecl 0x{{.*}} prev 0x{{.*}} imported in ModB <undeserialized declarations>
// CHECK-NEXT: | |-also in ModB
// CHECK-NEXT: | `-EnumConstantDecl 0x{{.*}} imported in ModB MyVal_C 'int'
-// CHECK-NEXT: |-TypedefDecl 0x{{.*}} prev 0x{{.*}} imported in ModB MyEnum3 'enum MyEnum3':'MyEnum3'
-// CHECK-NEXT: | `-ElaboratedType 0x{{.*}} 'enum MyEnum3' sugar imported
-// CHECK-NEXT: | `-EnumType 0x{{.*}} 'MyEnum3' imported
-// CHECK-NEXT: | `-Enum 0x{{.*}}
+// CHECK-NEXT: |-TypedefDecl 0x{{.*}} prev 0x{{.*}} imported in ModB MyEnum3 'enum MyEnum3'
+// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum3' imported
+// CHECK-NEXT: | `-Enum 0x{{.*}}
// CHECK: |-EnumDecl 0x{{.*}} imported in ModB <undeserialized declarations>
// CHECK-NEXT: | `-EnumConstantDecl 0x{{.*}} first 0x{{.*}} imported in ModB referenced MyVal_E 'int'
diff --git a/clang/test/Modules/odr_hash.cpp b/clang/test/Modules/odr_hash.cpp
index 8ef53e3..f22f3c7 100644
--- a/clang/test/Modules/odr_hash.cpp
+++ b/clang/test/Modules/odr_hash.cpp
@@ -1314,7 +1314,7 @@ class S1 {
#else
template<class T>
using U1 = S1<T>;
-// expected-error@first.h:* {{'DependentType::S1::x' from module 'FirstModule' is not present in definition of 'S1<T>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'DependentType::S1::x' from module 'FirstModule' is not present in definition of 'DependentType::S1<T>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
#endif
@@ -2343,7 +2343,7 @@ struct S1 {
};
#else
using TemplateTypeParmType::S1;
-// expected-error@first.h:* {{'TemplateTypeParmType::S1::x' from module 'FirstModule' is not present in definition of 'S1<T1, T2>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'TemplateTypeParmType::S1::x' from module 'FirstModule' is not present in definition of 'TemplateTypeParmType::S1<T1, T2>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
#endif
@@ -2365,9 +2365,9 @@ class S2 {
};
#else
using TemplateTypeParmType::S2;
-// expected-error@first.h:* {{'TemplateTypeParmType::S2::x' from module 'FirstModule' is not present in definition of 'S2<T, U>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'TemplateTypeParmType::S2::x' from module 'FirstModule' is not present in definition of 'TemplateTypeParmType::S2<T, U>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
-// expected-error@first.h:* {{'TemplateTypeParmType::S2::type' from module 'FirstModule' is not present in definition of 'S2<T, U>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'TemplateTypeParmType::S2::type' from module 'FirstModule' is not present in definition of 'TemplateTypeParmType::S2<T, U>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'type' does not match}}
#endif
@@ -4020,7 +4020,7 @@ struct Valid {
};
#else
Invalid::L2<1>::L3<1> invalid;
-// expected-error@second.h:* {{'Types::InjectedClassName::Invalid::L2::L3::x' from module 'SecondModule' is not present in definition of 'L3<value-parameter-1-0>' in module 'FirstModule'}}
+// expected-error@second.h:* {{'Types::InjectedClassName::Invalid::L2::L3::x' from module 'SecondModule' is not present in definition of 'Types::InjectedClassName::Invalid::L2::L3<value-parameter-1-0>' in module 'FirstModule'}}
// expected-note@first.h:* {{declaration of 'x' does not match}}
Valid::L2<1>::L3<1> valid;
#endif
@@ -4291,7 +4291,7 @@ struct Valid {
};
#else
template <class T> using I = Invalid<T>;
-// expected-error@first.h:* {{'Types::UnresolvedUsing::Invalid::x' from module 'FirstModule' is not present in definition of 'Invalid<T>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'Types::UnresolvedUsing::Invalid::x' from module 'FirstModule' is not present in definition of 'Types::UnresolvedUsing::Invalid<T>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
template <class T> using V = Valid<T>;
diff --git a/clang/test/Modules/pr138558.cppm b/clang/test/Modules/pr138558.cppm
new file mode 100644
index 0000000..c637ce2
--- /dev/null
+++ b/clang/test/Modules/pr138558.cppm
@@ -0,0 +1,54 @@
+// RUN: rm -rf %t
+// RUN: mkdir -p %t
+// RUN: split-file %s %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-reduced-module-interface -o %t/test-A.pcm
+// RUN: %clang_cc1 -std=c++20 %t/N.cppm -emit-reduced-module-interface -o %t/test-N.pcm
+// RUN: %clang_cc1 -std=c++20 %t/B.cppm -verify -fsyntax-only -fprebuilt-module-path=%t
+
+//--- a.h
+namespace N {
+inline namespace impl {
+ template <typename>
+ class C {
+ template <typename> friend void foo();
+ };
+
+ template <typename> void foo() {}
+} // namespace impl
+} // namespace N
+
+//--- a.cppm
+// This is some unrelated file. It also #includes system headers, but
+// here does not even export anything.
+module;
+#include "a.h"
+export module test:A;
+// To make sure they won't elided.
+using N::C;
+using N::foo;
+
+//--- N.cppm
+module;
+#include "a.h"
+export module test:N;
+
+// Now wrap these names into a module and export them:
+export {
+ namespace N {
+ inline namespace impl {
+ using N::impl::C;
+ using N::impl::foo;
+ }
+ }
+}
+
+//--- B.cppm
+// expected-no-diagnostics
+// A file that consumes the partitions from the other two files,
+// including the exported N::C name.
+module test:B;
+import :N;
+import :A;
+
+N::C<int> x;
diff --git a/clang/test/Modules/pr97313.cppm b/clang/test/Modules/pr97313.cppm
index 32c7112..99795d6 100644
--- a/clang/test/Modules/pr97313.cppm
+++ b/clang/test/Modules/pr97313.cppm
@@ -1,4 +1,4 @@
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
//
// RUN: rm -rf %t
// RUN: mkdir -p %t
diff --git a/clang/test/Modules/redundant-template-default-arg2.cpp b/clang/test/Modules/redundant-template-default-arg2.cpp
index ae1f0c7..6e22d82 100644
--- a/clang/test/Modules/redundant-template-default-arg2.cpp
+++ b/clang/test/Modules/redundant-template-default-arg2.cpp
@@ -33,8 +33,8 @@ int v2; // expected-error {{declaration of 'v2' in the global module follows dec
// expected-note@foo.cppm:6 {{previous declaration is here}}
template <typename T>
-class my_array {}; // expected-error {{redefinition of 'my_array'}}
- // expected-note@foo.cppm:9 {{previous definition is here}}
+class my_array {}; // expected-error {{declaration of 'my_array' in the global module follows declaration in module foo}}
+ // expected-note@foo.cppm:9 {{previous declaration is here}}
template <template <typename> typename C = my_array>
int v3; // expected-error {{declaration of 'v3' in the global module follows declaration in module foo}}
diff --git a/clang/test/Modules/safe_buffers_optout.cpp b/clang/test/Modules/safe_buffers_optout.cpp
index 8c3d6a2..39020a4 100644
--- a/clang/test/Modules/safe_buffers_optout.cpp
+++ b/clang/test/Modules/safe_buffers_optout.cpp
@@ -96,7 +96,7 @@ int textual(int *p) {
// `safe_buffers_test_base`. (So the module dependencies form a DAG.)
// No expected warnings from base.h, test_sub1, or test_sub2 because they are
-// in seperate modules, and the explicit commands that builds them have no
+// in separate modules, and the explicit commands that builds them have no
// `-Wunsafe-buffer-usage`.
int foo(int * p) {
@@ -122,7 +122,7 @@ int foo(int * p) {
// `safe_buffers_test_base`. (So the module dependencies form a DAG.)
// No expected warnings from base.h, test_sub1, or test_sub2 because they are
-// in seperate modules, and the explicit commands that builds them have no
+// in separate modules, and the explicit commands that builds them have no
// `-Wunsafe-buffer-usage`.
int foo(int * p) {
diff --git a/clang/test/Modules/skip-body-2.cppm b/clang/test/Modules/skip-body-2.cppm
new file mode 100644
index 0000000..781f4fe
--- /dev/null
+++ b/clang/test/Modules/skip-body-2.cppm
@@ -0,0 +1,58 @@
+// RUN: rm -rf %t
+// RUN: mkdir -p %t
+// RUN: split-file %s %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-reduced-module-interface -o %t/a.pcm
+// RUN: %clang_cc1 -std=c++20 %t/a.cpp -fmodule-file=a=%t/a.pcm -ast-dump | FileCheck %s
+
+// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-module-interface -o %t/a.pcm
+// RUN: %clang_cc1 -std=c++20 %t/a.cpp -fmodule-file=a=%t/a.pcm -ast-dump | FileCheck %s
+
+//--- a.h
+namespace a {
+class A {
+public:
+ int aaaa;
+
+ int get() {
+ return aaaa;
+ }
+};
+
+
+template <class T>
+class B {
+public:
+ B(T t): t(t) {}
+ T t;
+};
+
+using BI = B<int>;
+
+inline int get(A a, BI b) {
+ return a.get() + b.t;
+}
+
+}
+
+//--- a.cppm
+export module a;
+
+export extern "C++" {
+#include "a.h"
+}
+
+//--- a.cpp
+import a;
+#include "a.h"
+
+int test() {
+ a::A aa;
+ a::BI bb(43);
+ return get(aa, bb);
+}
+
+// CHECK-NOT: DefinitionData
+// CHECK: FunctionDecl {{.*}} get 'int (A, BI)' {{.*}}
+// CHECK-NOT: CompoundStmt
+// CHECK: FunctionDecl {{.*}} test {{.*}}
diff --git a/clang/test/Modules/skip-body.cppm b/clang/test/Modules/skip-body.cppm
new file mode 100644
index 0000000..cbb466f
--- /dev/null
+++ b/clang/test/Modules/skip-body.cppm
@@ -0,0 +1,63 @@
+// RUN: rm -rf %t
+// RUN: mkdir -p %t
+// RUN: split-file %s %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-reduced-module-interface -o %t/a.pcm
+// RUN: %clang_cc1 -std=c++20 %t/a.cpp -fmodule-file=a=%t/a.pcm -ast-dump | FileCheck %s
+
+// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-module-interface -o %t/a.pcm
+// RUN: %clang_cc1 -std=c++20 %t/a.cpp -fmodule-file=a=%t/a.pcm -ast-dump | FileCheck %s
+
+//--- a.h
+namespace a {
+class A {
+public:
+ int aaaa;
+
+ int get() {
+ return aaaa;
+ }
+};
+
+
+template <class T>
+class B {
+public:
+ B(T t): t(t) {}
+ T t;
+};
+
+using BI = B<int>;
+
+inline int get(A a, BI b) {
+ return a.get() + b.t;
+}
+
+}
+
+//--- a.cppm
+module;
+#include "a.h"
+
+export module a;
+
+namespace a {
+ export using ::a::A;
+ export using ::a::get;
+ export using ::a::BI;
+}
+
+//--- a.cpp
+import a;
+#include "a.h"
+
+int test() {
+ a::A aa;
+ a::BI bb(43);
+ return get(aa, bb);
+}
+
+// CHECK-NOT: DefinitionData
+// CHECK: FunctionDecl {{.*}} get 'int (A, BI)' {{.*}}
+// CHECK-NOT: CompoundStmt
+// CHECK: FunctionDecl {{.*}} test {{.*}}
diff --git a/clang/test/OpenMP/allocate_modifiers_messages.cpp b/clang/test/OpenMP/allocate_modifiers_messages.cpp
index 6867e78..83314a5 100644
--- a/clang/test/OpenMP/allocate_modifiers_messages.cpp
+++ b/clang/test/OpenMP/allocate_modifiers_messages.cpp
@@ -88,7 +88,7 @@ int main() {
// expected-warning@+2 {{extra tokens at the end of '#pragma omp scope' are ignored}}
// expected-note@+1 {{to match this '('}}
#pragma omp scope private(a,b,c) allocate(allocator(omp_const_mem_alloc):c:b;a)
- // expected-error@+1 {{initializing 'const omp_allocator_handle_t' with an expression of incompatible type 'int'}}
+ // expected-error@+1 {{initializing 'const omp_allocator_handle_t' (aka 'const enum omp_allocator_handle_t') with an expression of incompatible type 'int'}}
#pragma omp scope private(c,a,b) allocate(allocator(myAlloc()):a,b,c)
// expected-error@+2 {{missing ':' after allocate clause modifier}}
// expected-error@+1 {{expected expression}}
diff --git a/clang/test/OpenMP/amdgcn_debug_nowait.c b/clang/test/OpenMP/amdgcn_debug_nowait.c
new file mode 100644
index 0000000..d691327
--- /dev/null
+++ b/clang/test/OpenMP/amdgcn_debug_nowait.c
@@ -0,0 +1,16 @@
+// REQUIRES: amdgpu-registered-target
+
+// RUN: %clang_cc1 -debug-info-kind=line-tables-only -fopenmp -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-host.bc
+
+int test() {
+ int c;
+
+#pragma omp target data map(tofrom: c)
+{
+ #pragma omp target nowait
+ {
+ c = 2;
+ }
+}
+ return c;
+}
diff --git a/clang/test/OpenMP/amdgcn_target_parallel_num_threads_codegen.cpp b/clang/test/OpenMP/amdgcn_target_parallel_num_threads_codegen.cpp
new file mode 100644
index 0000000..806a79e
--- /dev/null
+++ b/clang/test/OpenMP/amdgcn_target_parallel_num_threads_codegen.cpp
@@ -0,0 +1,1095 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
+// Test target codegen - host bc file has to be created first.
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=OMP45_1
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-x86-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=OMP45_2
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fexceptions -fcxx-exceptions -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=OMP45_2
+
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefixes=OMP60_1
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -x c++ -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-x86-host.bc
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefixes=OMP60_2
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fexceptions -fcxx-exceptions -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefixes=OMP60_2
+
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK1
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-x86-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK2
+// RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK2
+
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+template<typename tx>
+tx ftemplate(int n) {
+ tx a = 0;
+ short aa = 0;
+ tx b[10];
+
+ #pragma omp target parallel map(tofrom: aa) num_threads(1024)
+ {
+ aa += 1;
+ }
+ #ifdef OMP60
+ char str[] = "msg";
+ #pragma omp target parallel map(tofrom: aa) num_threads(strict: 1024) severity(warning) message(str)
+ {
+ aa += 1;
+ }
+ #endif
+
+ #pragma omp target parallel map(tofrom:a, aa, b) if(target: n>40) num_threads(n)
+ {
+ a += 1;
+ aa += 1;
+ b[2] += 1;
+ }
+ #ifdef OMP60
+ const char *str1 = "msg1";
+ #pragma omp target parallel map(tofrom:a, aa, b) if(target: n>40) num_threads(strict: n) severity(warning) message(str1)
+ {
+ a += 1;
+ aa += 1;
+ b[2] += 1;
+ }
+ #endif
+
+ return a;
+}
+
+int bar(int n){
+ int a = 0;
+
+ a += ftemplate<int>(n);
+
+ return a;
+}
+
+#endif
+// OMP45_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// OMP45_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// OMP45_1-NEXT: entry:
+// OMP45_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// OMP45_1-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP45_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP45_1-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP45_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7:![0-9]+]], !align [[META8:![0-9]+]]
+// OMP45_1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP45_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// OMP45_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP45_1: user_code.entry:
+// OMP45_1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
+// OMP45_1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP45_1-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
+// OMP45_1-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 1)
+// OMP45_1-NEXT: call void @__kmpc_target_deinit()
+// OMP45_1-NEXT: ret void
+// OMP45_1: worker.exit:
+// OMP45_1-NEXT: ret void
+//
+//
+// OMP45_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// OMP45_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP45_1-NEXT: entry:
+// OMP45_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP45_1-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP45_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP45_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// OMP45_1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP45_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP45_1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP45_1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP45_1-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP45_1-NEXT: ret void
+//
+//
+// OMP45_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// OMP45_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// OMP45_1-NEXT: entry:
+// OMP45_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// OMP45_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
+// OMP45_1-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP45_1-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP45_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP45_1-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP45_1-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// OMP45_1-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP45_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9:![0-9]+]]
+// OMP45_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// OMP45_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// OMP45_1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP45_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP45_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP45_1: user_code.entry:
+// OMP45_1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// OMP45_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 4
+// OMP45_1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP45_1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP45_1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// OMP45_1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP45_1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// OMP45_1-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP45_1-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3)
+// OMP45_1-NEXT: call void @__kmpc_target_deinit()
+// OMP45_1-NEXT: ret void
+// OMP45_1: worker.exit:
+// OMP45_1-NEXT: ret void
+//
+//
+// OMP45_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// OMP45_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP45_1-NEXT: entry:
+// OMP45_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_1-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP45_1-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP45_1-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP45_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP45_1-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP45_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP45_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// OMP45_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// OMP45_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// OMP45_1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP45_1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP45_1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP45_1-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP45_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP45_1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP45_1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP45_1-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP45_1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP45_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP45_1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP45_1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP45_1-NEXT: ret void
+//
+//
+// OMP45_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// OMP45_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// OMP45_2-NEXT: entry:
+// OMP45_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// OMP45_2-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP45_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP45_2-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP45_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7:![0-9]+]], !align [[META8:![0-9]+]]
+// OMP45_2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP45_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// OMP45_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP45_2: user_code.entry:
+// OMP45_2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
+// OMP45_2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP45_2-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
+// OMP45_2-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 1)
+// OMP45_2-NEXT: call void @__kmpc_target_deinit()
+// OMP45_2-NEXT: ret void
+// OMP45_2: worker.exit:
+// OMP45_2-NEXT: ret void
+//
+//
+// OMP45_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// OMP45_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP45_2-NEXT: entry:
+// OMP45_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP45_2-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP45_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP45_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// OMP45_2-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP45_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP45_2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP45_2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP45_2-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP45_2-NEXT: ret void
+//
+//
+// OMP45_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// OMP45_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// OMP45_2-NEXT: entry:
+// OMP45_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// OMP45_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
+// OMP45_2-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP45_2-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP45_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP45_2-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP45_2-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// OMP45_2-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP45_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9:![0-9]+]]
+// OMP45_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// OMP45_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// OMP45_2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP45_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP45_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP45_2: user_code.entry:
+// OMP45_2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// OMP45_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 4
+// OMP45_2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP45_2-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP45_2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// OMP45_2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP45_2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// OMP45_2-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP45_2-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3)
+// OMP45_2-NEXT: call void @__kmpc_target_deinit()
+// OMP45_2-NEXT: ret void
+// OMP45_2: worker.exit:
+// OMP45_2-NEXT: ret void
+//
+//
+// OMP45_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// OMP45_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP45_2-NEXT: entry:
+// OMP45_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP45_2-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP45_2-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP45_2-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP45_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP45_2-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP45_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP45_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// OMP45_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// OMP45_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// OMP45_2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP45_2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP45_2-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP45_2-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP45_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP45_2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP45_2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP45_2-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP45_2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP45_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP45_2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP45_2-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP45_2-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// OMP60_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// OMP60_1-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP60_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP60_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9:![0-9]+]], !align [[META10:![0-9]+]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP60_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// OMP60_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_1: user_code.entry:
+// OMP60_1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
+// OMP60_1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP60_1-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
+// OMP60_1-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 1)
+// OMP60_1-NEXT: call void @__kmpc_target_deinit()
+// OMP60_1-NEXT: ret void
+// OMP60_1: worker.exit:
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// OMP60_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP60_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP60_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP60_1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP60_1-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37
+// OMP60_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[TMP:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// OMP60_1-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP60_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// OMP60_1-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP60_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8, !nonnull [[META9]]
+// OMP60_1-NEXT: store ptr [[TMP1]], ptr [[TMP_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP60_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
+// OMP60_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_1: user_code.entry:
+// OMP60_1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// OMP60_1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP60_1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
+// OMP60_1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP_ASCAST]], align 8, !nonnull [[META9]]
+// OMP60_1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP5]], i64 0, i64 0
+// OMP60_1-NEXT: call void @__kmpc_parallel_60(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP3]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 1, i32 1, i32 1, ptr [[ARRAYDECAY]])
+// OMP60_1-NEXT: call void @__kmpc_target_deinit()
+// OMP60_1-NEXT: ret void
+// OMP60_1: worker.exit:
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_omp_outlined
+// OMP60_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP60_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP60_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP60_1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP60_1-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// OMP60_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
+// OMP60_1-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP60_1-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP60_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_1-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP60_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11:![0-9]+]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP60_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP60_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_1: user_code.entry:
+// OMP60_1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// OMP60_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 4
+// OMP60_1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP60_1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP60_1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// OMP60_1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP60_1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// OMP60_1-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP60_1-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3)
+// OMP60_1-NEXT: call void @__kmpc_target_deinit()
+// OMP60_1-NEXT: ret void
+// OMP60_1: worker.exit:
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// OMP60_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP60_1-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP60_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_1-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP60_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP60_1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP60_1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP60_1-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP60_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP60_1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP60_1-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP60_1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP60_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP60_1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP60_1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51
+// OMP60_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
+// OMP60_1-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP60_1-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP60_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_1-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR2]] to ptr
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP60_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP60_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP60_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_1: user_code.entry:
+// OMP60_1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// OMP60_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 4
+// OMP60_1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP60_1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP60_1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// OMP60_1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP60_1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// OMP60_1-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP60_1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2_ASCAST]], align 8
+// OMP60_1-NEXT: call void @__kmpc_parallel_60(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3, i32 1, i32 1, ptr [[TMP9]])
+// OMP60_1-NEXT: call void @__kmpc_target_deinit()
+// OMP60_1-NEXT: ret void
+// OMP60_1: worker.exit:
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_omp_outlined
+// OMP60_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP60_1-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP60_1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_1-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP60_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP60_1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP60_1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP60_1-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP60_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP60_1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP60_1-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP60_1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP60_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP60_1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP60_1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// OMP60_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// OMP60_2-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP60_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP60_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9:![0-9]+]], !align [[META10:![0-9]+]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP60_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// OMP60_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_2: user_code.entry:
+// OMP60_2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
+// OMP60_2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP60_2-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
+// OMP60_2-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 1)
+// OMP60_2-NEXT: call void @__kmpc_target_deinit()
+// OMP60_2-NEXT: ret void
+// OMP60_2: worker.exit:
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// OMP60_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP60_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP60_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP60_2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP60_2-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37
+// OMP60_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[TMP:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// OMP60_2-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP60_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// OMP60_2-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP60_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8, !nonnull [[META9]]
+// OMP60_2-NEXT: store ptr [[TMP1]], ptr [[TMP_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP60_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
+// OMP60_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_2: user_code.entry:
+// OMP60_2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// OMP60_2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP60_2-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
+// OMP60_2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP_ASCAST]], align 8, !nonnull [[META9]]
+// OMP60_2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP5]], i64 0, i64 0
+// OMP60_2-NEXT: call void @__kmpc_parallel_60(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP3]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 1, i32 1, i32 1, ptr [[ARRAYDECAY]])
+// OMP60_2-NEXT: call void @__kmpc_target_deinit()
+// OMP60_2-NEXT: ret void
+// OMP60_2: worker.exit:
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_omp_outlined
+// OMP60_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP60_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP60_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP60_2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP60_2-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// OMP60_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
+// OMP60_2-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP60_2-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP60_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_2-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP60_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11:![0-9]+]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP60_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP60_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_2: user_code.entry:
+// OMP60_2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// OMP60_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 4
+// OMP60_2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP60_2-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP60_2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// OMP60_2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP60_2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// OMP60_2-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP60_2-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3)
+// OMP60_2-NEXT: call void @__kmpc_target_deinit()
+// OMP60_2-NEXT: ret void
+// OMP60_2: worker.exit:
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// OMP60_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP60_2-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP60_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_2-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP60_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP60_2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP60_2-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP60_2-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP60_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP60_2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP60_2-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP60_2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP60_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP60_2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP60_2-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51
+// OMP60_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
+// OMP60_2-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// OMP60_2-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP60_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_2-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR2]] to ptr
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// OMP60_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_kernel_environment to ptr), ptr [[DYN_PTR]])
+// OMP60_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP60_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_2: user_code.entry:
+// OMP60_2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// OMP60_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 4
+// OMP60_2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// OMP60_2-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP60_2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// OMP60_2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP60_2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// OMP60_2-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP60_2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2_ASCAST]], align 8
+// OMP60_2-NEXT: call void @__kmpc_parallel_60(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3, i32 1, i32 1, ptr [[TMP9]])
+// OMP60_2-NEXT: call void @__kmpc_target_deinit()
+// OMP60_2-NEXT: ret void
+// OMP60_2: worker.exit:
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_omp_outlined
+// OMP60_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// OMP60_2-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OMP60_2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// OMP60_2-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// OMP60_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META11]]
+// OMP60_2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP60_2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP60_2-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP60_2-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP60_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP60_2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP60_2-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP60_2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP60_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP60_2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP60_2-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP60_2-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// CHECK1-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// CHECK1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7:![0-9]+]], !align [[META8:![0-9]+]]
+// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment to ptr), ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1: user_code.entry:
+// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
+// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 1)
+// CHECK1-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-NEXT: ret void
+// CHECK1: worker.exit:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// CHECK1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// CHECK1-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
+// CHECK1-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// CHECK1-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// CHECK1-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9:![0-9]+]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment to ptr), ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1: user_code.entry:
+// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3)
+// CHECK1-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-NEXT: ret void
+// CHECK1: worker.exit:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// CHECK1-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK1-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// CHECK1-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// CHECK1-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// CHECK2-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// CHECK2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7:![0-9]+]], !align [[META8:![0-9]+]]
+// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment to ptr), ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2: user_code.entry:
+// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
+// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
+// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 1)
+// CHECK2-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-NEXT: ret void
+// CHECK2: worker.exit:
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// CHECK2-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// CHECK2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// CHECK2-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// CHECK2-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
+// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
+// CHECK2-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// CHECK2-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// CHECK2-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9:![0-9]+]]
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment to ptr), ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2: user_code.entry:
+// CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
+// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 4
+// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// CHECK2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
+// CHECK2-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3)
+// CHECK2-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-NEXT: ret void
+// CHECK2: worker.exit:
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// CHECK2-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// CHECK2-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK2-NEXT: [[AA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[AA_ADDR]] to ptr
+// CHECK2-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META8]]
+// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8, !nonnull [[META7]], !align [[META9]]
+// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// CHECK2-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// CHECK2-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// CHECK2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// CHECK2-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// CHECK2-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// CHECK2-NEXT: ret void
+//
diff --git a/clang/test/OpenMP/bug54082.c b/clang/test/OpenMP/bug54082.c
index da32be2..bda4bd2 100644
--- a/clang/test/OpenMP/bug54082.c
+++ b/clang/test/OpenMP/bug54082.c
@@ -68,14 +68,14 @@ void foo() {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[X_TRAITS:%.*]] = alloca [1 x %struct.omp_alloctrait_t], align 16
// CHECK-NEXT: [[X_ALLOC:%.*]] = alloca i64, align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X_TRAITS]]) #[[ATTR5:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X_TRAITS]]) #[[ATTR5:[0-9]+]]
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(16) [[X_TRAITS]], ptr noundef nonnull align 16 dereferenceable(16) @__const.foo.x_traits, i64 16, i1 false)
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
// CHECK-NEXT: [[CALL:%.*]] = call i64 @omp_init_allocator(i64 noundef 0, i32 noundef 1, ptr noundef nonnull [[X_TRAITS]]) #[[ATTR5]]
// CHECK-NEXT: store i64 [[CALL]], ptr [[X_ALLOC]], align 8, !tbaa [[TBAA3:![0-9]+]]
// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @[[GLOB2:[0-9]+]], i32 1, ptr nonnull @foo.omp_outlined, ptr nonnull [[X_ALLOC]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X_TRAITS]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X_TRAITS]]) #[[ATTR5]]
// CHECK-NEXT: ret void
//
//
@@ -86,13 +86,13 @@ void foo() {
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[DOTOMP_LB]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[DOTOMP_LB]]) #[[ATTR5]]
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA7:![0-9]+]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[DOTOMP_UB]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[DOTOMP_UB]]) #[[ATTR5]]
// CHECK-NEXT: store i32 1023, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA7]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[DOTOMP_STRIDE]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[DOTOMP_STRIDE]]) #[[ATTR5]]
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA7]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[DOTOMP_IS_LAST]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[DOTOMP_IS_LAST]]) #[[ATTR5]]
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA7]]
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA7]]
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[X_ALLOC]], align 8, !tbaa [[TBAA3]]
@@ -106,9 +106,9 @@ void foo() {
// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[X_ALLOC]], align 8, !tbaa [[TBAA3]]
// CHECK-NEXT: [[CONV5:%.*]] = inttoptr i64 [[TMP3]] to ptr
// CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTX__VOID_ADDR]], ptr [[CONV5]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[DOTOMP_IS_LAST]]) #[[ATTR5]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[DOTOMP_STRIDE]]) #[[ATTR5]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[DOTOMP_UB]]) #[[ATTR5]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[DOTOMP_LB]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[DOTOMP_IS_LAST]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[DOTOMP_STRIDE]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[DOTOMP_UB]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[DOTOMP_LB]]) #[[ATTR5]]
// CHECK-NEXT: ret void
//
diff --git a/clang/test/OpenMP/bug56913.c b/clang/test/OpenMP/bug56913.c
index cc72316..fad9e17 100644
--- a/clang/test/OpenMP/bug56913.c
+++ b/clang/test/OpenMP/bug56913.c
@@ -20,12 +20,12 @@ void loop(int n) {
// CHECK-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK: simd.if.then:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @j, align 4, !tbaa [[TBAA2:![0-9]+]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[J]]) #[[ATTR2:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[J]]) #[[ATTR2:[0-9]+]]
// CHECK-NEXT: store ptr [[J]], ptr @u, align 8, !tbaa [[TBAA6:![0-9]+]], !llvm.access.group [[ACC_GRP8:![0-9]+]]
// CHECK-NEXT: [[INC_LE:%.*]] = add i32 [[TMP0]], [[N]]
// CHECK-NEXT: store i32 [[INC_LE]], ptr [[J]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: store i32 [[INC_LE]], ptr @j, align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[J]]) #[[ATTR2]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[J]]) #[[ATTR2]]
// CHECK-NEXT: br label [[SIMD_IF_END]]
// CHECK: simd.if.end:
// CHECK-NEXT: ret void
diff --git a/clang/test/OpenMP/bug57757.cpp b/clang/test/OpenMP/bug57757.cpp
index eabf233..caf53a5b 100644
--- a/clang/test/OpenMP/bug57757.cpp
+++ b/clang/test/OpenMP/bug57757.cpp
@@ -46,7 +46,7 @@ void foo() {
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 52
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 48
// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP5]], align 8, !tbaa [[TBAA19:![0-9]+]], !noalias [[META13]]
-// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 4, !tbaa [[TBAA16]], !noalias [[META13]]
+// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 8, !tbaa [[TBAA16]], !noalias [[META13]]
// CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP6]], align 4, !tbaa [[TBAA20:![0-9]+]], !noalias [[META13]]
// CHECK-NEXT: tail call void [[TMP8]](i32 noundef [[TMP9]], float noundef [[TMP10]]) #[[ATTR2:[0-9]+]], !noalias [[META13]]
// CHECK-NEXT: br label [[DOTOMP_OUTLINED__EXIT]]
diff --git a/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp
index 3a09f37..04dd9c0 100644
--- a/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp
+++ b/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp
@@ -7,6 +7,14 @@
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3
+// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
+
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
@@ -23,6 +31,14 @@
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11
+// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
+
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK13
// RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK13
@@ -51,12 +67,20 @@ template <typename T, int C>
int tmain() {
#pragma omp target
#pragma omp teams
+#ifdef OMP60
+#pragma omp distribute parallel for num_threads(strict: C) severity(warning) message("msg")
+#else
#pragma omp distribute parallel for num_threads(C)
+#endif
for (int i = 0; i < 100; i++)
foo();
#pragma omp target
#pragma omp teams
+#ifdef OMP60
+#pragma omp distribute parallel for num_threads(T(23)) severity(fatal) message("msg1")
+#else
#pragma omp distribute parallel for num_threads(T(23))
+#endif
for (int i = 0; i < 100; i++)
foo();
return 0;
@@ -67,14 +91,22 @@ int main() {
char a = s;
#pragma omp target
#pragma omp teams
+#ifdef OMP60
+#pragma omp distribute parallel for num_threads(2) severity(warning) message("msg2")
+#else
#pragma omp distribute parallel for num_threads(2)
+#endif
for (int i = 0; i < 100; i++) {
foo();
}
#pragma omp target
#pragma omp teams
+#ifdef OMP60
+#pragma omp distribute parallel for num_threads(a) severity(fatal) message("msg3")
+#else
#pragma omp distribute parallel for num_threads(a)
+#endif
for (int i = 0; i < 100; i++) {
foo();
}
@@ -141,11 +173,11 @@ int main() {
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
-// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]]
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: lpad:
// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
@@ -194,11 +226,11 @@ int main() {
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4
-// CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]])
// CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
// CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK1: omp_offload.failed3:
-// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]]
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK1: omp_offload.cont4:
// CHECK1-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
@@ -249,14 +281,14 @@ int main() {
// CHECK1-NEXT: ret i8 [[CONV]]
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92
// CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined)
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined)
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -302,7 +334,7 @@ int main() {
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -317,7 +349,7 @@ int main() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -403,16 +435,16 @@ int main() {
// CHECK1-NEXT: unreachable
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102
// CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]])
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -428,7 +460,7 @@ int main() {
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
@@ -463,7 +495,7 @@ int main() {
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -478,7 +510,7 @@ int main() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -590,11 +622,11 @@ int main() {
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
-// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]]
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
@@ -623,11 +655,11 @@ int main() {
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4
-// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK1: omp_offload.failed3:
-// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]]
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK1: omp_offload.cont4:
// CHECK1-NEXT: ret i32 0
@@ -666,11 +698,11 @@ int main() {
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
-// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]]
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
@@ -699,11 +731,11 @@ int main() {
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4
-// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK1: omp_offload.failed3:
-// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]]
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK1: omp_offload.cont4:
// CHECK1-NEXT: ret i32 0
@@ -742,14 +774,14 @@ int main() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68
// CHECK1-SAME: () #[[ATTR2]] {
// CHECK1-NEXT: entry:
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined)
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined)
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -795,7 +827,7 @@ int main() {
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -810,7 +842,7 @@ int main() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -889,14 +921,14 @@ int main() {
// CHECK1-NEXT: unreachable
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77
// CHECK1-SAME: () #[[ATTR2]] {
// CHECK1-NEXT: entry:
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined)
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined)
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -942,7 +974,7 @@ int main() {
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -957,7 +989,7 @@ int main() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1036,14 +1068,14 @@ int main() {
// CHECK1-NEXT: unreachable
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68
// CHECK1-SAME: () #[[ATTR2]] {
// CHECK1-NEXT: entry:
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined)
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined)
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1089,7 +1121,7 @@ int main() {
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -1104,7 +1136,7 @@ int main() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1183,14 +1215,14 @@ int main() {
// CHECK1-NEXT: unreachable
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77
// CHECK1-SAME: () #[[ATTR2]] {
// CHECK1-NEXT: entry:
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined)
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined)
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1245,7 +1277,7 @@ int main() {
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
-// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -1266,7 +1298,7 @@ int main() {
// CHECK1-NEXT: unreachable
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1345,6 +1377,1258 @@ int main() {
// CHECK1-NEXT: unreachable
//
//
+// CHECK3-LABEL: define {{[^@]+}}@main
+// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK3-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0)
+// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]])
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]])
+// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK3: omp_offload.failed:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK3: lpad:
+// CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: cleanup
+// CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
+// CHECK3-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
+// CHECK3-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
+// CHECK3-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK3: omp_offload.cont:
+// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
+// CHECK3-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
+// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8
+// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
+// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8
+// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP25]], align 4
+// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4
+// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
+// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
+// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8
+// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8
+// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP31]], align 8
+// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 8
+// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK3-NEXT: store i64 100, ptr [[TMP33]], align 8
+// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP34]], align 8
+// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
+// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
+// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP37]], align 4
+// CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK3-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
+// CHECK3-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK3: omp_offload.failed3:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK3: omp_offload.cont4:
+// CHECK3-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
+// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32
+// CHECK3-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv()
+// CHECK3-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK3: invoke.cont5:
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
+// CHECK3-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv()
+// CHECK3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
+// CHECK3: invoke.cont7:
+// CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
+// CHECK3-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
+// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
+// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK3-NEXT: ret i32 [[TMP41]]
+// CHECK3: eh.resume:
+// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK3-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: call void @_Z8mayThrowv()
+// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK3-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92
+// CHECK3-SAME: () #[[ATTR2:[0-9]+]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2)
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK3-NEXT: ret void
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
+// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]]
+// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102
+// CHECK3-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]]
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1
+// CHECK3-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
+// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]])
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK3-NEXT: ret void
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK3-SAME: () #[[ATTR6:[0-9]+]] comdat {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
+// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK3: omp_offload.failed:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK3: omp_offload.cont:
+// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4
+// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4
+// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8
+// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8
+// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8
+// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8
+// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8
+// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8
+// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8
+// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
+// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
+// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4
+// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
+// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK3: omp_offload.failed3:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK3: omp_offload.cont4:
+// CHECK3-NEXT: ret i32 0
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK3-SAME: () #[[ATTR6]] comdat {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
+// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK3: omp_offload.failed:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK3: omp_offload.cont:
+// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4
+// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4
+// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8
+// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8
+// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8
+// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8
+// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8
+// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8
+// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8
+// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
+// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
+// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4
+// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
+// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK3: omp_offload.failed3:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK3: omp_offload.cont4:
+// CHECK3-NEXT: ret i32 0
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]]
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68
+// CHECK3-SAME: () #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str)
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK3-NEXT: ret void
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77
+// CHECK3-SAME: () #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23)
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK3-NEXT: ret void
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68
+// CHECK3-SAME: () #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str)
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK3-NEXT: ret void
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77
+// CHECK3-SAME: () #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23)
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]]
+// CHECK3: invoke.cont2:
+// CHECK3-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32
+// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]])
+// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]]
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK3-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK3-NEXT: ret void
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK3: cond.true:
+// CHECK3-NEXT: br label [[COND_END:%.*]]
+// CHECK3: cond.false:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: br label [[COND_END]]
+// CHECK3: cond.end:
+// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK3: omp.loop.exit:
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK3-NEXT: ret void
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: ret void
+//
+//
// CHECK5-LABEL: define {{[^@]+}}@main
// CHECK5-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
// CHECK5-NEXT: entry:
@@ -1393,11 +2677,11 @@ int main() {
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]])
+// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]])
// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
-// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]]
+// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: lpad:
// CHECK5-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
@@ -1446,11 +2730,11 @@ int main() {
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP37]], align 4
-// CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]])
// CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
// CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK5: omp_offload.failed3:
-// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]]
+// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK5: omp_offload.cont4:
// CHECK5-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
@@ -1501,14 +2785,14 @@ int main() {
// CHECK5-NEXT: ret i8 [[CONV]]
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92
// CHECK5-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK5-NEXT: entry:
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined)
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined)
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1554,7 +2838,7 @@ int main() {
// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -1569,7 +2853,7 @@ int main() {
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1655,16 +2939,16 @@ int main() {
// CHECK5-NEXT: unreachable
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102
// CHECK5-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]])
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]])
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1680,7 +2964,7 @@ int main() {
// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
-// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]]
// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
@@ -1715,7 +2999,7 @@ int main() {
// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -1730,7 +3014,7 @@ int main() {
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1842,11 +3126,11 @@ int main() {
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
+// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
-// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]]
+// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
@@ -1875,11 +3159,11 @@ int main() {
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4
-// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK5: omp_offload.failed3:
-// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]]
+// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK5: omp_offload.cont4:
// CHECK5-NEXT: ret i32 0
@@ -1918,11 +3202,11 @@ int main() {
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
+// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
-// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]]
+// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
@@ -1951,11 +3235,11 @@ int main() {
// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4
-// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK5: omp_offload.failed3:
-// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]]
+// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK5: omp_offload.cont4:
// CHECK5-NEXT: ret i32 0
@@ -1985,14 +3269,14 @@ int main() {
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68
// CHECK5-SAME: () #[[ATTR2]] {
// CHECK5-NEXT: entry:
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined)
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined)
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2038,7 +3322,7 @@ int main() {
// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -2053,7 +3337,7 @@ int main() {
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2132,14 +3416,14 @@ int main() {
// CHECK5-NEXT: unreachable
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77
// CHECK5-SAME: () #[[ATTR2]] {
// CHECK5-NEXT: entry:
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined)
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined)
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2185,7 +3469,7 @@ int main() {
// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -2200,7 +3484,7 @@ int main() {
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2279,14 +3563,14 @@ int main() {
// CHECK5-NEXT: unreachable
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68
// CHECK5-SAME: () #[[ATTR2]] {
// CHECK5-NEXT: entry:
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined)
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined)
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2332,7 +3616,7 @@ int main() {
// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -2347,7 +3631,7 @@ int main() {
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2426,14 +3710,14 @@ int main() {
// CHECK5-NEXT: unreachable
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77
// CHECK5-SAME: () #[[ATTR2]] {
// CHECK5-NEXT: entry:
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined)
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined)
// CHECK5-NEXT: ret void
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2488,7 +3772,7 @@ int main() {
// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
-// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
+// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -2509,7 +3793,7 @@ int main() {
// CHECK5-NEXT: unreachable
//
//
-// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined
+// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined
// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2645,11 +3929,11 @@ int main() {
// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]])
+// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]])
// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK9: omp_offload.failed:
-// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]]
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]]
// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK9: lpad:
// CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
@@ -2698,11 +3982,11 @@ int main() {
// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK9-NEXT: store i32 0, ptr [[TMP37]], align 4
-// CHECK9-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK9-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]])
// CHECK9-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
// CHECK9-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK9: omp_offload.failed3:
-// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]]
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]]
// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK9: omp_offload.cont4:
// CHECK9-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
@@ -2753,14 +4037,14 @@ int main() {
// CHECK9-NEXT: ret i8 [[CONV]]
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92
// CHECK9-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK9-NEXT: entry:
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined)
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined)
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2806,7 +4090,7 @@ int main() {
// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -2821,7 +4105,7 @@ int main() {
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2907,16 +4191,16 @@ int main() {
// CHECK9-NEXT: unreachable
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102
// CHECK9-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]])
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2932,7 +4216,7 @@ int main() {
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
-// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]]
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
@@ -2967,7 +4251,7 @@ int main() {
// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -2982,7 +4266,7 @@ int main() {
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3094,11 +4378,11 @@ int main() {
// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
+// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK9: omp_offload.failed:
-// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]]
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]]
// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK9: omp_offload.cont:
// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
@@ -3127,11 +4411,11 @@ int main() {
// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4
-// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK9: omp_offload.failed3:
-// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]]
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]]
// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK9: omp_offload.cont4:
// CHECK9-NEXT: ret i32 0
@@ -3170,11 +4454,11 @@ int main() {
// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
+// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK9: omp_offload.failed:
-// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]]
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]]
// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK9: omp_offload.cont:
// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
@@ -3203,11 +4487,11 @@ int main() {
// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4
-// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK9: omp_offload.failed3:
-// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]]
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]]
// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK9: omp_offload.cont4:
// CHECK9-NEXT: ret i32 0
@@ -3246,14 +4530,14 @@ int main() {
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68
// CHECK9-SAME: () #[[ATTR2]] {
// CHECK9-NEXT: entry:
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined)
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined)
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3299,7 +4583,7 @@ int main() {
// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -3314,7 +4598,7 @@ int main() {
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3393,14 +4677,14 @@ int main() {
// CHECK9-NEXT: unreachable
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77
// CHECK9-SAME: () #[[ATTR2]] {
// CHECK9-NEXT: entry:
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined)
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined)
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3446,7 +4730,7 @@ int main() {
// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -3461,7 +4745,7 @@ int main() {
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3540,14 +4824,14 @@ int main() {
// CHECK9-NEXT: unreachable
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68
// CHECK9-SAME: () #[[ATTR2]] {
// CHECK9-NEXT: entry:
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined)
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined)
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3593,7 +4877,7 @@ int main() {
// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -3608,7 +4892,7 @@ int main() {
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3687,14 +4971,14 @@ int main() {
// CHECK9-NEXT: unreachable
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77
// CHECK9-SAME: () #[[ATTR2]] {
// CHECK9-NEXT: entry:
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined)
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined)
// CHECK9-NEXT: ret void
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3749,7 +5033,7 @@ int main() {
// CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
-// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -3770,7 +5054,7 @@ int main() {
// CHECK9-NEXT: unreachable
//
//
-// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -3849,6 +5133,1258 @@ int main() {
// CHECK9-NEXT: unreachable
//
//
+// CHECK11-LABEL: define {{[^@]+}}@main
+// CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK11-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0)
+// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]])
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]])
+// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK11: omp_offload.failed:
+// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]]
+// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK11: lpad:
+// CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: cleanup
+// CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
+// CHECK11-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
+// CHECK11-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
+// CHECK11-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
+// CHECK11-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK11: omp_offload.cont:
+// CHECK11-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
+// CHECK11-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
+// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8
+// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
+// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8
+// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK11-NEXT: store i32 3, ptr [[TMP25]], align 4
+// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK11-NEXT: store i32 1, ptr [[TMP26]], align 4
+// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK11-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
+// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK11-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
+// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK11-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8
+// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8
+// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 8
+// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK11-NEXT: store ptr null, ptr [[TMP32]], align 8
+// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK11-NEXT: store i64 100, ptr [[TMP33]], align 8
+// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK11-NEXT: store i64 0, ptr [[TMP34]], align 8
+// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
+// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
+// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK11-NEXT: store i32 0, ptr [[TMP37]], align 4
+// CHECK11-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK11-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
+// CHECK11-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK11: omp_offload.failed3:
+// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]]
+// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK11: omp_offload.cont4:
+// CHECK11-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
+// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32
+// CHECK11-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv()
+// CHECK11-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK11: invoke.cont5:
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
+// CHECK11-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv()
+// CHECK11-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
+// CHECK11: invoke.cont7:
+// CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
+// CHECK11-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
+// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
+// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK11-NEXT: ret i32 [[TMP41]]
+// CHECK11: eh.resume:
+// CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK11-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: call void @_Z8mayThrowv()
+// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK11-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92
+// CHECK11-SAME: () #[[ATTR2:[0-9]+]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2)
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK11-NEXT: ret void
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK11-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
+// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]]
+// CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102
+// CHECK11-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]]
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1
+// CHECK11-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
+// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]])
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK11-NEXT: ret void
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK11-SAME: () #[[ATTR6:[0-9]+]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
+// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK11: omp_offload.failed:
+// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]]
+// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK11: omp_offload.cont:
+// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4
+// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4
+// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8
+// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8
+// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8
+// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8
+// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8
+// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8
+// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8
+// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
+// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
+// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4
+// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
+// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK11: omp_offload.failed3:
+// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]]
+// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK11: omp_offload.cont4:
+// CHECK11-NEXT: ret i32 0
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK11-SAME: () #[[ATTR6]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
+// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK11: omp_offload.failed:
+// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]]
+// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK11: omp_offload.cont:
+// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4
+// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4
+// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8
+// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8
+// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8
+// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8
+// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8
+// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8
+// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8
+// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
+// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
+// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4
+// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
+// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK11: omp_offload.failed3:
+// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]]
+// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK11: omp_offload.cont4:
+// CHECK11-NEXT: ret i32 0
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]]
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68
+// CHECK11-SAME: () #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str)
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK11-NEXT: ret void
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77
+// CHECK11-SAME: () #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23)
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK11-NEXT: ret void
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68
+// CHECK11-SAME: () #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str)
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK11-NEXT: ret void
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77
+// CHECK11-SAME: () #[[ATTR2]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23)
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK11-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]]
+// CHECK11: invoke.cont2:
+// CHECK11-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32
+// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]])
+// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]]
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK11-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK11-NEXT: ret void
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK11: cond.true:
+// CHECK11-NEXT: br label [[COND_END:%.*]]
+// CHECK11: cond.false:
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: br label [[COND_END]]
+// CHECK11: cond.end:
+// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK11: omp.loop.exit:
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK11-NEXT: ret void
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: ret void
+//
+//
// CHECK13-LABEL: define {{[^@]+}}@main
// CHECK13-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
// CHECK13-NEXT: entry:
@@ -3897,11 +6433,11 @@ int main() {
// CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]])
+// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]])
// CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK13: omp_offload.failed:
-// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]]
+// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK13: lpad:
// CHECK13-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
@@ -3950,11 +6486,11 @@ int main() {
// CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
// CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK13-NEXT: store i32 0, ptr [[TMP37]], align 4
-// CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]])
// CHECK13-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
// CHECK13-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK13: omp_offload.failed3:
-// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]]
+// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK13: omp_offload.cont4:
// CHECK13-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
@@ -4005,14 +6541,14 @@ int main() {
// CHECK13-NEXT: ret i8 [[CONV]]
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92
// CHECK13-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK13-NEXT: entry:
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined)
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined)
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4058,7 +6594,7 @@ int main() {
// CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -4073,7 +6609,7 @@ int main() {
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4159,16 +6695,16 @@ int main() {
// CHECK13-NEXT: unreachable
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102
// CHECK13-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]])
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]])
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4184,7 +6720,7 @@ int main() {
// CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
-// CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]]
// CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
@@ -4219,7 +6755,7 @@ int main() {
// CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -4234,7 +6770,7 @@ int main() {
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4346,11 +6882,11 @@ int main() {
// CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
+// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
// CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK13: omp_offload.failed:
-// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]]
+// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK13: omp_offload.cont:
// CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
@@ -4379,11 +6915,11 @@ int main() {
// CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK13-NEXT: store i32 0, ptr [[TMP27]], align 4
-// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
// CHECK13-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK13-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK13: omp_offload.failed3:
-// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]]
+// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK13: omp_offload.cont4:
// CHECK13-NEXT: ret i32 0
@@ -4422,11 +6958,11 @@ int main() {
// CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4
-// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
+// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]])
// CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK13: omp_offload.failed:
-// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]]
+// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK13: omp_offload.cont:
// CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
@@ -4455,11 +6991,11 @@ int main() {
// CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
// CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
// CHECK13-NEXT: store i32 0, ptr [[TMP27]], align 4
-// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]])
// CHECK13-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK13-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
// CHECK13: omp_offload.failed3:
-// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]]
+// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]]
// CHECK13: omp_offload.cont4:
// CHECK13-NEXT: ret i32 0
@@ -4489,14 +7025,14 @@ int main() {
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68
// CHECK13-SAME: () #[[ATTR2]] {
// CHECK13-NEXT: entry:
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined)
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined)
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4542,7 +7078,7 @@ int main() {
// CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -4557,7 +7093,7 @@ int main() {
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4636,14 +7172,14 @@ int main() {
// CHECK13-NEXT: unreachable
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77
// CHECK13-SAME: () #[[ATTR2]] {
// CHECK13-NEXT: entry:
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined)
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined)
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4689,7 +7225,7 @@ int main() {
// CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -4704,7 +7240,7 @@ int main() {
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4783,14 +7319,14 @@ int main() {
// CHECK13-NEXT: unreachable
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68
// CHECK13-SAME: () #[[ATTR2]] {
// CHECK13-NEXT: entry:
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined)
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined)
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4836,7 +7372,7 @@ int main() {
// CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -4851,7 +7387,7 @@ int main() {
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4930,14 +7466,14 @@ int main() {
// CHECK13-NEXT: unreachable
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77
// CHECK13-SAME: () #[[ATTR2]] {
// CHECK13-NEXT: entry:
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined)
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined)
// CHECK13-NEXT: ret void
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -4992,7 +7528,7 @@ int main() {
// CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
-// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
+// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
@@ -5013,7 +7549,7 @@ int main() {
// CHECK13-NEXT: unreachable
//
//
-// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined
+// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined
// CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
diff --git a/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp
new file mode 100644
index 0000000..7c4e995
--- /dev/null
+++ b/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp
@@ -0,0 +1,3541 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
+// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
+// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
+
+// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3
+// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
+
+// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK9
+// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
+
+// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11
+// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
+
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+typedef __INTPTR_TYPE__ intptr_t;
+
+
+void foo();
+
+struct S {
+ intptr_t a, b, c;
+ S(intptr_t a) : a(a) {}
+ operator char() { extern void mayThrow(); mayThrow(); return a; }
+ ~S() {}
+};
+
+template <typename T, int C>
+int tmain() {
+ char str[] = "msg1";
+#pragma omp target
+#pragma omp teams
+#pragma omp distribute parallel for simd num_threads(strict: C) severity(fatal) message("msg")
+ for (int i = 0; i < 100; i++)
+ foo();
+#pragma omp target
+#pragma omp teams
+#pragma omp distribute parallel for simd num_threads(strict: T(23)) severity(warning) message(str)
+ for (int i = 0; i < 100; i++)
+ foo();
+ return 0;
+}
+
+int main() {
+ S s(0);
+ char a = s;
+ const char *str = "msg1";
+#pragma omp target
+#pragma omp teams
+#pragma omp distribute parallel for simd num_threads(strict: 2) severity(warning) message("msg")
+ for (int i = 0; i < 100; i++) {
+ foo();
+ }
+#pragma omp target
+#pragma omp teams
+
+#pragma omp distribute parallel for simd num_threads(strict: a) severity(fatal) message(str)
+ for (int i = 0; i < 100; i++) {
+ foo();
+ }
+ return a + tmain<char, 5>() + tmain<S, 1>();
+}
+
+#endif
+// CHECK1-LABEL: define {{[^@]+}}@main
+// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK1-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
+// CHECK1-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK1-NEXT: store ptr @.str, ptr [[STR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54() #[[ATTR3:[0-9]+]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: lpad:
+// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
+// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
+// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
+// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
+// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
+// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP29]], align 4
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 2, ptr [[TMP30]], align 4
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 8
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP28]], ptr [[TMP32]], align 8
+// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP33]], align 8
+// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP34]], align 8
+// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8
+// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP36]], align 8
+// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP37]], align 8
+// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP38]], align 8
+// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP39]], align 4
+// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP40]], align 4
+// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP41]], align 4
+// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
+// CHECK1-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: [[TMP44:%.*]] = load i8, ptr [[A]], align 1
+// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP44]] to i32
+// CHECK1-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
+// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK1: invoke.cont5:
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
+// CHECK1-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
+// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
+// CHECK1: invoke.cont7:
+// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
+// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
+// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK1-NEXT: ret i32 [[TMP45]]
+// CHECK1: eh.resume:
+// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: call void @_Z8mayThrowv()
+// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK1-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54
+// CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 2, i32 1, ptr @.str.1), !llvm.access.group [[ACC_GRP15]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP15]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP19]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP19]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK1-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
+// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]]
+// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60
+// CHECK1-SAME: (i64 [[A:%.*]], ptr [[STR:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined, ptr [[A_ADDR]], ptr [[STR_ADDR]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]], ptr nonnull align 8 dereferenceable(8) [[STR:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META24:![0-9]+]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]], !align [[META25:![0-9]+]]
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: [[TMP10:%.*]] = sext i8 [[TMP9]] to i32
+// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP3]], i32 [[TMP10]], i32 2, ptr [[TMP11]]), !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined, i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
+// CHECK1-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP29]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP29]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false)
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37() #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4
+// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4
+// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
+// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42(ptr [[STR]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: ret i32 0
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK1-SAME: () #[[ATTR6]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false)
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37() #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4
+// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.4, ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4
+// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
+// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42(ptr [[STR]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: ret i32 0
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]]
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37
+// CHECK1-SAME: () #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP32]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP32]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP35]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP35]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42
+// CHECK1-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined, ptr [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]]
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP38]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP38]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
+// CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP41]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP41]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37
+// CHECK1-SAME: () #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]]
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP44]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP44]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP47]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP47]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42
+// CHECK1-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined, ptr [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]]
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23)
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP50]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP50]]
+// CHECK1: invoke.cont2:
+// CHECK1-NEXT: [[TMP8:%.*]] = sext i8 [[CALL]] to i32
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP8]], i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
+// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP17:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP18:%.*]] = extractvalue { ptr, i32 } [[TMP17]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP18]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP50]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP53]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP53]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@main
+// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[STR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I7:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK3-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
+// CHECK3-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK3-NEXT: store ptr @.str, ptr [[STR]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
+// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP2]]
+// CHECK3: invoke.cont1:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
+// CHECK3: lpad:
+// CHECK3-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: cleanup
+// CHECK3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
+// CHECK3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
+// CHECK3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
+// CHECK3-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]]
+// CHECK3-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB5]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4
+// CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV6]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
+// CHECK3: omp.inner.for.cond8:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK3-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
+// CHECK3-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]]
+// CHECK3: omp.inner.for.body10:
+// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK3-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP11]], 1
+// CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
+// CHECK3-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP6]]
+// CHECK3: invoke.cont13:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]]
+// CHECK3: omp.body.continue14:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]]
+// CHECK3: omp.inner.for.inc15:
+// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP12]], 1
+// CHECK3-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]]
+// CHECK3: omp.inner.for.end17:
+// CHECK3-NEXT: store i32 100, ptr [[I7]], align 4
+// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[A]], align 1
+// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32
+// CHECK3-NEXT: [[CALL19:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
+// CHECK3-NEXT: to label [[INVOKE_CONT18:%.*]] unwind label [[LPAD]]
+// CHECK3: invoke.cont18:
+// CHECK3-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV]], [[CALL19]]
+// CHECK3-NEXT: [[CALL22:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
+// CHECK3-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]]
+// CHECK3: invoke.cont21:
+// CHECK3-NEXT: [[ADD23:%.*]] = add nsw i32 [[ADD20]], [[CALL22]]
+// CHECK3-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4
+// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]]
+// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK3-NEXT: ret i32 [[TMP14]]
+// CHECK3: eh.resume:
+// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK3-NEXT: [[LPAD_VAL24:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL24]]
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP16]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP2]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: call void @_Z8mayThrowv()
+// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK3-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK3-SAME: (ptr [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat {
+// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]]
+// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK3-SAME: () #[[ATTR4:[0-9]+]] comdat personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I6:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false)
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
+// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP9]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
+// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
+// CHECK3: omp.inner.for.cond7:
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
+// CHECK3: omp.inner.for.body9:
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1
+// CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
+// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP12]]
+// CHECK3: invoke.cont12:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
+// CHECK3: omp.body.continue13:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
+// CHECK3: omp.inner.for.inc14:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1
+// CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]]
+// CHECK3: omp.inner.for.end16:
+// CHECK3-NEXT: store i32 100, ptr [[I6]], align 4
+// CHECK3-NEXT: ret i32 0
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP9]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK3-SAME: () #[[ATTR4]] comdat personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I6:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false)
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
+// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP15]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
+// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
+// CHECK3: omp.inner.for.cond7:
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
+// CHECK3: omp.inner.for.body9:
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1
+// CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
+// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP18]]
+// CHECK3: invoke.cont12:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
+// CHECK3: omp.body.continue13:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
+// CHECK3: omp.inner.for.inc14:
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1
+// CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP19:![0-9]+]]
+// CHECK3: omp.inner.for.end16:
+// CHECK3-NEXT: store i32 100, ptr [[I6]], align 4
+// CHECK3-NEXT: ret i32 0
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP15]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]]
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@main
+// CHECK9-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK9-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[STR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8
+// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK9-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
+// CHECK9-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
+// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK9: invoke.cont:
+// CHECK9-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK9-NEXT: store ptr @.str, ptr [[STR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.region_id, ptr [[KERNEL_ARGS]])
+// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK9: omp_offload.failed:
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54() #[[ATTR3:[0-9]+]]
+// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK9: lpad:
+// CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
+// CHECK9-NEXT: cleanup
+// CHECK9-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
+// CHECK9-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
+// CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
+// CHECK9-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
+// CHECK9-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK9: omp_offload.cont:
+// CHECK9-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
+// CHECK9-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
+// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK9-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8
+// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
+// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8
+// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK9-NEXT: store ptr null, ptr [[TMP23]], align 8
+// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK9-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8
+// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK9-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8
+// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK9-NEXT: store i32 3, ptr [[TMP29]], align 4
+// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK9-NEXT: store i32 2, ptr [[TMP30]], align 4
+// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK9-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 8
+// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK9-NEXT: store ptr [[TMP28]], ptr [[TMP32]], align 8
+// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP33]], align 8
+// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP34]], align 8
+// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK9-NEXT: store ptr null, ptr [[TMP35]], align 8
+// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK9-NEXT: store ptr null, ptr [[TMP36]], align 8
+// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK9-NEXT: store i64 100, ptr [[TMP37]], align 8
+// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK9-NEXT: store i64 0, ptr [[TMP38]], align 8
+// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP39]], align 4
+// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP40]], align 4
+// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK9-NEXT: store i32 0, ptr [[TMP41]], align 4
+// CHECK9-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK9-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
+// CHECK9-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK9: omp_offload.failed3:
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR3]]
+// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK9: omp_offload.cont4:
+// CHECK9-NEXT: [[TMP44:%.*]] = load i8, ptr [[A]], align 1
+// CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP44]] to i32
+// CHECK9-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
+// CHECK9-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK9: invoke.cont5:
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
+// CHECK9-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
+// CHECK9-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
+// CHECK9: invoke.cont7:
+// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
+// CHECK9-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
+// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
+// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK9-NEXT: ret i32 [[TMP45]]
+// CHECK9: eh.resume:
+// CHECK9-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK9-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK9-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK9-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK9-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: call void @_Z8mayThrowv()
+// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK9-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54
+// CHECK9-SAME: () #[[ATTR2:[0-9]+]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 2, i32 1, ptr @.str.1), !llvm.access.group [[ACC_GRP15]]
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP15]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
+// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK9-NEXT: invoke void @_Z3foov()
+// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP19]]
+// CHECK9: invoke.cont:
+// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK9: omp.body.continue:
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+// CHECK9: terminate.lpad:
+// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK9-NEXT: catch ptr null
+// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP19]]
+// CHECK9-NEXT: unreachable
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK9-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
+// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]]
+// CHECK9-NEXT: call void @_ZSt9terminatev() #[[ATTR8]]
+// CHECK9-NEXT: unreachable
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60
+// CHECK9-SAME: (i64 [[A:%.*]], ptr [[STR:%.*]]) #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined, ptr [[A_ADDR]], ptr [[STR_ADDR]])
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]], ptr nonnull align 8 dereferenceable(8) [[STR:%.*]]) #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META24:![0-9]+]]
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]], !align [[META25:![0-9]+]]
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
+// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: [[TMP10:%.*]] = sext i8 [[TMP9]] to i32
+// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP3]], i32 [[TMP10]], i32 2, ptr [[TMP11]]), !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined, i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
+// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
+// CHECK9-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
+// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK9-NEXT: invoke void @_Z3foov()
+// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP29]]
+// CHECK9: invoke.cont:
+// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK9: omp.body.continue:
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+// CHECK9: terminate.lpad:
+// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK9-NEXT: catch ptr null
+// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP29]]
+// CHECK9-NEXT: unreachable
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK9-SAME: () #[[ATTR6:[0-9]+]] comdat {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false)
+// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.region_id, ptr [[KERNEL_ARGS]])
+// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK9: omp_offload.failed:
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37() #[[ATTR3]]
+// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK9: omp_offload.cont:
+// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8
+// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8
+// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK9-NEXT: store i32 3, ptr [[TMP20]], align 4
+// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK9-NEXT: store i32 1, ptr [[TMP21]], align 4
+// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK9-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8
+// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK9-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8
+// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK9-NEXT: store ptr @.offload_sizes.2, ptr [[TMP24]], align 8
+// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK9-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP25]], align 8
+// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8
+// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK9-NEXT: store i64 100, ptr [[TMP28]], align 8
+// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK9-NEXT: store i64 0, ptr [[TMP29]], align 8
+// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4
+// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4
+// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK9-NEXT: store i32 0, ptr [[TMP32]], align 4
+// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
+// CHECK9-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK9: omp_offload.failed3:
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42(ptr [[STR]]) #[[ATTR3]]
+// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK9: omp_offload.cont4:
+// CHECK9-NEXT: ret i32 0
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK9-SAME: () #[[ATTR6]] comdat {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false)
+// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.region_id, ptr [[KERNEL_ARGS]])
+// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK9: omp_offload.failed:
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37() #[[ATTR3]]
+// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK9: omp_offload.cont:
+// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8
+// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8
+// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK9-NEXT: store i32 3, ptr [[TMP20]], align 4
+// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK9-NEXT: store i32 1, ptr [[TMP21]], align 4
+// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK9-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8
+// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK9-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8
+// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK9-NEXT: store ptr @.offload_sizes.4, ptr [[TMP24]], align 8
+// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK9-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP25]], align 8
+// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8
+// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK9-NEXT: store i64 100, ptr [[TMP28]], align 8
+// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK9-NEXT: store i64 0, ptr [[TMP29]], align 8
+// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4
+// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4
+// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK9-NEXT: store i32 0, ptr [[TMP32]], align 4
+// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
+// CHECK9-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK9: omp_offload.failed3:
+// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42(ptr [[STR]]) #[[ATTR3]]
+// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK9: omp_offload.cont4:
+// CHECK9-NEXT: ret i32 0
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]]
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37
+// CHECK9-SAME: () #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP32]]
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP32]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
+// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK9-NEXT: invoke void @_Z3foov()
+// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP35]]
+// CHECK9: invoke.cont:
+// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK9: omp.body.continue:
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+// CHECK9: terminate.lpad:
+// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK9-NEXT: catch ptr null
+// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP35]]
+// CHECK9-NEXT: unreachable
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42
+// CHECK9-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined, ptr [[TMP0]])
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP38]]
+// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP38]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
+// CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]]
+// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK9-NEXT: invoke void @_Z3foov()
+// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP41]]
+// CHECK9: invoke.cont:
+// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK9: omp.body.continue:
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+// CHECK9: terminate.lpad:
+// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK9-NEXT: catch ptr null
+// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP41]]
+// CHECK9-NEXT: unreachable
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37
+// CHECK9-SAME: () #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]]
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP44]]
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP44]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]]
+// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK9-NEXT: invoke void @_Z3foov()
+// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP47]]
+// CHECK9: invoke.cont:
+// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK9: omp.body.continue:
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+// CHECK9: terminate.lpad:
+// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK9-NEXT: catch ptr null
+// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP47]]
+// CHECK9-NEXT: unreachable
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42
+// CHECK9-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined, ptr [[TMP0]])
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]]
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23)
+// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP50]]
+// CHECK9: invoke.cont:
+// CHECK9-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK9-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP50]]
+// CHECK9: invoke.cont2:
+// CHECK9-NEXT: [[TMP8:%.*]] = sext i8 [[CALL]] to i32
+// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP8]], i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
+// CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+// CHECK9: terminate.lpad:
+// CHECK9-NEXT: [[TMP17:%.*]] = landingpad { ptr, i32 }
+// CHECK9-NEXT: catch ptr null
+// CHECK9-NEXT: [[TMP18:%.*]] = extractvalue { ptr, i32 } [[TMP17]], 0
+// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP18]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP50]]
+// CHECK9-NEXT: unreachable
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined
+// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK9: cond.true:
+// CHECK9-NEXT: br label [[COND_END:%.*]]
+// CHECK9: cond.false:
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: br label [[COND_END]]
+// CHECK9: cond.end:
+// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK9: omp.inner.for.cond:
+// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]]
+// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK9: omp.inner.for.body:
+// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK9-NEXT: invoke void @_Z3foov()
+// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP53]]
+// CHECK9: invoke.cont:
+// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK9: omp.body.continue:
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK9: omp.inner.for.inc:
+// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
+// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
+// CHECK9: omp.inner.for.end:
+// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK9: omp.loop.exit:
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK9: .omp.final.then:
+// CHECK9-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK9: .omp.final.done:
+// CHECK9-NEXT: ret void
+// CHECK9: terminate.lpad:
+// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK9-NEXT: catch ptr null
+// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP53]]
+// CHECK9-NEXT: unreachable
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@main
+// CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[STR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I7:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK11-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
+// CHECK11-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK11-NEXT: store ptr @.str, ptr [[STR]], align 8
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
+// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP2]]
+// CHECK11: invoke.cont1:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
+// CHECK11: lpad:
+// CHECK11-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: cleanup
+// CHECK11-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
+// CHECK11-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
+// CHECK11-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
+// CHECK11-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]]
+// CHECK11-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB5]], align 4
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4
+// CHECK11-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV6]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
+// CHECK11: omp.inner.for.cond8:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
+// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK11-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
+// CHECK11-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]]
+// CHECK11: omp.inner.for.body10:
+// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK11-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP11]], 1
+// CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
+// CHECK11-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP6]]
+// CHECK11: invoke.cont13:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]]
+// CHECK11: omp.body.continue14:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]]
+// CHECK11: omp.inner.for.inc15:
+// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP12]], 1
+// CHECK11-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]]
+// CHECK11: omp.inner.for.end17:
+// CHECK11-NEXT: store i32 100, ptr [[I7]], align 4
+// CHECK11-NEXT: [[TMP13:%.*]] = load i8, ptr [[A]], align 1
+// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32
+// CHECK11-NEXT: [[CALL19:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
+// CHECK11-NEXT: to label [[INVOKE_CONT18:%.*]] unwind label [[LPAD]]
+// CHECK11: invoke.cont18:
+// CHECK11-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV]], [[CALL19]]
+// CHECK11-NEXT: [[CALL22:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
+// CHECK11-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]]
+// CHECK11: invoke.cont21:
+// CHECK11-NEXT: [[ADD23:%.*]] = add nsw i32 [[ADD20]], [[CALL22]]
+// CHECK11-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4
+// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]]
+// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK11-NEXT: ret i32 [[TMP14]]
+// CHECK11: eh.resume:
+// CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK11-NEXT: [[LPAD_VAL24:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL24]]
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP16]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP2]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: call void @_Z8mayThrowv()
+// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK11-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK11-SAME: (ptr [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat {
+// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]]
+// CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK11-SAME: () #[[ATTR4:[0-9]+]] comdat personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I6:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false)
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
+// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP9]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
+// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
+// CHECK11: omp.inner.for.cond7:
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
+// CHECK11: omp.inner.for.body9:
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK11-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1
+// CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
+// CHECK11-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP12]]
+// CHECK11: invoke.cont12:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
+// CHECK11: omp.body.continue13:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
+// CHECK11: omp.inner.for.inc14:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1
+// CHECK11-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]]
+// CHECK11: omp.inner.for.end16:
+// CHECK11-NEXT: store i32 100, ptr [[I6]], align 4
+// CHECK11-NEXT: ret i32 0
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP9]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK11-SAME: () #[[ATTR4]] comdat personality ptr @__gxx_personality_v0 {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[I6:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false)
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK11: omp.inner.for.cond:
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
+// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK11: omp.inner.for.body:
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP15]]
+// CHECK11: invoke.cont:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK11: omp.body.continue:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK11: omp.inner.for.inc:
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
+// CHECK11: omp.inner.for.end:
+// CHECK11-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
+// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
+// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
+// CHECK11: omp.inner.for.cond7:
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
+// CHECK11: omp.inner.for.body9:
+// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK11-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1
+// CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
+// CHECK11-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK11-NEXT: invoke void @_Z3foov()
+// CHECK11-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP18]]
+// CHECK11: invoke.cont12:
+// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
+// CHECK11: omp.body.continue13:
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
+// CHECK11: omp.inner.for.inc14:
+// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1
+// CHECK11-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
+// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP19:![0-9]+]]
+// CHECK11: omp.inner.for.end16:
+// CHECK11-NEXT: store i32 100, ptr [[I6]], align 4
+// CHECK11-NEXT: ret i32 0
+// CHECK11: terminate.lpad:
+// CHECK11-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 }
+// CHECK11-NEXT: catch ptr null
+// CHECK11-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0
+// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP15]]
+// CHECK11-NEXT: unreachable
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]]
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK11-NEXT: ret void
+//
diff --git a/clang/test/OpenMP/error_codegen.cpp b/clang/test/OpenMP/error_codegen.cpp
index 70d493e..0efb0ab 100644
--- a/clang/test/OpenMP/error_codegen.cpp
+++ b/clang/test/OpenMP/error_codegen.cpp
@@ -1,8 +1,9 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ --version 5
// RUN: %clang_cc1 -std=c++11 -fopenmp -fopenmp-version=51 -triple x86_64 \
-// RUN: -emit-llvm -o - %s | FileCheck %s
+// RUN: -emit-llvm -o - %s | FileCheck --check-prefix OMP51 %s
// RUN: %clang_cc1 -std=c++11 -fopenmp -fopenmp-version=60 -triple x86_64 \
-// RUN: -emit-llvm -o - %s | FileCheck %s
+// RUN: -emit-llvm -o - %s | FileCheck --check-prefix OMP60 %s
// RUN: %clang_cc1 -std=c++11 -fopenmp-simd -fopenmp-version=51 \
// RUN: -debug-info-kind=limited -triple x86_64 -emit-llvm -o - %s | \
@@ -12,20 +13,6 @@
// RUN: -debug-info-kind=limited -triple x86_64 -emit-llvm -o - %s | \
// RUN: FileCheck --check-prefix SIMD %s
-//CHECK: @.str = private unnamed_addr constant [23 x i8] c"GPU compiler required.\00", align 1
-//CHECK: @0 = private unnamed_addr constant {{.*}}error_codegen.cpp;main;59;1;;\00", align 1
-//CHECK: @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{.*}}, ptr @0 }, align 8
-//CHECK: @.str.1 = private unnamed_addr constant [27 x i8] c"Note this is functioncall.\00", align 1
-//CHECK: @2 = private unnamed_addr constant {{.*}}error_codegen.cpp;main;61;1;;\00", align 1
-//CHECK: @3 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{.*}}, ptr @2 }, align 8
-//CHECK: @.str.2 = private unnamed_addr constant [23 x i8] c"GNU compiler required.\00", align 1
-//CHECK: @4 = private unnamed_addr constant {{.*}}error_codegen.cpp;tmain;36;1;;\00", align 1
-//CHECK: @5 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{.*}}, ptr @4 }, align 8
-//CHECK: @.str.3 = private unnamed_addr constant [22 x i8] c"Notice: add for loop.\00", align 1
-//CHECK: @6 = private unnamed_addr constant {{.*}}error_codegen.cpp;tmain;39;1;;\00", align 1
-//CHECK: @7 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{.*}}, ptr @6 }, align 8
-//CHECK: @8 = private unnamed_addr constant {{.*}}error_codegen.cpp;tmain;45;1;;\00", align 1
-//CHECK: @9 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{.*}}, ptr @8 }, align 8
void foo() {}
@@ -33,42 +20,723 @@ template <typename T, int N>
int tmain(T argc, char **argv) {
T b = argc, c, d, e, f, g;
static int a;
+ const char str1[] = "msg";
+ const char *str2 = "msg";
+ char str3[] = "msg";
+ char *str4 = str3;
+ char * const str5 = str3;
#pragma omp error at(execution) severity(fatal) message("GNU compiler required.")
+#pragma omp error at(execution) severity(fatal) message(str1)
+#pragma omp error at(execution) severity(fatal) message(str2)
+#pragma omp error at(execution) severity(fatal) message(str3)
+#pragma omp error at(execution) severity(fatal) message(str4)
+#pragma omp error at(execution) severity(fatal) message(str5)
a = argv[0][0];
++a;
#pragma omp error at(execution) severity(warning) message("Notice: add for loop.")
+#pragma omp error at(execution) severity(warning) message(str1)
+#pragma omp error at(execution) severity(warning) message(str2)
+#pragma omp error at(execution) severity(warning) message(str3)
+#pragma omp error at(execution) severity(warning) message(str4)
+#pragma omp error at(execution) severity(warning) message(str5)
{
int b = 10;
T c = 100;
a = b + c;
}
-#pragma omp error at(execution) severity(fatal) message("GPU compiler required.")
+#pragma omp error at(execution) severity(fatal) message("GPU compiler required.")
+#pragma omp error at(execution) severity(fatal) message(str1)
+#pragma omp error at(execution) severity(fatal) message(str2)
+#pragma omp error at(execution) severity(fatal) message(str3)
+#pragma omp error at(execution) severity(fatal) message(str4)
+#pragma omp error at(execution) severity(fatal) message(str5)
foo();
return N;
}
-// CHECK-LABEL: @main(
-// SIMD-LABEL: @main(
-// CHECK: call void @__kmpc_error(ptr @1, i32 2, ptr @.str)
-// SIMD-NOT: call void @__kmpc_error(ptr @1, i32 2, ptr @.str)
-// CHECK: call void @__kmpc_error(ptr @3, i32 1, ptr @.str.1)
-// SIMD-NOT: call void @__kmpc_error(ptr @3, i32 1, ptr @.str.1)
-//
+
+
+
int main (int argc, char **argv) {
int b = argc, c, d, e, f, g;
static int a;
+ const char str1[] = "msg";
+ const char *str2 = "msg";
+ char str3[] = "msg";
+ char *str4 = str3;
+ char * const str5 = str3;
#pragma omp error at(execution) severity(fatal) message("GPU compiler required.")
+#pragma omp error at(execution) severity(fatal) message(str1)
+#pragma omp error at(execution) severity(fatal) message(str2)
+#pragma omp error at(execution) severity(fatal) message(str3)
+#pragma omp error at(execution) severity(fatal) message(str4)
+#pragma omp error at(execution) severity(fatal) message(str5)
a=2;
#pragma omp error at(execution) severity(warning) message("Note this is functioncall.")
+#pragma omp error at(execution) severity(warning) message(str1)
+#pragma omp error at(execution) severity(warning) message(str2)
+#pragma omp error at(execution) severity(warning) message(str3)
+#pragma omp error at(execution) severity(warning) message(str4)
+#pragma omp error at(execution) severity(warning) message(str5)
foo();
tmain<int, 10>(argc, argv);
}
-//CHECK-LABEL: @_Z5tmainIiLi10EEiT_PPc(
-//SIMD-LABEL: @_Z5tmainIiLi10EEiT_PPc(
-//CHECK: call void @__kmpc_error(ptr @5, i32 2, ptr @.str.2)
-//CHECK: call void @__kmpc_error(ptr @7, i32 1, ptr @.str.3)
-//CHECK: call void @__kmpc_error(ptr @9, i32 2, ptr @.str)
-//SIMD-NOT: call void @__kmpc_error(ptr @5, i32 2, ptr @.str.2)
-//SIMD-NOT: call void @__kmpc_error(ptr @7, i32 1, ptr @.str.3)
-//SIMD-NOT: call void @__kmpc_error(ptr @9, i32 2, ptr @.str)
-//CHECK: ret i32 10
+// CHECK-LABEL: define dso_local void @_Z3foov(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+// CHECK-LABEL: define dso_local noundef i32 @main(
+// CHECK-SAME: i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[E:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[F:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[G:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[STR1:%.*]] = alloca [4 x i8], align 1
+// CHECK-NEXT: [[STR2:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[STR3:%.*]] = alloca [4 x i8], align 1
+// CHECK-NEXT: [[STR4:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[STR5:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
+// CHECK-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP0]], ptr [[B]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR1]], ptr align 1 @__const.main.str1, i64 4, i1 false)
+// CHECK-NEXT: store ptr @.str, ptr [[STR2]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR3]], ptr align 1 @__const.main.str3, i64 4, i1 false)
+// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: store ptr [[ARRAYDECAY]], ptr [[STR4]], align 8
+// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: store ptr [[ARRAYDECAY1]], ptr [[STR5]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.str.1)
+// CHECK-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB3:[0-9]+]], i32 2, ptr [[ARRAYDECAY2]])
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR2]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB5:[0-9]+]], i32 2, ptr [[TMP1]])
+// CHECK-NEXT: [[ARRAYDECAY3:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB7:[0-9]+]], i32 2, ptr [[ARRAYDECAY3]])
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[STR4]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB9:[0-9]+]], i32 2, ptr [[TMP2]])
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[STR5]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB11:[0-9]+]], i32 2, ptr [[TMP3]])
+// CHECK-NEXT: store i32 2, ptr @_ZZ4mainE1a, align 4
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB13:[0-9]+]], i32 1, ptr @.str.2)
+// CHECK-NEXT: [[ARRAYDECAY4:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB15:[0-9]+]], i32 1, ptr [[ARRAYDECAY4]])
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[STR2]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB17:[0-9]+]], i32 1, ptr [[TMP4]])
+// CHECK-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB19:[0-9]+]], i32 1, ptr [[ARRAYDECAY5]])
+// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[STR4]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB21:[0-9]+]], i32 1, ptr [[TMP5]])
+// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[STR5]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB23:[0-9]+]], i32 1, ptr [[TMP6]])
+// CHECK-NEXT: call void @_Z3foov()
+// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8
+// CHECK-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_PPc(i32 noundef [[TMP7]], ptr noundef [[TMP8]])
+// CHECK-NEXT: ret i32 0
+// CHECK-LABEL: define linkonce_odr noundef i32 @_Z5tmainIiLi10EEiT_PPc(
+// CHECK-SAME: i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0]] comdat {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[E:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[F:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[G:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[STR1:%.*]] = alloca [4 x i8], align 1
+// CHECK-NEXT: [[STR2:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[STR3:%.*]] = alloca [4 x i8], align 1
+// CHECK-NEXT: [[STR4:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[STR5:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[B7:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[C8:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
+// CHECK-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP0]], ptr [[B]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR1]], ptr align 1 @__const._Z5tmainIiLi10EEiT_PPc.str1, i64 4, i1 false)
+// CHECK-NEXT: store ptr @.str, ptr [[STR2]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR3]], ptr align 1 @__const._Z5tmainIiLi10EEiT_PPc.str3, i64 4, i1 false)
+// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: store ptr [[ARRAYDECAY]], ptr [[STR4]], align 8
+// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: store ptr [[ARRAYDECAY1]], ptr [[STR5]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB25:[0-9]+]], i32 2, ptr @.str.3)
+// CHECK-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB27:[0-9]+]], i32 2, ptr [[ARRAYDECAY2]])
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR2]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB29:[0-9]+]], i32 2, ptr [[TMP1]])
+// CHECK-NEXT: [[ARRAYDECAY3:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB31:[0-9]+]], i32 2, ptr [[ARRAYDECAY3]])
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[STR4]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB33:[0-9]+]], i32 2, ptr [[TMP2]])
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[STR5]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB35:[0-9]+]], i32 2, ptr [[TMP3]])
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8
+// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 0
+// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 0
+// CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1
+// CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP6]] to i32
+// CHECK-NEXT: store i32 [[CONV]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
+// CHECK-NEXT: store i32 [[INC]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB37:[0-9]+]], i32 1, ptr @.str.4)
+// CHECK-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB39:[0-9]+]], i32 1, ptr [[ARRAYDECAY5]])
+// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[STR2]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB41:[0-9]+]], i32 1, ptr [[TMP8]])
+// CHECK-NEXT: [[ARRAYDECAY6:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB43:[0-9]+]], i32 1, ptr [[ARRAYDECAY6]])
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[STR4]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB45:[0-9]+]], i32 1, ptr [[TMP9]])
+// CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[STR5]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB47:[0-9]+]], i32 1, ptr [[TMP10]])
+// CHECK-NEXT: store i32 10, ptr [[B7]], align 4
+// CHECK-NEXT: store i32 100, ptr [[C8]], align 4
+// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[B7]], align 4
+// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[C8]], align 4
+// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// CHECK-NEXT: store i32 [[ADD]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB49:[0-9]+]], i32 2, ptr @.str.1)
+// CHECK-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB51:[0-9]+]], i32 2, ptr [[ARRAYDECAY9]])
+// CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[STR2]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB53:[0-9]+]], i32 2, ptr [[TMP13]])
+// CHECK-NEXT: [[ARRAYDECAY10:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB55:[0-9]+]], i32 2, ptr [[ARRAYDECAY10]])
+// CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[STR4]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB57:[0-9]+]], i32 2, ptr [[TMP14]])
+// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[STR5]], align 8
+// CHECK-NEXT: call void @__kmpc_error(ptr @[[GLOB59:[0-9]+]], i32 2, ptr [[TMP15]])
+// CHECK-NEXT: call void @_Z3foov()
+// CHECK-NEXT: ret i32 10
+// OMP51-LABEL: define dso_local void @_Z3foov(
+// OMP51-SAME: ) #[[ATTR0:[0-9]+]] {
+// OMP51-NEXT: [[ENTRY:.*:]]
+// OMP51-NEXT: ret void
+//
+//
+// OMP51-LABEL: define dso_local noundef i32 @main(
+// OMP51-SAME: i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP51-NEXT: [[ENTRY:.*:]]
+// OMP51-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
+// OMP51-NEXT: [[B:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[C:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[D:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[E:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[F:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[G:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[STR1:%.*]] = alloca [4 x i8], align 1
+// OMP51-NEXT: [[STR2:%.*]] = alloca ptr, align 8
+// OMP51-NEXT: [[STR3:%.*]] = alloca [4 x i8], align 1
+// OMP51-NEXT: [[STR4:%.*]] = alloca ptr, align 8
+// OMP51-NEXT: [[STR5:%.*]] = alloca ptr, align 8
+// OMP51-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
+// OMP51-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
+// OMP51-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// OMP51-NEXT: store i32 [[TMP0]], ptr [[B]], align 4
+// OMP51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR1]], ptr align 1 @__const.main.str1, i64 4, i1 false)
+// OMP51-NEXT: store ptr @.str, ptr [[STR2]], align 8
+// OMP51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR3]], ptr align 1 @__const.main.str3, i64 4, i1 false)
+// OMP51-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: store ptr [[ARRAYDECAY]], ptr [[STR4]], align 8
+// OMP51-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: store ptr [[ARRAYDECAY1]], ptr [[STR5]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.str.1)
+// OMP51-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB3:[0-9]+]], i32 2, ptr [[ARRAYDECAY2]])
+// OMP51-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB5:[0-9]+]], i32 2, ptr [[TMP1]])
+// OMP51-NEXT: [[ARRAYDECAY3:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB7:[0-9]+]], i32 2, ptr [[ARRAYDECAY3]])
+// OMP51-NEXT: [[TMP2:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB9:[0-9]+]], i32 2, ptr [[TMP2]])
+// OMP51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB11:[0-9]+]], i32 2, ptr [[TMP3]])
+// OMP51-NEXT: store i32 2, ptr @_ZZ4mainE1a, align 4
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB13:[0-9]+]], i32 1, ptr @.str.2)
+// OMP51-NEXT: [[ARRAYDECAY4:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB15:[0-9]+]], i32 1, ptr [[ARRAYDECAY4]])
+// OMP51-NEXT: [[TMP4:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB17:[0-9]+]], i32 1, ptr [[TMP4]])
+// OMP51-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB19:[0-9]+]], i32 1, ptr [[ARRAYDECAY5]])
+// OMP51-NEXT: [[TMP5:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB21:[0-9]+]], i32 1, ptr [[TMP5]])
+// OMP51-NEXT: [[TMP6:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB23:[0-9]+]], i32 1, ptr [[TMP6]])
+// OMP51-NEXT: call void @_Z3foov()
+// OMP51-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// OMP51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8
+// OMP51-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_PPc(i32 noundef [[TMP7]], ptr noundef [[TMP8]])
+// OMP51-NEXT: ret i32 0
+//
+//
+// OMP51-LABEL: define linkonce_odr noundef i32 @_Z5tmainIiLi10EEiT_PPc(
+// OMP51-SAME: i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0]] comdat {
+// OMP51-NEXT: [[ENTRY:.*:]]
+// OMP51-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
+// OMP51-NEXT: [[B:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[C:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[D:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[E:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[F:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[G:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[STR1:%.*]] = alloca [4 x i8], align 1
+// OMP51-NEXT: [[STR2:%.*]] = alloca ptr, align 8
+// OMP51-NEXT: [[STR3:%.*]] = alloca [4 x i8], align 1
+// OMP51-NEXT: [[STR4:%.*]] = alloca ptr, align 8
+// OMP51-NEXT: [[STR5:%.*]] = alloca ptr, align 8
+// OMP51-NEXT: [[B7:%.*]] = alloca i32, align 4
+// OMP51-NEXT: [[C8:%.*]] = alloca i32, align 4
+// OMP51-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
+// OMP51-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
+// OMP51-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// OMP51-NEXT: store i32 [[TMP0]], ptr [[B]], align 4
+// OMP51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR1]], ptr align 1 @__const._Z5tmainIiLi10EEiT_PPc.str1, i64 4, i1 false)
+// OMP51-NEXT: store ptr @.str, ptr [[STR2]], align 8
+// OMP51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR3]], ptr align 1 @__const._Z5tmainIiLi10EEiT_PPc.str3, i64 4, i1 false)
+// OMP51-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: store ptr [[ARRAYDECAY]], ptr [[STR4]], align 8
+// OMP51-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: store ptr [[ARRAYDECAY1]], ptr [[STR5]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB25:[0-9]+]], i32 2, ptr @.str.3)
+// OMP51-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB27:[0-9]+]], i32 2, ptr [[ARRAYDECAY2]])
+// OMP51-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB29:[0-9]+]], i32 2, ptr [[TMP1]])
+// OMP51-NEXT: [[ARRAYDECAY3:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB31:[0-9]+]], i32 2, ptr [[ARRAYDECAY3]])
+// OMP51-NEXT: [[TMP2:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB33:[0-9]+]], i32 2, ptr [[TMP2]])
+// OMP51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB35:[0-9]+]], i32 2, ptr [[TMP3]])
+// OMP51-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8
+// OMP51-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 0
+// OMP51-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
+// OMP51-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 0
+// OMP51-NEXT: [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1
+// OMP51-NEXT: [[CONV:%.*]] = sext i8 [[TMP6]] to i32
+// OMP51-NEXT: store i32 [[CONV]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// OMP51-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// OMP51-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
+// OMP51-NEXT: store i32 [[INC]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB37:[0-9]+]], i32 1, ptr @.str.4)
+// OMP51-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB39:[0-9]+]], i32 1, ptr [[ARRAYDECAY5]])
+// OMP51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB41:[0-9]+]], i32 1, ptr [[TMP8]])
+// OMP51-NEXT: [[ARRAYDECAY6:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB43:[0-9]+]], i32 1, ptr [[ARRAYDECAY6]])
+// OMP51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB45:[0-9]+]], i32 1, ptr [[TMP9]])
+// OMP51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB47:[0-9]+]], i32 1, ptr [[TMP10]])
+// OMP51-NEXT: store i32 10, ptr [[B7]], align 4
+// OMP51-NEXT: store i32 100, ptr [[C8]], align 4
+// OMP51-NEXT: [[TMP11:%.*]] = load i32, ptr [[B7]], align 4
+// OMP51-NEXT: [[TMP12:%.*]] = load i32, ptr [[C8]], align 4
+// OMP51-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// OMP51-NEXT: store i32 [[ADD]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB49:[0-9]+]], i32 2, ptr @.str.1)
+// OMP51-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB51:[0-9]+]], i32 2, ptr [[ARRAYDECAY9]])
+// OMP51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB53:[0-9]+]], i32 2, ptr [[TMP13]])
+// OMP51-NEXT: [[ARRAYDECAY10:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB55:[0-9]+]], i32 2, ptr [[ARRAYDECAY10]])
+// OMP51-NEXT: [[TMP14:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB57:[0-9]+]], i32 2, ptr [[TMP14]])
+// OMP51-NEXT: [[TMP15:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP51-NEXT: call void @__kmpc_error(ptr @[[GLOB59:[0-9]+]], i32 2, ptr [[TMP15]])
+// OMP51-NEXT: call void @_Z3foov()
+// OMP51-NEXT: ret i32 10
+//
+//
+// OMP60-LABEL: define dso_local void @_Z3foov(
+// OMP60-SAME: ) #[[ATTR0:[0-9]+]] {
+// OMP60-NEXT: [[ENTRY:.*:]]
+// OMP60-NEXT: ret void
+//
+//
+// OMP60-LABEL: define dso_local noundef i32 @main(
+// OMP60-SAME: i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP60-NEXT: [[ENTRY:.*:]]
+// OMP60-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[B:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[C:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[D:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[E:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[F:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[G:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[STR1:%.*]] = alloca [4 x i8], align 1
+// OMP60-NEXT: [[STR2:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[STR3:%.*]] = alloca [4 x i8], align 1
+// OMP60-NEXT: [[STR4:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[STR5:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
+// OMP60-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
+// OMP60-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// OMP60-NEXT: store i32 [[TMP0]], ptr [[B]], align 4
+// OMP60-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR1]], ptr align 1 @__const.main.str1, i64 4, i1 false)
+// OMP60-NEXT: store ptr @.str, ptr [[STR2]], align 8
+// OMP60-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR3]], ptr align 1 @__const.main.str3, i64 4, i1 false)
+// OMP60-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: store ptr [[ARRAYDECAY]], ptr [[STR4]], align 8
+// OMP60-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: store ptr [[ARRAYDECAY1]], ptr [[STR5]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.str.1)
+// OMP60-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB3:[0-9]+]], i32 2, ptr [[ARRAYDECAY2]])
+// OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB5:[0-9]+]], i32 2, ptr [[TMP1]])
+// OMP60-NEXT: [[ARRAYDECAY3:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB7:[0-9]+]], i32 2, ptr [[ARRAYDECAY3]])
+// OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB9:[0-9]+]], i32 2, ptr [[TMP2]])
+// OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB11:[0-9]+]], i32 2, ptr [[TMP3]])
+// OMP60-NEXT: store i32 2, ptr @_ZZ4mainE1a, align 4
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB13:[0-9]+]], i32 1, ptr @.str.2)
+// OMP60-NEXT: [[ARRAYDECAY4:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB15:[0-9]+]], i32 1, ptr [[ARRAYDECAY4]])
+// OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB17:[0-9]+]], i32 1, ptr [[TMP4]])
+// OMP60-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB19:[0-9]+]], i32 1, ptr [[ARRAYDECAY5]])
+// OMP60-NEXT: [[TMP5:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB21:[0-9]+]], i32 1, ptr [[TMP5]])
+// OMP60-NEXT: [[TMP6:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB23:[0-9]+]], i32 1, ptr [[TMP6]])
+// OMP60-NEXT: call void @_Z3foov()
+// OMP60-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// OMP60-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8
+// OMP60-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_PPc(i32 noundef [[TMP7]], ptr noundef [[TMP8]])
+// OMP60-NEXT: ret i32 0
+//
+//
+// OMP60-LABEL: define linkonce_odr noundef i32 @_Z5tmainIiLi10EEiT_PPc(
+// OMP60-SAME: i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0]] comdat {
+// OMP60-NEXT: [[ENTRY:.*:]]
+// OMP60-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[B:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[C:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[D:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[E:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[F:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[G:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[STR1:%.*]] = alloca [4 x i8], align 1
+// OMP60-NEXT: [[STR2:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[STR3:%.*]] = alloca [4 x i8], align 1
+// OMP60-NEXT: [[STR4:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[STR5:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[B7:%.*]] = alloca i32, align 4
+// OMP60-NEXT: [[C8:%.*]] = alloca i32, align 4
+// OMP60-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
+// OMP60-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
+// OMP60-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
+// OMP60-NEXT: store i32 [[TMP0]], ptr [[B]], align 4
+// OMP60-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR1]], ptr align 1 @__const._Z5tmainIiLi10EEiT_PPc.str1, i64 4, i1 false)
+// OMP60-NEXT: store ptr @.str, ptr [[STR2]], align 8
+// OMP60-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR3]], ptr align 1 @__const._Z5tmainIiLi10EEiT_PPc.str3, i64 4, i1 false)
+// OMP60-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: store ptr [[ARRAYDECAY]], ptr [[STR4]], align 8
+// OMP60-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: store ptr [[ARRAYDECAY1]], ptr [[STR5]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB25:[0-9]+]], i32 2, ptr @.str.3)
+// OMP60-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB27:[0-9]+]], i32 2, ptr [[ARRAYDECAY2]])
+// OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB29:[0-9]+]], i32 2, ptr [[TMP1]])
+// OMP60-NEXT: [[ARRAYDECAY3:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB31:[0-9]+]], i32 2, ptr [[ARRAYDECAY3]])
+// OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB33:[0-9]+]], i32 2, ptr [[TMP2]])
+// OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB35:[0-9]+]], i32 2, ptr [[TMP3]])
+// OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8
+// OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP4]], i64 0
+// OMP60-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8
+// OMP60-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 0
+// OMP60-NEXT: [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1
+// OMP60-NEXT: [[CONV:%.*]] = sext i8 [[TMP6]] to i32
+// OMP60-NEXT: store i32 [[CONV]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// OMP60-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// OMP60-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
+// OMP60-NEXT: store i32 [[INC]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB37:[0-9]+]], i32 1, ptr @.str.4)
+// OMP60-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB39:[0-9]+]], i32 1, ptr [[ARRAYDECAY5]])
+// OMP60-NEXT: [[TMP8:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB41:[0-9]+]], i32 1, ptr [[TMP8]])
+// OMP60-NEXT: [[ARRAYDECAY6:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB43:[0-9]+]], i32 1, ptr [[ARRAYDECAY6]])
+// OMP60-NEXT: [[TMP9:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB45:[0-9]+]], i32 1, ptr [[TMP9]])
+// OMP60-NEXT: [[TMP10:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB47:[0-9]+]], i32 1, ptr [[TMP10]])
+// OMP60-NEXT: store i32 10, ptr [[B7]], align 4
+// OMP60-NEXT: store i32 100, ptr [[C8]], align 4
+// OMP60-NEXT: [[TMP11:%.*]] = load i32, ptr [[B7]], align 4
+// OMP60-NEXT: [[TMP12:%.*]] = load i32, ptr [[C8]], align 4
+// OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// OMP60-NEXT: store i32 [[ADD]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB49:[0-9]+]], i32 2, ptr @.str.1)
+// OMP60-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR1]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB51:[0-9]+]], i32 2, ptr [[ARRAYDECAY9]])
+// OMP60-NEXT: [[TMP13:%.*]] = load ptr, ptr [[STR2]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB53:[0-9]+]], i32 2, ptr [[TMP13]])
+// OMP60-NEXT: [[ARRAYDECAY10:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB55:[0-9]+]], i32 2, ptr [[ARRAYDECAY10]])
+// OMP60-NEXT: [[TMP14:%.*]] = load ptr, ptr [[STR4]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB57:[0-9]+]], i32 2, ptr [[TMP14]])
+// OMP60-NEXT: [[TMP15:%.*]] = load ptr, ptr [[STR5]], align 8
+// OMP60-NEXT: call void @__kmpc_error(ptr @[[GLOB59:[0-9]+]], i32 2, ptr [[TMP15]])
+// OMP60-NEXT: call void @_Z3foov()
+// OMP60-NEXT: ret i32 10
+//
+//
+// SIMD-LABEL: define dso_local void @_Z3foov(
+// SIMD-SAME: ) #[[ATTR0:[0-9]+]] !dbg [[DBG29:![0-9]+]] {
+// SIMD-NEXT: [[ENTRY:.*:]]
+// SIMD-NEXT: ret void, !dbg [[DBG32:![0-9]+]]
+//
+//
+// SIMD-LABEL: define dso_local noundef i32 @main(
+// SIMD-SAME: i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] !dbg [[DBG2:![0-9]+]] {
+// SIMD-NEXT: [[ENTRY:.*:]]
+// SIMD-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
+// SIMD-NEXT: [[B:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[C:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[D:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[E:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[F:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[G:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[STR1:%.*]] = alloca [4 x i8], align 1
+// SIMD-NEXT: [[STR2:%.*]] = alloca ptr, align 8
+// SIMD-NEXT: [[STR3:%.*]] = alloca [4 x i8], align 1
+// SIMD-NEXT: [[STR4:%.*]] = alloca ptr, align 8
+// SIMD-NEXT: [[STR5:%.*]] = alloca ptr, align 8
+// SIMD-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
+// SIMD-NEXT: #dbg_declare(ptr [[ARGC_ADDR]], [[META33:![0-9]+]], !DIExpression(), [[META34:![0-9]+]])
+// SIMD-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
+// SIMD-NEXT: #dbg_declare(ptr [[ARGV_ADDR]], [[META35:![0-9]+]], !DIExpression(), [[META36:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[B]], [[META37:![0-9]+]], !DIExpression(), [[META38:![0-9]+]])
+// SIMD-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !dbg [[DBG39:![0-9]+]]
+// SIMD-NEXT: store i32 [[TMP0]], ptr [[B]], align 4, !dbg [[META38]]
+// SIMD-NEXT: #dbg_declare(ptr [[C]], [[META40:![0-9]+]], !DIExpression(), [[META41:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[D]], [[META42:![0-9]+]], !DIExpression(), [[META43:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[E]], [[META44:![0-9]+]], !DIExpression(), [[META45:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[F]], [[META46:![0-9]+]], !DIExpression(), [[META47:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[G]], [[META48:![0-9]+]], !DIExpression(), [[META49:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[STR1]], [[META50:![0-9]+]], !DIExpression(), [[META51:![0-9]+]])
+// SIMD-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR1]], ptr align 1 @__const.main.str1, i64 4, i1 false), !dbg [[META51]]
+// SIMD-NEXT: #dbg_declare(ptr [[STR2]], [[META52:![0-9]+]], !DIExpression(), [[META54:![0-9]+]])
+// SIMD-NEXT: store ptr @.str, ptr [[STR2]], align 8, !dbg [[META54]]
+// SIMD-NEXT: #dbg_declare(ptr [[STR3]], [[META55:![0-9]+]], !DIExpression(), [[META57:![0-9]+]])
+// SIMD-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR3]], ptr align 1 @__const.main.str3, i64 4, i1 false), !dbg [[META57]]
+// SIMD-NEXT: #dbg_declare(ptr [[STR4]], [[META58:![0-9]+]], !DIExpression(), [[META59:![0-9]+]])
+// SIMD-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0, !dbg [[DBG60:![0-9]+]]
+// SIMD-NEXT: store ptr [[ARRAYDECAY]], ptr [[STR4]], align 8, !dbg [[META59]]
+// SIMD-NEXT: #dbg_declare(ptr [[STR5]], [[META61:![0-9]+]], !DIExpression(), [[META63:![0-9]+]])
+// SIMD-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0, !dbg [[DBG64:![0-9]+]]
+// SIMD-NEXT: store ptr [[ARRAYDECAY1]], ptr [[STR5]], align 8, !dbg [[META63]]
+// SIMD-NEXT: store i32 2, ptr @_ZZ4mainE1a, align 4, !dbg [[DBG65:![0-9]+]]
+// SIMD-NEXT: call void @_Z3foov(), !dbg [[DBG66:![0-9]+]]
+// SIMD-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !dbg [[DBG67:![0-9]+]]
+// SIMD-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !dbg [[DBG68:![0-9]+]]
+// SIMD-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_PPc(i32 noundef [[TMP1]], ptr noundef [[TMP2]]), !dbg [[DBG69:![0-9]+]]
+// SIMD-NEXT: ret i32 0, !dbg [[DBG70:![0-9]+]]
+//
+//
+// SIMD-LABEL: define linkonce_odr noundef i32 @_Z5tmainIiLi10EEiT_PPc(
+// SIMD-SAME: i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0]] comdat !dbg [[DBG21:![0-9]+]] {
+// SIMD-NEXT: [[ENTRY:.*:]]
+// SIMD-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
+// SIMD-NEXT: [[B:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[C:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[D:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[E:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[F:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[G:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[STR1:%.*]] = alloca [4 x i8], align 1
+// SIMD-NEXT: [[STR2:%.*]] = alloca ptr, align 8
+// SIMD-NEXT: [[STR3:%.*]] = alloca [4 x i8], align 1
+// SIMD-NEXT: [[STR4:%.*]] = alloca ptr, align 8
+// SIMD-NEXT: [[STR5:%.*]] = alloca ptr, align 8
+// SIMD-NEXT: [[B3:%.*]] = alloca i32, align 4
+// SIMD-NEXT: [[C4:%.*]] = alloca i32, align 4
+// SIMD-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
+// SIMD-NEXT: #dbg_declare(ptr [[ARGC_ADDR]], [[META71:![0-9]+]], !DIExpression(), [[META72:![0-9]+]])
+// SIMD-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
+// SIMD-NEXT: #dbg_declare(ptr [[ARGV_ADDR]], [[META73:![0-9]+]], !DIExpression(), [[META74:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[B]], [[META75:![0-9]+]], !DIExpression(), [[META76:![0-9]+]])
+// SIMD-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !dbg [[DBG77:![0-9]+]]
+// SIMD-NEXT: store i32 [[TMP0]], ptr [[B]], align 4, !dbg [[META76]]
+// SIMD-NEXT: #dbg_declare(ptr [[C]], [[META78:![0-9]+]], !DIExpression(), [[META79:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[D]], [[META80:![0-9]+]], !DIExpression(), [[META81:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[E]], [[META82:![0-9]+]], !DIExpression(), [[META83:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[F]], [[META84:![0-9]+]], !DIExpression(), [[META85:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[G]], [[META86:![0-9]+]], !DIExpression(), [[META87:![0-9]+]])
+// SIMD-NEXT: #dbg_declare(ptr [[STR1]], [[META88:![0-9]+]], !DIExpression(), [[META89:![0-9]+]])
+// SIMD-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR1]], ptr align 1 @__const._Z5tmainIiLi10EEiT_PPc.str1, i64 4, i1 false), !dbg [[META89]]
+// SIMD-NEXT: #dbg_declare(ptr [[STR2]], [[META90:![0-9]+]], !DIExpression(), [[META91:![0-9]+]])
+// SIMD-NEXT: store ptr @.str, ptr [[STR2]], align 8, !dbg [[META91]]
+// SIMD-NEXT: #dbg_declare(ptr [[STR3]], [[META92:![0-9]+]], !DIExpression(), [[META93:![0-9]+]])
+// SIMD-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR3]], ptr align 1 @__const._Z5tmainIiLi10EEiT_PPc.str3, i64 4, i1 false), !dbg [[META93]]
+// SIMD-NEXT: #dbg_declare(ptr [[STR4]], [[META94:![0-9]+]], !DIExpression(), [[META95:![0-9]+]])
+// SIMD-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0, !dbg [[DBG96:![0-9]+]]
+// SIMD-NEXT: store ptr [[ARRAYDECAY]], ptr [[STR4]], align 8, !dbg [[META95]]
+// SIMD-NEXT: #dbg_declare(ptr [[STR5]], [[META97:![0-9]+]], !DIExpression(), [[META98:![0-9]+]])
+// SIMD-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i8], ptr [[STR3]], i64 0, i64 0, !dbg [[DBG99:![0-9]+]]
+// SIMD-NEXT: store ptr [[ARRAYDECAY1]], ptr [[STR5]], align 8, !dbg [[META98]]
+// SIMD-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !dbg [[DBG100:![0-9]+]]
+// SIMD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0, !dbg [[DBG100]]
+// SIMD-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !dbg [[DBG100]]
+// SIMD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0, !dbg [[DBG100]]
+// SIMD-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1, !dbg [[DBG100]]
+// SIMD-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32, !dbg [[DBG100]]
+// SIMD-NEXT: store i32 [[CONV]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4, !dbg [[DBG101:![0-9]+]]
+// SIMD-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4, !dbg [[DBG102:![0-9]+]]
+// SIMD-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1, !dbg [[DBG102]]
+// SIMD-NEXT: store i32 [[INC]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4, !dbg [[DBG102]]
+// SIMD-NEXT: #dbg_declare(ptr [[B3]], [[META103:![0-9]+]], !DIExpression(), [[META105:![0-9]+]])
+// SIMD-NEXT: store i32 10, ptr [[B3]], align 4, !dbg [[META105]]
+// SIMD-NEXT: #dbg_declare(ptr [[C4]], [[META106:![0-9]+]], !DIExpression(), [[META107:![0-9]+]])
+// SIMD-NEXT: store i32 100, ptr [[C4]], align 4, !dbg [[META107]]
+// SIMD-NEXT: [[TMP5:%.*]] = load i32, ptr [[B3]], align 4, !dbg [[DBG108:![0-9]+]]
+// SIMD-NEXT: [[TMP6:%.*]] = load i32, ptr [[C4]], align 4, !dbg [[DBG109:![0-9]+]]
+// SIMD-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP6]], !dbg [[DBG110:![0-9]+]]
+// SIMD-NEXT: store i32 [[ADD]], ptr @_ZZ5tmainIiLi10EEiT_PPcE1a, align 4, !dbg [[DBG111:![0-9]+]]
+// SIMD-NEXT: call void @_Z3foov(), !dbg [[DBG112:![0-9]+]]
+// SIMD-NEXT: ret i32 10, !dbg [[DBG113:![0-9]+]]
+//
+//.
+// SIMD: [[META0:![0-9]+]] = !DIGlobalVariableExpression(var: [[META1:![0-9]+]], expr: !DIExpression())
+// SIMD: [[META1]] = distinct !DIGlobalVariable(name: "a", scope: [[DBG2]], file: [[META3:![0-9]+]], line: 61, type: [[META6:![0-9]+]], isLocal: true, isDefinition: true)
+// SIMD: [[DBG2]] = distinct !DISubprogram(name: "main", scope: [[META3]], file: [[META3]], line: 59, type: [[META4:![0-9]+]], scopeLine: 59, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META10:![0-9]+]], retainedNodes: [[META22:![0-9]+]])
+// SIMD: [[META3]] = !DIFile(filename: "{{.*}}error_codegen.cpp", directory: {{.*}})
+// SIMD: [[META4]] = !DISubroutineType(types: [[META5:![0-9]+]])
+// SIMD: [[META5]] = !{[[META6]], [[META6]], [[META7:![0-9]+]]}
+// SIMD: [[META6]] = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+// SIMD: [[META7]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META8:![0-9]+]], size: 64)
+// SIMD: [[META8]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META9:![0-9]+]], size: 64)
+// SIMD: [[META9]] = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+// SIMD: [[META10]] = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_11, file: [[META11:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: [[META12:![0-9]+]], splitDebugInlining: false, nameTableKind: None)
+// SIMD: [[META11]] = !DIFile(filename: "{{.*}}<stdin>", directory: {{.*}})
+// SIMD: [[META12]] = !{[[META0]], [[META13:![0-9]+]], [[META19:![0-9]+]]}
+// SIMD: [[META13]] = !DIGlobalVariableExpression(var: [[META14:![0-9]+]], expr: !DIExpression())
+// SIMD: [[META14]] = distinct !DIGlobalVariable(scope: null, file: [[META3]], line: 63, type: [[META15:![0-9]+]], isLocal: true, isDefinition: true)
+// SIMD: [[META15]] = !DICompositeType(tag: DW_TAG_array_type, baseType: [[META16:![0-9]+]], size: 32, elements: [[META17:![0-9]+]])
+// SIMD: [[META16]] = !DIDerivedType(tag: DW_TAG_const_type, baseType: [[META9]])
+// SIMD: [[META17]] = !{[[META18:![0-9]+]]}
+// SIMD: [[META18]] = !DISubrange(count: 4)
+// SIMD: [[META19]] = !DIGlobalVariableExpression(var: [[META20:![0-9]+]], expr: !DIExpression())
+// SIMD: [[META20]] = distinct !DIGlobalVariable(name: "a", scope: [[DBG21]], file: [[META3]], line: 22, type: [[META6]], isLocal: false, isDefinition: true)
+// SIMD: [[DBG21]] = distinct !DISubprogram(name: "tmain<int, 10>", linkageName: "_Z5tmainIiLi10EEiT_PPc", scope: [[META3]], file: [[META3]], line: 20, type: [[META4]], scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META10]], templateParams: [[META23:![0-9]+]], retainedNodes: [[META22]])
+// SIMD: [[META22]] = !{}
+// SIMD: [[META23]] = !{[[META24:![0-9]+]], [[META25:![0-9]+]]}
+// SIMD: [[META24]] = !DITemplateTypeParameter(name: "T", type: [[META6]])
+// SIMD: [[META25]] = !DITemplateValueParameter(name: "N", type: [[META6]], value: i32 10)
+// SIMD: [[DBG29]] = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: [[META3]], file: [[META3]], line: 17, type: [[META30:![0-9]+]], scopeLine: 17, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META10]])
+// SIMD: [[META30]] = !DISubroutineType(types: [[META31:![0-9]+]])
+// SIMD: [[META31]] = !{null}
+// SIMD: [[DBG32]] = !DILocation(line: 17, column: 13, scope: [[DBG29]])
+// SIMD: [[META33]] = !DILocalVariable(name: "argc", arg: 1, scope: [[DBG2]], file: [[META3]], line: 59, type: [[META6]])
+// SIMD: [[META34]] = !DILocation(line: 59, column: 15, scope: [[DBG2]])
+// SIMD: [[META35]] = !DILocalVariable(name: "argv", arg: 2, scope: [[DBG2]], file: [[META3]], line: 59, type: [[META7]])
+// SIMD: [[META36]] = !DILocation(line: 59, column: 28, scope: [[DBG2]])
+// SIMD: [[META37]] = !DILocalVariable(name: "b", scope: [[DBG2]], file: [[META3]], line: 60, type: [[META6]])
+// SIMD: [[META38]] = !DILocation(line: 60, column: 7, scope: [[DBG2]])
+// SIMD: [[DBG39]] = !DILocation(line: 60, column: 11, scope: [[DBG2]])
+// SIMD: [[META40]] = !DILocalVariable(name: "c", scope: [[DBG2]], file: [[META3]], line: 60, type: [[META6]])
+// SIMD: [[META41]] = !DILocation(line: 60, column: 17, scope: [[DBG2]])
+// SIMD: [[META42]] = !DILocalVariable(name: "d", scope: [[DBG2]], file: [[META3]], line: 60, type: [[META6]])
+// SIMD: [[META43]] = !DILocation(line: 60, column: 20, scope: [[DBG2]])
+// SIMD: [[META44]] = !DILocalVariable(name: "e", scope: [[DBG2]], file: [[META3]], line: 60, type: [[META6]])
+// SIMD: [[META45]] = !DILocation(line: 60, column: 23, scope: [[DBG2]])
+// SIMD: [[META46]] = !DILocalVariable(name: "f", scope: [[DBG2]], file: [[META3]], line: 60, type: [[META6]])
+// SIMD: [[META47]] = !DILocation(line: 60, column: 26, scope: [[DBG2]])
+// SIMD: [[META48]] = !DILocalVariable(name: "g", scope: [[DBG2]], file: [[META3]], line: 60, type: [[META6]])
+// SIMD: [[META49]] = !DILocation(line: 60, column: 29, scope: [[DBG2]])
+// SIMD: [[META50]] = !DILocalVariable(name: "str1", scope: [[DBG2]], file: [[META3]], line: 62, type: [[META15]])
+// SIMD: [[META51]] = !DILocation(line: 62, column: 14, scope: [[DBG2]])
+// SIMD: [[META52]] = !DILocalVariable(name: "str2", scope: [[DBG2]], file: [[META3]], line: 63, type: [[META53:![0-9]+]])
+// SIMD: [[META53]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META16]], size: 64)
+// SIMD: [[META54]] = !DILocation(line: 63, column: 15, scope: [[DBG2]])
+// SIMD: [[META55]] = !DILocalVariable(name: "str3", scope: [[DBG2]], file: [[META3]], line: 64, type: [[META56:![0-9]+]])
+// SIMD: [[META56]] = !DICompositeType(tag: DW_TAG_array_type, baseType: [[META9]], size: 32, elements: [[META17]])
+// SIMD: [[META57]] = !DILocation(line: 64, column: 8, scope: [[DBG2]])
+// SIMD: [[META58]] = !DILocalVariable(name: "str4", scope: [[DBG2]], file: [[META3]], line: 65, type: [[META8]])
+// SIMD: [[META59]] = !DILocation(line: 65, column: 9, scope: [[DBG2]])
+// SIMD: [[DBG60]] = !DILocation(line: 65, column: 16, scope: [[DBG2]])
+// SIMD: [[META61]] = !DILocalVariable(name: "str5", scope: [[DBG2]], file: [[META3]], line: 66, type: [[META62:![0-9]+]])
+// SIMD: [[META62]] = !DIDerivedType(tag: DW_TAG_const_type, baseType: [[META8]])
+// SIMD: [[META63]] = !DILocation(line: 66, column: 16, scope: [[DBG2]])
+// SIMD: [[DBG64]] = !DILocation(line: 66, column: 23, scope: [[DBG2]])
+// SIMD: [[DBG65]] = !DILocation(line: 73, column: 5, scope: [[DBG2]])
+// SIMD: [[DBG66]] = !DILocation(line: 80, column: 3, scope: [[DBG2]])
+// SIMD: [[DBG67]] = !DILocation(line: 81, column: 18, scope: [[DBG2]])
+// SIMD: [[DBG68]] = !DILocation(line: 81, column: 24, scope: [[DBG2]])
+// SIMD: [[DBG69]] = !DILocation(line: 81, column: 3, scope: [[DBG2]])
+// SIMD: [[DBG70]] = !DILocation(line: 82, column: 1, scope: [[DBG2]])
+// SIMD: [[META71]] = !DILocalVariable(name: "argc", arg: 1, scope: [[DBG21]], file: [[META3]], line: 20, type: [[META6]])
+// SIMD: [[META72]] = !DILocation(line: 20, column: 13, scope: [[DBG21]])
+// SIMD: [[META73]] = !DILocalVariable(name: "argv", arg: 2, scope: [[DBG21]], file: [[META3]], line: 20, type: [[META7]])
+// SIMD: [[META74]] = !DILocation(line: 20, column: 26, scope: [[DBG21]])
+// SIMD: [[META75]] = !DILocalVariable(name: "b", scope: [[DBG21]], file: [[META3]], line: 21, type: [[META6]])
+// SIMD: [[META76]] = !DILocation(line: 21, column: 5, scope: [[DBG21]])
+// SIMD: [[DBG77]] = !DILocation(line: 21, column: 9, scope: [[DBG21]])
+// SIMD: [[META78]] = !DILocalVariable(name: "c", scope: [[DBG21]], file: [[META3]], line: 21, type: [[META6]])
+// SIMD: [[META79]] = !DILocation(line: 21, column: 15, scope: [[DBG21]])
+// SIMD: [[META80]] = !DILocalVariable(name: "d", scope: [[DBG21]], file: [[META3]], line: 21, type: [[META6]])
+// SIMD: [[META81]] = !DILocation(line: 21, column: 18, scope: [[DBG21]])
+// SIMD: [[META82]] = !DILocalVariable(name: "e", scope: [[DBG21]], file: [[META3]], line: 21, type: [[META6]])
+// SIMD: [[META83]] = !DILocation(line: 21, column: 21, scope: [[DBG21]])
+// SIMD: [[META84]] = !DILocalVariable(name: "f", scope: [[DBG21]], file: [[META3]], line: 21, type: [[META6]])
+// SIMD: [[META85]] = !DILocation(line: 21, column: 24, scope: [[DBG21]])
+// SIMD: [[META86]] = !DILocalVariable(name: "g", scope: [[DBG21]], file: [[META3]], line: 21, type: [[META6]])
+// SIMD: [[META87]] = !DILocation(line: 21, column: 27, scope: [[DBG21]])
+// SIMD: [[META88]] = !DILocalVariable(name: "str1", scope: [[DBG21]], file: [[META3]], line: 23, type: [[META15]])
+// SIMD: [[META89]] = !DILocation(line: 23, column: 14, scope: [[DBG21]])
+// SIMD: [[META90]] = !DILocalVariable(name: "str2", scope: [[DBG21]], file: [[META3]], line: 24, type: [[META53]])
+// SIMD: [[META91]] = !DILocation(line: 24, column: 15, scope: [[DBG21]])
+// SIMD: [[META92]] = !DILocalVariable(name: "str3", scope: [[DBG21]], file: [[META3]], line: 25, type: [[META56]])
+// SIMD: [[META93]] = !DILocation(line: 25, column: 8, scope: [[DBG21]])
+// SIMD: [[META94]] = !DILocalVariable(name: "str4", scope: [[DBG21]], file: [[META3]], line: 26, type: [[META8]])
+// SIMD: [[META95]] = !DILocation(line: 26, column: 9, scope: [[DBG21]])
+// SIMD: [[DBG96]] = !DILocation(line: 26, column: 16, scope: [[DBG21]])
+// SIMD: [[META97]] = !DILocalVariable(name: "str5", scope: [[DBG21]], file: [[META3]], line: 27, type: [[META62]])
+// SIMD: [[META98]] = !DILocation(line: 27, column: 16, scope: [[DBG21]])
+// SIMD: [[DBG99]] = !DILocation(line: 27, column: 23, scope: [[DBG21]])
+// SIMD: [[DBG100]] = !DILocation(line: 34, column: 7, scope: [[DBG21]])
+// SIMD: [[DBG101]] = !DILocation(line: 34, column: 5, scope: [[DBG21]])
+// SIMD: [[DBG102]] = !DILocation(line: 35, column: 3, scope: [[DBG21]])
+// SIMD: [[META103]] = !DILocalVariable(name: "b", scope: [[META104:![0-9]+]], file: [[META3]], line: 43, type: [[META6]])
+// SIMD: [[META104]] = distinct !DILexicalBlock(scope: [[DBG21]], file: [[META3]], line: 42, column: 3)
+// SIMD: [[META105]] = !DILocation(line: 43, column: 9, scope: [[META104]])
+// SIMD: [[META106]] = !DILocalVariable(name: "c", scope: [[META104]], file: [[META3]], line: 44, type: [[META6]])
+// SIMD: [[META107]] = !DILocation(line: 44, column: 7, scope: [[META104]])
+// SIMD: [[DBG108]] = !DILocation(line: 45, column: 9, scope: [[META104]])
+// SIMD: [[DBG109]] = !DILocation(line: 45, column: 13, scope: [[META104]])
+// SIMD: [[DBG110]] = !DILocation(line: 45, column: 11, scope: [[META104]])
+// SIMD: [[DBG111]] = !DILocation(line: 45, column: 7, scope: [[META104]])
+// SIMD: [[DBG112]] = !DILocation(line: 53, column: 3, scope: [[DBG21]])
+// SIMD: [[DBG113]] = !DILocation(line: 54, column: 1, scope: [[DBG21]])
+//.
diff --git a/clang/test/OpenMP/error_message.cpp b/clang/test/OpenMP/error_message.cpp
index aed2df9..6e64ee2 100644
--- a/clang/test/OpenMP/error_message.cpp
+++ b/clang/test/OpenMP/error_message.cpp
@@ -112,8 +112,12 @@ if (1)
// expected-error@+1 {{GPU compiler is needed.}}
#pragma omp error message("GPU compiler is needed.") message("GPU compiler is needed.") // expected-error {{directive '#pragma omp error' cannot contain more than one 'message' clause}}
int a;
-// expected-warning@+1 {{expected string literal in 'clause message' - ignoring}}
+// expected-warning@+1 {{expected string in 'clause message' - ignoring}}
#pragma omp error message(a) // expected-error {{ERROR}}
+ char str[] = "msg";
+// expected-warning@+1 {{expected string literal in 'clause message' - ignoring}}
+#pragma omp error message(str) // expected-error {{ERROR}}
+#pragma omp error at(execution) message(str) // no error
// expected-error@+1 {{ERROR}}
#pragma omp error message() // expected-error {{expected expression}}
return T();
diff --git a/clang/test/OpenMP/irbuilder_unroll_partial_factor_for.c b/clang/test/OpenMP/irbuilder_unroll_partial_factor_for.c
index 8780d51..a9514e1 100644
--- a/clang/test/OpenMP/irbuilder_unroll_partial_factor_for.c
+++ b/clang/test/OpenMP/irbuilder_unroll_partial_factor_for.c
@@ -79,7 +79,7 @@ void unroll_partial_heuristic_for(int n, float *a, float *b, float *c, float *d)
// CHECK-NEXT: br i1 [[OMP_FLOOR0_CMP]], label [[OMP_FLOOR0_BODY:%.*]], label [[OMP_FLOOR0_EXIT:%.*]]
// CHECK: omp_floor0.body:
// CHECK-NEXT: [[TMP13:%.*]] = add i32 [[OMP_FLOOR0_IV]], [[TMP9]]
-// CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[TMP13]], [[OMP_FLOOR0_TRIPCOUNT]]
+// CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[TMP13]], [[TMP4]]
// CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP5]], i32 13
// CHECK-NEXT: br label [[OMP_TILE0_PREHEADER:%.*]]
// CHECK: omp_tile0.preheader:
diff --git a/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_constant_for.c b/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_constant_for.c
index 728f67c..8ca000a 100644
--- a/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_constant_for.c
+++ b/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_constant_for.c
@@ -85,7 +85,7 @@ void unroll_partial_heuristic_constant_for(float *a, float *b, float *c, float *
// CHECK-NEXT: br i1 [[OMP_FLOOR0_CMP]], label [[OMP_FLOOR0_BODY:%.*]], label [[OMP_FLOOR0_EXIT:%.*]]
// CHECK: omp_floor0.body:
// CHECK-NEXT: [[TMP12:%.*]] = add i32 [[OMP_FLOOR0_IV]], [[TMP8]]
-// CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP12]], [[OMP_FLOOR0_TRIPCOUNT]]
+// CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP12]], [[TMP3]]
// CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 4
// CHECK-NEXT: br label [[OMP_TILE0_PREHEADER:%.*]]
// CHECK: omp_tile0.preheader:
diff --git a/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_runtime_for.c b/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_runtime_for.c
index f41f1fe..5fbcf8f 100644
--- a/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_runtime_for.c
+++ b/clang/test/OpenMP/irbuilder_unroll_partial_heuristic_runtime_for.c
@@ -87,7 +87,7 @@ void unroll_partial_heuristic_runtime_for(int n, float *a, float *b, float *c, f
// CHECK-NEXT: br i1 [[OMP_FLOOR0_CMP]], label [[OMP_FLOOR0_BODY:%.*]], label [[OMP_FLOOR0_EXIT:%.*]]
// CHECK: omp_floor0.body:
// CHECK-NEXT: [[TMP13:%.*]] = add i32 [[OMP_FLOOR0_IV]], [[TMP9]]
-// CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[TMP13]], [[OMP_FLOOR0_TRIPCOUNT]]
+// CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[TMP13]], [[TMP4]]
// CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP5]], i32 4
// CHECK-NEXT: br label [[OMP_TILE0_PREHEADER:%.*]]
// CHECK: omp_tile0.preheader:
diff --git a/clang/test/OpenMP/irbuilder_unroll_unroll_partial_factor.c b/clang/test/OpenMP/irbuilder_unroll_unroll_partial_factor.c
index 3c24078..9a28c0c 100644
--- a/clang/test/OpenMP/irbuilder_unroll_unroll_partial_factor.c
+++ b/clang/test/OpenMP/irbuilder_unroll_unroll_partial_factor.c
@@ -75,7 +75,7 @@ void unroll_partial_factor_for(float *a, float *b, float *c, float *d) {
// CHECK-NEXT: br i1 [[OMP_FLOOR0_CMP]], label [[OMP_FLOOR0_BODY:%.*]], label [[OMP_FLOOR0_EXIT:%.*]]
// CHECK: omp_floor0.body:
// CHECK-NEXT: [[TMP12:%.*]] = add i32 [[OMP_FLOOR0_IV]], [[TMP8]]
-// CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP12]], [[OMP_FLOOR0_TRIPCOUNT]]
+// CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP12]], [[TMP3]]
// CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 2
// CHECK-NEXT: br label [[OMP_TILE0_PREHEADER:%.*]]
// CHECK: omp_tile0.preheader:
diff --git a/clang/test/OpenMP/irbuilder_unroll_unroll_partial_heuristic.c b/clang/test/OpenMP/irbuilder_unroll_unroll_partial_heuristic.c
index a03bd47..24d42d2 100644
--- a/clang/test/OpenMP/irbuilder_unroll_unroll_partial_heuristic.c
+++ b/clang/test/OpenMP/irbuilder_unroll_unroll_partial_heuristic.c
@@ -59,7 +59,7 @@ void unroll_unroll_partial_heuristic(float *a, float *b, float *c, float *d) {
// CHECK-NEXT: [[OMP_FLOOR0_CMP:%.*]] = icmp ult i32 [[OMP_FLOOR0_IV]], [[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: br i1 [[OMP_FLOOR0_CMP]], label [[OMP_FLOOR0_BODY:%.*]], label [[OMP_FLOOR0_EXIT:%.*]]
// CHECK: omp_floor0.body:
-// CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[OMP_FLOOR0_IV]], [[OMP_FLOOR0_TRIPCOUNT]]
+// CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[OMP_FLOOR0_IV]], [[TMP3]]
// CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP4]], i32 8
// CHECK-NEXT: br label [[OMP_TILE0_PREHEADER:%.*]]
// CHECK: omp_tile0.preheader:
diff --git a/clang/test/OpenMP/nvptx_target_codegen.cpp b/clang/test/OpenMP/nvptx_target_codegen.cpp
index 0045bd4..3f6c1dc 100644
--- a/clang/test/OpenMP/nvptx_target_codegen.cpp
+++ b/clang/test/OpenMP/nvptx_target_codegen.cpp
@@ -5,6 +5,11 @@
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -Wno-vla -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -Wno-vla -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK2
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -Wno-vla -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK2
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -no-enable-noundef-analysis -verify -Wno-vla -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -no-enable-noundef-analysis -verify -Wno-vla -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK1-OMP60
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -no-enable-noundef-analysis -verify -Wno-vla -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -no-enable-noundef-analysis -verify -Wno-vla -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK2-OMP60
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -no-enable-noundef-analysis -verify -Wno-vla -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK2-OMP60
// expected-no-diagnostics
#ifndef HEADER
@@ -23,7 +28,11 @@ struct TT {
void targetBar(int *Ptr1, int *Ptr2) {
#pragma omp target map(Ptr1[:0], Ptr2)
+#ifdef OMP60
+#pragma omp parallel num_threads(strict: 2) severity(warning) message("msg")
+#else
#pragma omp parallel num_threads(2)
+#endif
*Ptr1 = *Ptr2;
}
@@ -144,7 +153,7 @@ void unreachable_call() {
}
#endif
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l25
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30
// CHECK1-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr [[PTR1:%.*]], ptr nonnull align 8 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
@@ -154,8 +163,8 @@ void unreachable_call() {
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK1-NEXT: store ptr [[PTR1]], ptr [[PTR1_ADDR]], align 8
// CHECK1-NEXT: store ptr [[PTR2]], ptr [[PTR2_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l25_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8, !nonnull [[META12:![0-9]+]], !align [[META13:![0-9]+]]
+// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -164,14 +173,14 @@ void unreachable_call() {
// CHECK1-NEXT: store ptr [[PTR1_ADDR]], ptr [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
-// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l25_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
+// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
// CHECK1-NEXT: call void @__kmpc_target_deinit()
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l25_omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_omp_outlined
// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 8 dereferenceable(8) [[PTR1:%.*]], ptr nonnull align 8 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -182,8 +191,8 @@ void unreachable_call() {
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[PTR1]], ptr [[PTR1_ADDR]], align 8
// CHECK1-NEXT: store ptr [[PTR2]], ptr [[PTR2_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
@@ -191,12 +200,12 @@ void unreachable_call() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l39
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l48
// CHECK1-SAME: (ptr noalias [[DYN_PTR:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l39_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l48_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -206,14 +215,14 @@ void unreachable_call() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l47
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l56
// CHECK1-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[AA:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l47_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l56_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -233,7 +242,7 @@ void unreachable_call() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l53
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l62
// CHECK1-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[A:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], ptr nonnull align 4 dereferenceable(4) [[BN:%.*]], ptr nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], ptr nonnull align 8 dereferenceable(8) [[CN:%.*]], ptr nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
@@ -256,15 +265,15 @@ void unreachable_call() {
// CHECK1-NEXT: store i64 [[VLA3]], ptr [[VLA_ADDR4]], align 8
// CHECK1-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8
// CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META14:![0-9]+]]
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
-// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8
-// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8, !nonnull [[META12]], !align [[META14]]
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8
-// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8
-// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8
-// CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l53_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l62_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP8]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -326,7 +335,7 @@ void unreachable_call() {
// CHECK1-NEXT: ret ptr [[X]]
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l90
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l99
// CHECK1-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
@@ -339,8 +348,8 @@ void unreachable_call() {
// CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l90_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META14]]
+// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l99_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -367,7 +376,7 @@ void unreachable_call() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l108
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l117
// CHECK1-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], ptr nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
@@ -385,8 +394,8 @@ void unreachable_call() {
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8
-// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8
-// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l108_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !nonnull [[META12]], !align [[META15:![0-9]+]]
+// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l117_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -424,7 +433,7 @@ void unreachable_call() {
// CHECK1-NEXT: [[F:%.*]] = call align 8 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: store i32 [[F1]], ptr [[F]], align 4
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[F]], ptr [[TMP2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
@@ -435,12 +444,12 @@ void unreachable_call() {
// CHECK1-NEXT: ret i32 [[TMP4]]
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l142
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l151
// CHECK1-SAME: (ptr noalias [[DYN_PTR:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l142_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l151_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -453,7 +462,7 @@ void unreachable_call() {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l83
// CHECK1-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
@@ -464,8 +473,8 @@ void unreachable_call() {
// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META14]]
+// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l83_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -499,10 +508,10 @@ void unreachable_call() {
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[F]], ptr [[F_ADDR]], align 8
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[F_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[F_ADDR]], align 8, !nonnull [[META12]], !align [[META14]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
-// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META12]], !align [[META13]]
// CHECK1-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP3]]
// CHECK1-NEXT: [[CONV:%.*]] = fptosi double [[ADD]] to i32
@@ -530,7 +539,7 @@ void unreachable_call() {
// CHECK1-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l25
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30
// CHECK2-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr [[PTR1:%.*]], ptr nonnull align 4 dereferenceable(4) [[PTR2:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
@@ -540,8 +549,8 @@ void unreachable_call() {
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK2-NEXT: store ptr [[PTR1]], ptr [[PTR1_ADDR]], align 4
// CHECK2-NEXT: store ptr [[PTR2]], ptr [[PTR2_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l25_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 4, !nonnull [[META12:![0-9]+]], !align [[META13:![0-9]+]]
+// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -550,14 +559,14 @@ void unreachable_call() {
// CHECK2-NEXT: store ptr [[PTR1_ADDR]], ptr [[TMP3]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
-// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l25_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
+// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
// CHECK2-NEXT: call void @__kmpc_target_deinit()
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l25_omp_outlined
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_omp_outlined
// CHECK2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[PTR1:%.*]], ptr nonnull align 4 dereferenceable(4) [[PTR2:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
@@ -568,8 +577,8 @@ void unreachable_call() {
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store ptr [[PTR1]], ptr [[PTR1_ADDR]], align 4
// CHECK2-NEXT: store ptr [[PTR2]], ptr [[PTR2_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4
@@ -577,12 +586,12 @@ void unreachable_call() {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l39
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l48
// CHECK2-SAME: (ptr noalias [[DYN_PTR:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l39_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l48_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -592,14 +601,14 @@ void unreachable_call() {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l47
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l56
// CHECK2-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[AA:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK2-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l47_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l56_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -619,7 +628,7 @@ void unreachable_call() {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l53
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l62
// CHECK2-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[A:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], ptr nonnull align 4 dereferenceable(4) [[BN:%.*]], ptr nonnull align 8 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], ptr nonnull align 8 dereferenceable(8) [[CN:%.*]], ptr nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
@@ -642,15 +651,15 @@ void unreachable_call() {
// CHECK2-NEXT: store i32 [[VLA3]], ptr [[VLA_ADDR4]], align 4
// CHECK2-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4
// CHECK2-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
-// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4
-// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4
+// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !nonnull [[META12]], !align [[META14:![0-9]+]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4
-// CHECK2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4
-// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4
-// CHECK2-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l53_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4, !nonnull [[META12]], !align [[META14]]
+// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !nonnull [[META12]], !align [[META14]]
+// CHECK2-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l62_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP8]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -712,7 +721,7 @@ void unreachable_call() {
// CHECK2-NEXT: ret ptr [[X]]
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l90
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l99
// CHECK2-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
@@ -725,8 +734,8 @@ void unreachable_call() {
// CHECK2-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
// CHECK2-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4
// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l90_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l99_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -753,7 +762,7 @@ void unreachable_call() {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l108
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l117
// CHECK2-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], ptr nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
@@ -771,8 +780,8 @@ void unreachable_call() {
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4
-// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4
-// CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l108_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !nonnull [[META12]], !align [[META15:![0-9]+]]
+// CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l117_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -810,7 +819,7 @@ void unreachable_call() {
// CHECK2-NEXT: [[F:%.*]] = call align 8 ptr @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: store i32 [[F1]], ptr [[F]], align 4
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META12]], !align [[META14]]
// CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: store ptr [[F]], ptr [[TMP2]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
@@ -821,12 +830,12 @@ void unreachable_call() {
// CHECK2-NEXT: ret i32 [[TMP4]]
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l142
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l151
// CHECK2-SAME: (ptr noalias [[DYN_PTR:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l142_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l151_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -839,7 +848,7 @@ void unreachable_call() {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l83
// CHECK2-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
@@ -850,8 +859,8 @@ void unreachable_call() {
// CHECK2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK2-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l83_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -885,10 +894,10 @@ void unreachable_call() {
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store ptr [[F]], ptr [[F_ADDR]], align 4
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[F_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[F_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META12]], !align [[META14]]
// CHECK2-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
-// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 4
+// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META12]], !align [[META14]]
// CHECK2-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP3]]
// CHECK2-NEXT: [[CONV:%.*]] = fptosi double [[ADD]] to i32
@@ -915,3 +924,775 @@ void unreachable_call() {
// CHECK2-NEXT: call void @_Z3baziRd_omp_outlined(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP4]], ptr [[TMP6]]) #[[ATTR2:[0-9]+]]
// CHECK2-NEXT: ret void
//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30
+// CHECK1-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr [[PTR1:%.*]], ptr nonnull align 8 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[PTR1]], ptr [[PTR1_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[PTR2]], ptr [[PTR2_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8, !nonnull [[META12:![0-9]+]], !align [[META13:![0-9]+]]
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK1-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1-OMP60: user_code.entry:
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// CHECK1-OMP60-NEXT: store ptr [[PTR1_ADDR]], ptr [[TMP3]], align 8
+// CHECK1-OMP60-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
+// CHECK1-OMP60-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
+// CHECK1-OMP60-NEXT: call void @__kmpc_parallel_60(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2, i32 1, i32 1, ptr @.str)
+// CHECK1-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-OMP60-NEXT: ret void
+// CHECK1-OMP60: worker.exit:
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_omp_outlined
+// CHECK1-OMP60-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 8 dereferenceable(8) [[PTR1:%.*]], ptr nonnull align 8 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[PTR1]], ptr [[PTR1_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[PTR2]], ptr [[PTR2_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
+// CHECK1-OMP60-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l48
+// CHECK1-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l48_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
+// CHECK1-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1-OMP60: user_code.entry:
+// CHECK1-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-OMP60-NEXT: ret void
+// CHECK1-OMP60: worker.exit:
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l56
+// CHECK1-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[AA:%.*]]) #[[ATTR4]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l56_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
+// CHECK1-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1-OMP60: user_code.entry:
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2
+// CHECK1-OMP60-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK1-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK1-OMP60-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// CHECK1-OMP60-NEXT: store i16 [[CONV1]], ptr [[AA_ADDR]], align 2
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2
+// CHECK1-OMP60-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
+// CHECK1-OMP60-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 2
+// CHECK1-OMP60-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
+// CHECK1-OMP60-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2
+// CHECK1-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-OMP60-NEXT: ret void
+// CHECK1-OMP60: worker.exit:
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l62
+// CHECK1-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[A:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], ptr nonnull align 4 dereferenceable(4) [[BN:%.*]], ptr nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], ptr nonnull align 8 dereferenceable(8) [[CN:%.*]], ptr nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR4]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[BN_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[CN_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[BN]], ptr [[BN_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[VLA3]], ptr [[VLA_ADDR4]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META14:![0-9]+]]
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8, !nonnull [[META12]], !align [[META14]]
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-OMP60-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8
+// CHECK1-OMP60-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8
+// CHECK1-OMP60-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-OMP60-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-OMP60-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l62_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP8]], -1
+// CHECK1-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1-OMP60: user_code.entry:
+// CHECK1-OMP60-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK1-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], 1
+// CHECK1-OMP60-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK1-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2
+// CHECK1-OMP60-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+// CHECK1-OMP60-NEXT: [[CONV:%.*]] = fpext float [[TMP10]] to double
+// CHECK1-OMP60-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
+// CHECK1-OMP60-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
+// CHECK1-OMP60-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4
+// CHECK1-OMP60-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3
+// CHECK1-OMP60-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX7]], align 4
+// CHECK1-OMP60-NEXT: [[CONV8:%.*]] = fpext float [[TMP11]] to double
+// CHECK1-OMP60-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
+// CHECK1-OMP60-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
+// CHECK1-OMP60-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4
+// CHECK1-OMP60-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1
+// CHECK1-OMP60-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i64 0, i64 2
+// CHECK1-OMP60-NEXT: [[TMP12:%.*]] = load double, ptr [[ARRAYIDX12]], align 8
+// CHECK1-OMP60-NEXT: [[ADD13:%.*]] = fadd double [[TMP12]], 1.000000e+00
+// CHECK1-OMP60-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8
+// CHECK1-OMP60-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP5]]
+// CHECK1-OMP60-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP13]]
+// CHECK1-OMP60-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i64 3
+// CHECK1-OMP60-NEXT: [[TMP14:%.*]] = load double, ptr [[ARRAYIDX15]], align 8
+// CHECK1-OMP60-NEXT: [[ADD16:%.*]] = fadd double [[TMP14]], 1.000000e+00
+// CHECK1-OMP60-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8
+// CHECK1-OMP60-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0
+// CHECK1-OMP60-NEXT: [[TMP15:%.*]] = load i64, ptr [[X]], align 8
+// CHECK1-OMP60-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP15]], 1
+// CHECK1-OMP60-NEXT: store i64 [[ADD17]], ptr [[X]], align 8
+// CHECK1-OMP60-NEXT: [[Y:%.*]] = getelementptr inbounds nuw [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1
+// CHECK1-OMP60-NEXT: [[TMP16:%.*]] = load i8, ptr [[Y]], align 8
+// CHECK1-OMP60-NEXT: [[CONV18:%.*]] = sext i8 [[TMP16]] to i32
+// CHECK1-OMP60-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
+// CHECK1-OMP60-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
+// CHECK1-OMP60-NEXT: store i8 [[CONV20]], ptr [[Y]], align 8
+// CHECK1-OMP60-NEXT: [[CALL:%.*]] = call nonnull align 8 dereferenceable(8) ptr @_ZN2TTIxcEixEi(ptr nonnull align 8 dereferenceable(16) [[TMP7]], i32 0) #[[ATTR10:[0-9]+]]
+// CHECK1-OMP60-NEXT: [[TMP17:%.*]] = load i64, ptr [[CALL]], align 8
+// CHECK1-OMP60-NEXT: [[ADD21:%.*]] = add nsw i64 [[TMP17]], 1
+// CHECK1-OMP60-NEXT: store i64 [[ADD21]], ptr [[CALL]], align 8
+// CHECK1-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-OMP60-NEXT: ret void
+// CHECK1-OMP60: worker.exit:
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@_ZN2TTIxcEixEi
+// CHECK1-OMP60-SAME: (ptr nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 [[I:%.*]]) #[[ATTR5:[0-9]+]] comdat align 2 {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-OMP60-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i32 [[I]], ptr [[I_ADDR]], align 4
+// CHECK1-OMP60-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_TT:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-OMP60-NEXT: ret ptr [[X]]
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l99
+// CHECK1-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META14]]
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l99_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK1-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1-OMP60: user_code.entry:
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK1-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 1
+// CHECK1-OMP60-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2
+// CHECK1-OMP60-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
+// CHECK1-OMP60-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK1-OMP60-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// CHECK1-OMP60-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2
+// CHECK1-OMP60-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA_ADDR]], align 1
+// CHECK1-OMP60-NEXT: [[CONV3:%.*]] = sext i8 [[TMP4]] to i32
+// CHECK1-OMP60-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
+// CHECK1-OMP60-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
+// CHECK1-OMP60-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1
+// CHECK1-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2
+// CHECK1-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// CHECK1-OMP60-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP5]], 1
+// CHECK1-OMP60-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4
+// CHECK1-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-OMP60-NEXT: ret void
+// CHECK1-OMP60: worker.exit:
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l117
+// CHECK1-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], ptr nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR4]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !nonnull [[META12]], !align [[META15:![0-9]+]]
+// CHECK1-OMP60-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l117_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1
+// CHECK1-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1-OMP60: user_code.entry:
+// CHECK1-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK1-OMP60-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
+// CHECK1-OMP60-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
+// CHECK1-OMP60-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK1-OMP60-NEXT: store double [[ADD]], ptr [[A]], align 8
+// CHECK1-OMP60-NEXT: [[A3:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0
+// CHECK1-OMP60-NEXT: [[TMP6:%.*]] = load double, ptr [[A3]], align 8
+// CHECK1-OMP60-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
+// CHECK1-OMP60-NEXT: store double [[INC]], ptr [[A3]], align 8
+// CHECK1-OMP60-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
+// CHECK1-OMP60-NEXT: [[TMP7:%.*]] = mul nsw i64 1, [[TMP2]]
+// CHECK1-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP7]]
+// CHECK1-OMP60-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1
+// CHECK1-OMP60-NEXT: store i16 [[CONV4]], ptr [[ARRAYIDX5]], align 2
+// CHECK1-OMP60-NEXT: [[A6:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0
+// CHECK1-OMP60-NEXT: [[TMP8:%.*]] = load double, ptr [[A6]], align 8
+// CHECK1-OMP60-NEXT: [[CONV7:%.*]] = fptosi double [[TMP8]] to i32
+// CHECK1-OMP60-NEXT: [[A8:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0
+// CHECK1-OMP60-NEXT: [[CALL:%.*]] = call i32 @_Z3baziRd(i32 [[CONV7]], ptr nonnull align 8 dereferenceable(8) [[A8]]) #[[ATTR10]]
+// CHECK1-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-OMP60-NEXT: ret void
+// CHECK1-OMP60: worker.exit:
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@_Z3baziRd
+// CHECK1-OMP60-SAME: (i32 [[F1:%.*]], ptr nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR5]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-OMP60-NEXT: [[F:%.*]] = call align 8 ptr @__kmpc_alloc_shared(i64 4)
+// CHECK1-OMP60-NEXT: store i32 [[F1]], ptr [[F]], align 4
+// CHECK1-OMP60-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// CHECK1-OMP60-NEXT: store ptr [[F]], ptr [[TMP2]], align 8
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
+// CHECK1-OMP60-NEXT: store ptr [[TMP1]], ptr [[TMP3]], align 8
+// CHECK1-OMP60-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @_Z3baziRd_omp_outlined, ptr @_Z3baziRd_omp_outlined_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
+// CHECK1-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[F]], align 4
+// CHECK1-OMP60-NEXT: call void @__kmpc_free_shared(ptr [[F]], i64 4)
+// CHECK1-OMP60-NEXT: ret i32 [[TMP4]]
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l151
+// CHECK1-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]]) #[[ATTR4]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l151_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
+// CHECK1-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1-OMP60: user_code.entry:
+// CHECK1-OMP60-NEXT: call void @_Z6asserti(i32 0) #[[ATTR11:[0-9]+]]
+// CHECK1-OMP60-NEXT: unreachable
+// CHECK1-OMP60: worker.exit:
+// CHECK1-OMP60-NEXT: ret void
+// CHECK1-OMP60: 1:
+// CHECK1-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l83
+// CHECK1-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-OMP60-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META14]]
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l83_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK1-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK1-OMP60: user_code.entry:
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK1-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 1
+// CHECK1-OMP60-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2
+// CHECK1-OMP60-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
+// CHECK1-OMP60-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK1-OMP60-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// CHECK1-OMP60-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2
+// CHECK1-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2
+// CHECK1-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// CHECK1-OMP60-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK1-OMP60-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// CHECK1-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK1-OMP60-NEXT: ret void
+// CHECK1-OMP60: worker.exit:
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@_Z3baziRd_omp_outlined
+// CHECK1-OMP60-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[F:%.*]], ptr nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[F_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[F]], ptr [[F_ADDR]], align 8
+// CHECK1-OMP60-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[F_ADDR]], align 8, !nonnull [[META12]], !align [[META14]]
+// CHECK1-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-OMP60-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META12]], !align [[META13]]
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8
+// CHECK1-OMP60-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP3]]
+// CHECK1-OMP60-NEXT: [[CONV:%.*]] = fptosi double [[ADD]] to i32
+// CHECK1-OMP60-NEXT: store i32 [[CONV]], ptr [[TMP0]], align 4
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK1-OMP60-LABEL: define {{[^@]+}}@_Z3baziRd_omp_outlined_wrapper
+// CHECK1-OMP60-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR8:[0-9]+]] {
+// CHECK1-OMP60-NEXT: entry:
+// CHECK1-OMP60-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
+// CHECK1-OMP60-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
+// CHECK1-OMP60-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-OMP60-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
+// CHECK1-OMP60-NEXT: store i16 [[TMP0]], ptr [[DOTADDR]], align 2
+// CHECK1-OMP60-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
+// CHECK1-OMP60-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
+// CHECK1-OMP60-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
+// CHECK1-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS]], align 8
+// CHECK1-OMP60-NEXT: [[TMP3:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 0
+// CHECK1-OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8
+// CHECK1-OMP60-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 1
+// CHECK1-OMP60-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP5]], align 8
+// CHECK1-OMP60-NEXT: call void @_Z3baziRd_omp_outlined(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP4]], ptr [[TMP6]]) #[[ATTR2:[0-9]+]]
+// CHECK1-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30
+// CHECK2-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr [[PTR1:%.*]], ptr nonnull align 4 dereferenceable(4) [[PTR2:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
+// CHECK2-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[PTR1]], ptr [[PTR1_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[PTR2]], ptr [[PTR2_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 4, !nonnull [[META12:![0-9]+]], !align [[META13:![0-9]+]]
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK2-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2-OMP60: user_code.entry:
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
+// CHECK2-OMP60-NEXT: store ptr [[PTR1_ADDR]], ptr [[TMP3]], align 4
+// CHECK2-OMP60-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
+// CHECK2-OMP60-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
+// CHECK2-OMP60-NEXT: call void @__kmpc_parallel_60(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2, i32 1, i32 1, ptr @.str)
+// CHECK2-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-OMP60-NEXT: ret void
+// CHECK2-OMP60: worker.exit:
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9targetBarPiS__l30_omp_outlined
+// CHECK2-OMP60-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[PTR1:%.*]], ptr nonnull align 4 dereferenceable(4) [[PTR2:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[PTR1]], ptr [[PTR1_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[PTR2]], ptr [[PTR2_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 4
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK2-OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l48
+// CHECK2-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l48_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
+// CHECK2-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2-OMP60: user_code.entry:
+// CHECK2-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-OMP60-NEXT: ret void
+// CHECK2-OMP60: worker.exit:
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l56
+// CHECK2-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[AA:%.*]]) #[[ATTR4]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l56_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
+// CHECK2-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2-OMP60: user_code.entry:
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2
+// CHECK2-OMP60-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK2-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK2-OMP60-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// CHECK2-OMP60-NEXT: store i16 [[CONV1]], ptr [[AA_ADDR]], align 2
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2
+// CHECK2-OMP60-NEXT: [[CONV2:%.*]] = sext i16 [[TMP2]] to i32
+// CHECK2-OMP60-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV2]], 2
+// CHECK2-OMP60-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
+// CHECK2-OMP60-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2
+// CHECK2-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-OMP60-NEXT: ret void
+// CHECK2-OMP60: worker.exit:
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l62
+// CHECK2-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[A:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], ptr nonnull align 4 dereferenceable(4) [[BN:%.*]], ptr nonnull align 8 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], ptr nonnull align 8 dereferenceable(8) [[CN:%.*]], ptr nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR4]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[BN_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[CN_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[BN]], ptr [[BN_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[VLA3]], ptr [[VLA_ADDR4]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !nonnull [[META12]], !align [[META14:![0-9]+]]
+// CHECK2-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4
+// CHECK2-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4
+// CHECK2-OMP60-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4, !nonnull [[META12]], !align [[META14]]
+// CHECK2-OMP60-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !nonnull [[META12]], !align [[META14]]
+// CHECK2-OMP60-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l62_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP8]], -1
+// CHECK2-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2-OMP60: user_code.entry:
+// CHECK2-OMP60-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], 1
+// CHECK2-OMP60-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2
+// CHECK2-OMP60-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+// CHECK2-OMP60-NEXT: [[CONV:%.*]] = fpext float [[TMP10]] to double
+// CHECK2-OMP60-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00
+// CHECK2-OMP60-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float
+// CHECK2-OMP60-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4
+// CHECK2-OMP60-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3
+// CHECK2-OMP60-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX7]], align 4
+// CHECK2-OMP60-NEXT: [[CONV8:%.*]] = fpext float [[TMP11]] to double
+// CHECK2-OMP60-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00
+// CHECK2-OMP60-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float
+// CHECK2-OMP60-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4
+// CHECK2-OMP60-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1
+// CHECK2-OMP60-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i32 0, i32 2
+// CHECK2-OMP60-NEXT: [[TMP12:%.*]] = load double, ptr [[ARRAYIDX12]], align 8
+// CHECK2-OMP60-NEXT: [[ADD13:%.*]] = fadd double [[TMP12]], 1.000000e+00
+// CHECK2-OMP60-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8
+// CHECK2-OMP60-NEXT: [[TMP13:%.*]] = mul nsw i32 1, [[TMP5]]
+// CHECK2-OMP60-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP13]]
+// CHECK2-OMP60-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i32 3
+// CHECK2-OMP60-NEXT: [[TMP14:%.*]] = load double, ptr [[ARRAYIDX15]], align 8
+// CHECK2-OMP60-NEXT: [[ADD16:%.*]] = fadd double [[TMP14]], 1.000000e+00
+// CHECK2-OMP60-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8
+// CHECK2-OMP60-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0
+// CHECK2-OMP60-NEXT: [[TMP15:%.*]] = load i64, ptr [[X]], align 8
+// CHECK2-OMP60-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP15]], 1
+// CHECK2-OMP60-NEXT: store i64 [[ADD17]], ptr [[X]], align 8
+// CHECK2-OMP60-NEXT: [[Y:%.*]] = getelementptr inbounds nuw [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1
+// CHECK2-OMP60-NEXT: [[TMP16:%.*]] = load i8, ptr [[Y]], align 8
+// CHECK2-OMP60-NEXT: [[CONV18:%.*]] = sext i8 [[TMP16]] to i32
+// CHECK2-OMP60-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1
+// CHECK2-OMP60-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8
+// CHECK2-OMP60-NEXT: store i8 [[CONV20]], ptr [[Y]], align 8
+// CHECK2-OMP60-NEXT: [[CALL:%.*]] = call nonnull align 8 dereferenceable(8) ptr @_ZN2TTIxcEixEi(ptr nonnull align 8 dereferenceable(16) [[TMP7]], i32 0) #[[ATTR10:[0-9]+]]
+// CHECK2-OMP60-NEXT: [[TMP17:%.*]] = load i64, ptr [[CALL]], align 8
+// CHECK2-OMP60-NEXT: [[ADD21:%.*]] = add nsw i64 [[TMP17]], 1
+// CHECK2-OMP60-NEXT: store i64 [[ADD21]], ptr [[CALL]], align 8
+// CHECK2-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-OMP60-NEXT: ret void
+// CHECK2-OMP60: worker.exit:
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@_ZN2TTIxcEixEi
+// CHECK2-OMP60-SAME: (ptr nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 [[I:%.*]]) #[[ATTR5:[0-9]+]] comdat align 2 {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[I]], ptr [[I_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_TT:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK2-OMP60-NEXT: ret ptr [[X]]
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l99
+// CHECK2-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l99_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK2-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2-OMP60: user_code.entry:
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 1
+// CHECK2-OMP60-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2
+// CHECK2-OMP60-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
+// CHECK2-OMP60-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK2-OMP60-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// CHECK2-OMP60-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2
+// CHECK2-OMP60-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA_ADDR]], align 1
+// CHECK2-OMP60-NEXT: [[CONV3:%.*]] = sext i8 [[TMP4]] to i32
+// CHECK2-OMP60-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
+// CHECK2-OMP60-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8
+// CHECK2-OMP60-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1
+// CHECK2-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2
+// CHECK2-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// CHECK2-OMP60-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP5]], 1
+// CHECK2-OMP60-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4
+// CHECK2-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-OMP60-NEXT: ret void
+// CHECK2-OMP60: worker.exit:
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l117
+// CHECK2-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], ptr nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR4]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !nonnull [[META12]], !align [[META15:![0-9]+]]
+// CHECK2-OMP60-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l117_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1
+// CHECK2-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2-OMP60: user_code.entry:
+// CHECK2-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP5]] to double
+// CHECK2-OMP60-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
+// CHECK2-OMP60-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK2-OMP60-NEXT: store double [[ADD]], ptr [[A]], align 8
+// CHECK2-OMP60-NEXT: [[A3:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0
+// CHECK2-OMP60-NEXT: [[TMP6:%.*]] = load double, ptr [[A3]], align 8
+// CHECK2-OMP60-NEXT: [[INC:%.*]] = fadd double [[TMP6]], 1.000000e+00
+// CHECK2-OMP60-NEXT: store double [[INC]], ptr [[A3]], align 8
+// CHECK2-OMP60-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16
+// CHECK2-OMP60-NEXT: [[TMP7:%.*]] = mul nsw i32 1, [[TMP2]]
+// CHECK2-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP7]]
+// CHECK2-OMP60-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1
+// CHECK2-OMP60-NEXT: store i16 [[CONV4]], ptr [[ARRAYIDX5]], align 2
+// CHECK2-OMP60-NEXT: [[A6:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0
+// CHECK2-OMP60-NEXT: [[TMP8:%.*]] = load double, ptr [[A6]], align 8
+// CHECK2-OMP60-NEXT: [[CONV7:%.*]] = fptosi double [[TMP8]] to i32
+// CHECK2-OMP60-NEXT: [[A8:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0
+// CHECK2-OMP60-NEXT: [[CALL:%.*]] = call i32 @_Z3baziRd(i32 [[CONV7]], ptr nonnull align 8 dereferenceable(8) [[A8]]) #[[ATTR10]]
+// CHECK2-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-OMP60-NEXT: ret void
+// CHECK2-OMP60: worker.exit:
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@_Z3baziRd
+// CHECK2-OMP60-SAME: (i32 [[F1:%.*]], ptr nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR5]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK2-OMP60-NEXT: [[F:%.*]] = call align 8 ptr @__kmpc_alloc_shared(i32 4)
+// CHECK2-OMP60-NEXT: store i32 [[F1]], ptr [[F]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META12]], !align [[META14]]
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
+// CHECK2-OMP60-NEXT: store ptr [[F]], ptr [[TMP2]], align 4
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
+// CHECK2-OMP60-NEXT: store ptr [[TMP1]], ptr [[TMP3]], align 4
+// CHECK2-OMP60-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @_Z3baziRd_omp_outlined, ptr @_Z3baziRd_omp_outlined_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
+// CHECK2-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[F]], align 4
+// CHECK2-OMP60-NEXT: call void @__kmpc_free_shared(ptr [[F]], i32 4)
+// CHECK2-OMP60-NEXT: ret i32 [[TMP4]]
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l151
+// CHECK2-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]]) #[[ATTR4]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l151_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
+// CHECK2-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2-OMP60: user_code.entry:
+// CHECK2-OMP60-NEXT: call void @_Z6asserti(i32 0) #[[ATTR11:[0-9]+]]
+// CHECK2-OMP60-NEXT: unreachable
+// CHECK2-OMP60: worker.exit:
+// CHECK2-OMP60-NEXT: ret void
+// CHECK2-OMP60: 1:
+// CHECK2-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l83
+// CHECK2-OMP60-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l83_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// CHECK2-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK2-OMP60: user_code.entry:
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 1
+// CHECK2-OMP60-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2
+// CHECK2-OMP60-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
+// CHECK2-OMP60-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// CHECK2-OMP60-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// CHECK2-OMP60-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2
+// CHECK2-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2
+// CHECK2-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// CHECK2-OMP60-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP4]], 1
+// CHECK2-OMP60-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// CHECK2-OMP60-NEXT: call void @__kmpc_target_deinit()
+// CHECK2-OMP60-NEXT: ret void
+// CHECK2-OMP60: worker.exit:
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@_Z3baziRd_omp_outlined
+// CHECK2-OMP60-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[F:%.*]], ptr nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[F_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[F]], ptr [[F_ADDR]], align 4
+// CHECK2-OMP60-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
+// CHECK2-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[F_ADDR]], align 4, !nonnull [[META12]], !align [[META13]]
+// CHECK2-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META12]], !align [[META14]]
+// CHECK2-OMP60-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META12]], !align [[META14]]
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8
+// CHECK2-OMP60-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP3]]
+// CHECK2-OMP60-NEXT: [[CONV:%.*]] = fptosi double [[ADD]] to i32
+// CHECK2-OMP60-NEXT: store i32 [[CONV]], ptr [[TMP0]], align 4
+// CHECK2-OMP60-NEXT: ret void
+//
+//
+// CHECK2-OMP60-LABEL: define {{[^@]+}}@_Z3baziRd_omp_outlined_wrapper
+// CHECK2-OMP60-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR8:[0-9]+]] {
+// CHECK2-OMP60-NEXT: entry:
+// CHECK2-OMP60-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
+// CHECK2-OMP60-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-OMP60-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 4
+// CHECK2-OMP60-NEXT: store i16 [[TMP0]], ptr [[DOTADDR]], align 2
+// CHECK2-OMP60-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
+// CHECK2-OMP60-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
+// CHECK2-OMP60-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
+// CHECK2-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS]], align 4
+// CHECK2-OMP60-NEXT: [[TMP3:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i32 0
+// CHECK2-OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 4
+// CHECK2-OMP60-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i32 1
+// CHECK2-OMP60-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP5]], align 4
+// CHECK2-OMP60-NEXT: call void @_Z3baziRd_omp_outlined(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP4]], ptr [[TMP6]]) #[[ATTR2:[0-9]+]]
+// CHECK2-OMP60-NEXT: ret void
+//
diff --git a/clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp b/clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp
index f92ce4e..dc9a2b7 100644
--- a/clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp
+++ b/clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp
@@ -1,10 +1,16 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
-// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK1
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=OMP45_1
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
-// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK2
-// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK2
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=OMP45_2
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=OMP45_2
+
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefixes=OMP60_1
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefixes=OMP60_2
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefixes=OMP60_2
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK1
@@ -26,6 +32,13 @@ tx ftemplate(int n) {
{
aa += 1;
}
+ #ifdef OMP60
+ char str[] = "msg";
+ #pragma omp target parallel map(tofrom: aa) num_threads(strict: 1024) severity(warning) message(str)
+ {
+ aa += 1;
+ }
+ #endif
#pragma omp target parallel map(tofrom:a, aa, b) if(target: n>40) num_threads(n)
{
@@ -33,6 +46,15 @@ tx ftemplate(int n) {
aa += 1;
b[2] += 1;
}
+ #ifdef OMP60
+ const char *str1 = "msg1";
+ #pragma omp target parallel map(tofrom:a, aa, b) if(target: n>40) num_threads(strict: n) severity(warning) message(str1)
+ {
+ a += 1;
+ aa += 1;
+ b[2] += 1;
+ }
+ #endif
return a;
}
@@ -46,7 +68,675 @@ int bar(int n){
}
#endif
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l25
+// OMP45_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// OMP45_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// OMP45_1-NEXT: entry:
+// OMP45_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
+// OMP45_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP45_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META6:![0-9]+]], !align [[META7:![0-9]+]]
+// OMP45_1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment, ptr [[DYN_PTR]])
+// OMP45_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// OMP45_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP45_1: user_code.entry:
+// OMP45_1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// OMP45_1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// OMP45_1-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
+// OMP45_1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
+// OMP45_1-NEXT: call void @__kmpc_target_deinit()
+// OMP45_1-NEXT: ret void
+// OMP45_1: worker.exit:
+// OMP45_1-NEXT: ret void
+//
+//
+// OMP45_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// OMP45_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP45_1-NEXT: entry:
+// OMP45_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP45_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META6]], !align [[META7]]
+// OMP45_1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP45_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP45_1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP45_1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP45_1-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP45_1-NEXT: ret void
+//
+//
+// OMP45_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// OMP45_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// OMP45_1-NEXT: entry:
+// OMP45_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// OMP45_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8
+// OMP45_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// OMP45_1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// OMP45_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META6]], !align [[META8:![0-9]+]]
+// OMP45_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META6]], !align [[META7]]
+// OMP45_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META6]], !align [[META8]]
+// OMP45_1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment, ptr [[DYN_PTR]])
+// OMP45_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP45_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP45_1: user_code.entry:
+// OMP45_1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// OMP45_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP45_1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// OMP45_1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP45_1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
+// OMP45_1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP45_1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
+// OMP45_1-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP45_1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 3)
+// OMP45_1-NEXT: call void @__kmpc_target_deinit()
+// OMP45_1-NEXT: ret void
+// OMP45_1: worker.exit:
+// OMP45_1-NEXT: ret void
+//
+//
+// OMP45_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// OMP45_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP45_1-NEXT: entry:
+// OMP45_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// OMP45_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP45_1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// OMP45_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META6]], !align [[META8]]
+// OMP45_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META6]], !align [[META7]]
+// OMP45_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META6]], !align [[META8]]
+// OMP45_1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP45_1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP45_1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP45_1-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP45_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP45_1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP45_1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP45_1-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP45_1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP45_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP45_1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP45_1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP45_1-NEXT: ret void
+//
+//
+// OMP45_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// OMP45_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// OMP45_2-NEXT: entry:
+// OMP45_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
+// OMP45_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP45_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META6:![0-9]+]], !align [[META7:![0-9]+]]
+// OMP45_2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment, ptr [[DYN_PTR]])
+// OMP45_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// OMP45_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP45_2: user_code.entry:
+// OMP45_2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// OMP45_2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
+// OMP45_2-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
+// OMP45_2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
+// OMP45_2-NEXT: call void @__kmpc_target_deinit()
+// OMP45_2-NEXT: ret void
+// OMP45_2: worker.exit:
+// OMP45_2-NEXT: ret void
+//
+//
+// OMP45_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// OMP45_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP45_2-NEXT: entry:
+// OMP45_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP45_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META6]], !align [[META7]]
+// OMP45_2-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP45_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP45_2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP45_2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP45_2-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP45_2-NEXT: ret void
+//
+//
+// OMP45_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// OMP45_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// OMP45_2-NEXT: entry:
+// OMP45_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// OMP45_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 4
+// OMP45_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// OMP45_2-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP45_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META6]], !align [[META8:![0-9]+]]
+// OMP45_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META6]], !align [[META7]]
+// OMP45_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META6]], !align [[META8]]
+// OMP45_2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment, ptr [[DYN_PTR]])
+// OMP45_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP45_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP45_2: user_code.entry:
+// OMP45_2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// OMP45_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP45_2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
+// OMP45_2-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4
+// OMP45_2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
+// OMP45_2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
+// OMP45_2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
+// OMP45_2-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 4
+// OMP45_2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 3)
+// OMP45_2-NEXT: call void @__kmpc_target_deinit()
+// OMP45_2-NEXT: ret void
+// OMP45_2: worker.exit:
+// OMP45_2-NEXT: ret void
+//
+//
+// OMP45_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// OMP45_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP45_2-NEXT: entry:
+// OMP45_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// OMP45_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP45_2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// OMP45_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META6]], !align [[META8]]
+// OMP45_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META6]], !align [[META7]]
+// OMP45_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META6]], !align [[META8]]
+// OMP45_2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP45_2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP45_2-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP45_2-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP45_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP45_2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP45_2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP45_2-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP45_2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i32 0, i32 2
+// OMP45_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP45_2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP45_2-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP45_2-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// OMP60_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
+// OMP60_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META8:![0-9]+]], !align [[META9:![0-9]+]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment, ptr [[DYN_PTR]])
+// OMP60_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// OMP60_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_1: user_code.entry:
+// OMP60_1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// OMP60_1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// OMP60_1-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
+// OMP60_1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
+// OMP60_1-NEXT: call void @__kmpc_target_deinit()
+// OMP60_1-NEXT: ret void
+// OMP60_1: worker.exit:
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// OMP60_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META8]], !align [[META9]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP60_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP60_1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP60_1-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37
+// OMP60_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
+// OMP60_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META8]], !align [[META9]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META8]]
+// OMP60_1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// OMP60_1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_kernel_environment, ptr [[DYN_PTR]])
+// OMP60_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
+// OMP60_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_1: user_code.entry:
+// OMP60_1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// OMP60_1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// OMP60_1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
+// OMP60_1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META8]]
+// OMP60_1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP5]], i64 0, i64 0
+// OMP60_1-NEXT: call void @__kmpc_parallel_60(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 1, i32 1, i32 1, ptr [[ARRAYDECAY]])
+// OMP60_1-NEXT: call void @__kmpc_target_deinit()
+// OMP60_1-NEXT: ret void
+// OMP60_1: worker.exit:
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_omp_outlined
+// OMP60_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META8]], !align [[META9]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP60_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP60_1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP60_1-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// OMP60_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8
+// OMP60_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// OMP60_1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META8]], !align [[META10:![0-9]+]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META8]], !align [[META9]]
+// OMP60_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META8]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment, ptr [[DYN_PTR]])
+// OMP60_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP60_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_1: user_code.entry:
+// OMP60_1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// OMP60_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP60_1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// OMP60_1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP60_1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
+// OMP60_1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP60_1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
+// OMP60_1-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP60_1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 3)
+// OMP60_1-NEXT: call void @__kmpc_target_deinit()
+// OMP60_1-NEXT: ret void
+// OMP60_1: worker.exit:
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// OMP60_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META8]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META8]], !align [[META9]]
+// OMP60_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META8]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP60_1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP60_1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP60_1-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP60_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP60_1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP60_1-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP60_1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP60_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP60_1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP60_1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51
+// OMP60_1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// OMP60_1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8
+// OMP60_1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// OMP60_1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META8]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META8]], !align [[META9]]
+// OMP60_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META8]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_kernel_environment, ptr [[DYN_PTR]])
+// OMP60_1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP60_1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_1: user_code.entry:
+// OMP60_1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// OMP60_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP60_1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// OMP60_1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8
+// OMP60_1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
+// OMP60_1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
+// OMP60_1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
+// OMP60_1-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
+// OMP60_1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// OMP60_1-NEXT: call void @__kmpc_parallel_60(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 3, i32 1, i32 1, ptr [[TMP9]])
+// OMP60_1-NEXT: call void @__kmpc_target_deinit()
+// OMP60_1-NEXT: ret void
+// OMP60_1: worker.exit:
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_omp_outlined
+// OMP60_1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP60_1-NEXT: entry:
+// OMP60_1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
+// OMP60_1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
+// OMP60_1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
+// OMP60_1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META8]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META8]], !align [[META9]]
+// OMP60_1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META8]], !align [[META10]]
+// OMP60_1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP60_1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP60_1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP60_1-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP60_1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP60_1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP60_1-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP60_1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i64 0, i64 2
+// OMP60_1-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP60_1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP60_1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP60_1-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
+// OMP60_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
+// OMP60_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META8:![0-9]+]], !align [[META9:![0-9]+]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment, ptr [[DYN_PTR]])
+// OMP60_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
+// OMP60_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_2: user_code.entry:
+// OMP60_2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// OMP60_2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
+// OMP60_2-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
+// OMP60_2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
+// OMP60_2-NEXT: call void @__kmpc_target_deinit()
+// OMP60_2-NEXT: ret void
+// OMP60_2: worker.exit:
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
+// OMP60_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META8]], !align [[META9]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP60_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP60_2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP60_2-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37
+// OMP60_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
+// OMP60_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META8]], !align [[META9]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !nonnull [[META8]]
+// OMP60_2-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// OMP60_2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_kernel_environment, ptr [[DYN_PTR]])
+// OMP60_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
+// OMP60_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_2: user_code.entry:
+// OMP60_2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// OMP60_2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
+// OMP60_2-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
+// OMP60_2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META8]]
+// OMP60_2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP5]], i32 0, i32 0
+// OMP60_2-NEXT: call void @__kmpc_parallel_60(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1, i32 1, i32 1, ptr [[ARRAYDECAY]])
+// OMP60_2-NEXT: call void @__kmpc_target_deinit()
+// OMP60_2-NEXT: ret void
+// OMP60_2: worker.exit:
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l37_omp_outlined
+// OMP60_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META8]], !align [[META9]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
+// OMP60_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// OMP60_2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
+// OMP60_2-NEXT: store i16 [[CONV1]], ptr [[TMP0]], align 2
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
+// OMP60_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 4
+// OMP60_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// OMP60_2-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META8]], !align [[META10:![0-9]+]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META8]], !align [[META9]]
+// OMP60_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META8]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment, ptr [[DYN_PTR]])
+// OMP60_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP60_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_2: user_code.entry:
+// OMP60_2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// OMP60_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP60_2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
+// OMP60_2-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4
+// OMP60_2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
+// OMP60_2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
+// OMP60_2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
+// OMP60_2-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 4
+// OMP60_2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 3)
+// OMP60_2-NEXT: call void @__kmpc_target_deinit()
+// OMP60_2-NEXT: ret void
+// OMP60_2: worker.exit:
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
+// OMP60_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META8]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META8]], !align [[META9]]
+// OMP60_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META8]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP60_2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP60_2-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP60_2-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP60_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP60_2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP60_2-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP60_2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i32 0, i32 2
+// OMP60_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP60_2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP60_2-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51
+// OMP60_2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// OMP60_2-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 4
+// OMP60_2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// OMP60_2-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META8]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META8]], !align [[META9]]
+// OMP60_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META8]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_kernel_environment, ptr [[DYN_PTR]])
+// OMP60_2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
+// OMP60_2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// OMP60_2: user_code.entry:
+// OMP60_2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// OMP60_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// OMP60_2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
+// OMP60_2-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4
+// OMP60_2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
+// OMP60_2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
+// OMP60_2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
+// OMP60_2-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 4
+// OMP60_2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// OMP60_2-NEXT: call void @__kmpc_parallel_60(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 3, i32 1, i32 1, ptr [[TMP9]])
+// OMP60_2-NEXT: call void @__kmpc_target_deinit()
+// OMP60_2-NEXT: ret void
+// OMP60_2: worker.exit:
+// OMP60_2-NEXT: ret void
+//
+//
+// OMP60_2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l51_omp_outlined
+// OMP60_2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
+// OMP60_2-NEXT: entry:
+// OMP60_2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+// OMP60_2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
+// OMP60_2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
+// OMP60_2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META8]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META8]], !align [[META9]]
+// OMP60_2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META8]], !align [[META10]]
+// OMP60_2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
+// OMP60_2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
+// OMP60_2-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
+// OMP60_2-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP1]], align 2
+// OMP60_2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
+// OMP60_2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
+// OMP60_2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
+// OMP60_2-NEXT: store i16 [[CONV2]], ptr [[TMP1]], align 2
+// OMP60_2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP2]], i32 0, i32 2
+// OMP60_2-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+// OMP60_2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// OMP60_2-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// OMP60_2-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
@@ -54,22 +744,22 @@ int bar(int n){
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l25_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META6:![0-9]+]], !align [[META7:![0-9]+]]
+// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
-// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l25_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
+// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
// CHECK1-NEXT: call void @__kmpc_target_deinit()
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l25_omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -78,7 +768,7 @@ int bar(int n){
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META6]], !align [[META7]]
// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
@@ -87,7 +777,7 @@ int bar(int n){
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l30
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
@@ -101,10 +791,10 @@ int bar(int n){
// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
-// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8
-// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l30_kernel_environment, ptr [[DYN_PTR]])
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META6]], !align [[META8:![0-9]+]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META6]], !align [[META7]]
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META6]], !align [[META8]]
+// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment, ptr [[DYN_PTR]])
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
@@ -116,14 +806,14 @@ int bar(int n){
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 8
-// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l30_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 3)
+// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 3)
// CHECK1-NEXT: call void @__kmpc_target_deinit()
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l30_omp_outlined
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -136,9 +826,9 @@ int bar(int n){
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
-// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
-// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
-// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META6]], !align [[META8]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 8, !nonnull [[META6]], !align [[META7]]
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META6]], !align [[META8]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
@@ -154,7 +844,7 @@ int bar(int n){
// CHECK1-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l25
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31
// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
@@ -162,22 +852,22 @@ int bar(int n){
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l25_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META6:![0-9]+]], !align [[META7:![0-9]+]]
+// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
-// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l25_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
+// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
// CHECK2-NEXT: call void @__kmpc_target_deinit()
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l25_omp_outlined
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l31_omp_outlined
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
@@ -186,7 +876,7 @@ int bar(int n){
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META6]], !align [[META7]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
@@ -195,7 +885,7 @@ int bar(int n){
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l30
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
@@ -209,10 +899,10 @@ int bar(int n){
// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK2-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
-// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4
-// CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l30_kernel_environment, ptr [[DYN_PTR]])
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META6]], !align [[META8:![0-9]+]]
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META6]], !align [[META7]]
+// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META6]], !align [[META8]]
+// CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_kernel_environment, ptr [[DYN_PTR]])
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP3]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
@@ -224,14 +914,14 @@ int bar(int n){
// CHECK2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
// CHECK2-NEXT: store ptr [[TMP2]], ptr [[TMP8]], align 4
-// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l30_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 3)
+// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 3)
// CHECK2-NEXT: call void @__kmpc_target_deinit()
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l30_omp_outlined
+// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43_omp_outlined
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
@@ -244,9 +934,9 @@ int bar(int n){
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
-// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
-// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META6]], !align [[META8]]
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[AA_ADDR]], align 4, !nonnull [[META6]], !align [[META7]]
+// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META6]], !align [[META8]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK2-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
diff --git a/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp b/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp
index 232cfda..20e344f 100644
--- a/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp
+++ b/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp
@@ -71,16 +71,16 @@ void test() {
// CHECK1-NEXT: [[ISTART:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[IEND:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[PARTIAL_SUM:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 8)
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[IB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[IB]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
@@ -110,13 +110,13 @@ void test() {
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[IB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP]], align 4, !tbaa [[TBAA19:![0-9]+]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP2]]) #[[ATTR4]]
// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP2]], align 4, !tbaa [[TBAA19]]
// CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP2]]) #[[ATTR11:[0-9]+]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP2]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP8]], 4
// CHECK1-NEXT: store i32 [[MUL3]], ptr [[ISTART]], align 4, !tbaa [[TBAA15]]
@@ -143,12 +143,12 @@ void test() {
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP1]])
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[IB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[IB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[PARTIAL_SUM]], i64 8)
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[IEND]], i64 4)
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[ISTART]], i64 4)
@@ -205,14 +205,14 @@ void test() {
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ISTART_ADDR]], align 8, !tbaa [[TBAA17]]
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[IEND_ADDR]], align 8, !tbaa [[TBAA17]]
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PARTIAL_SUM_ADDR]], align 8, !tbaa [[TBAA23]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP5]], [[TMP6]]
@@ -221,33 +221,33 @@ void test() {
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP7]], ptr [[I]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP]], align 4, !tbaa [[TBAA19]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP6]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP6]]) #[[ATTR4]]
// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP6]], align 4, !tbaa [[TBAA19]]
// CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP6]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP6]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I7]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP6]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[I7]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB3:[0-9]+]], i32 [[TMP12]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
@@ -291,20 +291,20 @@ void test() {
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP23]], 1
// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[MUL]]
// CHECK1-NEXT: store i32 [[ADD13]], ptr [[I7]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP14]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP15]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP14]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP15]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP24]] to float
// CHECK1-NEXT: store float [[CONV]], ptr [[REF_TMP15]], align 4, !tbaa [[TBAA19]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP16]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP16]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CONV17:%.*]] = sitofp i32 [[TMP25]] to float
// CHECK1-NEXT: store float [[CONV17]], ptr [[REF_TMP16]], align 4, !tbaa [[TBAA19]]
// CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[REF_TMP14]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP15]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP16]]) #[[ATTR11]]
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) ptr @_ZNSt7complexIfEpLIfEERS0_RKS_IT_E(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]], ptr nonnull align 4 dereferenceable(8) [[REF_TMP14]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP16]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP15]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP14]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP16]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP15]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP14]]) #[[ATTR4]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
@@ -338,18 +338,18 @@ void test() {
// CHECK1-NEXT: [[CALL21:%.*]] = call nonnull align 4 dereferenceable(8) ptr @_ZNSt7complexIfEpLIfEERS0_RKS_IT_E(ptr nonnull align 4 dereferenceable(8) [[TMP2]], ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]]) #[[ATTR11]]
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK1: .omp.reduction.done:
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I7]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[I7]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
// CHECK1-NEXT: ret void
//
//
@@ -563,16 +563,16 @@ void test() {
// CHECK1-NEXT: [[ISTART:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[IEND:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[PARTIAL_SUM:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 16)
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[IB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[IB]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
@@ -602,13 +602,13 @@ void test() {
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[IB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP]], align 8, !tbaa [[TBAA36:![0-9]+]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP2]]) #[[ATTR4]]
// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP2]], align 8, !tbaa [[TBAA36]]
// CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP2]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP2]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP8]], 4
// CHECK1-NEXT: store i32 [[MUL3]], ptr [[ISTART]], align 4, !tbaa [[TBAA15]]
@@ -635,12 +635,12 @@ void test() {
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP1]])
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[IB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[IB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[PARTIAL_SUM]], i64 16)
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[IEND]], i64 4)
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[ISTART]], i64 4)
@@ -697,14 +697,14 @@ void test() {
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ISTART_ADDR]], align 8, !tbaa [[TBAA17]]
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[IEND_ADDR]], align 8, !tbaa [[TBAA17]]
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PARTIAL_SUM_ADDR]], align 8, !tbaa [[TBAA38]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP5]], [[TMP6]]
@@ -713,33 +713,33 @@ void test() {
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP7]], ptr [[I]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP]], align 8, !tbaa [[TBAA36]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP6]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP6]]) #[[ATTR4]]
// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP6]], align 8, !tbaa [[TBAA36]]
// CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP6]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP6]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I7]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP6]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[I7]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB3]], i32 [[TMP12]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
@@ -783,20 +783,20 @@ void test() {
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP23]], 1
// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[MUL]]
// CHECK1-NEXT: store i32 [[ADD13]], ptr [[I7]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[REF_TMP14]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP15]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP14]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP15]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP24]] to double
// CHECK1-NEXT: store double [[CONV]], ptr [[REF_TMP15]], align 8, !tbaa [[TBAA36]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP16]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP16]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CONV17:%.*]] = sitofp i32 [[TMP25]] to double
// CHECK1-NEXT: store double [[CONV17]], ptr [[REF_TMP16]], align 8, !tbaa [[TBAA36]]
// CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[REF_TMP14]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP15]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP16]]) #[[ATTR11]]
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 8 dereferenceable(16) ptr @_ZNSt7complexIdEpLIdEERS0_RKS_IT_E(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]], ptr nonnull align 8 dereferenceable(16) [[REF_TMP14]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP16]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP15]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[REF_TMP14]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP16]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP15]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP14]]) #[[ATTR4]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
@@ -830,18 +830,18 @@ void test() {
// CHECK1-NEXT: [[CALL21:%.*]] = call nonnull align 8 dereferenceable(16) ptr @_ZNSt7complexIdEpLIdEERS0_RKS_IT_E(ptr nonnull align 8 dereferenceable(16) [[TMP2]], ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]]) #[[ATTR11]]
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK1: .omp.reduction.done:
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I7]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[I7]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
// CHECK1-NEXT: ret void
//
//
diff --git a/clang/test/OpenMP/parallel_generic_loop_codegen.cpp b/clang/test/OpenMP/parallel_generic_loop_codegen.cpp
index f2eedc7..ce9aa5d 100644
--- a/clang/test/OpenMP/parallel_generic_loop_codegen.cpp
+++ b/clang/test/OpenMP/parallel_generic_loop_codegen.cpp
@@ -2,10 +2,16 @@
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp %s
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=IR
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -verify -triple x86_64-pc-linux-gnu -fopenmp %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=IR-OMP60
+
// Check same results after serialization round-trip
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=IR-PCH
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=IR-PCH-OMP60
+
// expected-no-diagnostics
#ifndef HEADER
@@ -16,7 +22,11 @@ int foo() {
int x = 0;
int result[N] = {0};
+ #ifdef OMP60
+ #pragma omp parallel loop num_threads(strict: N) severity(fatal) message("msg") allocate(x) private(x) collapse(2)
+ #else
#pragma omp parallel loop num_threads(N) allocate(x) private(x) collapse(2)
+ #endif
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
result[i] = i + j + x;
@@ -54,7 +64,7 @@ int foo() {
// IR-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// IR-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// IR-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR]], align 8
-// IR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[RESULT_ADDR]], align 8
+// IR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[RESULT_ADDR]], align 8, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
// IR-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// IR-NEXT: store i32 4095, ptr [[DOTOMP_UB]], align 4
// IR-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
@@ -123,6 +133,106 @@ int foo() {
// IR-NEXT: ret void
//
//
+// IR-OMP60-LABEL: define {{[^@]+}}@_Z3foov
+// IR-OMP60-SAME: () #[[ATTR0:[0-9]+]] {
+// IR-OMP60-NEXT: entry:
+// IR-OMP60-NEXT: [[X:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[RESULT:%.*]] = alloca [64 x i32], align 16
+// IR-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
+// IR-OMP60-NEXT: store i32 0, ptr [[X]], align 4
+// IR-OMP60-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[RESULT]], i8 0, i64 256, i1 false)
+// IR-OMP60-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB2]], i32 [[TMP0]], i32 64, i32 2, ptr @.str)
+// IR-OMP60-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @_Z3foov.omp_outlined, ptr [[RESULT]])
+// IR-OMP60-NEXT: ret i32 0
+//
+//
+// IR-OMP60-LABEL: define {{[^@]+}}@_Z3foov.omp_outlined
+// IR-OMP60-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[RESULT:%.*]]) #[[ATTR2:[0-9]+]] {
+// IR-OMP60-NEXT: entry:
+// IR-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[I:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[J:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// IR-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// IR-OMP60-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR]], align 8
+// IR-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[RESULT_ADDR]], align 8, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
+// IR-OMP60-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// IR-OMP60-NEXT: store i32 4095, ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// IR-OMP60-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// IR-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// IR-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// IR-OMP60-NEXT: [[DOTX__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP2]], i64 4, ptr null)
+// IR-OMP60-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// IR-OMP60-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 4095
+// IR-OMP60-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// IR-OMP60: cond.true:
+// IR-OMP60-NEXT: br label [[COND_END:%.*]]
+// IR-OMP60: cond.false:
+// IR-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: br label [[COND_END]]
+// IR-OMP60: cond.end:
+// IR-OMP60-NEXT: [[COND:%.*]] = phi i32 [ 4095, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// IR-OMP60-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// IR-OMP60-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// IR-OMP60: omp.inner.for.cond:
+// IR-OMP60-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// IR-OMP60-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
+// IR-OMP60: omp.inner.for.cond.cleanup:
+// IR-OMP60-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
+// IR-OMP60: omp.inner.for.body:
+// IR-OMP60-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 64
+// IR-OMP60-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
+// IR-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// IR-OMP60-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// IR-OMP60-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 64
+// IR-OMP60-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 64
+// IR-OMP60-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]]
+// IR-OMP60-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
+// IR-OMP60-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
+// IR-OMP60-NEXT: store i32 [[ADD6]], ptr [[J]], align 4
+// IR-OMP60-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
+// IR-OMP60-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4
+// IR-OMP60-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// IR-OMP60-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTX__VOID_ADDR]], align 4
+// IR-OMP60-NEXT: [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP13]]
+// IR-OMP60-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
+// IR-OMP60-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
+// IR-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// IR-OMP60-NEXT: store i32 [[ADD8]], ptr [[ARRAYIDX]], align 4
+// IR-OMP60-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// IR-OMP60: omp.body.continue:
+// IR-OMP60-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// IR-OMP60: omp.inner.for.inc:
+// IR-OMP60-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP15]], 1
+// IR-OMP60-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: br label [[OMP_INNER_FOR_COND]]
+// IR-OMP60: omp.inner.for.end:
+// IR-OMP60-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// IR-OMP60: omp.loop.exit:
+// IR-OMP60-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// IR-OMP60-NEXT: call void @__kmpc_free(i32 [[TMP2]], ptr [[DOTX__VOID_ADDR]], ptr null)
+// IR-OMP60-NEXT: ret void
+//
+//
// IR-PCH-LABEL: define {{[^@]+}}@_Z3foov
// IR-PCH-SAME: () #[[ATTR0:[0-9]+]] {
// IR-PCH-NEXT: entry:
@@ -154,7 +264,7 @@ int foo() {
// IR-PCH-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// IR-PCH-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// IR-PCH-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR]], align 8
-// IR-PCH-NEXT: [[TMP0:%.*]] = load ptr, ptr [[RESULT_ADDR]], align 8
+// IR-PCH-NEXT: [[TMP0:%.*]] = load ptr, ptr [[RESULT_ADDR]], align 8, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
// IR-PCH-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// IR-PCH-NEXT: store i32 4095, ptr [[DOTOMP_UB]], align 4
// IR-PCH-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
@@ -222,3 +332,103 @@ int foo() {
// IR-PCH-NEXT: call void @__kmpc_free(i32 [[TMP2]], ptr [[DOTX__VOID_ADDR]], ptr null)
// IR-PCH-NEXT: ret void
//
+//
+// IR-PCH-OMP60-LABEL: define {{[^@]+}}@_Z3foov
+// IR-PCH-OMP60-SAME: () #[[ATTR0:[0-9]+]] {
+// IR-PCH-OMP60-NEXT: entry:
+// IR-PCH-OMP60-NEXT: [[X:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[RESULT:%.*]] = alloca [64 x i32], align 16
+// IR-PCH-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
+// IR-PCH-OMP60-NEXT: store i32 0, ptr [[X]], align 4
+// IR-PCH-OMP60-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[RESULT]], i8 0, i64 256, i1 false)
+// IR-PCH-OMP60-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB2]], i32 [[TMP0]], i32 64, i32 2, ptr @.str)
+// IR-PCH-OMP60-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @_Z3foov.omp_outlined, ptr [[RESULT]])
+// IR-PCH-OMP60-NEXT: ret i32 0
+//
+//
+// IR-PCH-OMP60-LABEL: define {{[^@]+}}@_Z3foov.omp_outlined
+// IR-PCH-OMP60-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[RESULT:%.*]]) #[[ATTR2:[0-9]+]] {
+// IR-PCH-OMP60-NEXT: entry:
+// IR-PCH-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[I:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[J:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// IR-PCH-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// IR-PCH-OMP60-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR]], align 8
+// IR-PCH-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[RESULT_ADDR]], align 8, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
+// IR-PCH-OMP60-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// IR-PCH-OMP60-NEXT: store i32 4095, ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// IR-PCH-OMP60-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// IR-PCH-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// IR-PCH-OMP60-NEXT: [[DOTX__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP2]], i64 4, ptr null)
+// IR-PCH-OMP60-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// IR-PCH-OMP60-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 4095
+// IR-PCH-OMP60-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// IR-PCH-OMP60: cond.true:
+// IR-PCH-OMP60-NEXT: br label [[COND_END:%.*]]
+// IR-PCH-OMP60: cond.false:
+// IR-PCH-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: br label [[COND_END]]
+// IR-PCH-OMP60: cond.end:
+// IR-PCH-OMP60-NEXT: [[COND:%.*]] = phi i32 [ 4095, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// IR-PCH-OMP60-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// IR-PCH-OMP60-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// IR-PCH-OMP60: omp.inner.for.cond:
+// IR-PCH-OMP60-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// IR-PCH-OMP60-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
+// IR-PCH-OMP60: omp.inner.for.cond.cleanup:
+// IR-PCH-OMP60-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
+// IR-PCH-OMP60: omp.inner.for.body:
+// IR-PCH-OMP60-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 64
+// IR-PCH-OMP60-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
+// IR-PCH-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// IR-PCH-OMP60-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 64
+// IR-PCH-OMP60-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 64
+// IR-PCH-OMP60-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]]
+// IR-PCH-OMP60-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
+// IR-PCH-OMP60-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
+// IR-PCH-OMP60-NEXT: store i32 [[ADD6]], ptr [[J]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4
+// IR-PCH-OMP60-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// IR-PCH-OMP60-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTX__VOID_ADDR]], align 4
+// IR-PCH-OMP60-NEXT: [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP13]]
+// IR-PCH-OMP60-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
+// IR-PCH-OMP60-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
+// IR-PCH-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// IR-PCH-OMP60-NEXT: store i32 [[ADD8]], ptr [[ARRAYIDX]], align 4
+// IR-PCH-OMP60-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// IR-PCH-OMP60: omp.body.continue:
+// IR-PCH-OMP60-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// IR-PCH-OMP60: omp.inner.for.inc:
+// IR-PCH-OMP60-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP15]], 1
+// IR-PCH-OMP60-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: br label [[OMP_INNER_FOR_COND]]
+// IR-PCH-OMP60: omp.inner.for.end:
+// IR-PCH-OMP60-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// IR-PCH-OMP60: omp.loop.exit:
+// IR-PCH-OMP60-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// IR-PCH-OMP60-NEXT: call void @__kmpc_free(i32 [[TMP2]], ptr [[DOTX__VOID_ADDR]], ptr null)
+// IR-PCH-OMP60-NEXT: ret void
+//
diff --git a/clang/test/OpenMP/parallel_message_messages.cpp b/clang/test/OpenMP/parallel_message_messages.cpp
index 470fadc..ea8fa23 100644
--- a/clang/test/OpenMP/parallel_message_messages.cpp
+++ b/clang/test/OpenMP/parallel_message_messages.cpp
@@ -8,6 +8,9 @@ T tmain(T argc, S **argv) {
// Correct usage
#pragma omp parallel message("correct message")
+ // Template parameter is not yet instantiated.
+ #pragma omp parallel message(argv[0]) // expected-warning {{expected string in 'clause message' - ignoring}}
+
// Missing parentheses
#pragma omp parallel message // expected-error {{expected '(' after 'message'}}
@@ -15,9 +18,8 @@ T tmain(T argc, S **argv) {
#pragma omp parallel message() // expected-error {{expected expression}}
// Non-string literal
- #pragma omp parallel message(123) // expected-warning {{expected string literal in 'clause message' - ignoring}}
- #pragma omp parallel message(argc) // expected-warning {{expected string literal in 'clause message' - ignoring}}
- #pragma omp parallel message(argv[0]) // expected-warning {{expected string literal in 'clause message' - ignoring}}
+ #pragma omp parallel message(123) // expected-warning {{expected string in 'clause message' - ignoring}}
+ #pragma omp parallel message(argc) // expected-warning {{expected string in 'clause message' - ignoring}}
// Multiple arguments
#pragma omp parallel message("msg1", "msg2") // expected-error {{expected ')'}} expected-note {{to match this '('}}
@@ -47,10 +49,10 @@ T tmain(T argc, S **argv) {
// Message clause with macro that is not a string
#define NOT_A_STRING 123
- #pragma omp parallel message(NOT_A_STRING) // expected-warning {{expected string literal in 'clause message' - ignoring}}
+ #pragma omp parallel message(NOT_A_STRING) // expected-warning {{expected string in 'clause message' - ignoring}}
// Message clause with template parameter that is not a string
- #pragma omp parallel message(N) // expected-warning {{expected string literal in 'clause message' - ignoring}}
+ #pragma omp parallel message(N) // expected-warning {{expected string in 'clause message' - ignoring}}
// Message clause with macro that is a string
#define A_STRING "macro string"
@@ -73,15 +75,29 @@ T tmain(T argc, S **argv) {
return argc;
}
+template<int N> int tmain(int argc, char **argv);
+
int main(int argc, char **argv) {
+ const char str1[] = "msg";
+ const char *str2 = "msg";
+ char str3[] = "msg";
+ char *str4 = str3;
+ char * const str5 = str3;
+
// Correct usage
#pragma omp parallel message("main correct")
+ #pragma omp parallel message(argv[0])
+ #pragma omp parallel message(str1)
+ #pragma omp parallel message(str2)
+ #pragma omp parallel message(str3)
+ #pragma omp parallel message(str4)
+ #pragma omp parallel message(str5)
// Invalid: missing string
#pragma omp parallel message() // expected-error {{expression}}
// Invalid: non-string
- #pragma omp parallel message(argc) // expected-warning {{expected string literal in 'clause message' - ignoring}}
+ #pragma omp parallel message(argc) // expected-warning {{expected string in 'clause message' - ignoring}}
foo();
diff --git a/clang/test/OpenMP/parallel_num_threads_codegen.cpp b/clang/test/OpenMP/parallel_num_threads_codegen.cpp
index de10bea..a1a6218 100644
--- a/clang/test/OpenMP/parallel_num_threads_codegen.cpp
+++ b/clang/test/OpenMP/parallel_num_threads_codegen.cpp
@@ -2,9 +2,18 @@
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -x c++ -triple %itanium_abi_triple -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefixes=CHECK,OMP60 %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefixes=CHECK,OMP60 %s
+
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple %itanium_abi_triple -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
+
+// RUN: %clang_cc1 -DOMP60 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple %itanium_abi_triple -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix SIMD-ONLY0 %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
+
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
@@ -32,6 +41,14 @@ int tmain() {
foo();
#pragma omp parallel num_threads(T(23))
foo();
+#ifdef OMP60
+ char str[] = "msg";
+ const char *str1 = "msg1";
+#pragma omp parallel num_threads(strict: C) severity(fatal) message(str)
+ foo();
+#pragma omp parallel num_threads(strict: T(23)) severity(warning) message(str1)
+ foo();
+#endif
return 0;
}
@@ -42,6 +59,14 @@ int main() {
foo();
#pragma omp parallel num_threads(a)
foo();
+#ifdef OMP60
+ char str[] = "msg";
+ const char *str1 = "msg1";
+#pragma omp parallel num_threads(strict: 2) severity(fatal) message(str)
+ foo();
+#pragma omp parallel num_threads(strict: a) severity(warning) message(str1)
+ foo();
+#endif
return a + tmain<char, 5>() + tmain<S, 1>();
}
@@ -58,6 +83,13 @@ int main() {
// CHECK: [[RES:%.+]] = sext i8 [[A_VAL]] to i32
// CHECK: call {{.*}}void @__kmpc_push_num_threads(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 [[RES]])
// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call(
+// OMP60: [[ARRDECAY:%.+]] = getelementptr inbounds [4 x i8], ptr [[STR:%.+]], [[INTPTR_T_TY]] 0, [[INTPTR_T_TY]] 0
+// OMP60: call void @__kmpc_push_num_threads_strict(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 2, i32 2, ptr [[ARRDECAY]])
+// OMP60: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(
+// OMP60: [[A_VAL1:%.+]] = load i8, ptr [[A_ADDR]]
+// OMP60: [[RES1:%.+]] = sext i8 [[A_VAL1]] to i32
+// OMP60: call void @__kmpc_push_num_threads_strict(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 [[RES1]], i32 1, ptr [[STR2:%.+]])
+// OMP60: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(
// CHECK: invoke{{.*}} [[INT_TY:i[0-9]+]] [[TMAIN_CHAR_5:@.+]]()
// CHECK: invoke{{.*}} [[INT_TY]] [[TMAIN_S_1:@.+]]()
// CHECK: call {{.*}} [[S_TY_DESTR:@.+]](ptr {{[^,]*}} [[S_ADDR]])
@@ -70,6 +102,11 @@ int main() {
// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call(
// CHECK: call {{.*}}void @__kmpc_push_num_threads(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 23)
// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call(
+// OMP60: [[ARRDECAY:%.+]] = getelementptr inbounds [4 x i8], ptr [[STR:%.+]], [[INTPTR_T_TY]] 0, [[INTPTR_T_TY]] 0
+// OMP60: call {{.*}}void @__kmpc_push_num_threads_strict(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 5, i32 2, ptr [[ARRDECAY]])
+// OMP60: call {{.*}}void {{.*}} @__kmpc_fork_call(
+// OMP60: call {{.*}}void @__kmpc_push_num_threads_strict(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 23, i32 1, ptr [[STR1:%.+]])
+// OMP60: call {{.*}}void {{.*}} @__kmpc_fork_call(
// CHECK: ret [[INT_TY]] 0
// CHECK-NEXT: }
@@ -83,6 +120,15 @@ int main() {
// CHECK: call {{.*}}void @__kmpc_push_num_threads(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 [[RES]])
// CHECK: {{(invoke|call)}} {{.*}} [[S_TY_DESTR]](ptr {{[^,]*}} [[S_TEMP]])
// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call(
+// OMP60: [[ARRDECAY:%.+]] = getelementptr inbounds [4 x i8], ptr [[STR:%.+]], [[INTPTR_T_TY]] 0, [[INTPTR_T_TY]] 0
+// OMP60: call {{.*}}void @__kmpc_push_num_threads_strict(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 1, i32 2, ptr [[ARRDECAY]])
+// OMP60: call {{.*}}void {{.*}} @__kmpc_fork_call(
+// OMP60: {{(invoke|call)}} {{.*}} [[S_TY_CONSTR]](ptr {{[^,]*}} [[S_TEMP:%.+]], [[INTPTR_T_TY]] noundef [[INTPTR_T_TY_ATTR]]23)
+// OMP60: [[S_CHAR_OP1:%.+]] = invoke{{.*}} i8 [[S_TY_CHAR_OP]](ptr {{[^,]*}} [[S_TEMP]])
+// OMP60: [[RES1:%.+]] = sext {{.*}}i8 [[S_CHAR_OP1]] to i32
+// OMP60: call {{.*}}void @__kmpc_push_num_threads_strict(ptr [[DEF_LOC_2]], i32 [[GTID]], i32 [[RES1]], i32 1, ptr [[STR1:%.+]])
+// OMP60: {{(invoke|call)}} {{.*}} [[S_TY_DESTR]](ptr {{[^,]*}} [[S_TEMP]])
+// OMP60: call {{.*}}void {{.*}} @__kmpc_fork_call(
// CHECK: ret [[INT_TY]] 0
// CHECK: }
diff --git a/clang/test/OpenMP/target_default_ast.cpp b/clang/test/OpenMP/target_default_ast.cpp
new file mode 100644
index 0000000..73bc8e5
--- /dev/null
+++ b/clang/test/OpenMP/target_default_ast.cpp
@@ -0,0 +1,81 @@
+// expected-no-diagnostics
+
+//RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=60 \
+//RUN: -x c++ -std=c++14 -fexceptions -fcxx-exceptions \
+//RUN: -Wno-source-uses-openmp -Wno-openmp-clauses \
+//RUN: -ast-print %s | FileCheck %s --check-prefix=PRINT
+
+//RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=60 \
+//RUN: -x c++ -std=c++14 -fexceptions -fcxx-exceptions \
+//RUN: -Wno-source-uses-openmp -Wno-openmp-clauses \
+//RUN: -ast-dump %s | FileCheck %s --check-prefix=DUMP
+
+//RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=60 \
+//RUN: -x c++ -std=c++14 -fexceptions -fcxx-exceptions \
+//RUN: -Wno-source-uses-openmp -Wno-openmp-clauses \
+//RUN: -emit-pch -o %t %s
+
+//RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=60 \
+//RUN: -x c++ -std=c++14 -fexceptions -fcxx-exceptions \
+//RUN: -Wno-source-uses-openmp -Wno-openmp-clauses \
+//RUN: -include-pch %t -ast-print %s | FileCheck %s --check-prefix=PRINT
+
+//RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=60 \
+//RUN: -x c++ -std=c++14 -fexceptions -fcxx-exceptions \
+//RUN: -Wno-source-uses-openmp -Wno-openmp-clauses \
+//RUN: -include-pch %t -ast-dump-all %s | FileCheck %s --check-prefix=DUMP
+
+#ifndef HEADER
+#define HEADER
+
+void foo() {
+ int a;
+#pragma omp target default(firstprivate)
+ a++;
+ // PRINT: #pragma omp target default(firstprivate)
+ // PRINT-NEXT: a++;
+ // DUMP: -OMPTargetDirective
+ // DUMP-NEXT: -OMPDefaultClause
+ // DUMP-NEXT: -OMPFirstprivateClause {{.*}} <implicit>
+ // DUMP-NEXT: -DeclRefExpr {{.*}} 'a'
+
+}
+void fun(){
+int a = 0;
+ int x = 10;
+ #pragma omp target data default(firstprivate) map(a)
+ {
+ // DUMP: -OMPTargetDataDirective
+ // DUMP-NEXT: -OMPDefaultClause
+ // DUMP-NEXT: -OMPMapClause
+ // DUMP-NEXT: -DeclRefExpr {{.*}} 'a'
+ // DUMP-NEXT: -OMPFirstprivateClause {{.*}} <implicit>
+ // DUMP-NEXT: -DeclRefExpr {{.*}} 'x'
+
+
+ x += 10;
+ a += 1;
+ }
+}
+void bar(){
+int i = 0;
+int j = 0;
+int nn = 10;
+#pragma omp target default(firstprivate)
+#pragma omp teams
+#pragma teams distribute parallel for simd
+ for (j = 0; j < nn; j++ ) {
+ for (i = 0; i < nn; i++ ) {
+ ;
+ }
+ }
+
+ // PRINT: #pragma omp target default(firstprivate)
+ // DUMP: -OMPTargetDirective
+ // DUMP-NEXT: -OMPDefaultClause
+ // DUMP-NEXT: -OMPFirstprivateClause {{.*}} <implicit>
+ // DUMP-NEXT: -DeclRefExpr {{.*}} 'j'
+ // DUMP-NEXT: -DeclRefExpr {{.*}} 'nn'
+ // DUMP-NEXT: -DeclRefExpr {{.*}} 'i'
+}
+#endif
diff --git a/clang/test/OpenMP/target_default_messages.cpp b/clang/test/OpenMP/target_default_messages.cpp
new file mode 100644
index 0000000..be677df
--- /dev/null
+++ b/clang/test/OpenMP/target_default_messages.cpp
@@ -0,0 +1,51 @@
+
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -DOMP60 %s -Wuninitialized
+
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -DOMP60 %s -Wuninitialized
+
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=52 -DOMP52 %s -Wuninitialized
+
+void foo();
+
+namespace {
+static int y = 0;
+}
+static int x = 0;
+
+int main(int argc, char **argv) {
+#ifdef OMP60
+ #pragma omp target default // expected-error {{expected '(' after 'default'}}
+ for (int i=0; i<200; i++) foo();
+#pragma omp target default( // expected-error {{expected 'none', 'shared', 'private' or 'firstprivate' in OpenMP clause 'default'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
+ for (int i=0; i<200; i++) foo();
+#pragma omp target default() // expected-error {{expected 'none', 'shared', 'private' or 'firstprivate' in OpenMP clause 'default'}}
+ for (int i=0; i<200; i++) foo();
+ #pragma omp target default (none // expected-error {{expected ')'}} expected-note {{to match this '('}}
+ for (int i=0; i<200; i++) foo();
+#pragma omp target default(x) // expected-error {{expected 'none', 'shared', 'private' or 'firstprivate' in OpenMP clause 'default'}}
+ for (int i=0; i<200; i++) foo();
+#endif
+
+#ifdef OMP52
+#pragma omp target default(firstprivate) // expected-error {{unexpected OpenMP clause 'default' in directive '#pragma omp target'}}
+ for (int i = 0; i < 200; i++) {
+ ++x;
+ ++y;
+ }
+#pragma omp target default(private) // expected-error {{unexpected OpenMP clause 'default' in directive '#pragma omp target'}}
+ for (int i = 0; i < 200; i++) {
+ ++x;
+ ++y;
+ }
+
+int j = 0, i = 0, nn = 10;
+#pragma omp target teams distribute simd default(shared) // expected-error {{unexpected OpenMP clause 'default' in directive '#pragma omp target teams distribute simd'}}
+ for (j = 0; j < nn; j++ ) {
+ for (i = 0; i < nn; i++ ) {
+ ;
+ }
+ }
+#endif
+
+ return 0;
+}
diff --git a/clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp b/clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp
index a584770..15b976f 100644
--- a/clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp
+++ b/clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp
@@ -27,7 +27,7 @@ void foo() {
// DUM-NEXT: |-OMPMapClause {{.*}}<<invalid sloc>> <implicit>
// DUM-NEXT: | |-MemberExpr {{.*}}<line:9:3> 'int' lvalue .e
// DUM-NEXT: | | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
-// DUM-NEXT: | |-MemberExpr {{.*}}<line:10:3> 'C' lvalue .f {{.*}}
+// DUM-NEXT: | |-MemberExpr {{.*}}<line:10:3> 'C':'struct C' lvalue .f {{.*}}
// DUM-NEXT: | | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
// DUM-NEXT: | `-MemberExpr {{.*}}<line:11:3> 'int' lvalue .h {{.*}}
// DUM-NEXT: | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
diff --git a/clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp b/clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp
index b2fb8fb..bdf3dd0 100644
--- a/clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp
+++ b/clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp
@@ -27,7 +27,7 @@ void foo() {
// DUM-NEXT: |-OMPMapClause {{.*}}<<invalid sloc>> <implicit>
// DUM-NEXT: | |-MemberExpr {{.*}}<line:9:3> 'int' lvalue .e
// DUM-NEXT: | | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
-// DUM-NEXT: | |-MemberExpr {{.*}}<line:10:3> 'C' lvalue .f {{.*}}
+// DUM-NEXT: | |-MemberExpr {{.*}}<line:10:3> 'C':'struct C' lvalue .f {{.*}}
// DUM-NEXT: | | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
// DUM-NEXT: | `-MemberExpr {{.*}}<line:11:3> 'int' lvalue .h {{.*}}
// DUM-NEXT: | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
diff --git a/clang/test/OpenMP/target_parallel_generic_loop_codegen.cpp b/clang/test/OpenMP/target_parallel_generic_loop_codegen.cpp
index b9ed9bc..6a04571 100644
--- a/clang/test/OpenMP/target_parallel_generic_loop_codegen.cpp
+++ b/clang/test/OpenMP/target_parallel_generic_loop_codegen.cpp
@@ -10,6 +10,15 @@
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=IR-PCH
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -fopenmp -x c++ -std=c++11 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefixes=IR-GPU-OMP60
+
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefixes=IR-OMP60
+
+// Check same results after serialization round-trip
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
+// RUN: %clang_cc1 -DOMP60 -fopenmp-version=60 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefixes=IR-PCH-OMP60
+
// expected-no-diagnostics
#ifndef HEADER
@@ -34,14 +43,22 @@ int main() {
int x = 0;
int device_result[N] = {0};
+ #ifdef OMP60
+ #pragma omp target parallel loop num_threads(strict: N) severity(warning) message("msg") uses_allocators(omp_pteam_mem_alloc) allocate(omp_pteam_mem_alloc: x) private(x) map(from: device_result)
+ for (int i = 0; i < N; i++) {
+ x = omp_get_thread_num();
+ device_result[i] = i + x;
+ }
+ #else
#pragma omp target parallel loop num_threads(N) uses_allocators(omp_pteam_mem_alloc) allocate(omp_pteam_mem_alloc: x) private(x) map(from: device_result)
for (int i = 0; i < N; i++) {
x = omp_get_thread_num();
device_result[i] = i + x;
}
+ #endif
}
#endif
-// IR-GPU-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37
+// IR-GPU-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53
// IR-GPU-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR0:[0-9]+]] {
// IR-GPU-NEXT: entry:
// IR-GPU-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
@@ -55,8 +72,8 @@ int main() {
// IR-GPU-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
// IR-GPU-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8
// IR-GPU-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST]], align 8
-// IR-GPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8
-// IR-GPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37_kernel_environment to ptr), ptr [[DYN_PTR]])
+// IR-GPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8, !nonnull [[META6:![0-9]+]], !align [[META7:![0-9]+]]
+// IR-GPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53_kernel_environment to ptr), ptr [[DYN_PTR]])
// IR-GPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// IR-GPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// IR-GPU: user_code.entry:
@@ -66,14 +83,14 @@ int main() {
// IR-GPU-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// IR-GPU-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
// IR-GPU-NEXT: store ptr [[TMP3]], ptr [[TMP5]], align 8
-// IR-GPU-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 64, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 2)
+// IR-GPU-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 64, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 2)
// IR-GPU-NEXT: call void @__kmpc_target_deinit()
// IR-GPU-NEXT: ret void
// IR-GPU: worker.exit:
// IR-GPU-NEXT: ret void
//
//
-// IR-GPU-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37_omp_outlined
+// IR-GPU-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53_omp_outlined
// IR-GPU-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR1:[0-9]+]] {
// IR-GPU-NEXT: entry:
// IR-GPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
@@ -102,7 +119,7 @@ int main() {
// IR-GPU-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
// IR-GPU-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8
// IR-GPU-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST]], align 8
-// IR-GPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8
+// IR-GPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8, !nonnull [[META6]], !align [[META7]]
// IR-GPU-NEXT: store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4
// IR-GPU-NEXT: store i32 63, ptr [[DOTOMP_UB_ASCAST]], align 4
// IR-GPU-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
@@ -183,11 +200,11 @@ int main() {
// IR-NEXT: store i32 0, ptr [[X]], align 4
// IR-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[DEVICE_RESULT]], i8 0, i64 256, i1 false)
// IR-NEXT: [[TMP0:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8
-// IR-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37(ptr [[DEVICE_RESULT]], ptr [[TMP0]]) #[[ATTR3:[0-9]+]]
+// IR-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53(ptr [[DEVICE_RESULT]], ptr [[TMP0]]) #[[ATTR3:[0-9]+]]
// IR-NEXT: ret i32 0
//
//
-// IR-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37
+// IR-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53
// IR-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2:[0-9]+]] {
// IR-NEXT: entry:
// IR-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8
@@ -195,14 +212,14 @@ int main() {
// IR-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
// IR-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8
// IR-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
-// IR-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8
+// IR-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
// IR-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB2]], i32 [[TMP0]], i32 64)
// IR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
-// IR-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37.omp_outlined, ptr [[TMP1]], ptr [[TMP2]])
+// IR-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53.omp_outlined, ptr [[TMP1]], ptr [[TMP2]])
// IR-NEXT: ret void
//
//
-// IR-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37.omp_outlined
+// IR-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53.omp_outlined
// IR-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2]] {
// IR-NEXT: entry:
// IR-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -220,7 +237,7 @@ int main() {
// IR-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// IR-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8
// IR-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
-// IR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8
+// IR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8, !nonnull [[META3]], !align [[META4]]
// IR-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// IR-NEXT: store i32 63, ptr [[DOTOMP_UB]], align 4
// IR-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
@@ -290,11 +307,11 @@ int main() {
// IR-PCH-NEXT: store i32 0, ptr [[X]], align 4
// IR-PCH-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[DEVICE_RESULT]], i8 0, i64 256, i1 false)
// IR-PCH-NEXT: [[TMP0:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8
-// IR-PCH-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37(ptr [[DEVICE_RESULT]], ptr [[TMP0]]) #[[ATTR3:[0-9]+]]
+// IR-PCH-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53(ptr [[DEVICE_RESULT]], ptr [[TMP0]]) #[[ATTR3:[0-9]+]]
// IR-PCH-NEXT: ret i32 0
//
//
-// IR-PCH-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37
+// IR-PCH-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53
// IR-PCH-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2:[0-9]+]] {
// IR-PCH-NEXT: entry:
// IR-PCH-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8
@@ -302,14 +319,14 @@ int main() {
// IR-PCH-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
// IR-PCH-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8
// IR-PCH-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
-// IR-PCH-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8
+// IR-PCH-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
// IR-PCH-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB2]], i32 [[TMP0]], i32 64)
// IR-PCH-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
-// IR-PCH-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37.omp_outlined, ptr [[TMP1]], ptr [[TMP2]])
+// IR-PCH-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53.omp_outlined, ptr [[TMP1]], ptr [[TMP2]])
// IR-PCH-NEXT: ret void
//
//
-// IR-PCH-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37.omp_outlined
+// IR-PCH-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l53.omp_outlined
// IR-PCH-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2]] {
// IR-PCH-NEXT: entry:
// IR-PCH-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -327,7 +344,7 @@ int main() {
// IR-PCH-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// IR-PCH-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8
// IR-PCH-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
-// IR-PCH-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8
+// IR-PCH-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8, !nonnull [[META3]], !align [[META4]]
// IR-PCH-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// IR-PCH-NEXT: store i32 63, ptr [[DOTOMP_UB]], align 4
// IR-PCH-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
@@ -388,3 +405,386 @@ int main() {
// IR-PCH-NEXT: call void @__kmpc_free(i32 [[TMP2]], ptr [[DOTX__VOID_ADDR]], ptr [[TMP14]])
// IR-PCH-NEXT: ret void
//
+//
+// IR-GPU-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47
+// IR-GPU-OMP60-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0:[0-9]+]] {
+// IR-GPU-OMP60-NEXT: entry:
+// IR-GPU-OMP60-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[TMP:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
+// IR-GPU-OMP60-NEXT: [[DEVICE_RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DEVICE_RESULT_ADDR]] to ptr
+// IR-GPU-OMP60-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OMP_PTEAM_MEM_ALLOC_ADDR]] to ptr
+// IR-GPU-OMP60-NEXT: [[DOTCAPTURE_EXPR__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR__ADDR]] to ptr
+// IR-GPU-OMP60-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
+// IR-GPU-OMP60-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
+// IR-GPU-OMP60-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8, !nonnull [[META6:![0-9]+]], !align [[META7:![0-9]+]]
+// IR-GPU-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR_ASCAST]], align 8, !nonnull [[META6]]
+// IR-GPU-OMP60-NEXT: store ptr [[TMP1]], ptr [[TMP_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47_kernel_environment to ptr), ptr [[DYN_PTR]])
+// IR-GPU-OMP60-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
+// IR-GPU-OMP60-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// IR-GPU-OMP60: user_code.entry:
+// IR-GPU-OMP60-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
+// IR-GPU-OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
+// IR-GPU-OMP60-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
+// IR-GPU-OMP60-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
+// IR-GPU-OMP60-NEXT: store ptr [[TMP4]], ptr [[TMP6]], align 8
+// IR-GPU-OMP60-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP_ASCAST]], align 8, !nonnull [[META6]]
+// IR-GPU-OMP60-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP7]], i64 0, i64 0
+// IR-GPU-OMP60-NEXT: call void @__kmpc_parallel_60(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP3]], i32 1, i32 64, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 2, i32 1, i32 1, ptr [[ARRAYDECAY]])
+// IR-GPU-OMP60-NEXT: call void @__kmpc_target_deinit()
+// IR-GPU-OMP60-NEXT: ret void
+// IR-GPU-OMP60: worker.exit:
+// IR-GPU-OMP60-NEXT: ret void
+//
+//
+// IR-GPU-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47_omp_outlined
+// IR-GPU-OMP60-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR1:[0-9]+]] {
+// IR-GPU-OMP60-NEXT: entry:
+// IR-GPU-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5)
+// IR-GPU-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
+// IR-GPU-OMP60-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
+// IR-GPU-OMP60-NEXT: [[DEVICE_RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DEVICE_RESULT_ADDR]] to ptr
+// IR-GPU-OMP60-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OMP_PTEAM_MEM_ALLOC_ADDR]] to ptr
+// IR-GPU-OMP60-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
+// IR-GPU-OMP60-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
+// IR-GPU-OMP60-NEXT: [[DOTOMP_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_LB]] to ptr
+// IR-GPU-OMP60-NEXT: [[DOTOMP_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_UB]] to ptr
+// IR-GPU-OMP60-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
+// IR-GPU-OMP60-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
+// IR-GPU-OMP60-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
+// IR-GPU-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8, !nonnull [[META6]], !align [[META7]]
+// IR-GPU-OMP60-NEXT: store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: store i32 63, ptr [[DOTOMP_UB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
+// IR-GPU-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// IR-GPU-OMP60-NEXT: call void @__kmpc_for_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB2:[0-9]+]] to ptr), i32 [[TMP2]], i32 33, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_LB_ASCAST]], ptr [[DOTOMP_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1)
+// IR-GPU-OMP60-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
+// IR-GPU-OMP60: omp.dispatch.cond:
+// IR-GPU-OMP60-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 63
+// IR-GPU-OMP60-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// IR-GPU-OMP60: cond.true:
+// IR-GPU-OMP60-NEXT: br label [[COND_END:%.*]]
+// IR-GPU-OMP60: cond.false:
+// IR-GPU-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: br label [[COND_END]]
+// IR-GPU-OMP60: cond.end:
+// IR-GPU-OMP60-NEXT: [[COND:%.*]] = phi i32 [ 63, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// IR-GPU-OMP60-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// IR-GPU-OMP60-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
+// IR-GPU-OMP60: omp.dispatch.body:
+// IR-GPU-OMP60-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// IR-GPU-OMP60: omp.inner.for.cond:
+// IR-GPU-OMP60-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
+// IR-GPU-OMP60-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// IR-GPU-OMP60: omp.inner.for.body:
+// IR-GPU-OMP60-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
+// IR-GPU-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// IR-GPU-OMP60-NEXT: store i32 [[ADD]], ptr [[I_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[CALL:%.*]] = call noundef i32 @_Z18omp_get_thread_numv() #[[ATTR5:[0-9]+]]
+// IR-GPU-OMP60-NEXT: store i32 [[CALL]], ptr addrspacecast (ptr addrspace(3) @x to ptr), align 4
+// IR-GPU-OMP60-NEXT: [[TMP11:%.*]] = load i32, ptr [[I_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP12:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @x to ptr), align 4
+// IR-GPU-OMP60-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
+// IR-GPU-OMP60-NEXT: [[TMP13:%.*]] = load i32, ptr [[I_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
+// IR-GPU-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// IR-GPU-OMP60-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
+// IR-GPU-OMP60-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// IR-GPU-OMP60: omp.body.continue:
+// IR-GPU-OMP60-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// IR-GPU-OMP60: omp.inner.for.inc:
+// IR-GPU-OMP60-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
+// IR-GPU-OMP60-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: br label [[OMP_INNER_FOR_COND]]
+// IR-GPU-OMP60: omp.inner.for.end:
+// IR-GPU-OMP60-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
+// IR-GPU-OMP60: omp.dispatch.inc:
+// IR-GPU-OMP60-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
+// IR-GPU-OMP60-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_LB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
+// IR-GPU-OMP60-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_UB_ASCAST]], align 4
+// IR-GPU-OMP60-NEXT: br label [[OMP_DISPATCH_COND]]
+// IR-GPU-OMP60: omp.dispatch.end:
+// IR-GPU-OMP60-NEXT: call void @__kmpc_for_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP2]])
+// IR-GPU-OMP60-NEXT: ret void
+//
+//
+// IR-OMP60-LABEL: define {{[^@]+}}@main
+// IR-OMP60-SAME: () #[[ATTR0:[0-9]+]] {
+// IR-OMP60-NEXT: entry:
+// IR-OMP60-NEXT: [[X:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DEVICE_RESULT:%.*]] = alloca [64 x i32], align 16
+// IR-OMP60-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: store i32 0, ptr [[X]], align 4
+// IR-OMP60-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[DEVICE_RESULT]], i8 0, i64 256, i1 false)
+// IR-OMP60-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// IR-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META3:![0-9]+]]
+// IR-OMP60-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// IR-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8
+// IR-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META3]]
+// IR-OMP60-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47(ptr [[DEVICE_RESULT]], ptr [[TMP1]], ptr [[TMP2]]) #[[ATTR3:[0-9]+]]
+// IR-OMP60-NEXT: ret i32 0
+//
+//
+// IR-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47
+// IR-OMP60-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2:[0-9]+]] {
+// IR-OMP60-NEXT: entry:
+// IR-OMP60-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
+// IR-OMP60-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8
+// IR-OMP60-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
+// IR-OMP60-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// IR-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8, !nonnull [[META3]], !align [[META4:![0-9]+]]
+// IR-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META3]]
+// IR-OMP60-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 8
+// IR-OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META3]]
+// IR-OMP60-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0
+// IR-OMP60-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB2]], i32 [[TMP0]], i32 64, i32 1, ptr [[ARRAYDECAY]])
+// IR-OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
+// IR-OMP60-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47.omp_outlined, ptr [[TMP1]], ptr [[TMP4]])
+// IR-OMP60-NEXT: ret void
+//
+//
+// IR-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47.omp_outlined
+// IR-OMP60-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2]] {
+// IR-OMP60-NEXT: entry:
+// IR-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8
+// IR-OMP60-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: [[I:%.*]] = alloca i32, align 4
+// IR-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// IR-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// IR-OMP60-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8
+// IR-OMP60-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
+// IR-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8, !nonnull [[META3]], !align [[META4]]
+// IR-OMP60-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// IR-OMP60-NEXT: store i32 63, ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// IR-OMP60-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// IR-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// IR-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// IR-OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8
+// IR-OMP60-NEXT: [[DOTX__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP2]], i64 4, ptr [[TMP3]])
+// IR-OMP60-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// IR-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 63
+// IR-OMP60-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// IR-OMP60: cond.true:
+// IR-OMP60-NEXT: br label [[COND_END:%.*]]
+// IR-OMP60: cond.false:
+// IR-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: br label [[COND_END]]
+// IR-OMP60: cond.end:
+// IR-OMP60-NEXT: [[COND:%.*]] = phi i32 [ 63, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// IR-OMP60-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// IR-OMP60-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// IR-OMP60: omp.inner.for.cond:
+// IR-OMP60-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-OMP60-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// IR-OMP60-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
+// IR-OMP60: omp.inner.for.cond.cleanup:
+// IR-OMP60-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
+// IR-OMP60: omp.inner.for.body:
+// IR-OMP60-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// IR-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// IR-OMP60-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// IR-OMP60-NEXT: [[CALL:%.*]] = call noundef i32 @_Z18omp_get_thread_numv()
+// IR-OMP60-NEXT: store i32 [[CALL]], ptr [[DOTX__VOID_ADDR]], align 4
+// IR-OMP60-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
+// IR-OMP60-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTX__VOID_ADDR]], align 4
+// IR-OMP60-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
+// IR-OMP60-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
+// IR-OMP60-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
+// IR-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// IR-OMP60-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4
+// IR-OMP60-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// IR-OMP60: omp.body.continue:
+// IR-OMP60-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// IR-OMP60: omp.inner.for.inc:
+// IR-OMP60-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
+// IR-OMP60-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// IR-OMP60-NEXT: br label [[OMP_INNER_FOR_COND]]
+// IR-OMP60: omp.inner.for.end:
+// IR-OMP60-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// IR-OMP60: omp.loop.exit:
+// IR-OMP60-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// IR-OMP60-NEXT: [[TMP14:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8
+// IR-OMP60-NEXT: call void @__kmpc_free(i32 [[TMP2]], ptr [[DOTX__VOID_ADDR]], ptr [[TMP14]])
+// IR-OMP60-NEXT: ret void
+//
+//
+// IR-PCH-OMP60-LABEL: define {{[^@]+}}@main
+// IR-PCH-OMP60-SAME: () #[[ATTR0:[0-9]+]] {
+// IR-PCH-OMP60-NEXT: entry:
+// IR-PCH-OMP60-NEXT: [[X:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DEVICE_RESULT:%.*]] = alloca [64 x i32], align 16
+// IR-PCH-OMP60-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: store i32 0, ptr [[X]], align 4
+// IR-PCH-OMP60-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[DEVICE_RESULT]], i8 0, i64 256, i1 false)
+// IR-PCH-OMP60-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// IR-PCH-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META3:![0-9]+]]
+// IR-PCH-OMP60-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// IR-PCH-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8
+// IR-PCH-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META3]]
+// IR-PCH-OMP60-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47(ptr [[DEVICE_RESULT]], ptr [[TMP1]], ptr [[TMP2]]) #[[ATTR3:[0-9]+]]
+// IR-PCH-OMP60-NEXT: ret i32 0
+//
+//
+// IR-PCH-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47
+// IR-PCH-OMP60-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2:[0-9]+]] {
+// IR-PCH-OMP60-NEXT: entry:
+// IR-PCH-OMP60-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
+// IR-PCH-OMP60-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8
+// IR-PCH-OMP60-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
+// IR-PCH-OMP60-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// IR-PCH-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8, !nonnull [[META3]], !align [[META4:![0-9]+]]
+// IR-PCH-OMP60-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META3]]
+// IR-PCH-OMP60-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 8
+// IR-PCH-OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META3]]
+// IR-PCH-OMP60-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0
+// IR-PCH-OMP60-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB2]], i32 [[TMP0]], i32 64, i32 1, ptr [[ARRAYDECAY]])
+// IR-PCH-OMP60-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
+// IR-PCH-OMP60-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47.omp_outlined, ptr [[TMP1]], ptr [[TMP4]])
+// IR-PCH-OMP60-NEXT: ret void
+//
+//
+// IR-PCH-OMP60-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l47.omp_outlined
+// IR-PCH-OMP60-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2]] {
+// IR-PCH-OMP60-NEXT: entry:
+// IR-PCH-OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8
+// IR-PCH-OMP60-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: [[I:%.*]] = alloca i32, align 4
+// IR-PCH-OMP60-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// IR-PCH-OMP60-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// IR-PCH-OMP60-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8
+// IR-PCH-OMP60-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8
+// IR-PCH-OMP60-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8, !nonnull [[META3]], !align [[META4]]
+// IR-PCH-OMP60-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// IR-PCH-OMP60-NEXT: store i32 63, ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// IR-PCH-OMP60-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// IR-PCH-OMP60-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP3:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8
+// IR-PCH-OMP60-NEXT: [[DOTX__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP2]], i64 4, ptr [[TMP3]])
+// IR-PCH-OMP60-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// IR-PCH-OMP60-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 63
+// IR-PCH-OMP60-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// IR-PCH-OMP60: cond.true:
+// IR-PCH-OMP60-NEXT: br label [[COND_END:%.*]]
+// IR-PCH-OMP60: cond.false:
+// IR-PCH-OMP60-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: br label [[COND_END]]
+// IR-PCH-OMP60: cond.end:
+// IR-PCH-OMP60-NEXT: [[COND:%.*]] = phi i32 [ 63, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// IR-PCH-OMP60-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// IR-PCH-OMP60-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// IR-PCH-OMP60: omp.inner.for.cond:
+// IR-PCH-OMP60-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// IR-PCH-OMP60-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// IR-PCH-OMP60-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
+// IR-PCH-OMP60: omp.inner.for.cond.cleanup:
+// IR-PCH-OMP60-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
+// IR-PCH-OMP60: omp.inner.for.body:
+// IR-PCH-OMP60-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// IR-PCH-OMP60-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// IR-PCH-OMP60-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// IR-PCH-OMP60-NEXT: [[CALL:%.*]] = call noundef i32 @_Z18omp_get_thread_numv()
+// IR-PCH-OMP60-NEXT: store i32 [[CALL]], ptr [[DOTX__VOID_ADDR]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
+// IR-PCH-OMP60-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTX__VOID_ADDR]], align 4
+// IR-PCH-OMP60-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
+// IR-PCH-OMP60-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
+// IR-PCH-OMP60-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
+// IR-PCH-OMP60-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
+// IR-PCH-OMP60-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4
+// IR-PCH-OMP60-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// IR-PCH-OMP60: omp.body.continue:
+// IR-PCH-OMP60-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// IR-PCH-OMP60: omp.inner.for.inc:
+// IR-PCH-OMP60-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
+// IR-PCH-OMP60-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// IR-PCH-OMP60-NEXT: br label [[OMP_INNER_FOR_COND]]
+// IR-PCH-OMP60: omp.inner.for.end:
+// IR-PCH-OMP60-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// IR-PCH-OMP60: omp.loop.exit:
+// IR-PCH-OMP60-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// IR-PCH-OMP60-NEXT: [[TMP14:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8
+// IR-PCH-OMP60-NEXT: call void @__kmpc_free(i32 [[TMP2]], ptr [[DOTX__VOID_ADDR]], ptr [[TMP14]])
+// IR-PCH-OMP60-NEXT: ret void
+//
diff --git a/clang/test/OpenMP/target_parallel_num_threads_messages.cpp b/clang/test/OpenMP/target_parallel_num_threads_messages.cpp
index 79f77b7..64e9929d 100644
--- a/clang/test/OpenMP/target_parallel_num_threads_messages.cpp
+++ b/clang/test/OpenMP/target_parallel_num_threads_messages.cpp
@@ -1,7 +1,9 @@
// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized
-
// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized
+// RUN: %clang_cc1 -DOMP60 -verify=expected,omp60 -fopenmp -fopenmp-version=60 -ferror-limit 100 %s -Wuninitialized
+// RUN: %clang_cc1 -DOMP60 -verify=expected,omp60 -fopenmp-simd -fopenmp-version=60 -ferror-limit 100 %s -Wuninitialized
+
void foo() {
}
@@ -9,11 +11,11 @@ bool foobool(int argc) {
return argc;
}
-struct S1; // expected-note {{declared here}}
+struct S1; // expected-note {{declared here}} omp60-note {{declared here}}
#define redef_num_threads(a, b) num_threads(a)
-template <class T, typename S, int N> // expected-note {{declared here}}
+template <class T, typename S, int N> // expected-note {{declared here}} omp60-note {{declared here}}
T tmain(T argc, S **argv) {
T z;
#pragma omp target parallel num_threads // expected-error {{expected '(' after 'num_threads'}}
@@ -41,6 +43,56 @@ T tmain(T argc, S **argv) {
#pragma omp target parallel redef_num_threads (argc, argc)
foo();
+#ifdef OMP60
+ // Valid uses of strict modifier
+ #pragma omp target parallel num_threads(strict: 4)
+ foo();
+ #pragma omp target parallel num_threads(strict: argc+z)
+ foo();
+
+ // Invalid: missing expression after strict:
+ #pragma omp target parallel num_threads(strict: ) // omp60-error {{expected expression}}
+ foo();
+ #pragma omp target parallel num_threads(strict:) // omp60-error {{expected expression}}
+ foo();
+ #pragma omp target parallel num_threads(strict: // omp60-error {{expected expression}} omp60-error {{expected ')'}} omp60-note {{to match this '('}}
+ foo();
+
+ // Invalid: unknown/missing modifier
+ #pragma omp target parallel num_threads(foo: 4) // omp60-error {{expected 'strict' in OpenMP clause 'num_threads'}}
+ foo();
+ #pragma omp target parallel num_threads(: 4) // omp60-error {{expected expression}} omp60-error {{expected ')'}} omp60-note {{to match this '('}}
+ foo();
+ #pragma omp target parallel num_threads(:)// omp60-error {{expected expression}} omp60-error {{expected ')'}} omp60-note {{to match this '('}}
+ foo();
+
+ // Invalid: missing colon after modifier
+ #pragma omp target parallel num_threads(strict 4) // omp60-error {{missing ':' after strict modifier}}
+ foo();
+
+ // Invalid: negative, zero, or non-integral
+ #pragma omp target parallel num_threads(strict: -1) // omp60-error {{argument to 'num_threads' clause must be a strictly positive integer value}}
+ foo();
+ #pragma omp target parallel num_threads(strict: 0) // omp60-error {{argument to 'num_threads' clause must be a strictly positive integer value}}
+ foo();
+ #pragma omp target parallel num_threads(strict: (argc > 0) ? argv[1] : argv[2]) // omp60-error 2 {{expression must have integral or unscoped enumeration type, not 'char *'}}
+ foo();
+ #pragma omp target parallel num_threads(strict: S) // omp60-error {{'S' does not refer to a value}}
+ foo();
+ #pragma omp target parallel num_threads(strict: argv[1]=2) // omp60-error {{expected ')'}} omp60-note {{to match this '('}} omp60-error 2 {{expression must have integral or unscoped enumeration type, not 'char *'}}
+ foo();
+ #pragma omp target parallel num_threads(strict: N) // omp60-error {{argument to 'num_threads' clause must be a strictly positive integer value}}
+ foo();
+
+ // Invalid: multiple strict modifiers or mixed with non-strict
+ #pragma omp target parallel num_threads(strict: 4, strict: 5) // omp60-error {{expected ')'}} expected-note {{to match this '('}}
+ foo();
+ #pragma omp target parallel num_threads(strict: 4), num_threads(5) // omp60-error {{directive '#pragma omp target parallel' cannot contain more than one 'num_threads' clause}}
+ foo();
+ #pragma omp target parallel num_threads(4), num_threads(strict: 5) // omp60-error {{directive '#pragma omp target parallel' cannot contain more than one 'num_threads' clause}}
+ foo();
+#endif // OMP60
+
return argc;
}
@@ -69,5 +121,53 @@ int main(int argc, char **argv) {
#pragma omp target parallel redef_num_threads (argc, argc)
foo();
+#ifdef OMP60
+ // Valid uses of strict modifier
+ #pragma omp target parallel num_threads(strict: 4)
+ foo();
+ #pragma omp target parallel num_threads(strict: argc+z)
+ foo();
+
+ // Invalid: missing expression after strict:
+ #pragma omp target parallel num_threads(strict: ) // omp60-error {{expected expression}}
+ foo();
+ #pragma omp target parallel num_threads(strict:) // omp60-error {{expected expression}}
+ foo();
+ #pragma omp target parallel num_threads(strict: // omp60-error {{expected expression}} omp60-error {{expected ')'}} omp60-note {{to match this '('}}
+ foo();
+
+ // Invalid: unknown/missing modifier
+ #pragma omp target parallel num_threads(foo: 4) // omp60-error {{expected 'strict' in OpenMP clause 'num_threads'}}
+ foo();
+ #pragma omp target parallel num_threads(: 4) // omp60-error {{expected expression}} omp60-error {{expected ')'}} omp60-note {{to match this '('}}
+ foo();
+ #pragma omp target parallel num_threads(:) // omp60-error {{expected expression}} omp60-error {{expected ')'}} omp60-note {{to match this '('}}
+ foo();
+
+ // Invalid: missing colon after modifier
+ #pragma omp target parallel num_threads(strict 4) // omp60-error {{missing ':' after strict modifier}}
+ foo();
+
+ // Invalid: negative, zero, or non-integral
+ #pragma omp target parallel num_threads(strict: -1) // omp60-error {{argument to 'num_threads' clause must be a strictly positive integer value}}
+ foo();
+ #pragma omp target parallel num_threads(strict: 0) // omp60-error {{argument to 'num_threads' clause must be a strictly positive integer value}}
+ foo();
+ #pragma omp target parallel num_threads(strict: (argc > 0) ? argv[1] : argv[2]) // omp60-error {{expression must have integral or unscoped enumeration type, not 'char *'}}
+ foo();
+ #pragma omp target parallel num_threads(strict: S1) // omp60-error {{'S1' does not refer to a value}}
+ foo();
+ #pragma omp target parallel num_threads(strict: argv[1]=2) // omp60-error {{expected ')'}} omp60-note {{to match this '('}} omp60-error {{expression must have integral or unscoped enumeration type, not 'char *'}}
+ foo();
+
+ // Invalid: multiple strict modifiers or mixed with non-strict
+ #pragma omp target parallel num_threads(strict: 4, strict: 5) // omp60-error {{expected ')'}} expected-note {{to match this '('}}
+ foo();
+ #pragma omp target parallel num_threads(strict: 4), num_threads(5) // omp60-error {{directive '#pragma omp target parallel' cannot contain more than one 'num_threads' clause}}
+ foo();
+ #pragma omp target parallel num_threads(4), num_threads(strict: 5) // omp60-error {{directive '#pragma omp target parallel' cannot contain more than one 'num_threads' clause}}
+ foo();
+#endif // OMP60
+
return tmain<int, char, 3>(argc, argv); // expected-note {{in instantiation of function template specialization 'tmain<int, char, 3>' requested here}}
}
diff --git a/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp b/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp
new file mode 100644
index 0000000..9e319e4
--- /dev/null
+++ b/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp
@@ -0,0 +1,2956 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
+// Test host codegen.
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
+
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+
+// Test target codegen - host bc file has to be created first.
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK9
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK11
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
+
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+
+// We have 6 target regions
+
+// Check target registration is registered as a Ctor.
+
+// Check that the offloading functions are emitted and that the parallel function
+// is appropriately guarded.
+
+
+template<typename tx>
+tx ftemplate(int n) {
+ tx a = 0;
+ char str[] = "msg";
+ const char *str1 = "msg1";
+
+ #pragma omp target parallel num_threads(strict: tx(20)) severity(warning) message(str)
+ {
+ }
+
+ #pragma omp target parallel num_threads(strict: 42) severity(warning) message("msg_literal")
+ {
+ }
+
+ short b = 1;
+ #pragma omp target parallel num_threads(strict: b) severity(fatal) message(str1)
+ {
+ a += b;
+ }
+
+ return a;
+}
+
+static
+int fstatic(int n, const char *str2) {
+ char str[] = "msg";
+ const char *str1 = "msg1";
+
+ #pragma omp target parallel num_threads(strict: n) severity(warning) message(str)
+ {
+ }
+
+ #pragma omp target parallel num_threads(strict: n) severity(fatal) message("msg_literal")
+ {
+ }
+
+ #pragma omp target parallel num_threads(strict: 32+n) severity(fatal) message(str1)
+ {
+ }
+
+ #pragma omp target parallel num_threads(strict: 32+n) severity(warning) message(str2)
+ {
+ }
+
+ return n+1;
+}
+
+struct S1 {
+ double a;
+
+ int r1(int n){
+ int b = 1;
+ char str[] = "msg";
+ const char *str1 = "msg1";
+
+ #pragma omp target parallel num_threads(strict: n-b) severity(warning) message(str)
+ {
+ this->a = (double)b + 1.5;
+ }
+
+ #pragma omp target parallel num_threads(strict: 1024) severity(fatal) message(str1)
+ {
+ this->a = 2.5;
+ }
+
+ #pragma omp target parallel num_threads(strict: n) severity(fatal) message("msg_literal")
+ {
+ this->a = 2.5;
+ }
+
+ return (int)a;
+ }
+};
+
+int bar(int n){
+ int a = 0;
+ const char *str = "msg_arg";
+
+ S1 S;
+ a += S.r1(n);
+
+ a += fstatic(n, str);
+
+ a += ftemplate<int>(n);
+
+ return a;
+}
+
+#endif
+// CHECK1-LABEL: define {{[^@]+}}@_Z3bari
+// CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
+// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[A]], align 4
+// CHECK1-NEXT: store ptr @.str, ptr [[STR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP0]])
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[STR]], align 8
+// CHECK1-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZL7fstaticiPKc(i32 noundef signext [[TMP2]], ptr noundef [[TMP3]])
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
+// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], [[CALL1]]
+// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP5]])
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4
+// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], [[CALL3]]
+// CHECK1-NEXT: store i32 [[ADD4]], ptr [[A]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4
+// CHECK1-NEXT: ret i32 [[TMP7]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK1-NEXT: [[STR1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS8:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[_TMP13:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED14:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [3 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [3 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [3 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i32 1, ptr [[B]], align 4
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._ZN2S12r1Ei.str, i64 4, i1 false)
+// CHECK1-NEXT: store ptr @.str.1, ptr [[STR1]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[B]], align 4
+// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]]
+// CHECK1-NEXT: store i32 [[SUB]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_2]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_2]], align 8, !nonnull [[META23:![0-9]+]]
+// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[B]], align 4
+// CHECK1-NEXT: store i32 [[TMP3]], ptr [[B_CASTED]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[B_CASTED]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[A]], ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP10]], align 8
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK1-NEXT: store i64 [[TMP4]], ptr [[TMP11]], align 8
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK1-NEXT: store i64 [[TMP4]], ptr [[TMP12]], align 8
+// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8
+// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK1-NEXT: store i64 [[TMP6]], ptr [[TMP14]], align 8
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK1-NEXT: store i64 [[TMP6]], ptr [[TMP15]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP17]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP18]], align 8
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8
+// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[TMP23:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP22]], 0
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP24]], align 4
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 4, ptr [[TMP25]], align 4
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP21]], ptr [[TMP27]], align 8
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP28]], align 8
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP29]], align 8
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP32]], align 8
+// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP33]], align 8
+// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP34]], align 4
+// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP23]], ptr [[TMP35]], align 4
+// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP36]], align 4
+// CHECK1-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 1, i32 [[TMP22]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0
+// CHECK1-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104(ptr [[THIS1]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP7]]) #[[ATTR3:[0-9]+]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[STR1]], align 8
+// CHECK1-NEXT: store ptr [[TMP39]], ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK1-NEXT: [[A4:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP41]], align 8
+// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[A4]], ptr [[TMP42]], align 8
+// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP43]], align 8
+// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP40]], ptr [[TMP44]], align 8
+// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP40]], ptr [[TMP45]], align 8
+// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP46]], align 8
+// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP49]], align 4
+// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 1
+// CHECK1-NEXT: store i32 2, ptr [[TMP50]], align 4
+// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP47]], ptr [[TMP51]], align 8
+// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 8
+// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP53]], align 8
+// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP54]], align 8
+// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP55]], align 8
+// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP56]], align 8
+// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP57]], align 8
+// CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP58]], align 8
+// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP59]], align 4
+// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 1024, i32 0, i32 0], ptr [[TMP60]], align 4
+// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP61]], align 4
+// CHECK1-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 1024, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.region_id, ptr [[KERNEL_ARGS8]])
+// CHECK1-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
+// CHECK1-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]]
+// CHECK1: omp_offload.failed9:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109(ptr [[THIS1]], ptr [[TMP40]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT10]]
+// CHECK1: omp_offload.cont10:
+// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[TMP64]], ptr [[DOTCAPTURE_EXPR_11]], align 4
+// CHECK1-NEXT: store ptr @.str.4, ptr [[DOTCAPTURE_EXPR_12]], align 8
+// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_12]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP65]], ptr [[_TMP13]], align 8
+// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
+// CHECK1-NEXT: store i32 [[TMP66]], ptr [[DOTCAPTURE_EXPR__CASTED14]], align 4
+// CHECK1-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED14]], align 8
+// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[_TMP13]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[A15:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP69]], align 8
+// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[A15]], ptr [[TMP70]], align 8
+// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP71]], align 8
+// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
+// CHECK1-NEXT: store i64 [[TMP67]], ptr [[TMP72]], align 8
+// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
+// CHECK1-NEXT: store i64 [[TMP67]], ptr [[TMP73]], align 8
+// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP74]], align 8
+// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP68]], ptr [[TMP75]], align 8
+// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS17]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP68]], ptr [[TMP76]], align 8
+// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP77]], align 8
+// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
+// CHECK1-NEXT: [[TMP81:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP80]], 0
+// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP82]], align 4
+// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
+// CHECK1-NEXT: store i32 3, ptr [[TMP83]], align 4
+// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP78]], ptr [[TMP84]], align 8
+// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP79]], ptr [[TMP85]], align 8
+// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP86]], align 8
+// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP87]], align 8
+// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP88]], align 8
+// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP89]], align 8
+// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP90]], align 8
+// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP91]], align 8
+// CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP92]], align 4
+// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP81]], ptr [[TMP93]], align 4
+// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP94]], align 4
+// CHECK1-NEXT: [[TMP95:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP80]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.region_id, ptr [[KERNEL_ARGS19]])
+// CHECK1-NEXT: [[TMP96:%.*]] = icmp ne i32 [[TMP95]], 0
+// CHECK1-NEXT: br i1 [[TMP96]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
+// CHECK1: omp_offload.failed20:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114(ptr [[THIS1]], i64 [[TMP67]], ptr [[TMP68]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT21]]
+// CHECK1: omp_offload.cont21:
+// CHECK1-NEXT: [[A22:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP97:%.*]] = load double, ptr [[A22]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = fptosi double [[TMP97]] to i32
+// CHECK1-NEXT: ret i32 [[CONV]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZL7fstaticiPKc
+// CHECK1-SAME: (i32 noundef signext [[N:%.*]], ptr noundef [[STR2:%.*]]) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STR2_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK1-NEXT: [[STR1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[_TMP4:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED5:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS9:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_13:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED14:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS18:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED24:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS25:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS26:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS27:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS28:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: store ptr [[STR2]], ptr [[STR2_ADDR]], align 8
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._ZL7fstaticiPKc.str, i64 4, i1 false)
+// CHECK1-NEXT: store ptr @.str.1, ptr [[STR1]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_1]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
+// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP10]], align 8
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP13]], 0
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 2, ptr [[TMP16]], align 4
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP17]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP12]], ptr [[TMP18]], align 8
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP19]], align 8
+// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP20]], align 8
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8
+// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP23]], align 8
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP25]], align 4
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP14]], ptr [[TMP26]], align 4
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4
+// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP13]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
+// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77(i64 [[TMP3]], ptr [[TMP4]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[TMP30]], ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK1-NEXT: store ptr @.str.4, ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP31]], ptr [[_TMP4]], align 8
+// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK1-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED5]], align 4
+// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED5]], align 8
+// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[_TMP4]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP33]], ptr [[TMP35]], align 8
+// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP33]], ptr [[TMP36]], align 8
+// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS8]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP37]], align 8
+// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP34]], ptr [[TMP38]], align 8
+// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS7]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP34]], ptr [[TMP39]], align 8
+// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS8]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP40]], align 8
+// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK1-NEXT: [[TMP44:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP43]], 0
+// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP45]], align 4
+// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 1
+// CHECK1-NEXT: store i32 2, ptr [[TMP46]], align 4
+// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP41]], ptr [[TMP47]], align 8
+// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP42]], ptr [[TMP48]], align 8
+// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.9, ptr [[TMP49]], align 8
+// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP50]], align 8
+// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP51]], align 8
+// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP52]], align 8
+// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP53]], align 8
+// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP54]], align 8
+// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP55]], align 4
+// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP44]], ptr [[TMP56]], align 4
+// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP57]], align 4
+// CHECK1-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP43]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.region_id, ptr [[KERNEL_ARGS9]])
+// CHECK1-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
+// CHECK1-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]]
+// CHECK1: omp_offload.failed10:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81(i64 [[TMP33]], ptr [[TMP34]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT11]]
+// CHECK1: omp_offload.cont11:
+// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP60]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTCAPTURE_EXPR_12]], align 4
+// CHECK1-NEXT: [[TMP61:%.*]] = load ptr, ptr [[STR1]], align 8
+// CHECK1-NEXT: store ptr [[TMP61]], ptr [[DOTCAPTURE_EXPR_13]], align 8
+// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
+// CHECK1-NEXT: store i32 [[TMP62]], ptr [[DOTCAPTURE_EXPR__CASTED14]], align 4
+// CHECK1-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED14]], align 8
+// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_13]], align 8
+// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP63]], ptr [[TMP65]], align 8
+// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP63]], ptr [[TMP66]], align 8
+// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP67]], align 8
+// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 8
+// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP69]], align 8
+// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP70]], align 8
+// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP73:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
+// CHECK1-NEXT: [[TMP74:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP73]], 0
+// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP75]], align 4
+// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 1
+// CHECK1-NEXT: store i32 2, ptr [[TMP76]], align 4
+// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP71]], ptr [[TMP77]], align 8
+// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP72]], ptr [[TMP78]], align 8
+// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.11, ptr [[TMP79]], align 8
+// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP80]], align 8
+// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP81]], align 8
+// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP82]], align 8
+// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP83]], align 8
+// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP84]], align 8
+// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP85]], align 4
+// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP74]], ptr [[TMP86]], align 4
+// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP87]], align 4
+// CHECK1-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP73]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.region_id, ptr [[KERNEL_ARGS18]])
+// CHECK1-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0
+// CHECK1-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]]
+// CHECK1: omp_offload.failed19:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85(i64 [[TMP63]], ptr [[TMP64]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT20]]
+// CHECK1: omp_offload.cont20:
+// CHECK1-NEXT: [[TMP90:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: [[ADD22:%.*]] = add nsw i32 32, [[TMP90]]
+// CHECK1-NEXT: store i32 [[ADD22]], ptr [[DOTCAPTURE_EXPR_21]], align 4
+// CHECK1-NEXT: [[TMP91:%.*]] = load ptr, ptr [[STR2_ADDR]], align 8
+// CHECK1-NEXT: store ptr [[TMP91]], ptr [[DOTCAPTURE_EXPR_23]], align 8
+// CHECK1-NEXT: [[TMP92:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4
+// CHECK1-NEXT: store i32 [[TMP92]], ptr [[DOTCAPTURE_EXPR__CASTED24]], align 4
+// CHECK1-NEXT: [[TMP93:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED24]], align 8
+// CHECK1-NEXT: [[TMP94:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_23]], align 8
+// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP93]], ptr [[TMP95]], align 8
+// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS26]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP93]], ptr [[TMP96]], align 8
+// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP97]], align 8
+// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 8
+// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS26]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP94]], ptr [[TMP99]], align 8
+// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP100]], align 8
+// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS26]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP103:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4
+// CHECK1-NEXT: [[TMP104:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP103]], 0
+// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP105]], align 4
+// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 1
+// CHECK1-NEXT: store i32 2, ptr [[TMP106]], align 4
+// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP101]], ptr [[TMP107]], align 8
+// CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP102]], ptr [[TMP108]], align 8
+// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.13, ptr [[TMP109]], align 8
+// CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP110]], align 8
+// CHECK1-NEXT: [[TMP111:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP111]], align 8
+// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP112]], align 8
+// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP113]], align 8
+// CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP114]], align 8
+// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP115]], align 4
+// CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP104]], ptr [[TMP116]], align 4
+// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP117]], align 4
+// CHECK1-NEXT: [[TMP118:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP103]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.region_id, ptr [[KERNEL_ARGS28]])
+// CHECK1-NEXT: [[TMP119:%.*]] = icmp ne i32 [[TMP118]], 0
+// CHECK1-NEXT: br i1 [[TMP119]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]]
+// CHECK1: omp_offload.failed29:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89(i64 [[TMP93]], ptr [[TMP94]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT30]]
+// CHECK1: omp_offload.cont30:
+// CHECK1-NEXT: [[TMP120:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP120]], 1
+// CHECK1-NEXT: ret i32 [[ADD31]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
+// CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK1-NEXT: [[STR1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[_TMP2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: [[B:%.*]] = alloca i16, align 2
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i16, align 2
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [4 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [4 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [4 x ptr], align 8
+// CHECK1-NEXT: [[KERNEL_ARGS14:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[A]], align 4
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z9ftemplateIiET_i.str, i64 4, i1 false)
+// CHECK1-NEXT: store ptr @.str.1, ptr [[STR1]], align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP7]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.15, ptr [[TMP11]], align 8
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP12]], align 8
+// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8
+// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP15]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP16]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP17]], align 4
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 20, i32 0, i32 0], ptr [[TMP18]], align 4
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP19]], align 4
+// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 20, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
+// CHECK1-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55(ptr [[TMP1]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: store ptr @.str.4, ptr [[DOTCAPTURE_EXPR_1]], align 8
+// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP22]], ptr [[_TMP2]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[_TMP2]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP29]], align 4
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1
+// CHECK1-NEXT: store i32 1, ptr [[TMP30]], align 4
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 8
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP28]], ptr [[TMP32]], align 8
+// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.17, ptr [[TMP33]], align 8
+// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP34]], align 8
+// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8
+// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP36]], align 8
+// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP37]], align 8
+// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP38]], align 8
+// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP39]], align 4
+// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 42, i32 0, i32 0], ptr [[TMP40]], align 4
+// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP41]], align 4
+// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 42, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.region_id, ptr [[KERNEL_ARGS6]])
+// CHECK1-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
+// CHECK1-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
+// CHECK1: omp_offload.failed7:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59(ptr [[TMP23]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT8]]
+// CHECK1: omp_offload.cont8:
+// CHECK1-NEXT: store i16 1, ptr [[B]], align 2
+// CHECK1-NEXT: [[TMP44:%.*]] = load i16, ptr [[B]], align 2
+// CHECK1-NEXT: store i16 [[TMP44]], ptr [[DOTCAPTURE_EXPR_9]], align 2
+// CHECK1-NEXT: [[TMP45:%.*]] = load ptr, ptr [[STR1]], align 8
+// CHECK1-NEXT: store ptr [[TMP45]], ptr [[DOTCAPTURE_EXPR_10]], align 8
+// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4
+// CHECK1-NEXT: store i32 [[TMP46]], ptr [[A_CASTED]], align 4
+// CHECK1-NEXT: [[TMP47:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK1-NEXT: [[TMP48:%.*]] = load i16, ptr [[B]], align 2
+// CHECK1-NEXT: store i16 [[TMP48]], ptr [[B_CASTED]], align 2
+// CHECK1-NEXT: [[TMP49:%.*]] = load i64, ptr [[B_CASTED]], align 8
+// CHECK1-NEXT: [[TMP50:%.*]] = load i16, ptr [[DOTCAPTURE_EXPR_9]], align 2
+// CHECK1-NEXT: store i16 [[TMP50]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 2
+// CHECK1-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
+// CHECK1-NEXT: [[TMP52:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8
+// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP47]], ptr [[TMP53]], align 8
+// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP47]], ptr [[TMP54]], align 8
+// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP55]], align 8
+// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 1
+// CHECK1-NEXT: store i64 [[TMP49]], ptr [[TMP56]], align 8
+// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 1
+// CHECK1-NEXT: store i64 [[TMP49]], ptr [[TMP57]], align 8
+// CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP58]], align 8
+// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 2
+// CHECK1-NEXT: store i64 [[TMP51]], ptr [[TMP59]], align 8
+// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 2
+// CHECK1-NEXT: store i64 [[TMP51]], ptr [[TMP60]], align 8
+// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i64 0, i64 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP61]], align 8
+// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP52]], ptr [[TMP62]], align 8
+// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP52]], ptr [[TMP63]], align 8
+// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i64 0, i64 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP64]], align 8
+// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP67:%.*]] = load i16, ptr [[DOTCAPTURE_EXPR_9]], align 2
+// CHECK1-NEXT: [[TMP68:%.*]] = zext i16 [[TMP67]] to i32
+// CHECK1-NEXT: [[TMP69:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP68]], 0
+// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP70]], align 4
+// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 1
+// CHECK1-NEXT: store i32 4, ptr [[TMP71]], align 4
+// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP65]], ptr [[TMP72]], align 8
+// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP66]], ptr [[TMP73]], align 8
+// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.19, ptr [[TMP74]], align 8
+// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.20, ptr [[TMP75]], align 8
+// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP76]], align 8
+// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP77]], align 8
+// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 8
+// CHECK1-NEXT: store i64 0, ptr [[TMP78]], align 8
+// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP79]], align 8
+// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP80]], align 4
+// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP69]], ptr [[TMP81]], align 4
+// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP82]], align 4
+// CHECK1-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP68]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.region_id, ptr [[KERNEL_ARGS14]])
+// CHECK1-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
+// CHECK1-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
+// CHECK1: omp_offload.failed15:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64(i64 [[TMP47]], i64 [[TMP49]], i64 [[TMP51]], ptr [[TMP52]]) #[[ATTR3]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT16]]
+// CHECK1: omp_offload.cont16:
+// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[A]], align 4
+// CHECK1-NEXT: ret i32 [[TMP85]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104
+// CHECK1-SAME: (ptr noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP4]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.omp_outlined, ptr [[TMP1]], i64 [[TMP6]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], i64 noundef [[B:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double
+// CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
+// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK1-NEXT: store double [[ADD]], ptr [[A]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109
+// CHECK1-SAME: (ptr noundef [[THIS:%.*]], ptr noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr [[TMP2]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.omp_outlined, ptr [[TMP1]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK1-NEXT: store double 2.500000e+00, ptr [[A]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114
+// CHECK1-SAME: (ptr noundef [[THIS:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP4]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.omp_outlined, ptr [[TMP1]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK1-NEXT: store double 2.500000e+00, ptr [[A]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77
+// CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81
+// CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP3]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85
+// CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 2, ptr [[TMP2]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89
+// CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[TMP2]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55
+// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP2]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 1, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
+// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META23]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP2]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 42, i32 1, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64
+// CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[DOTCAPTURE_EXPR__ADDR]], align 2
+// CHECK1-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[TMP3]])
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = load i16, ptr [[B_ADDR]], align 2
+// CHECK1-NEXT: store i16 [[TMP6]], ptr [[B_CASTED]], align 2
+// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[B_CASTED]], align 8
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.omp_outlined, i64 [[TMP5]], i64 [[TMP7]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[B:%.*]]) #[[ATTR2]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[B_ADDR]], align 2
+// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_Z3bari
+// CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[STR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
+// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[A]], align 4
+// CHECK3-NEXT: store ptr @.str, ptr [[STR]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP0]])
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[STR]], align 4
+// CHECK3-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZL7fstaticiPKc(i32 noundef [[TMP2]], ptr noundef [[TMP3]])
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4
+// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], [[CALL1]]
+// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A]], align 4
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: [[CALL3:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP5]])
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4
+// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], [[CALL3]]
+// CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4
+// CHECK3-NEXT: ret i32 [[TMP7]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
+// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK3-NEXT: [[STR1:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS8:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[_TMP13:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED14:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [3 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [3 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [3 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: store i32 1, ptr [[B]], align 4
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[STR]], ptr align 1 @__const._ZN2S12r1Ei.str, i32 4, i1 false)
+// CHECK3-NEXT: store ptr @.str.1, ptr [[STR1]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[B]], align 4
+// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]]
+// CHECK3-NEXT: store i32 [[SUB]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_2]], align 4, !nonnull [[META24:![0-9]+]]
+// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[B]], align 4
+// CHECK3-NEXT: store i32 [[TMP3]], ptr [[B_CASTED]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_CASTED]], align 4
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
+// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[THIS1]], ptr [[TMP8]], align 4
+// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[A]], ptr [[TMP9]], align 4
+// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP10]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK3-NEXT: store i32 [[TMP4]], ptr [[TMP11]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK3-NEXT: store i32 [[TMP4]], ptr [[TMP12]], align 4
+// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
+// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4
+// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK3-NEXT: store i32 [[TMP6]], ptr [[TMP14]], align 4
+// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK3-NEXT: store i32 [[TMP6]], ptr [[TMP15]], align 4
+// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
+// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4
+// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP17]], align 4
+// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP18]], align 4
+// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 4
+// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK3-NEXT: [[TMP23:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP22]], 0
+// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP24]], align 4
+// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK3-NEXT: store i32 4, ptr [[TMP25]], align 4
+// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP20]], ptr [[TMP26]], align 4
+// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP21]], ptr [[TMP27]], align 4
+// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP28]], align 4
+// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP29]], align 4
+// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP30]], align 4
+// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP31]], align 4
+// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP32]], align 8
+// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP33]], align 8
+// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP34]], align 4
+// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [[TMP23]], ptr [[TMP35]], align 4
+// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP36]], align 4
+// CHECK3-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 1, i32 [[TMP22]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.region_id, ptr [[KERNEL_ARGS]])
+// CHECK3-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0
+// CHECK3-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK3: omp_offload.failed:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104(ptr [[THIS1]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP7]]) #[[ATTR3:[0-9]+]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK3: omp_offload.cont:
+// CHECK3-NEXT: [[TMP39:%.*]] = load ptr, ptr [[STR1]], align 4
+// CHECK3-NEXT: store ptr [[TMP39]], ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK3-NEXT: [[TMP40:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK3-NEXT: [[A4:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[THIS1]], ptr [[TMP41]], align 4
+// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[A4]], ptr [[TMP42]], align 4
+// CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP43]], align 4
+// CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP40]], ptr [[TMP44]], align 4
+// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP40]], ptr [[TMP45]], align 4
+// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 1
+// CHECK3-NEXT: store ptr null, ptr [[TMP46]], align 4
+// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP49]], align 4
+// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 1
+// CHECK3-NEXT: store i32 2, ptr [[TMP50]], align 4
+// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP47]], ptr [[TMP51]], align 4
+// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 4
+// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.2, ptr [[TMP53]], align 4
+// CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP54]], align 4
+// CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP55]], align 4
+// CHECK3-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP56]], align 4
+// CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP57]], align 8
+// CHECK3-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP58]], align 8
+// CHECK3-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP59]], align 4
+// CHECK3-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [i32 1024, i32 0, i32 0], ptr [[TMP60]], align 4
+// CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS8]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP61]], align 4
+// CHECK3-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 1024, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.region_id, ptr [[KERNEL_ARGS8]])
+// CHECK3-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
+// CHECK3-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]]
+// CHECK3: omp_offload.failed9:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109(ptr [[THIS1]], ptr [[TMP40]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT10]]
+// CHECK3: omp_offload.cont10:
+// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[TMP64]], ptr [[DOTCAPTURE_EXPR_11]], align 4
+// CHECK3-NEXT: store ptr @.str.4, ptr [[DOTCAPTURE_EXPR_12]], align 4
+// CHECK3-NEXT: [[TMP65:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_12]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP65]], ptr [[_TMP13]], align 4
+// CHECK3-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
+// CHECK3-NEXT: store i32 [[TMP66]], ptr [[DOTCAPTURE_EXPR__CASTED14]], align 4
+// CHECK3-NEXT: [[TMP67:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED14]], align 4
+// CHECK3-NEXT: [[TMP68:%.*]] = load ptr, ptr [[_TMP13]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[A15:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[THIS1]], ptr [[TMP69]], align 4
+// CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[A15]], ptr [[TMP70]], align 4
+// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS18]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP71]], align 4
+// CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
+// CHECK3-NEXT: store i32 [[TMP67]], ptr [[TMP72]], align 4
+// CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
+// CHECK3-NEXT: store i32 [[TMP67]], ptr [[TMP73]], align 4
+// CHECK3-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS18]], i32 0, i32 1
+// CHECK3-NEXT: store ptr null, ptr [[TMP74]], align 4
+// CHECK3-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP68]], ptr [[TMP75]], align 4
+// CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS17]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP68]], ptr [[TMP76]], align 4
+// CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS18]], i32 0, i32 2
+// CHECK3-NEXT: store ptr null, ptr [[TMP77]], align 4
+// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP80:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
+// CHECK3-NEXT: [[TMP81:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP80]], 0
+// CHECK3-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP82]], align 4
+// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
+// CHECK3-NEXT: store i32 3, ptr [[TMP83]], align 4
+// CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP78]], ptr [[TMP84]], align 4
+// CHECK3-NEXT: [[TMP85:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP79]], ptr [[TMP85]], align 4
+// CHECK3-NEXT: [[TMP86:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.5, ptr [[TMP86]], align 4
+// CHECK3-NEXT: [[TMP87:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP87]], align 4
+// CHECK3-NEXT: [[TMP88:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP88]], align 4
+// CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP89]], align 4
+// CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP90]], align 8
+// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP91]], align 8
+// CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP92]], align 4
+// CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [[TMP81]], ptr [[TMP93]], align 4
+// CHECK3-NEXT: [[TMP94:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP94]], align 4
+// CHECK3-NEXT: [[TMP95:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP80]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.region_id, ptr [[KERNEL_ARGS19]])
+// CHECK3-NEXT: [[TMP96:%.*]] = icmp ne i32 [[TMP95]], 0
+// CHECK3-NEXT: br i1 [[TMP96]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
+// CHECK3: omp_offload.failed20:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114(ptr [[THIS1]], i32 [[TMP67]], ptr [[TMP68]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT21]]
+// CHECK3: omp_offload.cont21:
+// CHECK3-NEXT: [[A22:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP97:%.*]] = load double, ptr [[A22]], align 4
+// CHECK3-NEXT: [[CONV:%.*]] = fptosi double [[TMP97]] to i32
+// CHECK3-NEXT: ret i32 [[CONV]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZL7fstaticiPKc
+// CHECK3-SAME: (i32 noundef [[N:%.*]], ptr noundef [[STR2:%.*]]) #[[ATTR0]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[STR2_ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK3-NEXT: [[STR1:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[_TMP4:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED5:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS9:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_13:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED14:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS18:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED24:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS25:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS26:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS27:%.*]] = alloca [2 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS28:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: store ptr [[STR2]], ptr [[STR2_ADDR]], align 4
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[STR]], ptr align 1 @__const._ZL7fstaticiPKc.str, i32 4, i1 false)
+// CHECK3-NEXT: store ptr @.str.1, ptr [[STR1]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_1]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP6]], align 4
+// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
+// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 4
+// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
+// CHECK3-NEXT: store ptr null, ptr [[TMP10]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK3-NEXT: [[TMP14:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP13]], 0
+// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4
+// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK3-NEXT: store i32 2, ptr [[TMP16]], align 4
+// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP17]], align 4
+// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP12]], ptr [[TMP18]], align 4
+// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.7, ptr [[TMP19]], align 4
+// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP20]], align 4
+// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 4
+// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 4
+// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP23]], align 8
+// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8
+// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP25]], align 4
+// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [[TMP14]], ptr [[TMP26]], align 4
+// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4
+// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP13]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.region_id, ptr [[KERNEL_ARGS]])
+// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
+// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK3: omp_offload.failed:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77(i32 [[TMP3]], ptr [[TMP4]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK3: omp_offload.cont:
+// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[TMP30]], ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK3-NEXT: store ptr @.str.4, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK3-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP31]], ptr [[_TMP4]], align 4
+// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK3-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED5]], align 4
+// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED5]], align 4
+// CHECK3-NEXT: [[TMP34:%.*]] = load ptr, ptr [[_TMP4]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP33]], ptr [[TMP35]], align 4
+// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP33]], ptr [[TMP36]], align 4
+// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP37]], align 4
+// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP34]], ptr [[TMP38]], align 4
+// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS7]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP34]], ptr [[TMP39]], align 4
+// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1
+// CHECK3-NEXT: store ptr null, ptr [[TMP40]], align 4
+// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK3-NEXT: [[TMP44:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP43]], 0
+// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP45]], align 4
+// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 1
+// CHECK3-NEXT: store i32 2, ptr [[TMP46]], align 4
+// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP41]], ptr [[TMP47]], align 4
+// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP42]], ptr [[TMP48]], align 4
+// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.9, ptr [[TMP49]], align 4
+// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP50]], align 4
+// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP51]], align 4
+// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP52]], align 4
+// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP53]], align 8
+// CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP54]], align 8
+// CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP55]], align 4
+// CHECK3-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [[TMP44]], ptr [[TMP56]], align 4
+// CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS9]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP57]], align 4
+// CHECK3-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP43]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.region_id, ptr [[KERNEL_ARGS9]])
+// CHECK3-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
+// CHECK3-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]]
+// CHECK3: omp_offload.failed10:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81(i32 [[TMP33]], ptr [[TMP34]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT11]]
+// CHECK3: omp_offload.cont11:
+// CHECK3-NEXT: [[TMP60:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP60]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTCAPTURE_EXPR_12]], align 4
+// CHECK3-NEXT: [[TMP61:%.*]] = load ptr, ptr [[STR1]], align 4
+// CHECK3-NEXT: store ptr [[TMP61]], ptr [[DOTCAPTURE_EXPR_13]], align 4
+// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
+// CHECK3-NEXT: store i32 [[TMP62]], ptr [[DOTCAPTURE_EXPR__CASTED14]], align 4
+// CHECK3-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED14]], align 4
+// CHECK3-NEXT: [[TMP64:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_13]], align 4
+// CHECK3-NEXT: [[TMP65:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP63]], ptr [[TMP65]], align 4
+// CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP63]], ptr [[TMP66]], align 4
+// CHECK3-NEXT: [[TMP67:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP67]], align 4
+// CHECK3-NEXT: [[TMP68:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 4
+// CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP64]], ptr [[TMP69]], align 4
+// CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 1
+// CHECK3-NEXT: store ptr null, ptr [[TMP70]], align 4
+// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP73:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
+// CHECK3-NEXT: [[TMP74:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP73]], 0
+// CHECK3-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP75]], align 4
+// CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 1
+// CHECK3-NEXT: store i32 2, ptr [[TMP76]], align 4
+// CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP71]], ptr [[TMP77]], align 4
+// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP72]], ptr [[TMP78]], align 4
+// CHECK3-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.11, ptr [[TMP79]], align 4
+// CHECK3-NEXT: [[TMP80:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP80]], align 4
+// CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP81]], align 4
+// CHECK3-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP82]], align 4
+// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP83]], align 8
+// CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP84]], align 8
+// CHECK3-NEXT: [[TMP85:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP85]], align 4
+// CHECK3-NEXT: [[TMP86:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [[TMP74]], ptr [[TMP86]], align 4
+// CHECK3-NEXT: [[TMP87:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP87]], align 4
+// CHECK3-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP73]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.region_id, ptr [[KERNEL_ARGS18]])
+// CHECK3-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0
+// CHECK3-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]]
+// CHECK3: omp_offload.failed19:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85(i32 [[TMP63]], ptr [[TMP64]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT20]]
+// CHECK3: omp_offload.cont20:
+// CHECK3-NEXT: [[TMP90:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: [[ADD22:%.*]] = add nsw i32 32, [[TMP90]]
+// CHECK3-NEXT: store i32 [[ADD22]], ptr [[DOTCAPTURE_EXPR_21]], align 4
+// CHECK3-NEXT: [[TMP91:%.*]] = load ptr, ptr [[STR2_ADDR]], align 4
+// CHECK3-NEXT: store ptr [[TMP91]], ptr [[DOTCAPTURE_EXPR_23]], align 4
+// CHECK3-NEXT: [[TMP92:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4
+// CHECK3-NEXT: store i32 [[TMP92]], ptr [[DOTCAPTURE_EXPR__CASTED24]], align 4
+// CHECK3-NEXT: [[TMP93:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED24]], align 4
+// CHECK3-NEXT: [[TMP94:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_23]], align 4
+// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP93]], ptr [[TMP95]], align 4
+// CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS26]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP93]], ptr [[TMP96]], align 4
+// CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS27]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP97]], align 4
+// CHECK3-NEXT: [[TMP98:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 4
+// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS26]], i32 0, i32 1
+// CHECK3-NEXT: store ptr [[TMP94]], ptr [[TMP99]], align 4
+// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS27]], i32 0, i32 1
+// CHECK3-NEXT: store ptr null, ptr [[TMP100]], align 4
+// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS26]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP103:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4
+// CHECK3-NEXT: [[TMP104:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP103]], 0
+// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP105]], align 4
+// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 1
+// CHECK3-NEXT: store i32 2, ptr [[TMP106]], align 4
+// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP101]], ptr [[TMP107]], align 4
+// CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP102]], ptr [[TMP108]], align 4
+// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.13, ptr [[TMP109]], align 4
+// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP110]], align 4
+// CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP111]], align 4
+// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP112]], align 4
+// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP113]], align 8
+// CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP114]], align 8
+// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP115]], align 4
+// CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [[TMP104]], ptr [[TMP116]], align 4
+// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS28]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP117]], align 4
+// CHECK3-NEXT: [[TMP118:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP103]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.region_id, ptr [[KERNEL_ARGS28]])
+// CHECK3-NEXT: [[TMP119:%.*]] = icmp ne i32 [[TMP118]], 0
+// CHECK3-NEXT: br i1 [[TMP119]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]]
+// CHECK3: omp_offload.failed29:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89(i32 [[TMP93]], ptr [[TMP94]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT30]]
+// CHECK3: omp_offload.cont30:
+// CHECK3-NEXT: [[TMP120:%.*]] = load i32, ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP120]], 1
+// CHECK3-NEXT: ret i32 [[ADD31]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
+// CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK3-NEXT: [[STR1:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[_TMP2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: [[B:%.*]] = alloca i16, align 2
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i16, align 2
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [4 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [4 x ptr], align 4
+// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [4 x ptr], align 4
+// CHECK3-NEXT: [[KERNEL_ARGS14:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
+// CHECK3-NEXT: store i32 0, ptr [[A]], align 4
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[STR]], ptr align 1 @__const._Z9ftemplateIiET_i.str, i32 4, i1 false)
+// CHECK3-NEXT: store ptr @.str.1, ptr [[STR1]], align 4
+// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP2]], align 4
+// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP3]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4
+// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP7]], align 4
+// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4
+// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4
+// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.15, ptr [[TMP11]], align 4
+// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP12]], align 4
+// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4
+// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4
+// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP15]], align 8
+// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP16]], align 8
+// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP17]], align 4
+// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [i32 20, i32 0, i32 0], ptr [[TMP18]], align 4
+// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP19]], align 4
+// CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 20, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.region_id, ptr [[KERNEL_ARGS]])
+// CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
+// CHECK3-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK3: omp_offload.failed:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55(ptr [[TMP1]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK3: omp_offload.cont:
+// CHECK3-NEXT: store ptr @.str.4, ptr [[DOTCAPTURE_EXPR_1]], align 4
+// CHECK3-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP22]], ptr [[_TMP2]], align 4
+// CHECK3-NEXT: [[TMP23:%.*]] = load ptr, ptr [[_TMP2]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP24]], align 4
+// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP25]], align 4
+// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP26]], align 4
+// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP29]], align 4
+// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1
+// CHECK3-NEXT: store i32 1, ptr [[TMP30]], align 4
+// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 4
+// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP28]], ptr [[TMP32]], align 4
+// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.17, ptr [[TMP33]], align 4
+// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP34]], align 4
+// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP35]], align 4
+// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP36]], align 4
+// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP37]], align 8
+// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP38]], align 8
+// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP39]], align 4
+// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [i32 42, i32 0, i32 0], ptr [[TMP40]], align 4
+// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP41]], align 4
+// CHECK3-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 42, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.region_id, ptr [[KERNEL_ARGS6]])
+// CHECK3-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
+// CHECK3-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
+// CHECK3: omp_offload.failed7:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59(ptr [[TMP23]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT8]]
+// CHECK3: omp_offload.cont8:
+// CHECK3-NEXT: store i16 1, ptr [[B]], align 2
+// CHECK3-NEXT: [[TMP44:%.*]] = load i16, ptr [[B]], align 2
+// CHECK3-NEXT: store i16 [[TMP44]], ptr [[DOTCAPTURE_EXPR_9]], align 2
+// CHECK3-NEXT: [[TMP45:%.*]] = load ptr, ptr [[STR1]], align 4
+// CHECK3-NEXT: store ptr [[TMP45]], ptr [[DOTCAPTURE_EXPR_10]], align 4
+// CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4
+// CHECK3-NEXT: store i32 [[TMP46]], ptr [[A_CASTED]], align 4
+// CHECK3-NEXT: [[TMP47:%.*]] = load i32, ptr [[A_CASTED]], align 4
+// CHECK3-NEXT: [[TMP48:%.*]] = load i16, ptr [[B]], align 2
+// CHECK3-NEXT: store i16 [[TMP48]], ptr [[B_CASTED]], align 2
+// CHECK3-NEXT: [[TMP49:%.*]] = load i32, ptr [[B_CASTED]], align 4
+// CHECK3-NEXT: [[TMP50:%.*]] = load i16, ptr [[DOTCAPTURE_EXPR_9]], align 2
+// CHECK3-NEXT: store i16 [[TMP50]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 2
+// CHECK3-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
+// CHECK3-NEXT: [[TMP52:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 4
+// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP47]], ptr [[TMP53]], align 4
+// CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
+// CHECK3-NEXT: store i32 [[TMP47]], ptr [[TMP54]], align 4
+// CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i32 0, i32 0
+// CHECK3-NEXT: store ptr null, ptr [[TMP55]], align 4
+// CHECK3-NEXT: [[TMP56:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 1
+// CHECK3-NEXT: store i32 [[TMP49]], ptr [[TMP56]], align 4
+// CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 1
+// CHECK3-NEXT: store i32 [[TMP49]], ptr [[TMP57]], align 4
+// CHECK3-NEXT: [[TMP58:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i32 0, i32 1
+// CHECK3-NEXT: store ptr null, ptr [[TMP58]], align 4
+// CHECK3-NEXT: [[TMP59:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 2
+// CHECK3-NEXT: store i32 [[TMP51]], ptr [[TMP59]], align 4
+// CHECK3-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 2
+// CHECK3-NEXT: store i32 [[TMP51]], ptr [[TMP60]], align 4
+// CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i32 0, i32 2
+// CHECK3-NEXT: store ptr null, ptr [[TMP61]], align 4
+// CHECK3-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP52]], ptr [[TMP62]], align 4
+// CHECK3-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP52]], ptr [[TMP63]], align 4
+// CHECK3-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i32 0, i32 3
+// CHECK3-NEXT: store ptr null, ptr [[TMP64]], align 4
+// CHECK3-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP67:%.*]] = load i16, ptr [[DOTCAPTURE_EXPR_9]], align 2
+// CHECK3-NEXT: [[TMP68:%.*]] = zext i16 [[TMP67]] to i32
+// CHECK3-NEXT: [[TMP69:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP68]], 0
+// CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 0
+// CHECK3-NEXT: store i32 3, ptr [[TMP70]], align 4
+// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 1
+// CHECK3-NEXT: store i32 4, ptr [[TMP71]], align 4
+// CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 2
+// CHECK3-NEXT: store ptr [[TMP65]], ptr [[TMP72]], align 4
+// CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 3
+// CHECK3-NEXT: store ptr [[TMP66]], ptr [[TMP73]], align 4
+// CHECK3-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 4
+// CHECK3-NEXT: store ptr @.offload_sizes.19, ptr [[TMP74]], align 4
+// CHECK3-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 5
+// CHECK3-NEXT: store ptr @.offload_maptypes.20, ptr [[TMP75]], align 4
+// CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 6
+// CHECK3-NEXT: store ptr null, ptr [[TMP76]], align 4
+// CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 7
+// CHECK3-NEXT: store ptr null, ptr [[TMP77]], align 4
+// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 8
+// CHECK3-NEXT: store i64 0, ptr [[TMP78]], align 8
+// CHECK3-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 9
+// CHECK3-NEXT: store i64 0, ptr [[TMP79]], align 8
+// CHECK3-NEXT: [[TMP80:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 10
+// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP80]], align 4
+// CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 11
+// CHECK3-NEXT: store [3 x i32] [[TMP69]], ptr [[TMP81]], align 4
+// CHECK3-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS14]], i32 0, i32 12
+// CHECK3-NEXT: store i32 0, ptr [[TMP82]], align 4
+// CHECK3-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP68]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.region_id, ptr [[KERNEL_ARGS14]])
+// CHECK3-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
+// CHECK3-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
+// CHECK3: omp_offload.failed15:
+// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64(i32 [[TMP47]], i32 [[TMP49]], i32 [[TMP51]], ptr [[TMP52]]) #[[ATTR3]]
+// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT16]]
+// CHECK3: omp_offload.cont16:
+// CHECK3-NEXT: [[TMP85:%.*]] = load i32, ptr [[A]], align 4
+// CHECK3-NEXT: ret i32 [[TMP85]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104
+// CHECK3-SAME: (ptr noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP4]], i32 0, i32 0
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.omp_outlined, ptr [[TMP1]], i32 [[TMP6]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], i32 noundef [[B:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double
+// CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
+// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK3-NEXT: store double [[ADD]], ptr [[A]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109
+// CHECK3-SAME: (ptr noundef [[THIS:%.*]], ptr noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr [[TMP2]])
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.omp_outlined, ptr [[TMP1]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK3-NEXT: store double 2.500000e+00, ptr [[A]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114
+// CHECK3-SAME: (ptr noundef [[THIS:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP4]], i32 0, i32 0
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.omp_outlined, ptr [[TMP1]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK3-NEXT: store double 2.500000e+00, ptr [[A]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77
+// CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i32 0, i32 0
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81
+// CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP3]], i32 0, i32 0
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85
+// CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 2, ptr [[TMP2]])
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89
+// CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[TMP2]])
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55
+// CHECK3-SAME: (ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP2]], i32 0, i32 0
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 1, ptr [[ARRAYDECAY]])
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
+// CHECK3-SAME: (ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META24]]
+// CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP2]], i32 0, i32 0
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 42, i32 1, ptr [[ARRAYDECAY]])
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.omp_outlined)
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64
+// CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[DOTCAPTURE_EXPR__ADDR]], align 2
+// CHECK3-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[TMP3]])
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4
+// CHECK3-NEXT: [[TMP6:%.*]] = load i16, ptr [[B_ADDR]], align 2
+// CHECK3-NEXT: store i16 [[TMP6]], ptr [[B_CASTED]], align 2
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[B_CASTED]], align 4
+// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.omp_outlined, i32 [[TMP5]], i32 [[TMP7]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR2]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK3-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[B_ADDR]], align 2
+// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META24:![0-9]+]]
+// CHECK9-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP3]], i64 0, i64 0
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 2, ptr [[TMP2]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[TMP2]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP4]], i64 0, i64 0
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK9-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4
+// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.omp_outlined, ptr [[TMP1]], i64 [[TMP6]])
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], i64 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double
+// CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
+// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK9-NEXT: store double [[ADD]], ptr [[A]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], ptr noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr [[TMP2]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.omp_outlined, ptr [[TMP1]])
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK9-NEXT: store double 2.500000e+00, ptr [[A]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 8
+// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP4]], i64 0, i64 0
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.omp_outlined, ptr [[TMP1]])
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK9-NEXT: store double 2.500000e+00, ptr [[A]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP2]], i64 0, i64 0
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 1, ptr [[ARRAYDECAY]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META24]]
+// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP2]], i64 0, i64 0
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 42, i32 1, ptr [[ARRAYDECAY]])
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.omp_outlined)
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64
+// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: [[TMP1:%.*]] = load i16, ptr [[DOTCAPTURE_EXPR__ADDR]], align 2
+// CHECK9-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[TMP3]])
+// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK9-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4
+// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK9-NEXT: [[TMP6:%.*]] = load i16, ptr [[B_ADDR]], align 2
+// CHECK9-NEXT: store i16 [[TMP6]], ptr [[B_CASTED]], align 2
+// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[B_CASTED]], align 8
+// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.omp_outlined, i64 [[TMP5]], i64 [[TMP7]])
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.omp_outlined
+// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK9-NEXT: entry:
+// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK9-NEXT: store i64 [[B]], ptr [[B_ADDR]], align 8
+// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[B_ADDR]], align 2
+// CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV]]
+// CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK9-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META25:![0-9]+]]
+// CHECK11-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i32 0, i32 0
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l77.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP3]], i32 0, i32 0
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l81.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 2, ptr [[TMP2]])
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l85.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[TMP2]])
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstaticiPKc_l89.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 4
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP4]], i32 0, i32 0
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.omp_outlined, ptr [[TMP1]], i32 [[TMP6]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l104.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double
+// CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
+// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK11-NEXT: store double [[ADD]], ptr [[A]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], ptr noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr [[TMP2]])
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.omp_outlined, ptr [[TMP1]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l109.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK11-NEXT: store double 2.500000e+00, ptr [[A]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: store ptr [[TMP2]], ptr [[TMP]], align 4
+// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP4]], i32 0, i32 0
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.omp_outlined, ptr [[TMP1]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l114.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK11-NEXT: store double 2.500000e+00, ptr [[A]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP2]], i32 0, i32 0
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 1, ptr [[ARRAYDECAY]])
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l55.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(12) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4
+// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META25]]
+// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x i8], ptr [[TMP2]], i32 0, i32 0
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 42, i32 1, ptr [[ARRAYDECAY]])
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.omp_outlined)
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64
+// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
+// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: [[TMP1:%.*]] = load i16, ptr [[DOTCAPTURE_EXPR__ADDR]], align 2
+// CHECK11-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
+// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[TMP3]])
+// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4
+// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4
+// CHECK11-NEXT: [[TMP6:%.*]] = load i16, ptr [[B_ADDR]], align 2
+// CHECK11-NEXT: store i16 [[TMP6]], ptr [[B_CASTED]], align 2
+// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[B_CASTED]], align 4
+// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.omp_outlined, i32 [[TMP5]], i32 [[TMP7]])
+// CHECK11-NEXT: ret void
+//
+//
+// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l64.omp_outlined
+// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK11-NEXT: entry:
+// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
+// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
+// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
+// CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK11-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[B_ADDR]], align 2
+// CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV]]
+// CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4
+// CHECK11-NEXT: ret void
+//
diff --git a/clang/test/OpenMP/target_update_strided_messages.c b/clang/test/OpenMP/target_update_strided_messages.c
new file mode 100644
index 0000000..1f50af4
--- /dev/null
+++ b/clang/test/OpenMP/target_update_strided_messages.c
@@ -0,0 +1,38 @@
+// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized
+// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized
+
+void foo(void) {}
+
+int main(int argc, char **argv) {
+ int len = 8;
+ double data[len];
+
+ // Valid strided array sections
+ #pragma omp target update from(data[0:4:2]) // OK
+ {}
+
+ #pragma omp target update to(data[0:len/2:2]) // OK
+ {}
+
+ #pragma omp target update from(data[1:3:2]) // OK
+ {}
+
+ // Missing stride (default = 1)
+ #pragma omp target update from(data[0:4]) // OK
+ {}
+
+ // Invalid stride expressions
+ #pragma omp target update from(data[0:4:0]) // expected-error {{section stride is evaluated to a non-positive value 0}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
+
+ #pragma omp target update from(data[0:4:-1]) // expected-error {{section stride is evaluated to a non-positive value -1}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
+
+ // Missing colon
+ #pragma omp target update from(data[0:4 2]) // expected-error {{expected ']'}} expected-note {{to match this '['}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
+ {}
+
+ // Too many colons
+ #pragma omp target update from(data[0:4:2:1]) // expected-error {{expected ']'}} expected-note {{to match this '['}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
+ {}
+
+ return 0;
+} \ No newline at end of file
diff --git a/clang/test/OpenMP/target_update_strided_multiple_messages.c b/clang/test/OpenMP/target_update_strided_multiple_messages.c
new file mode 100644
index 0000000..361d4c6
--- /dev/null
+++ b/clang/test/OpenMP/target_update_strided_multiple_messages.c
@@ -0,0 +1,46 @@
+// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized
+// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized
+
+void foo(void) {}
+
+typedef struct {
+ int len;
+ double data[12];
+} S;
+
+int main(int argc, char **argv) {
+ int len = 12;
+ double data1[len], data2[len];
+ S s;
+
+ // Valid multiple strided array sections
+ #pragma omp target update from(data1[0:4:2], data2[0:2:5]) // OK
+ {}
+
+ #pragma omp target update to(data1[1:2:3], data2[2:3:2]) // OK
+ {}
+
+ // Mixed strided and regular array sections
+ #pragma omp target update from(data1[0:len], data2[0:4:2]) // OK
+ {}
+
+ // Struct member arrays with strides
+ #pragma omp target update from(s.data[0:4:2]) // OK
+ {}
+
+ #pragma omp target update from(s.data[0:s.len/2:2]) // OK
+ {}
+
+ // Invalid stride in one of multiple sections
+ #pragma omp target update from(data1[0:3:4], data2[0:2:0]) // expected-error {{section stride is evaluated to a non-positive value 0}}
+
+ // Complex expressions in multiple arrays
+ int stride1 = 2, stride2 = 3;
+ #pragma omp target update from(data1[0:len/2:stride1], data2[1:len/3:stride2]) // OK
+ {}
+
+ // Missing colon
+ #pragma omp target update from(data1[0:4:2], data2[0:3 4]) // expected-error {{expected ']'}} expected-note {{to match this '['}}
+
+ return 0;
+} \ No newline at end of file
diff --git a/clang/test/OpenMP/target_update_strided_partial_messages.c b/clang/test/OpenMP/target_update_strided_partial_messages.c
new file mode 100644
index 0000000..6dc286c
--- /dev/null
+++ b/clang/test/OpenMP/target_update_strided_partial_messages.c
@@ -0,0 +1,32 @@
+// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized
+// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized
+
+void foo(void) {}
+
+int main(int argc, char **argv) {
+ int len = 11;
+ double data[len];
+
+ // Valid partial strided updates
+ #pragma omp target update from(data[0:4:3]) // OK
+ {}
+
+ // Stride larger than length
+ #pragma omp target update from(data[0:2:10]) // OK
+ {}
+
+ // Valid: complex expressions
+ int offset = 1;
+ int count = 3;
+ int stride = 2;
+ #pragma omp target update from(data[offset:count:stride]) // OK
+ {}
+
+ // Invalid stride expressions
+ #pragma omp target update from(data[0:4:offset-1]) // OK if offset > 1
+ {}
+
+ #pragma omp target update from(data[0:count:0]) // expected-error {{section stride is evaluated to a non-positive value 0}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
+
+ return 0;
+} \ No newline at end of file
diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp
new file mode 100644
index 0000000..02b7126
--- /dev/null
+++ b/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp
@@ -0,0 +1,1447 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
+
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
+
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+typedef __INTPTR_TYPE__ intptr_t;
+
+
+void foo();
+
+struct S {
+ intptr_t a, b, c;
+ S(intptr_t a) : a(a) {}
+ operator char() { return a; }
+ ~S() {}
+};
+
+template <typename T, int C>
+int tmain() {
+ char str[] = "msg1";
+#pragma omp target
+#pragma omp teams distribute parallel for num_threads(strict: C) severity(fatal) message("msg")
+ for (int i = 0; i < 100; i++)
+ foo();
+#pragma omp target
+#pragma omp teams distribute parallel for num_threads(strict: T(23)) severity(warning) message(str)
+ for (int i = 0; i < 100; i++)
+ foo();
+ return 0;
+}
+
+int main() {
+ S s(0);
+ char a = s;
+ char str[] = "msg2";
+#pragma omp target
+#pragma omp teams distribute parallel for num_threads(strict: 2) severity(warning) message("msg")
+ for (int i = 0; i < 100; i++) {
+ foo();
+ }
+#pragma omp target
+
+#pragma omp teams distribute parallel for num_threads(strict: a) severity(fatal) message(str)
+ for (int i = 0; i < 100; i++) {
+ foo();
+ }
+ return a + tmain<char, 5>() + tmain<S, 1>();
+}
+
+#endif
+// CHECK1-LABEL: define {{[^@]+}}@main
+// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0)
+// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]])
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const.main.str, i64 5, i1 false)
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 2, i32 0, i32 0], ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 2, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44() #[[ATTR5:[0-9]+]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: lpad:
+// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
+// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
+// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5]]
+// CHECK1-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
+// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
+// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
+// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP23]], align 8
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP28:%.*]] = load i8, ptr [[A]], align 1
+// CHECK1-NEXT: store i8 [[TMP28]], ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP30:%.*]] = zext i8 [[TMP29]] to i32
+// CHECK1-NEXT: [[TMP31:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP30]], 0
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP32]], align 4
+// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 2, ptr [[TMP33]], align 4
+// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP26]], ptr [[TMP34]], align 8
+// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP35]], align 8
+// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP36]], align 8
+// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP37]], align 8
+// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP38]], align 8
+// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP39]], align 8
+// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP40]], align 8
+// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP41]], align 8
+// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP42]], align 4
+// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP31]], ptr [[TMP43]], align 4
+// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP44]], align 4
+// CHECK1-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP30]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0
+// CHECK1-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49(i64 [[TMP19]], ptr [[STR]]) #[[ATTR5]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: [[TMP47:%.*]] = load i8, ptr [[A]], align 1
+// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP47]] to i32
+// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv()
+// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK1: invoke.cont5:
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
+// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv()
+// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
+// CHECK1: invoke.cont7:
+// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
+// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5]]
+// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK1-NEXT: ret i32 [[TMP48]]
+// CHECK1: eh.resume:
+// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK1-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44
+// CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15:![0-9]+]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined, ptr [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 2, i32 1, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]])
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8:[0-9]+]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR7:[0-9]+]] comdat {
+// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR5]]
+// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49
+// CHECK1-SAME: (i64 noundef [[A:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[A_ADDR]], align 1
+// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1
+// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined, i64 [[TMP3]], ptr [[TMP4]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1
+// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
+// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP10]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 2, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]])
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK1-SAME: () #[[ATTR2]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false)
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 5, i32 0, i32 0], ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 5, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29() #[[ATTR5]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4
+// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 23, i32 0, i32 0], ptr [[TMP31]], align 4
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4
+// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 23, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
+// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33(ptr [[STR]]) #[[ATTR5]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: ret i32 0
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK1-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false)
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29() #[[ATTR5]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23)
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR5]]
+// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP21:%.*]] = zext i8 [[TMP20]] to i32
+// CHECK1-NEXT: [[TMP22:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP21]], 0
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP23]], align 4
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 1, ptr [[TMP24]], align 4
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP27]], align 8
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP28]], align 8
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP29]], align 8
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP31]], align 8
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP32]], align 8
+// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
+// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP22]], ptr [[TMP34]], align 4
+// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP35]], align 4
+// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP21]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
+// CHECK1-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33(ptr [[STR]]) #[[ATTR5]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: ret i32 0
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP38:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP39:%.*]] = extractvalue { ptr, i32 } [[TMP38]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP39]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR5]]
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29
+// CHECK1-SAME: () #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined, ptr [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 5, i32 2, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]])
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33
+// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined, ptr [[TMP1]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP8]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]])
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29
+// CHECK1-SAME: () #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined, ptr [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 2, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]])
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33
+// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23)
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR5]]
+// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1
+// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined, i64 [[TMP2]], ptr [[TMP3]])
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP4]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP5]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1
+// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
+// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP10]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 1, ptr [[ARRAYDECAY]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]])
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp
new file mode 100644
index 0000000..559cfee
--- /dev/null
+++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp
@@ -0,0 +1,1911 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
+
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
+
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+typedef __INTPTR_TYPE__ intptr_t;
+
+
+void foo();
+
+struct S {
+ intptr_t a, b, c;
+ S(intptr_t a) : a(a) {}
+ operator char() { return a; }
+ ~S() {}
+};
+
+template <typename T, int C>
+int tmain() {
+ char str[] = "msg";
+#pragma omp target
+#pragma omp teams distribute parallel for simd num_threads(strict: C) severity(warning) message("msg")
+ for (int i = 0; i < 100; i++)
+ foo();
+#pragma omp target
+#pragma omp teams distribute parallel for simd num_threads(strict: T(23)) severity(fatal) message(str)
+ for (int i = 0; i < 100; i++)
+ foo();
+ return 0;
+}
+
+int main() {
+ S s(0);
+ char a = s;
+ const char *str = "msg";
+#pragma omp target
+#pragma omp teams distribute parallel for simd num_threads(strict: 2) severity(fatal) message("msg")
+ for (int i = 0; i < 100; i++) {
+ foo();
+ }
+#pragma omp target
+
+#pragma omp teams distribute parallel for simd num_threads(strict: a) severity(warning) message(str)
+ for (int i = 0; i < 100; i++) {
+ foo();
+ }
+ return a + tmain<char, 5>() + tmain<S, 1>();
+}
+
+#endif
+// CHECK1-LABEL: define {{[^@]+}}@main
+// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0)
+// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]])
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK1-NEXT: store ptr @.str, ptr [[STR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 2, i32 0, i32 0], ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 2, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44() #[[ATTR4:[0-9]+]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: lpad:
+// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
+// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
+// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR4]]
+// CHECK1-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
+// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
+// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
+// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
+// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP29:%.*]] = load i8, ptr [[A]], align 1
+// CHECK1-NEXT: store i8 [[TMP29]], ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP31:%.*]] = zext i8 [[TMP30]] to i32
+// CHECK1-NEXT: [[TMP32:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP31]], 0
+// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP33]], align 4
+// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 2, ptr [[TMP34]], align 4
+// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP35]], align 8
+// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP28]], ptr [[TMP36]], align 8
+// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP37]], align 8
+// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP38]], align 8
+// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP39]], align 8
+// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP40]], align 8
+// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP41]], align 8
+// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP42]], align 8
+// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP43]], align 4
+// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP32]], ptr [[TMP44]], align 4
+// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP45]], align 4
+// CHECK1-NEXT: [[TMP46:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP31]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP47:%.*]] = icmp ne i32 [[TMP46]], 0
+// CHECK1-NEXT: br i1 [[TMP47]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR4]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: [[TMP48:%.*]] = load i8, ptr [[A]], align 1
+// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP48]] to i32
+// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv()
+// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK1: invoke.cont5:
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
+// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv()
+// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
+// CHECK1: invoke.cont7:
+// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
+// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR4]]
+// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK1-NEXT: ret i32 [[TMP49]]
+// CHECK1: eh.resume:
+// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK1-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44
+// CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15:![0-9]+]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined, ptr [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 2, i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
+// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP20]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP20]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] comdat {
+// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49
+// CHECK1-SAME: (i64 noundef [[A:%.*]], ptr noundef [[STR:%.*]]) #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1
+// CHECK1-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1
+// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined, i64 [[TMP3]], ptr [[TMP4]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: [[TMP8:%.*]] = sext i8 [[TMP7]] to i32
+// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP8]], i32 1, ptr [[TMP9]]), !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
+// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP28]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP28]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK1-SAME: () #[[ATTR2]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 4, i1 false)
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 5, i32 0, i32 0], ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 5, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29() #[[ATTR4]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4
+// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4
+// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP24]], align 8
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 23, i32 0, i32 0], ptr [[TMP31]], align 4
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4
+// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 23, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
+// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33(ptr [[STR]]) #[[ATTR4]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: ret i32 0
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK1-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 4, i1 false)
+// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
+// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
+// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
+// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP11]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.region_id, ptr [[KERNEL_ARGS]])
+// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
+// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK1: omp_offload.failed:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29() #[[ATTR4]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
+// CHECK1: omp_offload.cont:
+// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23)
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: [[TMP21:%.*]] = zext i8 [[TMP20]] to i32
+// CHECK1-NEXT: [[TMP22:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP21]], 0
+// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
+// CHECK1-NEXT: store i32 3, ptr [[TMP23]], align 4
+// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
+// CHECK1-NEXT: store i32 1, ptr [[TMP24]], align 4
+// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
+// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP25]], align 8
+// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
+// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP26]], align 8
+// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
+// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP27]], align 8
+// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
+// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP28]], align 8
+// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
+// CHECK1-NEXT: store ptr null, ptr [[TMP29]], align 8
+// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
+// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8
+// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
+// CHECK1-NEXT: store i64 100, ptr [[TMP31]], align 8
+// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
+// CHECK1-NEXT: store i64 0, ptr [[TMP32]], align 8
+// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
+// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
+// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
+// CHECK1-NEXT: store [3 x i32] [[TMP22]], ptr [[TMP34]], align 4
+// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
+// CHECK1-NEXT: store i32 0, ptr [[TMP35]], align 4
+// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP21]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.region_id, ptr [[KERNEL_ARGS2]])
+// CHECK1-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
+// CHECK1-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
+// CHECK1: omp_offload.failed3:
+// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33(ptr [[STR]]) #[[ATTR4]]
+// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
+// CHECK1: omp_offload.cont4:
+// CHECK1-NEXT: ret i32 0
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP38:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP39:%.*]] = extractvalue { ptr, i32 } [[TMP38]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP39]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]]
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29
+// CHECK1-SAME: () #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined, ptr [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 5, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
+// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP34]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP34]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33
+// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(4) [[STR:%.*]]) #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined, ptr [[TMP1]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
+// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP40]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP40]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP40]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29
+// CHECK1-SAME: () #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined, ptr [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
+// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP46]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP46]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP46]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33
+// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(4) [[STR:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
+// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23)
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1
+// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1
+// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined, i64 [[TMP2]], ptr [[TMP3]])
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP4]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP5]]) #[[ATTR8]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR3]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]]
+// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
+// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
+// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP10]], i64 0, i64 0
+// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]]), !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
+// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
+// CHECK1-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined
+// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
+// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
+// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP52]]
+// CHECK1-NEXT: invoke void @_Z3foov()
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP52]]
+// CHECK1: invoke.cont:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
+// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
+// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
+// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
+// CHECK1: .omp.final.then:
+// CHECK1-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
+// CHECK1: .omp.final.done:
+// CHECK1-NEXT: ret void
+// CHECK1: terminate.lpad:
+// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
+// CHECK1-NEXT: catch ptr null
+// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
+// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP52]]
+// CHECK1-NEXT: unreachable
+//
+//
+// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@main
+// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1
+// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[STR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i8, align 1
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB7:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB8:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV9:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I10:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK3-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0)
+// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]])
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1
+// CHECK3-NEXT: store ptr @.str, ptr [[STR]], align 8
+// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2:![0-9]+]]
+// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]]
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]]
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
+// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP3]]
+// CHECK3: invoke.cont2:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
+// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
+// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
+// CHECK3: lpad:
+// CHECK3-NEXT: [[TMP6:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: cleanup
+// CHECK3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 0
+// CHECK3-NEXT: store ptr [[TMP7]], ptr [[EXN_SLOT]], align 8
+// CHECK3-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 1
+// CHECK3-NEXT: store i32 [[TMP8]], ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]]
+// CHECK3-NEXT: br label [[EH_RESUME:%.*]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[A]], align 1
+// CHECK3-NEXT: store i8 [[TMP9]], ptr [[DOTCAPTURE_EXPR_4]], align 1
+// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[STR]], align 8
+// CHECK3-NEXT: store ptr [[TMP10]], ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB7]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB8]], align 4
+// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB7]], align 4
+// CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV9]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND11:%.*]]
+// CHECK3: omp.inner.for.cond11:
+// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]]
+// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB8]], align 4, !llvm.access.group [[ACC_GRP7]]
+// CHECK3-NEXT: [[CMP12:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
+// CHECK3-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END20:%.*]]
+// CHECK3: omp.inner.for.body13:
+// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]]
+// CHECK3-NEXT: [[MUL14:%.*]] = mul nsw i32 [[TMP14]], 1
+// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
+// CHECK3-NEXT: store i32 [[ADD15]], ptr [[I10]], align 4, !llvm.access.group [[ACC_GRP7]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT16:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP7]]
+// CHECK3: invoke.cont16:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE17:%.*]]
+// CHECK3: omp.body.continue17:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC18:%.*]]
+// CHECK3: omp.inner.for.inc18:
+// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]]
+// CHECK3-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP15]], 1
+// CHECK3-NEXT: store i32 [[ADD19]], ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND11]], !llvm.loop [[LOOP8:![0-9]+]]
+// CHECK3: omp.inner.for.end20:
+// CHECK3-NEXT: store i32 100, ptr [[I10]], align 4
+// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[A]], align 1
+// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP16]] to i32
+// CHECK3-NEXT: [[CALL22:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv()
+// CHECK3-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]]
+// CHECK3: invoke.cont21:
+// CHECK3-NEXT: [[ADD23:%.*]] = add nsw i32 [[CONV]], [[CALL22]]
+// CHECK3-NEXT: [[CALL25:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv()
+// CHECK3-NEXT: to label [[INVOKE_CONT24:%.*]] unwind label [[LPAD]]
+// CHECK3: invoke.cont24:
+// CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[ADD23]], [[CALL25]]
+// CHECK3-NEXT: store i32 [[ADD26]], ptr [[RETVAL]], align 4
+// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]]
+// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK3-NEXT: ret i32 [[TMP17]]
+// CHECK3: eh.resume:
+// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
+// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
+// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
+// CHECK3-NEXT: [[LPAD_VAL27:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
+// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL27]]
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP18:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP19:%.*]] = extractvalue { ptr, i32 } [[TMP18]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP19]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP3]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]])
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
+// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
+// CHECK3-NEXT: ret i8 [[CONV]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate
+// CHECK3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat {
+// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]]
+// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
+// CHECK3-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[_TMP4:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[_TMP5:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB6:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB7:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV8:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I9:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 4, i1 false)
+// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2]]
+// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]]
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
+// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP10]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
+// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
+// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8, !nonnull [[META2]]
+// CHECK3-NEXT: store ptr [[TMP6]], ptr [[_TMP4]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB6]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB7]], align 4
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB6]], align 4
+// CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV8]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]]
+// CHECK3: omp.inner.for.cond10:
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB7]], align 4, !llvm.access.group [[ACC_GRP13]]
+// CHECK3-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
+// CHECK3-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
+// CHECK3: omp.inner.for.body12:
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]]
+// CHECK3-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP10]], 1
+// CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
+// CHECK3-NEXT: store i32 [[ADD14]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP13]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP13]]
+// CHECK3: invoke.cont15:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]]
+// CHECK3: omp.body.continue16:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]]
+// CHECK3: omp.inner.for.inc17:
+// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]]
+// CHECK3-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1
+// CHECK3-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP14:![0-9]+]]
+// CHECK3: omp.inner.for.end19:
+// CHECK3-NEXT: store i32 100, ptr [[I9]], align 4
+// CHECK3-NEXT: ret i32 0
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP12:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP13:%.*]] = extractvalue { ptr, i32 } [[TMP12]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP13]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP10]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
+// CHECK3-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i8, align 1
+// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK3-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[_TMP6:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[_TMP7:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_LB8:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_UB9:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[DOTOMP_IV10:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: [[I11:%.*]] = alloca i32, align 4
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 4, i1 false)
+// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2]]
+// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
+// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK3: omp.inner.for.cond:
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]]
+// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
+// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK3: omp.inner.for.body:
+// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
+// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP16]]
+// CHECK3: invoke.cont:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK3: omp.body.continue:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK3: omp.inner.for.inc:
+// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
+// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
+// CHECK3: omp.inner.for.end:
+// CHECK3-NEXT: store i32 100, ptr [[I]], align 4
+// CHECK3-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23)
+// CHECK3-NEXT: to label [[INVOKE_CONT4:%.*]] unwind label [[TERMINATE_LPAD]]
+// CHECK3: invoke.cont4:
+// CHECK3-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
+// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR6]]
+// CHECK3-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_3]], align 1
+// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_5]], align 8, !nonnull [[META2]]
+// CHECK3-NEXT: store ptr [[TMP6]], ptr [[_TMP6]], align 8
+// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB8]], align 4
+// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB9]], align 4
+// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB8]], align 4
+// CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV10]], align 4
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND12:%.*]]
+// CHECK3: omp.inner.for.cond12:
+// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
+// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB9]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK3-NEXT: [[CMP13:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
+// CHECK3-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY14:%.*]], label [[OMP_INNER_FOR_END21:%.*]]
+// CHECK3: omp.inner.for.body14:
+// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK3-NEXT: [[MUL15:%.*]] = mul nsw i32 [[TMP10]], 1
+// CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
+// CHECK3-NEXT: store i32 [[ADD16]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK3-NEXT: invoke void @_Z3foov()
+// CHECK3-NEXT: to label [[INVOKE_CONT17:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP19]]
+// CHECK3: invoke.cont17:
+// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE18:%.*]]
+// CHECK3: omp.body.continue18:
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC19:%.*]]
+// CHECK3: omp.inner.for.inc19:
+// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK3-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP11]], 1
+// CHECK3-NEXT: store i32 [[ADD20]], ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]]
+// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND12]], !llvm.loop [[LOOP20:![0-9]+]]
+// CHECK3: omp.inner.for.end21:
+// CHECK3-NEXT: store i32 100, ptr [[I11]], align 4
+// CHECK3-NEXT: ret i32 0
+// CHECK3: terminate.lpad:
+// CHECK3-NEXT: [[TMP12:%.*]] = landingpad { ptr, i32 }
+// CHECK3-NEXT: catch ptr null
+// CHECK3-NEXT: [[TMP13:%.*]] = extractvalue { ptr, i32 } [[TMP12]], 0
+// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP13]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP16]]
+// CHECK3-NEXT: unreachable
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]]
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
+// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: ret void
+//
diff --git a/clang/test/PCH/cxx-explicit-specifier.cpp b/clang/test/PCH/cxx-explicit-specifier.cpp
index 84548fa..c7cd2c5 100644
--- a/clang/test/PCH/cxx-explicit-specifier.cpp
+++ b/clang/test/PCH/cxx-explicit-specifier.cpp
@@ -79,8 +79,8 @@ struct A {
B<true> b_true;
B<false> b_false;
#else
-//expected-note@-8 {{candidate template ignored}} expected-note@-8 {{implicit deduction guide declared as 'template <bool b> A(A<b>) -> A<b>'}}
-//expected-note@-8 {{explicit constructor declared here}} expected-note@-8 {{implicit deduction guide declared as 'template <bool b> explicit(b) A(B<b>) -> A<b>'}}
+//expected-note@-8 {{candidate template ignored}} expected-note@-8 {{implicit deduction guide declared as 'template <bool b> A(templ::A<b>) -> templ::A<b>'}}
+//expected-note@-8 {{explicit constructor declared here}} expected-note@-8 {{implicit deduction guide declared as 'template <bool b> explicit(b) A(B<b>) -> templ::A<b>'}}
//expected-note@-15+ {{candidate constructor}}
//expected-note@-8+ {{explicit conversion function is not a candidate (explicit specifier}}
//expected-note@-11 {{explicit constructor is not a candidate (explicit specifier}}
diff --git a/clang/test/PCH/dedup_types.cpp b/clang/test/PCH/dedup_types.cpp
new file mode 100644
index 0000000..d4b19b4
--- /dev/null
+++ b/clang/test/PCH/dedup_types.cpp
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -std=c++14 -x c++-header %s -emit-pch -o %t.pch
+// RUN: %clang_cc1 -std=c++14 -x c++ /dev/null -include-pch %t.pch
+
+// RUN: %clang_cc1 -std=c++14 -x c++-header %s -emit-pch -fpch-instantiate-templates -o %t.pch
+// RUN: %clang_cc1 -std=c++14 -x c++ /dev/null -include-pch %t.pch
+
+template <template <class...> class Templ, class...Types>
+using TypePackDedup = Templ<__builtin_dedup_pack<Types...>...>;
+
+template <class ...Ts>
+struct TypeList {};
+
+template <int i>
+struct X {};
+
+void fn1() {
+ TypeList<int, double> l1 = TypePackDedup<TypeList, int, double, int>{};
+ TypeList<> l2 = TypePackDedup<TypeList>{};
+ TypeList<X<0>, X<1>> x1 = TypePackDedup<TypeList, X<0>, X<1>, X<0>, X<1>>{};
+}
diff --git a/clang/test/Parser/MicrosoftExtensions.cpp b/clang/test/Parser/MicrosoftExtensions.cpp
index 9102bca..e32d7fa 100644
--- a/clang/test/Parser/MicrosoftExtensions.cpp
+++ b/clang/test/Parser/MicrosoftExtensions.cpp
@@ -145,7 +145,7 @@ typedef COM_CLASS_TEMPLATE_REF<struct_with_uuid, __uuidof(struct_with_uuid)> COM
COM_CLASS_TEMPLATE_REF<int, __uuidof(struct_with_uuid)> good_template_arg;
-COM_CLASS_TEMPLATE<int, __uuidof(struct_with_uuid)> bad_template_arg; // expected-error {{non-type template argument for template parameter of pointer type 'const GUID *' (aka 'const _GUID *') must have its address taken}}
+COM_CLASS_TEMPLATE<int, __uuidof(struct_with_uuid)> bad_template_arg; // expected-error {{non-type template argument for template parameter of pointer type 'const GUID *' (aka 'const struct _GUID *') must have its address taken}}
namespace PR16911 {
struct __declspec(uuid("{12345678-1234-1234-1234-1234567890aB}")) uuid;
diff --git a/clang/test/Parser/brackets.cpp b/clang/test/Parser/brackets.cpp
index 927b66a..91d4f9b 100644
--- a/clang/test/Parser/brackets.cpp
+++ b/clang/test/Parser/brackets.cpp
@@ -158,4 +158,53 @@ struct A {
const char[] A::f = "f";
// expected-error@-1{{brackets are not allowed here; to declare an array, place the brackets after the name}}
}
-// CHECK: 15 errors generated.
+
+namespace gh147333 {
+ template<class T, char fmt>
+ constexpr inline auto& to_print_fmt = "";
+ template<> constexpr inline char[] to_print_fmt<unsigned, 'x'> = "0x%x";
+ // expected-error@-1{{brackets are not allowed here; to declare an array, place the brackets after the name}}
+
+#ifndef FIXIT
+ // Further related test cases.
+
+ int[1] operator+();
+ // expected-error@-1{{brackets are not allowed here; to declare an array, place the brackets after the name}}
+ // expected-error@-2{{function cannot return array type}}
+
+ int[1] operator ""_x(unsigned long long);
+ // expected-error@-1{{brackets are not allowed here; to declare an array, place the brackets after the name}}
+ // expected-error@-2{{function cannot return array type}}
+
+ struct A {
+ int[1] operator int();
+ // expected-error@-1{{brackets are not allowed here; to declare an array, place the brackets after the name}}
+ // TODO: The following is too noisy and redundant.
+ // expected-error@-3{{conversion function cannot have a return type}}
+ // expected-error@-4{{cannot specify any part of a return type in the declaration of a conversion function}}
+ // expected-error@-5{{conversion function cannot convert to an array type}}
+
+ int[1] A();
+ // expected-error@-1{{brackets are not allowed here; to declare an array, place the brackets after the name}}
+ // TODO: The following is too noisy and redundant.
+ // expected-error@-3{{function cannot return array type}}
+ // expected-error@-4{{constructor cannot have a return type}}
+
+ int[1] ~A();
+ // expected-error@-1{{brackets are not allowed here; to declare an array, place the brackets after the name}}
+ // TODO: This isn't helpful.
+ // expected-error@-3{{array has incomplete element type 'void'}}
+ };
+
+ template<typename T>
+ struct B {
+ int[1] B<T>();
+ // expected-error@-1{{brackets are not allowed here; to declare an array, place the brackets after the name}}
+ // TODO: The following is too noisy and redundant.
+ // expected-error@-3{{function cannot return array type}}
+ // expected-error@-4{{constructor cannot have a return type}}
+ };
+#endif
+}
+
+// CHECK: 32 errors generated.
diff --git a/clang/test/Parser/cxx-variadic-func.cpp b/clang/test/Parser/cxx-variadic-func.cpp
index 98a34d3..73124b8 100644
--- a/clang/test/Parser/cxx-variadic-func.cpp
+++ b/clang/test/Parser/cxx-variadic-func.cpp
@@ -6,3 +6,24 @@ void f(...) {
}
void h(int n..., int m); // expected-error {{expected ')'}} expected-note {{to match}}
+
+
+namespace GH153445 {
+void f(int = {}...);
+
+struct S {
+ void f(int = {}...);
+ void g(int...);
+};
+
+void S::g(int = {}...) {}
+}
+
+
+template <typename ...T>
+constexpr int a() {return 1;}
+
+struct S2 {
+ template <typename ...Ts>
+ void f(int = a<Ts...>()...);
+};
diff --git a/clang/test/Parser/cxx0x-attributes-preprocessor-tokens.cpp b/clang/test/Parser/cxx0x-attributes-preprocessor-tokens.cpp
new file mode 100644
index 0000000..6605d24
--- /dev/null
+++ b/clang/test/Parser/cxx0x-attributes-preprocessor-tokens.cpp
@@ -0,0 +1,58 @@
+// RUN: %clang_cc1 -fsyntax-only -Wattribute-preprocessor-tokens -verify %s
+// RUN: %clang_cc1 -Wattribute-preprocessor-tokens -E %s | FileCheck %s
+// RUN: %clang_cc1 -x c -fsyntax-only -verify=c %s
+// RUN: %clang_cc1 -x c -E %s | FileCheck %s
+
+#define ATTR_STR(X) [[clang::annotate(#X)]]
+#define ATTR_PASTE(X, Y) [[clang::annotate("test", X ## Y)]]
+
+[[clang::assume(#)]] void f1(); // c-error {{expected expression}} \
+ // expected-warning {{'#' is not allowed in an attribute argument list}}
+
+[[clang::assume(##)]] void f2(); // c-error {{expected expression}} \
+ // expected-warning {{'##' is not allowed in an attribute argument list}}
+
+[[clang::assume(1#2#3)]] void f3(); // c-error {{use of this expression in an 'assume' attribute requires parentheses}} \
+ // c-error {{expected ')'}} \
+ // c-note {{to match this '('}} \
+ // expected-warning {{'#' is not allowed in an attribute argument list}} \
+ // expected-warning {{'#' is not allowed in an attribute argument list}}
+
+[[unknown::unknown(#)]] void f4(); // c-warning {{unknown attribute 'unknown::unknown' ignored}} \
+ // expected-warning {{'#' is not allowed in an attribute argument list}}
+
+[[unknown::unknown(##)]] void f5(); // c-warning {{unknown attribute 'unknown::unknown' ignored}} \
+ // expected-warning {{'##' is not allowed in an attribute argument list}}
+
+[[unknown::unknown(1#2#3)]] void f6(); // c-warning {{unknown attribute 'unknown::unknown' ignored}} \
+ // expected-warning {{'#' is not allowed in an attribute argument list}} \
+ // expected-warning {{'#' is not allowed in an attribute argument list}}
+
+[[clang::assume(%:)]] void f7(); // c-error {{expected expression}} \
+ // expected-warning {{'%:' is not allowed in an attribute argument list}}
+
+
+[[clang::assume(%:%:)]] void f8(); // c-error {{expected expression}} \
+ // expected-warning {{'%:%:' is not allowed in an attribute argument list}}
+
+[[clang::assume(1%:2%:3)]] void f9(); // c-error {{use of this expression in an 'assume' attribute requires parentheses}} \
+ // c-error {{expected ')'}} \
+ // c-note {{to match this '('}} \
+ // expected-warning {{'%:' is not allowed in an attribute argument list}} \
+ // expected-warning {{'%:' is not allowed in an attribute argument list}}
+
+[[unknown::unknown(%:)]] void f10(); // c-warning {{unknown attribute 'unknown::unknown' ignored}} \
+ // expected-warning {{'%:' is not allowed in an attribute argument list}}
+
+[[unknown::unknown(%:%:)]] void f11(); // c-warning {{unknown attribute 'unknown::unknown' ignored}} \
+ // expected-warning {{'%:%:' is not allowed in an attribute argument list}}
+
+[[unknown::unknown(1%:2%:3)]] void f12(); // c-warning {{unknown attribute 'unknown::unknown' ignored}} \
+ // expected-warning {{'%:' is not allowed in an attribute argument list}} \
+ // expected-warning {{'%:' is not allowed in an attribute argument list}}
+
+ATTR_STR(stringify) void f13();
+// CHECK: {{\[\[}}clang{{::}}annotate("stringify"){{\]\]}} void f13();
+
+ATTR_PASTE(1, 2) void f14();
+// CHECK: {{\[\[}}clang{{::}}annotate("test", 12){{\]\]}} void f14();
diff --git a/clang/test/Parser/cxx1z-class-template-argument-deduction.cpp b/clang/test/Parser/cxx1z-class-template-argument-deduction.cpp
index d29eed4..9d27f83 100644
--- a/clang/test/Parser/cxx1z-class-template-argument-deduction.cpp
+++ b/clang/test/Parser/cxx1z-class-template-argument-deduction.cpp
@@ -196,8 +196,8 @@ namespace typename_specifier {
new typename T::A{0};
typename T::A a = 0;
const typename T::A b = 0;
- if (typename T::A a = 0) {} // expected-error {{value of type 'typename X::A<int>' (aka 'typename_specifier::X::A<int>') is not contextually convertible to 'bool'}}
- for (typename T::A a = 0; typename T::A b = 0; /**/) {} // expected-error {{value of type 'typename X::A<int>' (aka 'typename_specifier::X::A<int>') is not contextually convertible to 'bool'}}
+ if (typename T::A a = 0) {} // expected-error {{value of type 'typename typename_specifier::X::A<int>' (aka 'typename_specifier::X::A<int>') is not contextually convertible to 'bool'}}
+ for (typename T::A a = 0; typename T::A b = 0; /**/) {} // expected-error {{value of type 'typename typename_specifier::X::A<int>' (aka 'typename_specifier::X::A<int>') is not contextually convertible to 'bool'}}
{(void)(typename T::A)(0);} // expected-error{{refers to class template member}}
{(void)(typename T::A){0};} // expected-error{{refers to class template member}}
@@ -208,7 +208,7 @@ namespace typename_specifier {
{typename T::A arr[3] = 0;} // expected-error {{refers to class template member}}
{typename T::A F::*pm = 0;} // expected-error {{refers to class template member}}
{typename T::A (*fp)() = 0;} // expected-error {{refers to class template member}}
- {typename T::A [x, y] = 0;} // expected-error {{cannot be declared with type 'typename T::A'}} expected-error {{type 'typename X::A<int>' (aka 'typename_specifier::X::A<int>') decomposes into 0}}
+ {typename T::A [x, y] = 0;} // expected-error {{cannot be declared with type 'typename T::A'}} expected-error {{type 'typename typename_specifier::X::A<int>' (aka 'typename_specifier::X::A<int>') decomposes into 0}}
}
template void f<X>(); // expected-note {{instantiation of}}
diff --git a/clang/test/Parser/cxx2c-oxford-variadic-comma.cpp b/clang/test/Parser/cxx2c-oxford-variadic-comma.cpp
index b8015b4..18ce770 100644
--- a/clang/test/Parser/cxx2c-oxford-variadic-comma.cpp
+++ b/clang/test/Parser/cxx2c-oxford-variadic-comma.cpp
@@ -36,6 +36,7 @@ void o(int x, ...);
struct S {
void p(this S...) {} // expected-warning {{declaration of a variadic function without a comma before '...' is deprecated}}
+ void f(int = {}...); // expected-warning {{declaration of a variadic function without a comma before '...' is deprecated}}
};
template<class ...Ts>
diff --git a/clang/test/Parser/diagnose_if.cpp b/clang/test/Parser/diagnose_if.cpp
new file mode 100644
index 0000000..5205980
--- /dev/null
+++ b/clang/test/Parser/diagnose_if.cpp
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 %s -fsyntax-only -fcxx-exceptions -verify
+
+void t1() __attribute__((__diagnose_if__(baz))) try {} catch(...) {}
+// expected-error@-1 {{use of undeclared identifier 'baz'}}
+
+struct A {
+ A();
+};
+
+A::A() __attribute__((__diagnose_if__(baz))) :;
+// expected-error@-1 {{expected class member or base class name}}
+// expected-error@-2 {{use of undeclared identifier 'baz'}}
diff --git a/clang/test/Parser/explicit-bool-pre-cxx17.cpp b/clang/test/Parser/explicit-bool-pre-cxx17.cpp
new file mode 100644
index 0000000..fee0889
--- /dev/null
+++ b/clang/test/Parser/explicit-bool-pre-cxx17.cpp
@@ -0,0 +1,15 @@
+// Regression test for assertion failure when explicit(bool) is used in pre-C++20
+// Fixes GitHub issue #152729
+// RUN: %clang_cc1 -std=c++98 -verify %s
+// RUN: %clang_cc1 -std=c++03 -verify %s
+// RUN: %clang_cc1 -std=c++11 -verify %s
+// RUN: %clang_cc1 -std=c++14 -verify %s
+// RUN: %clang_cc1 -std=c++17 -verify %s
+
+struct S {
+ explicit(true) S(int);
+ // expected-warning@-1 {{explicit(bool) is a C++20 extension}}
+
+ explicit(false) S(float);
+ // expected-warning@-1 {{explicit(bool) is a C++20 extension}}
+};
diff --git a/clang/test/ParserOpenACC/parse-clauses.c b/clang/test/ParserOpenACC/parse-clauses.c
index a9ad7ab..d3fd903 100644
--- a/clang/test/ParserOpenACC/parse-clauses.c
+++ b/clang/test/ParserOpenACC/parse-clauses.c
@@ -723,7 +723,7 @@ void VarListClauses() {
}
void ReductionClauseParsing() {
- char *Begin, *End;
+ char Begin, End;
// expected-error@+1{{expected '('}}
#pragma acc serial reduction
for(int i = 0; i < 5;++i) {}
diff --git a/clang/test/Preprocessor/embed_constexpr.c b/clang/test/Preprocessor/embed_constexpr.c
index e444dfe..e4c85cc 100644
--- a/clang/test/Preprocessor/embed_constexpr.c
+++ b/clang/test/Preprocessor/embed_constexpr.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -fsyntax-only --embed-dir=%S/Inputs -verify -std=c23
+// RUN: %clang_cc1 %s -fsyntax-only --embed-dir=%S/Inputs -verify -std=c23 -fexperimental-new-constant-interpreter
static constexpr unsigned char data[] = {
#embed "big_char.txt"
@@ -19,3 +20,7 @@ static constexpr unsigned data3[] = {
static constexpr int data4[] = {
#embed "big_char.txt" suffix(, -1)
};
+
+static constexpr float data5[] = {
+#embed "big_char.txt" suffix(, -1)
+};
diff --git a/clang/test/Preprocessor/embed_parsing_errors.c b/clang/test/Preprocessor/embed_parsing_errors.c
index 490ec6d..a8bbdea 100644
--- a/clang/test/Preprocessor/embed_parsing_errors.c
+++ b/clang/test/Preprocessor/embed_parsing_errors.c
@@ -94,6 +94,9 @@ char buffer[] = {
#embed "embed_parsing_errors.c" prefix() // OK: tokens within parens are optional
#embed "embed_parsing_errors.c" prefix)
// expected-error@-1 {{expected '('}}
+#embed "embed_parsing_errors.c" prefix()) // expected-error {{expected identifier}}
+#embed "embed_parsing_errors.c" prefix(]) // expected-error {{expected ')'}}
+#embed "embed_parsing_errors.c" prefix(}) // expected-error {{expected ')'}}
#embed "embed_parsing_errors.c" suffix
// expected-error@-1 {{expected '('}}
@@ -115,6 +118,9 @@ char buffer[] = {
#embed "embed_parsing_errors.c" suffix() // OK: tokens within parens are optional
#embed "embed_parsing_errors.c" suffix)
// expected-error@-1 {{expected '('}}
+#embed "embed_parsing_errors.c" suffix()) // expected-error {{expected identifier}}
+#embed "embed_parsing_errors.c" suffix(]) // expected-error {{expected ')'}}
+#embed "embed_parsing_errors.c" suffix(}) // expected-error {{expected ')'}}
#embed "embed_parsing_errors.c" if_empty(1/0) // OK: emitted as tokens, not evaluated yet.
#embed "embed_parsing_errors.c" if_empty(([{}])) // OK: delimiters balanced
@@ -128,3 +134,6 @@ char buffer[] = {
#embed "embed_parsing_errors.c" if_empty)
// expected-error@-1 {{expected '('}}
};
+#embed "embed_parsing_errors.c" if_empty()) // expected-error {{expected identifier}}
+#embed "embed_parsing_errors.c" if_empty(]) // expected-error {{expected ')'}}
+#embed "embed_parsing_errors.c" if_empty(}) // expected-error {{expected ')'}}
diff --git a/clang/test/Preprocessor/file_test.c b/clang/test/Preprocessor/file_test.c
index 945882d..1e7e1df 100644
--- a/clang/test/Preprocessor/file_test.c
+++ b/clang/test/Preprocessor/file_test.c
@@ -1,4 +1,4 @@
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
// RUN: %clang -E -ffile-prefix-map=%p=/UNLIKELY_PATH/empty -c -o - %s | FileCheck %s
// RUN: %clang -E -fmacro-prefix-map=%p=/UNLIKELY_PATH/empty -c -o - %s | FileCheck %s
// RUN: %clang -E -fmacro-prefix-map=%p=/UNLIKELY_PATH=empty -c -o - %s | FileCheck %s -check-prefix CHECK-EVIL
diff --git a/clang/test/Preprocessor/init.c b/clang/test/Preprocessor/init.c
index bed39dc..7e0df96 100644
--- a/clang/test/Preprocessor/init.c
+++ b/clang/test/Preprocessor/init.c
@@ -1622,6 +1622,14 @@
// RUN: %clang_cc1 -x c -std=c99 -E -dM -ffreestanding -triple=amd64-unknown-openbsd < /dev/null | FileCheck -match-full-lines -check-prefix OPENBSD-STDC-N %s
// OPENBSD-STDC-N-NOT:#define __STDC_NO_THREADS__ 1
//
+// RUN: %clang_cc1 -x c -std=c11 -E -dM -ffreestanding -triple=x86_64-unknown-dragonfly < /dev/null | FileCheck -match-full-lines -check-prefix DRAGONFLY-STDC %s
+// RUN: %clang_cc1 -x c -std=gnu11 -E -dM -ffreestanding -triple=x86_64-unknown-dragonfly < /dev/null | FileCheck -match-full-lines -check-prefix DRAGONFLY-STDC %s
+// RUN: %clang_cc1 -x c -std=c17 -E -dM -ffreestanding -triple=x86_64-unknown-dragonfly < /dev/null | FileCheck -match-full-lines -check-prefix DRAGONFLY-STDC %s
+// DRAGONFLY-STDC:#define __STDC_NO_THREADS__ 1
+//
+// RUN: %clang_cc1 -x c -std=c99 -E -dM -ffreestanding -triple=x86_64-unknown-dragonfly < /dev/null | FileCheck -match-full-lines -check-prefix DRAGONFLY-STDC-N %s
+// DRAGONFLY-STDC-N-NOT:#define __STDC_NO_THREADS__ 1
+//
// RUN: %clang_cc1 -triple=aarch64-unknown-managarm-mlibc -E -dM < /dev/null | FileCheck -match-full-lines -check-prefix MANAGARM %s
// RUN: %clang_cc1 -triple=riscv64-unknown-managarm-mlibc -E -dM < /dev/null | FileCheck -match-full-lines -check-prefix MANAGARM %s
// RUN: %clang_cc1 -triple=x86_64-unknown-managarm-mlibc -E -dM < /dev/null | FileCheck -match-full-lines -check-prefix MANAGARM %s
diff --git a/clang/test/Preprocessor/predefined-arch-macros.c b/clang/test/Preprocessor/predefined-arch-macros.c
index e82d825..2ea2d51 100644
--- a/clang/test/Preprocessor/predefined-arch-macros.c
+++ b/clang/test/Preprocessor/predefined-arch-macros.c
@@ -1907,7 +1907,6 @@
// CHECK_GNR_M32: #define __BMI2__ 1
// CHECK_GNR_M32: #define __BMI__ 1
// CHECK_DMR_M32: #define __CCMP__ 1
-// CHECK_DMR_M32: #define __CF__ 1
// CHECK_GNR_M32: #define __CLDEMOTE__ 1
// CHECK_GNR_M32: #define __CLFLUSHOPT__ 1
// CHECK_GNR_M32: #define __CLWB__ 1
@@ -2017,7 +2016,6 @@
// CHECK_GNR_M64: #define __BMI2__ 1
// CHECK_GNR_M64: #define __BMI__ 1
// CHECK_DMR_M64: #define __CCMP__ 1
-// CHECK_DMR_M64: #define __CF__ 1
// CHECK_GNR_M64: #define __CLDEMOTE__ 1
// CHECK_GNR_M64: #define __CLFLUSHOPT__ 1
// CHECK_GNR_M64: #define __CLWB__ 1
diff --git a/clang/test/Preprocessor/ptrauth_extension.c b/clang/test/Preprocessor/ptrauth_extension.c
index d6b7918..3267b07 100644
--- a/clang/test/Preprocessor/ptrauth_extension.c
+++ b/clang/test/Preprocessor/ptrauth_extension.c
@@ -4,10 +4,32 @@
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-calls | \
// RUN: FileCheck %s --check-prefixes=NOINTRIN
-#if __has_extension(ptrauth_qualifier)
-// INTRIN: has_ptrauth_qualifier
-void has_ptrauth_qualifier() {}
-#else
+// RUN: %clang_cc1 -E %s -DIS_DARWIN -triple=arm64e-apple-darwin -fptrauth-intrinsics | \
+// RUN: FileCheck %s --check-prefixes=INTRIN,INTRIN_MAC
+
+// RUN: %clang_cc1 -E %s -DIS_DARWIN -triple=arm64e-apple-darwin -fptrauth-calls | \
+// RUN: FileCheck %s --check-prefixes=NOINTRIN
+
+#if defined(IS_DARWIN) && __has_extension(ptrauth_qualifier)
+// INTRIN_MAC: has_ptrauth_qualifier1
+void has_ptrauth_qualifier1() {}
+#ifndef __PTRAUTH__
+#error ptrauth_qualifier extension present without predefined test macro
+#endif
+#endif
+#if defined(IS_DARWIN) && __has_feature(ptrauth_qualifier)
+// INTRIN_MAC: has_ptrauth_qualifier2
+void has_ptrauth_qualifier2() {}
+#ifndef __PTRAUTH__
+#error ptrauth_qualifier extension present without predefined test macro
+#endif
+#endif
+#if defined(__PTRAUTH__)
+// INTRIN: has_ptrauth_qualifier3
+void has_ptrauth_qualifier3() {}
+#endif
+
+#if !defined(__PTRAUTH__) && !__has_feature(ptrauth_qualifier) && !__has_extension(ptrauth_qualifier)
// NOINTRIN: no_ptrauth_qualifier
void no_ptrauth_qualifier() {}
#endif
diff --git a/clang/test/Preprocessor/ptrauth_feature.c b/clang/test/Preprocessor/ptrauth_feature.c
index a440791..cebea41 100644
--- a/clang/test/Preprocessor/ptrauth_feature.c
+++ b/clang/test/Preprocessor/ptrauth_feature.c
@@ -34,7 +34,7 @@
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-elf-got | \
// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,ELFGOT
-#if __has_feature(ptrauth_intrinsics)
+#if defined(__PTRAUTH__)
// INTRIN: has_ptrauth_intrinsics
void has_ptrauth_intrinsics() {}
#else
@@ -130,3 +130,11 @@ void has_ptrauth_elf_got() {}
// NOELFGOT: no_ptrauth_elf_got
void no_ptrauth_elf_got() {}
#endif
+
+#if __has_feature(ptrauth_objc_signable_class)
+// INTRIN: has_ptrauth_objc_signable_class
+void has_ptrauth_objc_signable_class(){}
+#else
+// NOINTRIN: no_ptrauth_objc_signable_class
+void no_ptrauth_objc_signable_class(){}
+#endif
diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index 864d782..204c985 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -177,6 +177,7 @@
// CHECK-NOT: __riscv_zicfiss {{.*$}}
// CHECK-NOT: __riscv_ztso {{.*$}}
// CHECK-NOT: __riscv_zvbc32e {{.*$}}
+// CHECK-NOT: __riscv_zvfbfa {{.*$}}
// CHECK-NOT: __riscv_zvfbfmin {{.*$}}
// CHECK-NOT: __riscv_zvfbfwma {{.*$}}
// CHECK-NOT: __riscv_zvkgs {{.*$}}
@@ -1552,6 +1553,14 @@
// CHECK-ZTSO-EXT: __riscv_ztso 1000000{{$}}
// RUN: %clang --target=riscv32 -menable-experimental-extensions \
+// RUN: -march=rv32ifzvfbfa0p1 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZVFBFA-EXT %s
+// RUN: %clang --target=riscv64 -menable-experimental-extensions \
+// RUN: -march=rv64ifzvfbfa0p1 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZVFBFA-EXT %s
+// CHECK-ZVFBFA-EXT: __riscv_zvfbfa 1000{{$}}
+
+// RUN: %clang --target=riscv32 -menable-experimental-extensions \
// RUN: -march=rv32i_zve32x_zvbc32e0p7 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZVBC32E-EXT %s
// RUN: %clang --target=riscv64 -menable-experimental-extensions \
diff --git a/clang/test/Preprocessor/sanitizer-predefines.c b/clang/test/Preprocessor/sanitizer-predefines.c
new file mode 100644
index 0000000..9d2f6bf
--- /dev/null
+++ b/clang/test/Preprocessor/sanitizer-predefines.c
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -E -dM -triple aarch64-unknown-linux -fsanitize=address %s | FileCheck %s --check-prefix=ASAN
+// ASAN: #define __SANITIZE_ADDRESS__ 1
+
+// RUN: %clang_cc1 -E -dM -triple aarch64-unknown-linux -fsanitize=hwaddress %s | FileCheck %s --check-prefix=HWASAN
+// HWASAN: #define __SANITIZE_HWADDRESS__ 1
+
+// RUN: %clang_cc1 -E -dM -triple aarch64-unknown-linux -fsanitize=thread %s | FileCheck %s --check-prefix=TSAN
+// TSAN: #define __SANITIZE_THREAD__ 1
diff --git a/clang/test/Preprocessor/x86_target_features.c b/clang/test/Preprocessor/x86_target_features.c
index 3edc92c..43bf177 100644
--- a/clang/test/Preprocessor/x86_target_features.c
+++ b/clang/test/Preprocessor/x86_target_features.c
@@ -795,7 +795,7 @@
// RUN: %clang -target x86_64-unknown-unknown -march=x86-64 -mapx-features=nf -x c -E -dM -o - %s | FileCheck --check-prefix=NF %s
// RUN: %clang -target x86_64-unknown-unknown -march=x86-64 -mapx-features=cf -x c -E -dM -o - %s | FileCheck --check-prefix=CF %s
// RUN: %clang -target x86_64-unknown-unknown -march=x86-64 -mapx-features=zu -x c -E -dM -o - %s | FileCheck --check-prefix=ZU %s
-// RUN: %clang -target x86_64-unknown-unknown -march=x86-64 -mapxf -x c -E -dM -o - %s | FileCheck --check-prefixes=EGPR,PUSH2POP2,PPX,NDD,CCMP,NF,CF,ZU,APXF %s
+// RUN: %clang -target x86_64-unknown-unknown -march=x86-64 -mapxf -x c -E -dM -o - %s | FileCheck --check-prefixes=EGPR,PUSH2POP2,PPX,NDD,CCMP,NF,ZU,APXF %s
// APXF: #define __APX_F__ 1
// CCMP: #define __CCMP__ 1
// CF: #define __CF__ 1
diff --git a/clang/test/Sema/GH155794.c b/clang/test/Sema/GH155794.c
new file mode 100644
index 0000000..1afbefa
--- /dev/null
+++ b/clang/test/Sema/GH155794.c
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -fsyntax-only -verify -Wno-everything %s
+
+struct S {
+ enum e1 {} // expected-error {{use of empty enum}} expected-error {{expected ';' after enum}}
+ enum e2 {} // expected-error {{use of empty enum}}
+}; // expected-error {{expected member name or ';' after declaration specifiers}}
diff --git a/clang/test/Sema/aarch64-sve-intrinsics/acle_sve_compact.cpp b/clang/test/Sema/aarch64-sve-intrinsics/acle_sve_compact.cpp
new file mode 100644
index 0000000..4de3f39
--- /dev/null
+++ b/clang/test/Sema/aarch64-sve-intrinsics/acle_sve_compact.cpp
@@ -0,0 +1,18 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
+// RUN: -verify -verify-ignore-unexpected=error,note -emit-llvm -o - %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme \
+// RUN: -verify -verify-ignore-unexpected=error,note -emit-llvm -o - %s
+// REQUIRES: aarch64-registered-target
+// expected-no-diagnostics
+
+#include <arm_sve.h>
+
+__attribute__((target("sme2p2")))
+void test_svcompact(svbool_t pg, svfloat32_t op) __arm_streaming{
+ svcompact(pg, op);
+}
+
+void test_svcompact_nofeature(svbool_t pg, svfloat32_t op) __arm_streaming{
+ // expected-error@+1 {{'svcompact' needs target feature (sve)|(sme, sme2p2)}}
+ svcompact(pg, op);
+} \ No newline at end of file
diff --git a/clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_aes_bitperm_sha3_sm4.cpp b/clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_aes_bitperm_sha3_sm4.cpp
index 6b97fa2..62553821 100644
--- a/clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_aes_bitperm_sha3_sm4.cpp
+++ b/clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_aes_bitperm_sha3_sm4.cpp
@@ -14,17 +14,17 @@
void test(uint8_t u8, uint16_t u16, uint32_t u32, uint64_t u64)
{
- // expected-error@+2 {{'svaesd_u8' needs target feature sve,sve2,sve-aes}}
- // overload-error@+1 {{'svaesd' needs target feature sve,sve2,sve-aes}}
+ // expected-error@+2 {{'svaesd_u8' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}
+ // overload-error@+1 {{'svaesd' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}
SVE_ACLE_FUNC(svaesd,_u8,,)(svundef_u8(), svundef_u8());
- // expected-error@+2 {{'svaese_u8' needs target feature sve,sve2,sve-aes}}
- // overload-error@+1 {{'svaese' needs target feature sve,sve2,sve-aes}}
+ // expected-error@+2 {{'svaese_u8' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}
+ // overload-error@+1 {{'svaese' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}
SVE_ACLE_FUNC(svaese,_u8,,)(svundef_u8(), svundef_u8());
- // expected-error@+2 {{'svaesimc_u8' needs target feature sve,sve2,sve-aes}}
- // overload-error@+1 {{'svaesimc' needs target feature sve,sve2,sve-aes}}
+ // expected-error@+2 {{'svaesimc_u8' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}
+ // overload-error@+1 {{'svaesimc' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}
SVE_ACLE_FUNC(svaesimc,_u8,,)(svundef_u8());
- // expected-error@+2 {{'svaesmc_u8' needs target feature sve,sve2,sve-aes}}
- // overload-error@+1 {{'svaesmc' needs target feature sve,sve2,sve-aes}}
+ // expected-error@+2 {{'svaesmc_u8' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}
+ // overload-error@+1 {{'svaesmc' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}
SVE_ACLE_FUNC(svaesmc,_u8,,)(svundef_u8());
// expected-error@+2 {{'svbdep_u8' needs target feature (sve,sve2,sve-bitperm)|(sme,ssve-bitperm)}}
// overload-error@+1 {{'svbdep' needs target feature (sve,sve2,sve-bitperm)|(sme,ssve-bitperm)}}
@@ -107,17 +107,17 @@ void test(uint8_t u8, uint16_t u16, uint32_t u32, uint64_t u64)
// expected-error@+2 {{'svbgrp_n_u64' needs target feature (sve,sve2,sve-bitperm)|(sme,ssve-bitperm)}}
// overload-error@+1 {{'svbgrp' needs target feature (sve,sve2,sve-bitperm)|(sme,ssve-bitperm)}}
SVE_ACLE_FUNC(svbgrp,_n_u64,,)(svundef_u64(), u64);
- // expected-error@+2 {{'svpmullb_pair_u64' needs target feature sve,sve2,sve-aes}}
- // overload-error@+1 {{'svpmullb_pair' needs target feature sve,sve2,sve-aes}}
+ // expected-error@+2 {{'svpmullb_pair_u64' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}}
+ // overload-error@+1 {{'svpmullb_pair' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}}
SVE_ACLE_FUNC(svpmullb_pair,_u64,,)(svundef_u64(), svundef_u64());
- // expected-error@+2 {{'svpmullb_pair_n_u64' needs target feature sve,sve2,sve-aes}}
- // overload-error@+1 {{'svpmullb_pair' needs target feature sve,sve2,sve-aes}}
+ // expected-error@+2 {{'svpmullb_pair_n_u64' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}}
+ // overload-error@+1 {{'svpmullb_pair' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}}
SVE_ACLE_FUNC(svpmullb_pair,_n_u64,,)(svundef_u64(), u64);
- // expected-error@+2 {{'svpmullt_pair_u64' needs target feature sve,sve2,sve-aes}}
- // overload-error@+1 {{'svpmullt_pair' needs target feature sve,sve2,sve-aes}}
+ // expected-error@+2 {{'svpmullt_pair_u64' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}}
+ // overload-error@+1 {{'svpmullt_pair' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}}
SVE_ACLE_FUNC(svpmullt_pair,_u64,,)(svundef_u64(), svundef_u64());
- // expected-error@+2 {{'svpmullt_pair_n_u64' needs target feature sve,sve2,sve-aes}}
- // overload-error@+1 {{'svpmullt_pair' needs target feature sve,sve2,sve-aes}}
+ // expected-error@+2 {{'svpmullt_pair_n_u64' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}}
+ // overload-error@+1 {{'svpmullt_pair' needs target feature (sve,sve2,sve-aes)|(sme,ssve-aes)}}}
SVE_ACLE_FUNC(svpmullt_pair,_n_u64,,)(svundef_u64(), u64);
// expected-error@+2 {{'svrax1_u64' needs target feature (sve,sve-sha3)|(sme,sve-sha3,sme2p1)}}
// overload-error@+1 {{'svrax1' needs target feature (sve,sve-sha3)|(sme,sve-sha3,sme2p1)}}
diff --git a/clang/test/Sema/address-packed.c b/clang/test/Sema/address-packed.c
index 29f1249..f826b7d 100644
--- a/clang/test/Sema/address-packed.c
+++ b/clang/test/Sema/address-packed.c
@@ -338,3 +338,11 @@ struct Invalid0 {
void *g14(struct Invalid0 *ivl) {
return &(ivl->x);
}
+
+void to_void_with_expr(void *ptr, int expr);
+
+void g15(void) {
+ struct Arguable arguable;
+ to_void_with_expr(&arguable.x, 3); // no-warning
+ to_void_with_expr(&arguable.x, ({3;})); // no-warning
+}
diff --git a/clang/test/Sema/attr-cfi-salt.c b/clang/test/Sema/attr-cfi-salt.c
new file mode 100644
index 0000000..bccdfc4
--- /dev/null
+++ b/clang/test/Sema/attr-cfi-salt.c
@@ -0,0 +1,60 @@
+// RUN: %clang_cc1 -std=c11 -fsyntax-only -fsanitize=kcfi -verify %s
+// RUN: %clang_cc1 -std=c89 -DKNR -fsyntax-only -fsanitize=kcfi -verify %s
+
+#define __cfi_salt(S) __attribute__((cfi_salt(S)))
+
+int bad1(void) __cfi_salt(); // expected-error{{'cfi_salt' attribute takes one argument}}
+int bad2(void) __cfi_salt(42); // expected-error{{expected string literal as argument of 'cfi_salt' attribute}}
+int bad3(void) __attribute__((cfi_salt("a", "b", "c"))); // expected-error{{'cfi_salt' attribute takes one argument}}
+
+
+int foo(int a, int b) __cfi_salt("pepper"); // ok
+int foo(int a, int b) __cfi_salt("pepper"); // ok
+
+#ifndef KNR
+typedef int (*bar_t)(void) __cfi_salt("pepper"); // ok
+typedef int (*bar_t)(void) __cfi_salt("pepper"); // ok
+#endif
+
+// FIXME: Should we allow this?
+// int b(void) __cfi_salt("salt 'n") __cfi_salt("pepper");
+// bar_t bar_fn __cfi_salt("salt 'n");
+
+int baz __cfi_salt("salt"); // expected-warning{{'cfi_salt' only applies to function types}}
+
+int baz_fn(int a, int b) __cfi_salt("salt 'n"); // expected-note{{previous declaration is here}}
+int baz_fn(int a, int b) __cfi_salt("pepper"); // expected-error{{conflicting types for 'baz_fn'}}
+
+int mux_fn(int a, int b) __cfi_salt("salt 'n"); // expected-note{{previous declaration is here}}
+int mux_fn(int a, int b) __cfi_salt("pepper") { // expected-error{{conflicting types for 'mux_fn'}}
+ return a * b;
+}
+
+typedef int qux_t __cfi_salt("salt"); // expected-warning{{'cfi_salt' only applies to function types}}
+
+typedef int (*quux_t)(void) __cfi_salt("salt 'n"); // expected-note{{previous definition is here}}
+typedef int (*quux_t)(void) __cfi_salt("pepper"); // expected-error{{typedef redefinition with different type}}
+
+void func1(int a) __cfi_salt("pepper"); // expected-note{{previous declaration is here}}
+void func1(int a) { } // expected-error{{conflicting types for 'func1'}}
+void (*fp1)(int) = func1; // expected-error{{incompatible function pointer types initializing 'void (*)(int)' with an expression of type 'void (int)'}}
+
+void func2(int) [[clang::cfi_salt("test")]]; // expected-note{{previous declaration is here}}
+void func2(int a) { } // expected-error{{conflicting types for 'func2'}}
+void (*fp2)(int) = func2; // expected-error{{incompatible function pointer types initializing 'void (*)(int)' with an expression of type 'void (int)'}}
+
+void func3(int) __cfi_salt("pepper"); // ok
+void func3(int a) __cfi_salt("pepper") { } // ok
+void (* __cfi_salt("pepper") fp3)(int) = func3; // ok
+void (*fp3_noattr)(int) = func3; // expected-error{{incompatible function pointer types initializing 'void (*)(int)' with an expression of type 'void (int)'}}
+
+void func4(int) [[clang::cfi_salt("test")]]; // ok
+void func4(int a) [[clang::cfi_salt("test")]] { } // ok
+void (* [[clang::cfi_salt("test")]] fp4)(int) = func4; // ok
+void (*fp4_noattr)(int) = func4; // expected-error{{incompatible function pointer types initializing 'void (*)(int)' with an expression of type 'void (int)'}}
+
+#ifdef KNR
+// K&R C function without a prototype
+void func() __attribute__((cfi_salt("pepper"))); // expected-error {{attribute only applies to non-K&R-style functions}}
+void (*fp)() __attribute__((cfi_salt("pepper"))); // expected-error {{attribute only applies to non-K&R-style functions}}
+#endif
diff --git a/clang/test/Sema/builtin-masked.c b/clang/test/Sema/builtin-masked.c
new file mode 100644
index 0000000..05c6580
--- /dev/null
+++ b/clang/test/Sema/builtin-masked.c
@@ -0,0 +1,46 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify %s
+
+typedef int v8i __attribute__((ext_vector_type(8)));
+typedef _Bool v8b __attribute__((ext_vector_type(8)));
+typedef _Bool v2b __attribute__((ext_vector_type(2)));
+typedef float v8f __attribute__((ext_vector_type(8)));
+
+void test_masked_load(v8i *pf, v8b mask, v2b mask2, v2b thru) {
+ (void)__builtin_masked_load(mask); // expected-error {{too few arguments to function call, expected 2, have 1}}
+ (void)__builtin_masked_load(mask, pf, pf, pf); // expected-error {{too many arguments to function call, expected at most 3, have 4}}
+ (void)__builtin_masked_load(mask2, pf); // expected-error {{all arguments to '__builtin_masked_load' must have the same number of elements}}
+ (void)__builtin_masked_load(mask, mask); // expected-error {{2nd argument must be a pointer to vector}}
+ (void)__builtin_masked_load(mask, (void *)0); // expected-error {{2nd argument must be a pointer to vector}}
+ (void)__builtin_masked_load(mask2, pf, thru); // expected-error {{3rd argument must be a 'v8i' (vector of 8 'int' values)}}
+ (void)__builtin_masked_load(mask2, pf); // expected-error {{all arguments to '__builtin_masked_load' must have the same number of elements}}
+}
+
+void test_masked_store(v8i *pf, v8f *pf2, v8b mask, v2b mask2) {
+ __builtin_masked_store(mask); // expected-error {{too few arguments to function call, expected 3, have 1}}
+ __builtin_masked_store(mask, 0, 0, 0); // expected-error {{too many arguments to function call, expected 3, have 4}}
+ __builtin_masked_store(0, 0, pf); // expected-error {{1st argument must be a vector of boolean types (was 'int')}}
+ __builtin_masked_store(mask, 0, pf); // expected-error {{2nd argument must be a vector}}
+ __builtin_masked_store(mask, *pf, 0); // expected-error {{3rd argument must be a pointer to vector}}
+ __builtin_masked_store(mask2, *pf, pf); // expected-error {{all arguments to '__builtin_masked_store' must have the same number of elements}}
+ __builtin_masked_store(mask, *pf, pf2); // expected-error {{last two arguments to '__builtin_masked_store' must have the same type}}
+}
+
+void test_masked_expand_load(v8i *pf, v8b mask, v2b mask2, v2b thru) {
+ (void)__builtin_masked_expand_load(mask); // expected-error {{too few arguments to function call, expected 2, have 1}}
+ (void)__builtin_masked_expand_load(mask, pf, pf, pf); // expected-error {{too many arguments to function call, expected at most 3, have 4}}
+ (void)__builtin_masked_expand_load(mask2, pf); // expected-error {{all arguments to '__builtin_masked_expand_load' must have the same number of elements}}
+ (void)__builtin_masked_expand_load(mask, mask); // expected-error {{2nd argument must be a pointer to vector}}
+ (void)__builtin_masked_expand_load(mask, (void *)0); // expected-error {{2nd argument must be a pointer to vector}}
+ (void)__builtin_masked_expand_load(mask2, pf, thru); // expected-error {{3rd argument must be a 'v8i' (vector of 8 'int' values)}}
+ (void)__builtin_masked_expand_load(mask2, pf); // expected-error {{all arguments to '__builtin_masked_expand_load' must have the same number of elements}}
+}
+
+void test_masked_compress_store(v8i *pf, v8f *pf2, v8b mask, v2b mask2) {
+ __builtin_masked_compress_store(mask); // expected-error {{too few arguments to function call, expected 3, have 1}}
+ __builtin_masked_compress_store(mask, 0, 0, 0); // expected-error {{too many arguments to function call, expected 3, have 4}}
+ __builtin_masked_compress_store(0, 0, pf); // expected-error {{1st argument must be a vector of boolean types (was 'int')}}
+ __builtin_masked_compress_store(mask, 0, pf); // expected-error {{2nd argument must be a vector}}
+ __builtin_masked_compress_store(mask, *pf, 0); // expected-error {{3rd argument must be a pointer to vector}}
+ __builtin_masked_compress_store(mask2, *pf, pf); // expected-error {{all arguments to '__builtin_masked_compress_store' must have the same number of elements}}
+ __builtin_masked_compress_store(mask, *pf, pf2); // expected-error {{last two arguments to '__builtin_masked_compress_store' must have the same type}}
+}
diff --git a/clang/test/Sema/builtin-object-size.c b/clang/test/Sema/builtin-object-size.c
index 20d4e2a..a763c24 100644
--- a/clang/test/Sema/builtin-object-size.c
+++ b/clang/test/Sema/builtin-object-size.c
@@ -2,6 +2,10 @@
// RUN: %clang_cc1 -fsyntax-only -triple x86_64-apple-darwin9 -verify %s
// RUN: %clang_cc1 -DDYNAMIC -fsyntax-only -triple x86_64-apple-darwin9 -verify %s
+// RUN: %clang_cc1 -fsyntax-only -verify %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -fsyntax-only -triple x86_64-apple-darwin9 -verify %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -DDYNAMIC -fsyntax-only -triple x86_64-apple-darwin9 -verify %s -fexperimental-new-constant-interpreter
+
#ifndef DYNAMIC
#define OBJECT_SIZE_BUILTIN __builtin_object_size
#else
diff --git a/clang/test/Sema/builtins-elementwise-math.c b/clang/test/Sema/builtins-elementwise-math.c
index 8548d3b..d6ce5c0 100644
--- a/clang/test/Sema/builtins-elementwise-math.c
+++ b/clang/test/Sema/builtins-elementwise-math.c
@@ -5,6 +5,7 @@ typedef double double4 __attribute__((ext_vector_type(4)));
typedef float float2 __attribute__((ext_vector_type(2)));
typedef float float3 __attribute__((ext_vector_type(3)));
typedef float float4 __attribute__((ext_vector_type(4)));
+typedef const float cfloat4 __attribute__((ext_vector_type(4)));
typedef int int2 __attribute__((ext_vector_type(2)));
typedef int int3 __attribute__((ext_vector_type(3)));
@@ -1294,12 +1295,108 @@ void test_builtin_elementwise_fma(int i32, int2 v2i32, short i16,
// expected-error@-1 {{3rd argument must be a scalar or vector of floating-point types (was '_Complex float')}}
}
+void test_builtin_elementwise_fsh(int i32, int2 v2i32, short i16, int3 v3i32,
+ double f64, float f32, float2 v2f32) {
+ i32 = __builtin_elementwise_fshl();
+ // expected-error@-1 {{too few arguments to function call, expected 3, have 0}}
+
+ i32 = __builtin_elementwise_fshr();
+ // expected-error@-1 {{too few arguments to function call, expected 3, have 0}}
+
+ i32 = __builtin_elementwise_fshl(i32, i32);
+ // expected-error@-1 {{too few arguments to function call, expected 3, have 2}}
+
+ i32 = __builtin_elementwise_fshr(i32, i32);
+ // expected-error@-1 {{too few arguments to function call, expected 3, have 2}}
+
+ i32 = __builtin_elementwise_fshl(i32, i32, i16);
+ // expected-error@-1 {{arguments are of different types ('int' vs 'short')}}
+
+ i16 = __builtin_elementwise_fshr(i16, i32, i16);
+ // expected-error@-1 {{arguments are of different types ('short' vs 'int')}}
+
+ f32 = __builtin_elementwise_fshl(f32, f32, f32);
+ // expected-error@-1 {{argument must be a scalar or vector of integer types (was 'float')}}
+
+ f64 = __builtin_elementwise_fshr(f64, f64, f64);
+ // expected-error@-1 {{argument must be a scalar or vector of integer types (was 'double')}}
+
+ v2i32 = __builtin_elementwise_fshl(v2i32, v2i32, v2f32);
+ // expected-error@-1 {{argument must be a scalar or vector of integer types (was 'float2' (vector of 2 'float' values))}}
+
+ v2i32 = __builtin_elementwise_fshr(v2i32, v2i32, v3i32);
+ // expected-error@-1 {{arguments are of different types ('int2' (vector of 2 'int' values) vs 'int3' (vector of 3 'int' values))}}
+
+ v3i32 = __builtin_elementwise_fshl(v3i32, v3i32, v2i32);
+ // expected-error@-1 {{arguments are of different types ('int3' (vector of 3 'int' values) vs 'int2' (vector of 2 'int' values))}}
+}
+
+// Tests corresponding to GitHub issues #141397 and #155405.
+// Type mismatch error when 'builtin-elementwise-math' arguments have
+// different qualifiers, this should be well-formed.
typedef struct {
float3 b;
} struct_float3;
-// This example uncovered a bug #141397 :
-// Type mismatch error when 'builtin-elementwise-math' arguments have different qualifiers, this should be well-formed
+
float3 foo(float3 a,const struct_float3* hi) {
float3 b = __builtin_elementwise_max((float3)(0.0f), a);
return __builtin_elementwise_pow(b, hi->b.yyy);
}
+
+float3 baz(float3 a, const struct_float3* hi) {
+ return __builtin_elementwise_fma(a, a, hi->b);
+}
+
+cfloat4 qux(cfloat4 x, float4 y, float4 z) {
+ float a = __builtin_elementwise_fma(x[0],y[0],z[0]);
+ return __builtin_elementwise_fma(x,y,z);
+}
+
+cfloat4 quux(cfloat4 x, float4 y) {
+ float a = __builtin_elementwise_pow(x[0],y[0]);
+ return __builtin_elementwise_pow(x,y);
+}
+
+void test_builtin_elementwise_ctlz(int i32, int2 v2i32, short i16,
+ double f64, double2 v2f64) {
+ f64 = __builtin_elementwise_ctlz(f64);
+ // expected-error@-1 {{1st argument must be a scalar or vector of integer types (was 'double')}}
+
+ _Complex float c1;
+ c1 = __builtin_elementwise_ctlz(c1);
+ // expected-error@-1 {{1st argument must be a scalar or vector of integer types (was '_Complex float')}}
+
+ v2i32 = __builtin_elementwise_ctlz(v2i32, i32);
+ // expected-error@-1 {{arguments are of different types ('int2' (vector of 2 'int' values) vs 'int')}}
+
+ v2i32 = __builtin_elementwise_ctlz(v2i32, f64);
+ // expected-error@-1 {{arguments are of different types ('int2' (vector of 2 'int' values) vs 'double')}}
+
+ v2i32 = __builtin_elementwise_ctlz();
+ // expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
+
+ v2i32 = __builtin_elementwise_ctlz(v2i32, v2i32, f64);
+ // expected-error@-1 {{too many arguments to function call, expected 2, have 3}}
+}
+
+void test_builtin_elementwise_cttz(int i32, int2 v2i32, short i16,
+ double f64, double2 v2f64) {
+ f64 = __builtin_elementwise_cttz(f64);
+ // expected-error@-1 {{1st argument must be a scalar or vector of integer types (was 'double')}}
+
+ _Complex float c1;
+ c1 = __builtin_elementwise_cttz(c1);
+ // expected-error@-1 {{1st argument must be a scalar or vector of integer types (was '_Complex float')}}
+
+ v2i32 = __builtin_elementwise_cttz(v2i32, i32);
+ // expected-error@-1 {{arguments are of different types ('int2' (vector of 2 'int' values) vs 'int')}}
+
+ v2i32 = __builtin_elementwise_cttz(v2i32, f64);
+ // expected-error@-1 {{arguments are of different types ('int2' (vector of 2 'int' values) vs 'double')}}
+
+ v2i32 = __builtin_elementwise_cttz();
+ // expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
+
+ v2i32 = __builtin_elementwise_cttz(v2i32, v2i32, f64);
+ // expected-error@-1 {{too many arguments to function call, expected 2, have 3}}
+}
diff --git a/clang/test/Sema/builtins-wasm.c b/clang/test/Sema/builtins-wasm.c
index a3486b1..9075e9e 100644
--- a/clang/test/Sema/builtins-wasm.c
+++ b/clang/test/Sema/builtins-wasm.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only -verify -triple wasm32 -target-feature +reference-types %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple wasm32 -target-abi experimental-mv -DMULTIVALUE -target-feature +reference-types %s
#define EXPR_HAS_TYPE(expr, type) _Generic((expr), type : 1, default : 0)
@@ -57,8 +58,8 @@ void test_table_copy(int dst_idx, int src_idx, int nelem) {
typedef void (*F1)(void);
typedef int (*F2)(int);
-typedef int (*F3)(__externref_t);
-typedef __externref_t (*F4)(int);
+typedef void (*F3)(struct {int x; double y;});
+typedef struct {int x; double y;} (*F4)(void);
void test_function_pointer_signature() {
// Test argument count validation
@@ -68,8 +69,6 @@ void test_function_pointer_signature() {
// // Test argument type validation - should require function pointer
(void)__builtin_wasm_test_function_pointer_signature((void*)0); // expected-error {{used type 'void *' where function pointer is required}}
(void)__builtin_wasm_test_function_pointer_signature((int)0); // expected-error {{used type 'int' where function pointer is required}}
- (void)__builtin_wasm_test_function_pointer_signature((F3)0); // expected-error {{not supported for function pointers with a reference type parameter}}
- (void)__builtin_wasm_test_function_pointer_signature((F4)0); // expected-error {{not supported for function pointers with a reference type return value}}
// // Test valid usage
int res = __builtin_wasm_test_function_pointer_signature((F1)0);
@@ -77,4 +76,14 @@ void test_function_pointer_signature() {
// Test return type
_Static_assert(EXPR_HAS_TYPE(__builtin_wasm_test_function_pointer_signature((F1)0), int), "");
+
+#ifdef MULTIVALUE
+ // Test that struct arguments and returns are rejected with multivalue abi
+ (void)__builtin_wasm_test_function_pointer_signature((F3)0); // expected-error {{not supported with the multivalue ABI for function pointers with a struct/union as parameter}}
+ (void)__builtin_wasm_test_function_pointer_signature((F4)0); // expected-error {{not supported with the multivalue ABI for function pointers with a struct/union as return value}}
+#else
+ // with default abi they are fine
+ (void)__builtin_wasm_test_function_pointer_signature((F3)0);
+ (void)__builtin_wasm_test_function_pointer_signature((F4)0);
+#endif
}
diff --git a/clang/test/Sema/c2x-nodiscard.c b/clang/test/Sema/c2x-nodiscard.c
index e2537bc..852c747 100644
--- a/clang/test/Sema/c2x-nodiscard.c
+++ b/clang/test/Sema/c2x-nodiscard.c
@@ -41,6 +41,10 @@ void f2(void) {
(void)get_s3();
(void)get_i();
(void)get_e();
+
+ One; // expected-warning {{expression result unused}}
+ (enum E2)(0); // expected-warning {{expression result unused}}
+ (struct S4){1}; // expected-warning {{expression result unused}}
}
struct [[nodiscard]] error_info{
@@ -60,3 +64,16 @@ void GH104391() {
#define M (unsigned int) f3()
M; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}}
}
+
+[[nodiscard]] typedef int NoDInt; // expected-warning {{'[[nodiscard]]' attribute ignored when applied to a typedef}}
+typedef __attribute__((warn_unused)) int WUInt; // expected-warning {{'warn_unused' attribute only applies to structs, unions, and classes}}
+typedef __attribute__((warn_unused_result)) int WURInt;
+NoDInt get_nodint();
+WUInt get_wuint();
+WURInt get_wurint();
+
+void f4(void) {
+ get_nodint(); // no warning because attribute is ignored
+ get_wuint(); // no warning because attribute is ignored
+ get_wurint(); // expected-warning {{ignoring return value of type 'WURInt' declared with 'warn_unused_result' attribute}}
+}
diff --git a/clang/test/Sema/constant-builtins-vector.cpp b/clang/test/Sema/constant-builtins-vector.cpp
index bde5c47..17fa958 100644
--- a/clang/test/Sema/constant-builtins-vector.cpp
+++ b/clang/test/Sema/constant-builtins-vector.cpp
@@ -860,3 +860,104 @@ static_assert(__builtin_elementwise_sub_sat(0U, 1U) == 0U);
static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_sub_sat((vector4char){5, 4, 3, 2}, (vector4char){1, 1, 1, 1})) == (LITTLE_END ? 0x01020304 : 0x04030201));
static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_sub_sat((vector4uchar){5, 4, 3, 2}, (vector4uchar){1, 1, 1, 1})) == (LITTLE_END ? 0x01020304U : 0x04030201U));
static_assert(__builtin_bit_cast(unsigned long long, __builtin_elementwise_sub_sat((vector4short){(short)0x8000, (short)0x8001, (short)0x8002, (short)0x8003}, (vector4short){7, 8, 9, 10}) == (LITTLE_END ? 0x8000800080008000 : 0x8000800080008000)));
+
+static_assert(__builtin_elementwise_max(1, 2) == 2);
+static_assert(__builtin_elementwise_max(-1, 1) == 1);
+static_assert(__builtin_elementwise_max(1U, 2U) == 2U);
+static_assert(__builtin_elementwise_max(~0U, 0U) == ~0U);
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_max((vector4char){1, -2, 3, -4}, (vector4char){4, -3, 2, -1})) == (LITTLE_END ? 0xFF03FE04 : 0x04FE03FF ));
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_max((vector4uchar){1, 2, 3, 4}, (vector4uchar){4, 3, 2, 1})) == 0x04030304U);
+static_assert(__builtin_bit_cast(unsigned long long, __builtin_elementwise_max((vector4short){1, -2, 3, -4}, (vector4short){4, -3, 2, -1})) == (LITTLE_END ? 0xFFFF0003FFFE0004 : 0x0004FFFE0003FFFF));
+
+static_assert(__builtin_elementwise_min(1, 2) == 1);
+static_assert(__builtin_elementwise_min(-1, 1) == -1);
+static_assert(__builtin_elementwise_min(1U, 2U) == 1U);
+static_assert(__builtin_elementwise_min(~0U, 0U) == 0U);
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_min((vector4char){1, -2, 3, -4}, (vector4char){4, -3, 2, -1})) == (LITTLE_END ? 0xFC02FD01 : 0x01FD02FC));
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_min((vector4uchar){1, 2, 3, 4}, (vector4uchar){4, 3, 2, 1})) == 0x01020201U);
+static_assert(__builtin_bit_cast(unsigned long long, __builtin_elementwise_min((vector4short){1, -2, 3, -4}, (vector4short){4, -3, 2, -1})) == (LITTLE_END ? 0xFFFC0002FFFD0001 : 0x0001FFFD0002FFFC));
+
+static_assert(__builtin_elementwise_abs(10) == 10);
+static_assert(__builtin_elementwise_abs(-10) == 10);
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_abs((vector4char){-1, -2, -3, 4})) == (LITTLE_END ? 0x04030201 : 0x01020304));
+static_assert(__builtin_elementwise_abs((int)(-2147483648)) == (int)(-2147483648)); // the absolute value of the most negative integer remains the most negative integer
+
+// check floating point for elementwise abs
+#define CHECK_FOUR_FLOAT_VEC(vec1, vec2) \
+ static_assert(__builtin_fabs(vec1[0] - vec2[0]) < 1e-6); \
+ static_assert(__builtin_fabs(vec1[1] - vec2[1]) < 1e-6); \
+ static_assert(__builtin_fabs(vec1[2] - vec2[2]) < 1e-6); \
+ static_assert(__builtin_fabs(vec1[3] - vec2[3]) < 1e-6);
+
+// checking floating point vector
+CHECK_FOUR_FLOAT_VEC(__builtin_elementwise_abs((vector4float){-1.123, 2.123, -3.123, 4.123}), ((vector4float){1.123, 2.123, 3.123, 4.123}))
+CHECK_FOUR_FLOAT_VEC(__builtin_elementwise_abs((vector4double){-1.123, 2.123, -3.123, 4.123}), ((vector4double){1.123, 2.123, 3.123, 4.123}))
+static_assert(__builtin_elementwise_abs((float)-1.123) - (float)1.123 < 1e-6); // making sure one element works
+#undef CHECK_FOUR_FLOAT_VEC
+
+static_assert(__builtin_elementwise_ctlz(2) == 30);
+static_assert(__builtin_elementwise_ctlz(2, 8) == 30);
+static_assert(__builtin_elementwise_ctlz(0, 8) == 8);
+static_assert(__builtin_elementwise_ctlz(0, 0) == 0);
+static_assert(__builtin_elementwise_ctlz((char)2) == 6);
+static_assert(__builtin_elementwise_ctlz((short)2) == 14);
+static_assert(__builtin_elementwise_ctlz((char)1) == 0x7);
+static_assert(__builtin_elementwise_ctlz((char)4) == 0x5);
+static_assert(__builtin_elementwise_ctlz((char)127) == 0x1);
+static_assert(__builtin_elementwise_ctlz((char)128) == 0x0);
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_ctlz((vector4char){1, 4, 127, (char)128})) == (LITTLE_END ? 0x00010507 : 0x07050100));
+
+constexpr int clz0 = __builtin_elementwise_ctlz(0);
+// expected-error@-1 {{must be initialized by a constant expression}} \
+// expected-note@-1 {{evaluation of __builtin_elementwise_ctlz with a zero value is undefined}}
+constexpr vector4char clz1 = __builtin_elementwise_ctlz((vector4char){1, 0, 3, 4});
+// expected-error@-1 {{must be initialized by a constant expression}} \
+// expected-note@-1 {{evaluation of __builtin_elementwise_ctlz with a zero value is undefined}}
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_ctlz((vector4char){1, 0, 127, 0}, (vector4char){9, -1, 9, -2})) == (LITTLE_END ? 0xFE01FF07 : 0x07FF01FE));
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_ctlz((vector4char){0, 0, 0, 0}, (vector4char){0, 0, 0, 0})) == 0);
+
+static_assert(__builtin_elementwise_cttz(2) == 1);
+static_assert(__builtin_elementwise_cttz(2, 8) == 1);
+static_assert(__builtin_elementwise_cttz(0, 8) == 8);
+static_assert(__builtin_elementwise_cttz(0, 0) == 0);
+static_assert(__builtin_elementwise_cttz((char)2) == 1);
+static_assert(__builtin_elementwise_cttz((short)2) == 1);
+static_assert(__builtin_elementwise_cttz((char)8) == 0x3);
+static_assert(__builtin_elementwise_cttz((char)32) == 0x5);
+static_assert(__builtin_elementwise_cttz((char)127) == 0x0);
+static_assert(__builtin_elementwise_cttz((char)128) == 0x7);
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_cttz((vector4char){8, 32, 127, (char)128})) == (LITTLE_END ? 0x07000503 : 0x03050007));
+
+constexpr int ctz0 = __builtin_elementwise_cttz(0);
+// expected-error@-1 {{must be initialized by a constant expression}} \
+// expected-note@-1 {{evaluation of __builtin_elementwise_cttz with a zero value is undefined}}
+constexpr vector4char ctz1 = __builtin_elementwise_cttz((vector4char){1, 0, 3, 4});
+// expected-error@-1 {{must be initialized by a constant expression}} \
+// expected-note@-1 {{evaluation of __builtin_elementwise_cttz with a zero value is undefined}}
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_cttz((vector4char){8, 0, 127, 0}, (vector4char){9, -1, 9, -2})) == (LITTLE_END ? 0xFE00FF03 : 0x03FF00FE));
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_cttz((vector4char){0, 0, 0, 0}, (vector4char){0, 0, 0, 0})) == 0);
+
+// Non-vector floating point types.
+static_assert(__builtin_elementwise_fma(2.0, 3.0, 4.0) == 10.0);
+static_assert(__builtin_elementwise_fma(200.0, 300.0, 400.0) == 60400.0);
+// Vector type.
+constexpr vector4float fmaFloat1 =
+ __builtin_elementwise_fma((vector4float){1.0, 2.0, 3.0, 4.0},
+ (vector4float){2.0, 3.0, 4.0, 5.0},
+ (vector4float){3.0, 4.0, 5.0, 6.0});
+static_assert(fmaFloat1[0] == 5.0);
+static_assert(fmaFloat1[1] == 10.0);
+static_assert(fmaFloat1[2] == 17.0);
+static_assert(fmaFloat1[3] == 26.0);
+constexpr vector4double fmaDouble1 =
+ __builtin_elementwise_fma((vector4double){1.0, 2.0, 3.0, 4.0},
+ (vector4double){2.0, 3.0, 4.0, 5.0},
+ (vector4double){3.0, 4.0, 5.0, 6.0});
+static_assert(fmaDouble1[0] == 5.0);
+static_assert(fmaDouble1[1] == 10.0);
+static_assert(fmaDouble1[2] == 17.0);
+static_assert(fmaDouble1[3] == 26.0);
+
+constexpr float fmaArray[] = {2.0f, 2.0f, 2.0f, 2.0f};
+constexpr float fmaResult = __builtin_elementwise_fma(fmaArray[1], fmaArray[2], fmaArray[3]);
+static_assert(fmaResult == 6.0f, "");
diff --git a/clang/test/Sema/constexpr.c b/clang/test/Sema/constexpr.c
index 3dcb0b3..e9b738a 100644
--- a/clang/test/Sema/constexpr.c
+++ b/clang/test/Sema/constexpr.c
@@ -391,3 +391,13 @@ void ghissue109095() {
_Static_assert(i == c[0]); // expected-error {{static assertion expression is not an integral constant expression}}\
// expected-note {{initializer of 'i' is not a constant expression}}
}
+
+typedef bool __vbool2 __attribute__((ext_vector_type(2)));
+typedef short v2int16_t __attribute__((ext_vector_type(2)));
+
+bool issue155507(v2int16_t a, v2int16_t b) {
+ return __builtin_bit_cast(unsigned char, __builtin_convertvector(a == b, __vbool2)) == 0b11;
+}
+
+constexpr bool b2 = (bool)nullptr;
+_Static_assert(!b2);
diff --git a/clang/test/Sema/designated-initializers.c b/clang/test/Sema/designated-initializers.c
index 31a3380..11dc3a2 100644
--- a/clang/test/Sema/designated-initializers.c
+++ b/clang/test/Sema/designated-initializers.c
@@ -368,3 +368,10 @@ struct {
.b = 0, // expected-warning {{initializer overrides prior initialization of this subobject}}
},
};
+
+void gh154046(void) {
+ (void)(const char[]) {
+ [0] = "", // expected-error {{incompatible pointer to integer conversion initializing 'const char' with an expression of type 'char[1]'}}
+ [1] = "" // expected-error {{incompatible pointer to integer conversion initializing 'const char' with an expression of type 'char[1]'}}
+ }[1];
+}
diff --git a/clang/test/Sema/format-strings-signedness.c b/clang/test/Sema/format-strings-signedness.c
index d5a8140..773ff41 100644
--- a/clang/test/Sema/format-strings-signedness.c
+++ b/clang/test/Sema/format-strings-signedness.c
@@ -39,13 +39,13 @@ void test_printf_unsigned_char(unsigned char x)
void test_printf_int(int x)
{
printf("%d", x); // no-warning
- printf("%u", x); // expected-warning{{format specifies type 'unsigned int' but the argument has type 'int'}}
- printf("%x", x); // expected-warning{{format specifies type 'unsigned int' but the argument has type 'int'}}
+ printf("%u", x); // expected-warning{{format specifies type 'unsigned int' but the argument has type 'int', which differs in signedness}}
+ printf("%x", x); // expected-warning{{format specifies type 'unsigned int' but the argument has type 'int', which differs in signedness}}
}
void test_printf_unsigned(unsigned x)
{
- printf("%d", x); // expected-warning{{format specifies type 'int' but the argument has type 'unsigned int'}}
+ printf("%d", x); // expected-warning{{format specifies type 'int' but the argument has type 'unsigned int', which differs in signedness}}
printf("%u", x); // no-warning
printf("%x", x); // no-warning
}
@@ -53,13 +53,13 @@ void test_printf_unsigned(unsigned x)
void test_printf_long(long x)
{
printf("%ld", x); // no-warning
- printf("%lu", x); // expected-warning{{format specifies type 'unsigned long' but the argument has type 'long'}}
- printf("%lx", x); // expected-warning{{format specifies type 'unsigned long' but the argument has type 'long'}}
+ printf("%lu", x); // expected-warning{{format specifies type 'unsigned long' but the argument has type 'long', which differs in signedness}}
+ printf("%lx", x); // expected-warning{{format specifies type 'unsigned long' but the argument has type 'long', which differs in signedness}}
}
void test_printf_unsigned_long(unsigned long x)
{
- printf("%ld", x); // expected-warning{{format specifies type 'long' but the argument has type 'unsigned long'}}
+ printf("%ld", x); // expected-warning{{format specifies type 'long' but the argument has type 'unsigned long', which differs in signedness}}
printf("%lu", x); // no-warning
printf("%lx", x); // no-warning
}
@@ -67,13 +67,13 @@ void test_printf_unsigned_long(unsigned long x)
void test_printf_long_long(long long x)
{
printf("%lld", x); // no-warning
- printf("%llu", x); // expected-warning{{format specifies type 'unsigned long long' but the argument has type 'long long'}}
- printf("%llx", x); // expected-warning{{format specifies type 'unsigned long long' but the argument has type 'long long'}}
+ printf("%llu", x); // expected-warning{{format specifies type 'unsigned long long' but the argument has type 'long long', which differs in signedness}}
+ printf("%llx", x); // expected-warning{{format specifies type 'unsigned long long' but the argument has type 'long long', which differs in signedness}}
}
void test_printf_unsigned_long_long(unsigned long long x)
{
- printf("%lld", x); // expected-warning{{format specifies type 'long long' but the argument has type 'unsigned long long'}}
+ printf("%lld", x); // expected-warning{{format specifies type 'long long' but the argument has type 'unsigned long long', which differs in signedness}}
printf("%llu", x); // no-warning
printf("%llx", x); // no-warning
}
@@ -85,8 +85,8 @@ enum enum_int {
void test_printf_enum_int(enum enum_int x)
{
printf("%d", x); // no-warning
- printf("%u", x); // expected-warning{{format specifies type 'unsigned int' but the argument has underlying type 'int'}}
- printf("%x", x); // expected-warning{{format specifies type 'unsigned int' but the argument has underlying type 'int'}}
+ printf("%u", x); // expected-warning{{format specifies type 'unsigned int' but the argument has underlying type 'int', which differs in signedness}}
+ printf("%x", x); // expected-warning{{format specifies type 'unsigned int' but the argument has underlying type 'int', which differs in signedness}}
}
#ifndef _WIN32 // Disabled due to enums have different underlying type on _WIN32
@@ -96,7 +96,7 @@ enum enum_unsigned {
void test_printf_enum_unsigned(enum enum_unsigned x)
{
- printf("%d", x); // expected-warning{{format specifies type 'int' but the argument has underlying type 'unsigned int'}}
+ printf("%d", x); // expected-warning{{format specifies type 'int' but the argument has underlying type 'unsigned int', which differs in signedness}}
printf("%u", x); // no-warning
printf("%x", x); // no-warning
}
@@ -110,8 +110,8 @@ enum enum_long {
void test_printf_enum_long(enum enum_long x)
{
printf("%ld", x); // no-warning
- printf("%lu", x); // expected-warning{{format specifies type 'unsigned long' but the argument has underlying type 'long'}}
- printf("%lx", x); // expected-warning{{format specifies type 'unsigned long' but the argument has underlying type 'long'}}
+ printf("%lu", x); // expected-warning{{format specifies type 'unsigned long' but the argument has underlying type 'long', which differs in signedness}}
+ printf("%lx", x); // expected-warning{{format specifies type 'unsigned long' but the argument has underlying type 'long', which differs in signedness}}
}
enum enum_unsigned_long {
@@ -120,7 +120,7 @@ enum enum_unsigned_long {
void test_printf_enum_unsigned_long(enum enum_unsigned_long x)
{
- printf("%ld", x); // expected-warning{{format specifies type 'long' but the argument has underlying type 'unsigned long'}}
+ printf("%ld", x); // expected-warning{{format specifies type 'long' but the argument has underlying type 'unsigned long', which differs in signedness}}
printf("%lu", x); // no-warning
printf("%lx", x); // no-warning
}
@@ -136,61 +136,61 @@ void test_scanf_unsigned_char(unsigned char *y) {
void test_scanf_int(int *x) {
scanf("%d", x); // no-warning
- scanf("%u", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'int *'}}
- scanf("%x", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'int *'}}
+ scanf("%u", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'int *', which differs in signedness}}
+ scanf("%x", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'int *', which differs in signedness}}
}
void test_scanf_unsigned(unsigned *x) {
- scanf("%d", x); // expected-warning{{format specifies type 'int *' but the argument has type 'unsigned int *'}}
+ scanf("%d", x); // expected-warning{{format specifies type 'int *' but the argument has type 'unsigned int *', which differs in signedness}}
scanf("%u", x); // no-warning
scanf("%x", x); // no-warning
}
void test_scanf_long(long *x) {
scanf("%ld", x); // no-warning
- scanf("%lu", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'long *'}}
- scanf("%lx", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'long *'}}
+ scanf("%lu", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'long *', which differs in signedness}}
+ scanf("%lx", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'long *', which differs in signedness}}
}
void test_scanf_unsigned_long(unsigned long *x) {
- scanf("%ld", x); // expected-warning{{format specifies type 'long *' but the argument has type 'unsigned long *'}}
+ scanf("%ld", x); // expected-warning{{format specifies type 'long *' but the argument has type 'unsigned long *', which differs in signedness}}
scanf("%lu", x); // no-warning
scanf("%lx", x); // no-warning
}
void test_scanf_longlong(long long *x) {
scanf("%lld", x); // no-warning
- scanf("%llu", x); // expected-warning{{format specifies type 'unsigned long long *' but the argument has type 'long long *'}}
- scanf("%llx", x); // expected-warning{{format specifies type 'unsigned long long *' but the argument has type 'long long *'}}
+ scanf("%llu", x); // expected-warning{{format specifies type 'unsigned long long *' but the argument has type 'long long *', which differs in signedness}}
+ scanf("%llx", x); // expected-warning{{format specifies type 'unsigned long long *' but the argument has type 'long long *', which differs in signedness}}
}
void test_scanf_unsigned_longlong(unsigned long long *x) {
- scanf("%lld", x); // expected-warning{{format specifies type 'long long *' but the argument has type 'unsigned long long *'}}
+ scanf("%lld", x); // expected-warning{{format specifies type 'long long *' but the argument has type 'unsigned long long *', which differs in signedness}}
scanf("%llu", x); // no-warning
scanf("%llx", x); // no-warning
}
void test_scanf_enum_int(enum enum_int *x) {
scanf("%d", x); // no-warning
- scanf("%u", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'enum enum_int *'}}
- scanf("%x", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'enum enum_int *'}}
+ scanf("%u", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'enum enum_int *', which differs in signedness}}
+ scanf("%x", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'enum enum_int *', which differs in signedness}}
}
#ifndef _WIN32 // Disabled due to enums have different underlying type on _WIN32
void test_scanf_enum_unsigned(enum enum_unsigned *x) {
- scanf("%d", x); // expected-warning{{format specifies type 'int *' but the argument has type 'enum enum_unsigned *'}}
+ scanf("%d", x); // expected-warning{{format specifies type 'int *' but the argument has type 'enum enum_unsigned *', which differs in signedness}}
scanf("%u", x); // no-warning
scanf("%x", x); // no-warning
}
void test_scanf_enum_long(enum enum_long *x) {
scanf("%ld", x); // no-warning
- scanf("%lu", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'enum enum_long *'}}
- scanf("%lx", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'enum enum_long *'}}
+ scanf("%lu", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'enum enum_long *', which differs in signedness}}
+ scanf("%lx", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'enum enum_long *', which differs in signedness}}
}
void test_scanf_enum_unsigned_long(enum enum_unsigned_long *x) {
- scanf("%ld", x); // expected-warning{{format specifies type 'long *' but the argument has type 'enum enum_unsigned_long *'}}
+ scanf("%ld", x); // expected-warning{{format specifies type 'long *' but the argument has type 'enum enum_unsigned_long *', which differs in signedness}}
scanf("%lu", x); // no-warning
scanf("%lx", x); // no-warning
}
diff --git a/clang/test/Sema/gh152826.c b/clang/test/Sema/gh152826.c
new file mode 100644
index 0000000..1234d80
--- /dev/null
+++ b/clang/test/Sema/gh152826.c
@@ -0,0 +1,7 @@
+// RUN: %clang_cc1 -std=c2y -verify %s
+// RUN: %clang_cc1 -std=c2y -verify -fexperimental-new-constant-interpreter %s
+// expected-no-diagnostics
+
+void gh152826(char (*a)[*][5], int (*x)[_Countof(*a)]);
+void more_likely_in_practice(unsigned long size_one, int (*a)[*][5], int b[_Countof(*a)]);
+void f(int (*x)[*][1][*][2][*][*][3][*], int q[_Countof(*x)]);
diff --git a/clang/test/Sema/implicit-void-ptr-cast.c b/clang/test/Sema/implicit-void-ptr-cast.c
index 3c3e153..037feaa 100644
--- a/clang/test/Sema/implicit-void-ptr-cast.c
+++ b/clang/test/Sema/implicit-void-ptr-cast.c
@@ -1,8 +1,8 @@
-// RUN: %clang_cc1 -fsyntax-only -std=c23 -verify=c -Wimplicit-void-ptr-cast %s
-// RUN: %clang_cc1 -fsyntax-only -std=c23 -verify=c -Wc++-compat %s
-// RUN: %clang_cc1 -fsyntax-only -verify=cxx -x c++ %s
-// RUN: %clang_cc1 -fsyntax-only -std=c23 -verify=good %s
-// RUN: %clang_cc1 -fsyntax-only -std=c23 -verify=good -Wc++-compat -Wno-implicit-void-ptr-cast %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-alloc-size -std=c23 -verify=c -Wimplicit-void-ptr-cast %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-alloc-size -std=c23 -verify=c -Wc++-compat %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-alloc-size -verify=cxx -x c++ %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-alloc-size -std=c23 -verify=good %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-alloc-size -std=c23 -verify=good -Wc++-compat -Wno-implicit-void-ptr-cast %s
// good-no-diagnostics
typedef __typeof__(sizeof(int)) size_t;
@@ -82,3 +82,15 @@ void more(void) {
ptr3 = SOMETHING_THAT_IS_NOT_NULL; // c-warning {{implicit conversion when assigning to 'char *' from type 'void *' is not permitted in C++}} \
cxx-error {{assigning to 'char *' from incompatible type 'void *'}}
}
+
+void gh154157(void) {
+ #define ATOMIC_VAR_INIT(value) (value)
+
+ typedef const struct T * T_Ref;
+ static T_Ref _Atomic x = ATOMIC_VAR_INIT((void*)NULL); // c-warning {{implicit conversion when initializing '_Atomic(T_Ref)' with an expression of type 'void *' is not permitted in C++}} \
+ cxx-error {{cannot initialize a variable of type '_Atomic(T_Ref)' with an rvalue of type 'void *'}}
+ static T_Ref const y = ATOMIC_VAR_INIT((void*)NULL); // c-warning {{implicit conversion when initializing 'const T_Ref' (aka 'const struct T *const') with an expression of type 'void *' is not permitted in C++}} \
+ cxx-error {{cannot initialize a variable of type 'const T_Ref' (aka 'const struct T *const') with an rvalue of type 'void *'}}
+ static T_Ref z = ATOMIC_VAR_INIT((void*)NULL); // c-warning {{implicit conversion when initializing 'T_Ref' (aka 'const struct T *') with an expression of type 'void *' is not permitted in C++}} \
+ cxx-error {{cannot initialize a variable of type 'T_Ref' (aka 'const struct T *') with an rvalue of type 'void *'}}
+}
diff --git a/clang/test/Sema/ptrauth-qualifier.c b/clang/test/Sema/ptrauth-qualifier.c
index 5d932b7..3e568ce 100644
--- a/clang/test/Sema/ptrauth-qualifier.c
+++ b/clang/test/Sema/ptrauth-qualifier.c
@@ -1,13 +1,25 @@
-// RUN: %clang_cc1 -triple arm64-apple-ios -std=c23 -fsyntax-only -verify -fptrauth-intrinsics %s
+// RUN: %clang_cc1 -triple arm64-apple-ios -DIS_DARWIN -std=c23 -fsyntax-only -verify -fptrauth-intrinsics %s
// RUN: %clang_cc1 -triple aarch64-linux-gnu -std=c23 -fsyntax-only -verify -fptrauth-intrinsics %s
-#if !__has_extension(ptrauth_qualifier)
+#if defined(IS_DARWIN) && !__has_extension(ptrauth_qualifier)
// This error means that the __ptrauth qualifier availability test says that it
// is not available. This error is not expected in the output, if it is seen
// there is a feature detection regression.
#error __ptrauth qualifier not enabled
#endif
+#if defined(IS_DARWIN) && !__has_feature(ptrauth_qualifier)
+// This error means that the __has_feature test for ptrauth_qualifier has
+// failed, despite it being expected on darwin.
+#error __ptrauth qualifier not enabled
+#elif !defined(IS_DARWIN) && (__has_feature(ptrauth_qualifier) || __has_extension(ptrauth_qualifier))
+#error ptrauth_qualifier labeled a feature on a non-darwin platform
+#endif
+
+#if !defined (__PTRAUTH__)
+#error __PTRAUTH__ test macro not defined when ptrauth is enabled
+#endif
+
#if __aarch64__
#define VALID_CODE_KEY 0
#define VALID_DATA_KEY 2
diff --git a/clang/test/Sema/warn-alloc-size.c b/clang/test/Sema/warn-alloc-size.c
new file mode 100644
index 0000000..445b0ba
--- /dev/null
+++ b/clang/test/Sema/warn-alloc-size.c
@@ -0,0 +1,49 @@
+// RUN: %clang_cc1 -triple x86_64-linux -fsyntax-only -verify -Walloc-size %s
+struct Foo { int x[10]; };
+
+struct ZeroSize {
+ int flexible_array[];
+};
+
+typedef __typeof__(sizeof(int)) size_t;
+void *my_malloc(size_t) __attribute__((alloc_size(1)));
+void *my_calloc(size_t, size_t) __attribute__((alloc_size(2, 1)));
+
+void foo_consumer(struct Foo* p);
+
+void alloc_foo(void) {
+ struct Foo *ptr1 = my_malloc(sizeof(struct Foo));
+ struct Foo *ptr2 = my_malloc(sizeof(*ptr2));
+ struct Foo *ptr3 = my_calloc(1, sizeof(*ptr3));
+ struct Foo *ptr4 = my_calloc(sizeof(*ptr4), 1);
+ struct Foo (*ptr5)[5] = my_malloc(sizeof(*ptr5));
+ void *ptr6 = my_malloc(4);
+
+ // Test insufficient size with different allocation functions.
+ struct Foo *ptr7 = my_malloc(sizeof(ptr7)); // expected-warning {{allocation of insufficient size '8' for type 'struct Foo' with size '40'}}
+ struct Foo *ptr8 = my_calloc(1, sizeof(ptr8)); // expected-warning {{allocation of insufficient size '8' for type 'struct Foo' with size '40'}}
+ struct Foo *ptr9 = my_calloc(sizeof(ptr9), 1); // expected-warning {{allocation of insufficient size '8' for type 'struct Foo' with size '40'}}
+
+ // Test function arguments.
+ foo_consumer(my_malloc(4)); // expected-warning {{allocation of insufficient size '4' for type 'struct Foo' with size '40'}}
+
+ // Test explicit cast.
+ struct Foo *ptr10 = (struct Foo *)my_malloc(sizeof(*ptr10));
+ struct Foo *ptr11 = (struct Foo *)my_malloc(sizeof(ptr11)); // expected-warning {{allocation of insufficient size '8' for type 'struct Foo' with size '40'}}
+ struct Foo *ptr12 = (struct Foo *)my_calloc(1, sizeof(ptr12)); // expected-warning {{allocation of insufficient size '8' for type 'struct Foo' with size '40'}}
+ struct Foo *ptr13 = (struct Foo *)my_malloc(4); // expected-warning {{allocation of insufficient size '4' for type 'struct Foo' with size '40'}}
+ void *ptr14 = (struct Foo *)my_malloc(4); // expected-warning {{allocation of insufficient size '4' for type 'struct Foo' with size '40'}}
+
+ struct Foo *ptr15 = (void *)(struct Foo *)my_malloc(4); // expected-warning 2 {{allocation of insufficient size '4' for type 'struct Foo' with size '40'}}
+ int *ptr16 = (unsigned *)(void *)(int *)my_malloc(1); // expected-warning {{initializing 'int *' with an expression of type 'unsigned int *' converts between pointers to integer types with different sign}}
+ // expected-warning@-1 {{allocation of insufficient size '1' for type 'int' with size '4'}}
+ // expected-warning@-2 {{allocation of insufficient size '1' for type 'unsigned int' with size '4'}}
+ int *ptr17 = (void *)(int *)my_malloc(1); // expected-warning {{allocation of insufficient size '1' for type 'int' with size '4'}}
+ // expected-warning@-1 {{allocation of insufficient size '1' for type 'int' with size '4'}}
+ (void)(int *)my_malloc(1); // expected-warning {{allocation of insufficient size '1' for type 'int' with size '4'}}
+
+ struct ZeroSize *ptr18 = my_malloc(0); // okay because sizeof(struct ZeroSize) = 0
+
+ void *funcptr_1 = (void (*)(int))my_malloc(0); // expected-warning {{allocation of insufficient size '0' for type 'void (int)' with size '1'}}
+ void *funcptr_2 = (void (*)(int))my_malloc(1);
+}
diff --git a/clang/test/Sema/warn-lifetime-safety-dataflow.cpp b/clang/test/Sema/warn-lifetime-safety-dataflow.cpp
index 2b934ac..bcde9ad 100644
--- a/clang/test/Sema/warn-lifetime-safety-dataflow.cpp
+++ b/clang/test/Sema/warn-lifetime-safety-dataflow.cpp
@@ -12,11 +12,11 @@ MyObj* return_local_addr() {
MyObj x {10};
MyObj* p = &x;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_X:[0-9]+]], OriginID: [[O_ADDR_X:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_ADDR_X]])
+// CHECK: Issue (LoanID: [[L_X:[0-9]+]], ToOrigin: [[O_ADDR_X:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_X]] (Expr: UnaryOperator))
return p;
-// CHECK: AssignOrigin (DestID: [[O_RET_VAL:[0-9]+]], SrcID: [[O_P]])
-// CHECK: ReturnOfOrigin (OriginID: [[O_RET_VAL]])
+// CHECK: AssignOrigin (Dest: [[O_RET_VAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P]] (Decl: p))
+// CHECK: ReturnOfOrigin ([[O_RET_VAL]] (Expr: ImplicitCastExpr))
// CHECK: Expire (LoanID: [[L_X]])
}
@@ -27,20 +27,20 @@ MyObj* return_local_addr() {
MyObj* assign_and_return_local_addr() {
MyObj y{20};
MyObj* ptr1 = &y;
-// CHECK: Issue (LoanID: [[L_Y:[0-9]+]], OriginID: [[O_ADDR_Y:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_PTR1:[0-9]+]], SrcID: [[O_ADDR_Y]])
+// CHECK: Issue (LoanID: [[L_Y:[0-9]+]], ToOrigin: [[O_ADDR_Y:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_PTR1:[0-9]+]] (Decl: ptr1), Src: [[O_ADDR_Y]] (Expr: UnaryOperator))
MyObj* ptr2 = ptr1;
-// CHECK: AssignOrigin (DestID: [[O_PTR1_RVAL:[0-9]+]], SrcID: [[O_PTR1]])
-// CHECK: AssignOrigin (DestID: [[O_PTR2:[0-9]+]], SrcID: [[O_PTR1_RVAL]])
+// CHECK: AssignOrigin (Dest: [[O_PTR1_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR1]] (Decl: ptr1))
+// CHECK: AssignOrigin (Dest: [[O_PTR2:[0-9]+]] (Decl: ptr2), Src: [[O_PTR1_RVAL]] (Expr: ImplicitCastExpr))
ptr2 = ptr1;
-// CHECK: AssignOrigin (DestID: [[O_PTR1_RVAL_2:[0-9]+]], SrcID: [[O_PTR1]])
-// CHECK: AssignOrigin (DestID: [[O_PTR2]], SrcID: [[O_PTR1_RVAL_2]])
+// CHECK: AssignOrigin (Dest: [[O_PTR1_RVAL_2:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR1]] (Decl: ptr1))
+// CHECK: AssignOrigin (Dest: [[O_PTR2]] (Decl: ptr2), Src: [[O_PTR1_RVAL_2]] (Expr: ImplicitCastExpr))
ptr2 = ptr2; // Self assignment.
-// CHECK: AssignOrigin (DestID: [[O_PTR2_RVAL:[0-9]+]], SrcID: [[O_PTR2]])
-// CHECK: AssignOrigin (DestID: [[O_PTR2]], SrcID: [[O_PTR2_RVAL]])
+// CHECK: AssignOrigin (Dest: [[O_PTR2_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR2]] (Decl: ptr2))
+// CHECK: AssignOrigin (Dest: [[O_PTR2]] (Decl: ptr2), Src: [[O_PTR2_RVAL]] (Expr: ImplicitCastExpr))
return ptr2;
-// CHECK: AssignOrigin (DestID: [[O_PTR2_RVAL_2:[0-9]+]], SrcID: [[O_PTR2]])
-// CHECK: ReturnOfOrigin (OriginID: [[O_PTR2_RVAL_2]])
+// CHECK: AssignOrigin (Dest: [[O_PTR2_RVAL_2:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR2]] (Decl: ptr2))
+// CHECK: ReturnOfOrigin ([[O_PTR2_RVAL_2]] (Expr: ImplicitCastExpr))
// CHECK: Expire (LoanID: [[L_Y]])
}
@@ -60,8 +60,8 @@ int return_int_val() {
void loan_expires_cpp() {
MyObj obj{1};
MyObj* pObj = &obj;
-// CHECK: Issue (LoanID: [[L_OBJ:[0-9]+]], OriginID: [[O_ADDR_OBJ:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_POBJ:[0-9]+]], SrcID: [[O_ADDR_OBJ]])
+// CHECK: Issue (LoanID: [[L_OBJ:[0-9]+]], ToOrigin: [[O_ADDR_OBJ:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_POBJ:[0-9]+]] (Decl: pObj), Src: [[O_ADDR_OBJ]] (Expr: UnaryOperator))
// CHECK: Expire (LoanID: [[L_OBJ]])
}
@@ -72,8 +72,8 @@ void loan_expires_cpp() {
void loan_expires_trivial() {
int trivial_obj = 1;
int* pTrivialObj = &trivial_obj;
-// CHECK: Issue (LoanID: [[L_TRIVIAL_OBJ:[0-9]+]], OriginID: [[O_ADDR_TRIVIAL_OBJ:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_PTOBJ:[0-9]+]], SrcID: [[O_ADDR_TRIVIAL_OBJ]])
+// CHECK: Issue (LoanID: [[L_TRIVIAL_OBJ:[0-9]+]], ToOrigin: [[O_ADDR_TRIVIAL_OBJ:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_PTOBJ:[0-9]+]] (Decl: pTrivialObj), Src: [[O_ADDR_TRIVIAL_OBJ]] (Expr: UnaryOperator))
// CHECK-NOT: Expire (LoanID: [[L_TRIVIAL_OBJ]])
// CHECK-NEXT: End of Block
// FIXME: Add check for Expire once trivial destructors are handled for expiration.
@@ -87,15 +87,15 @@ void conditional(bool condition) {
if (condition)
p = &a;
- // CHECK: Issue (LoanID: [[L_A:[0-9]+]], OriginID: [[O_ADDR_A:[0-9]+]])
- // CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_ADDR_A]])
+// CHECK: Issue (LoanID: [[L_A:[0-9]+]], ToOrigin: [[O_ADDR_A:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_A]] (Expr: UnaryOperator))
else
p = &b;
- // CHECK: Issue (LoanID: [[L_B:[0-9]+]], OriginID: [[O_ADDR_B:[0-9]+]])
- // CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_B]])
+// CHECK: Issue (LoanID: [[L_B:[0-9]+]], ToOrigin: [[O_ADDR_B:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_B]] (Expr: UnaryOperator))
int *q = p;
- // CHECK: AssignOrigin (DestID: [[O_P_RVAL:[0-9]+]], SrcID: [[O_P]])
- // CHECK: AssignOrigin (DestID: [[O_Q:[0-9]+]], SrcID: [[O_P_RVAL]])
+// CHECK: AssignOrigin (Dest: [[O_P_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P]] (Decl: p))
+// CHECK: AssignOrigin (Dest: [[O_Q:[0-9]+]] (Decl: q), Src: [[O_P_RVAL]] (Expr: ImplicitCastExpr))
}
@@ -109,12 +109,12 @@ void pointers_in_a_cycle(bool condition) {
MyObj* p2 = &v2;
MyObj* p3 = &v3;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_V1:[0-9]+]], OriginID: [[O_ADDR_V1:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P1:[0-9]+]], SrcID: [[O_ADDR_V1]])
-// CHECK: Issue (LoanID: [[L_V2:[0-9]+]], OriginID: [[O_ADDR_V2:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P2:[0-9]+]], SrcID: [[O_ADDR_V2]])
-// CHECK: Issue (LoanID: [[L_V3:[0-9]+]], OriginID: [[O_ADDR_V3:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P3:[0-9]+]], SrcID: [[O_ADDR_V3]])
+// CHECK: Issue (LoanID: [[L_V1:[0-9]+]], ToOrigin: [[O_ADDR_V1:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P1:[0-9]+]] (Decl: p1), Src: [[O_ADDR_V1]] (Expr: UnaryOperator))
+// CHECK: Issue (LoanID: [[L_V2:[0-9]+]], ToOrigin: [[O_ADDR_V2:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P2:[0-9]+]] (Decl: p2), Src: [[O_ADDR_V2]] (Expr: UnaryOperator))
+// CHECK: Issue (LoanID: [[L_V3:[0-9]+]], ToOrigin: [[O_ADDR_V3:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P3:[0-9]+]] (Decl: p3), Src: [[O_ADDR_V3]] (Expr: UnaryOperator))
while (condition) {
MyObj* temp = p1;
@@ -122,14 +122,14 @@ void pointers_in_a_cycle(bool condition) {
p2 = p3;
p3 = temp;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: AssignOrigin (DestID: [[O_P1_RVAL:[0-9]+]], SrcID: [[O_P1]])
-// CHECK: AssignOrigin (DestID: [[O_TEMP:[0-9]+]], SrcID: [[O_P1_RVAL]])
-// CHECK: AssignOrigin (DestID: [[O_P2_RVAL:[0-9]+]], SrcID: [[O_P2]])
-// CHECK: AssignOrigin (DestID: [[O_P1]], SrcID: [[O_P2_RVAL]])
-// CHECK: AssignOrigin (DestID: [[O_P3_RVAL:[0-9]+]], SrcID: [[O_P3]])
-// CHECK: AssignOrigin (DestID: [[O_P2]], SrcID: [[O_P3_RVAL]])
-// CHECK: AssignOrigin (DestID: [[O_TEMP_RVAL:[0-9]+]], SrcID: [[O_TEMP]])
-// CHECK: AssignOrigin (DestID: [[O_P3]], SrcID: [[O_TEMP_RVAL]])
+// CHECK: AssignOrigin (Dest: [[O_P1_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P1]] (Decl: p1))
+// CHECK: AssignOrigin (Dest: [[O_TEMP:[0-9]+]] (Decl: temp), Src: [[O_P1_RVAL]] (Expr: ImplicitCastExpr))
+// CHECK: AssignOrigin (Dest: [[O_P2_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P2]] (Decl: p2))
+// CHECK: AssignOrigin (Dest: [[O_P1]] (Decl: p1), Src: [[O_P2_RVAL]] (Expr: ImplicitCastExpr))
+// CHECK: AssignOrigin (Dest: [[O_P3_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P3]] (Decl: p3))
+// CHECK: AssignOrigin (Dest: [[O_P2]] (Decl: p2), Src: [[O_P3_RVAL]] (Expr: ImplicitCastExpr))
+// CHECK: AssignOrigin (Dest: [[O_TEMP_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_TEMP]] (Decl: temp))
+// CHECK: AssignOrigin (Dest: [[O_P3]] (Decl: p3), Src: [[O_TEMP_RVAL]] (Expr: ImplicitCastExpr))
}
}
@@ -139,11 +139,11 @@ void overwrite_origin() {
MyObj s2;
MyObj* p = &s1;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], OriginID: [[O_ADDR_S1:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_ADDR_S1]])
+// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], ToOrigin: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator))
p = &s2;
-// CHECK: Issue (LoanID: [[L_S2:[0-9]+]], OriginID: [[O_ADDR_S2:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_S2]])
+// CHECK: Issue (LoanID: [[L_S2:[0-9]+]], ToOrigin: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator))
// CHECK: Expire (LoanID: [[L_S2]])
// CHECK: Expire (LoanID: [[L_S1]])
}
@@ -153,10 +153,11 @@ void reassign_to_null() {
MyObj s1;
MyObj* p = &s1;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], OriginID: [[O_ADDR_S1:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_ADDR_S1]])
+// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], ToOrigin: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator))
p = nullptr;
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_NULLPTR:[0-9]+]])
+// CHECK: AssignOrigin (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: {{[0-9]+}} (Expr: CXXNullPtrLiteralExpr))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr))
// CHECK: Expire (LoanID: [[L_S1]])
}
// FIXME: Have a better representation for nullptr than just an empty origin.
@@ -169,13 +170,13 @@ void reassign_in_if(bool condition) {
MyObj s2;
MyObj* p = &s1;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], OriginID: [[O_ADDR_S1:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_ADDR_S1]])
+// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], ToOrigin: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator))
if (condition) {
p = &s2;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S2:[0-9]+]], OriginID: [[O_ADDR_S2:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_S2]])
+// CHECK: Issue (LoanID: [[L_S2:[0-9]+]], ToOrigin: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator))
}
// CHECK: Block B{{[0-9]+}}:
// CHECK: Expire (LoanID: [[L_S2]])
@@ -190,26 +191,26 @@ void assign_in_switch(int mode) {
MyObj s3;
MyObj* p = nullptr;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: AssignOrigin (DestID: [[O_NULLPTR_CAST:[0-9]+]], SrcID: [[O_NULLPTR:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_NULLPTR_CAST]])
+// CHECK: AssignOrigin (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr))
switch (mode) {
case 1:
p = &s1;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], OriginID: [[O_ADDR_S1:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_S1]])
+// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], ToOrigin: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator))
break;
case 2:
p = &s2;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S2:[0-9]+]], OriginID: [[O_ADDR_S2:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_S2]])
+// CHECK: Issue (LoanID: [[L_S2:[0-9]+]], ToOrigin: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator))
break;
default:
p = &s3;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S3:[0-9]+]], OriginID: [[O_ADDR_S3:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_S3]])
+// CHECK: Issue (LoanID: [[L_S3:[0-9]+]], ToOrigin: [[O_ADDR_S3:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S3]] (Expr: UnaryOperator))
break;
}
// CHECK: Block B{{[0-9]+}}:
@@ -221,14 +222,14 @@ void assign_in_switch(int mode) {
// CHECK-LABEL: Function: loan_in_loop
void loan_in_loop(bool condition) {
MyObj* p = nullptr;
- // CHECK: AssignOrigin (DestID: [[O_NULLPTR_CAST:[0-9]+]], SrcID: [[O_NULLPTR:[0-9]+]])
- // CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_NULLPTR_CAST]])
+ // CHECK: AssignOrigin (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr))
+ // CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr))
while (condition) {
MyObj inner;
p = &inner;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_INNER:[0-9]+]], OriginID: [[O_ADDR_INNER:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_INNER]])
+// CHECK: Issue (LoanID: [[L_INNER:[0-9]+]], ToOrigin: [[O_ADDR_INNER:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_INNER]] (Expr: UnaryOperator))
// CHECK: Expire (LoanID: [[L_INNER]])
}
}
@@ -239,14 +240,14 @@ void loop_with_break(int count) {
MyObj s2;
MyObj* p = &s1;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], OriginID: [[O_ADDR_S1:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_ADDR_S1]])
+// CHECK: Issue (LoanID: [[L_S1:[0-9]+]], ToOrigin: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator))
for (int i = 0; i < count; ++i) {
if (i == 5) {
p = &s2;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: Issue (LoanID: [[L_S2:[0-9]+]], OriginID: [[O_ADDR_S2:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_S2]])
+// CHECK: Issue (LoanID: [[L_S2:[0-9]+]], ToOrigin: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator))
break;
}
}
@@ -259,18 +260,18 @@ void loop_with_break(int count) {
void nested_scopes() {
MyObj* p = nullptr;
// CHECK: Block B{{[0-9]+}}:
-// CHECK: AssignOrigin (DestID: [[O_NULLPTR_CAST:[0-9]+]], SrcID: [[O_NULLPTR:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_NULLPTR_CAST]])
+// CHECK: AssignOrigin (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr))
{
MyObj outer;
p = &outer;
-// CHECK: Issue (LoanID: [[L_OUTER:[0-9]+]], OriginID: [[O_ADDR_OUTER:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_OUTER]])
+// CHECK: Issue (LoanID: [[L_OUTER:[0-9]+]], ToOrigin: [[O_ADDR_OUTER:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_OUTER]] (Expr: UnaryOperator))
{
MyObj inner;
p = &inner;
-// CHECK: Issue (LoanID: [[L_INNER:[0-9]+]], OriginID: [[O_ADDR_INNER:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P]], SrcID: [[O_ADDR_INNER]])
+// CHECK: Issue (LoanID: [[L_INNER:[0-9]+]], ToOrigin: [[O_ADDR_INNER:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_INNER]] (Expr: UnaryOperator))
}
// CHECK: Expire (LoanID: [[L_INNER]])
}
@@ -282,13 +283,13 @@ void pointer_indirection() {
int a;
int *p = &a;
// CHECK: Block B1:
-// CHECK: Issue (LoanID: [[L_A:[0-9]+]], OriginID: [[O_ADDR_A:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_P:[0-9]+]], SrcID: [[O_ADDR_A]])
+// CHECK: Issue (LoanID: [[L_A:[0-9]+]], ToOrigin: [[O_ADDR_A:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_A]] (Expr: UnaryOperator))
int **pp = &p;
-// CHECK: Issue (LoanID: [[L_P:[0-9]+]], OriginID: [[O_ADDR_P:[0-9]+]])
-// CHECK: AssignOrigin (DestID: [[O_PP:[0-9]+]], SrcID: [[O_ADDR_P]])
+// CHECK: Issue (LoanID: [[L_P:[0-g]+]], ToOrigin: [[O_ADDR_P:[0-9]+]] (Expr: UnaryOperator))
+// CHECK: AssignOrigin (Dest: [[O_PP:[0-9]+]] (Decl: pp), Src: [[O_ADDR_P]] (Expr: UnaryOperator))
// FIXME: The Origin for the RHS is broken
int *q = *pp;
-// CHECK: AssignOrigin (DestID: [[O_Q:[0-9]+]], SrcID: {{[0-9]+}})
+// CHECK: AssignOrigin (Dest: {{[0-9]+}} (Decl: q), Src: {{[0-9]+}} (Expr: ImplicitCastExpr))
}
diff --git a/clang/test/Sema/warn-lifetime-safety.cpp b/clang/test/Sema/warn-lifetime-safety.cpp
new file mode 100644
index 0000000..660b9c9
--- /dev/null
+++ b/clang/test/Sema/warn-lifetime-safety.cpp
@@ -0,0 +1,273 @@
+// RUN: %clang_cc1 -fsyntax-only -fexperimental-lifetime-safety -Wexperimental-lifetime-safety -verify %s
+
+struct MyObj {
+ int id;
+ ~MyObj() {} // Non-trivial destructor
+ MyObj operator+(MyObj);
+};
+
+//===----------------------------------------------------------------------===//
+// Basic Definite Use-After-Free (-W...permissive)
+// These are cases where the pointer is guaranteed to be dangling at the use site.
+//===----------------------------------------------------------------------===//
+
+void definite_simple_case() {
+ MyObj* p;
+ {
+ MyObj s;
+ p = &s; // expected-warning {{object whose reference is captured does not live long enough}}
+ } // expected-note {{destroyed here}}
+ (void)*p; // expected-note {{later used here}}
+}
+
+void no_use_no_error() {
+ MyObj* p;
+ {
+ MyObj s;
+ p = &s;
+ }
+}
+
+void definite_pointer_chain() {
+ MyObj* p;
+ MyObj* q;
+ {
+ MyObj s;
+ p = &s; // expected-warning {{does not live long enough}}
+ q = p;
+ } // expected-note {{destroyed here}}
+ (void)*q; // expected-note {{later used here}}
+}
+
+void definite_multiple_uses_one_warning() {
+ MyObj* p;
+ {
+ MyObj s;
+ p = &s; // expected-warning {{does not live long enough}}
+ } // expected-note {{destroyed here}}
+ (void)*p; // expected-note {{later used here}}
+ // No second warning for the same loan.
+ p->id = 1;
+ MyObj* q = p;
+ (void)*q;
+}
+
+void definite_multiple_pointers() {
+ MyObj *p, *q, *r;
+ {
+ MyObj s;
+ p = &s; // expected-warning {{does not live long enough}}
+ q = &s; // expected-warning {{does not live long enough}}
+ r = &s; // expected-warning {{does not live long enough}}
+ } // expected-note 3 {{destroyed here}}
+ (void)*p; // expected-note {{later used here}}
+ (void)*q; // expected-note {{later used here}}
+ (void)*r; // expected-note {{later used here}}
+}
+
+void definite_single_pointer_multiple_loans(bool cond) {
+ MyObj *p;
+ if (cond){
+ MyObj s;
+ p = &s; // expected-warning {{does not live long enough}}
+ } // expected-note {{destroyed here}}
+ else {
+ MyObj t;
+ p = &t; // expected-warning {{does not live long enough}}
+ } // expected-note {{destroyed here}}
+ (void)*p; // expected-note 2 {{later used here}}
+}
+
+
+//===----------------------------------------------------------------------===//
+// Potential (Maybe) Use-After-Free (-W...strict)
+// These are cases where the pointer *may* become dangling, depending on the path taken.
+//===----------------------------------------------------------------------===//
+
+void potential_if_branch(bool cond) {
+ MyObj safe;
+ MyObj* p = &safe;
+ if (cond) {
+ MyObj temp;
+ p = &temp; // expected-warning {{object whose reference is captured may not live long enough}}
+ } // expected-note {{destroyed here}}
+ (void)*p; // expected-note {{later used here}}
+}
+
+// If all paths lead to a dangle, it becomes a definite error.
+void potential_becomes_definite(bool cond) {
+ MyObj* p;
+ if (cond) {
+ MyObj temp1;
+ p = &temp1; // expected-warning {{does not live long enough}}
+ } // expected-note {{destroyed here}}
+ else {
+ MyObj temp2;
+ p = &temp2; // expected-warning {{does not live long enough}}
+ } // expected-note {{destroyed here}}
+ (void)*p; // expected-note 2 {{later used here}}
+}
+
+void definite_potential_together(bool cond) {
+ MyObj safe;
+ MyObj* p_maybe = &safe;
+ MyObj* p_definite = nullptr;
+
+ {
+ MyObj s;
+ p_definite = &s; // expected-warning {{does not live long enough}}
+ if (cond) {
+ p_maybe = &s; // expected-warning {{may not live long enough}}
+ }
+ } // expected-note 2 {{destroyed here}}
+ (void)*p_definite; // expected-note {{later used here}}
+ (void)*p_maybe; // expected-note {{later used here}}
+}
+
+void definite_overrides_potential(bool cond) {
+ MyObj safe;
+ MyObj* p;
+ MyObj* q;
+ {
+ MyObj s;
+ q = &s; // expected-warning {{does not live long enough}}
+ p = q;
+ } // expected-note {{destroyed here}}
+
+ if (cond) {
+ // 'q' is conditionally "rescued". 'p' is not.
+ q = &safe;
+ }
+
+ // The use of 'p' is a definite error because it was never rescued.
+ (void)*q;
+ (void)*p; // expected-note {{later used here}}
+ (void)*q;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Control Flow Tests
+//===----------------------------------------------------------------------===//
+
+void potential_for_loop_use_after_loop_body(MyObj safe) {
+ MyObj* p = &safe;
+ for (int i = 0; i < 1; ++i) {
+ MyObj s;
+ p = &s; // expected-warning {{may not live long enough}}
+ } // expected-note {{destroyed here}}
+ (void)*p; // expected-note {{later used here}}
+}
+
+void potential_for_loop_use_before_loop_body(MyObj safe) {
+ MyObj* p = &safe;
+ for (int i = 0; i < 1; ++i) {
+ (void)*p; // expected-note {{later used here}}
+ MyObj s;
+ p = &s; // expected-warning {{may not live long enough}}
+ } // expected-note {{destroyed here}}
+ (void)*p;
+}
+
+void potential_loop_with_break(bool cond) {
+ MyObj safe;
+ MyObj* p = &safe;
+ for (int i = 0; i < 10; ++i) {
+ if (cond) {
+ MyObj temp;
+ p = &temp; // expected-warning {{may not live long enough}}
+ break; // expected-note {{destroyed here}}
+ }
+ }
+ (void)*p; // expected-note {{later used here}}
+}
+
+void potential_multiple_expiry_of_same_loan(bool cond) {
+ // Choose the last expiry location for the loan.
+ MyObj safe;
+ MyObj* p = &safe;
+ for (int i = 0; i < 10; ++i) {
+ MyObj unsafe;
+ if (cond) {
+ p = &unsafe; // expected-warning {{may not live long enough}}
+ break;
+ }
+ } // expected-note {{destroyed here}}
+ (void)*p; // expected-note {{later used here}}
+
+ p = &safe;
+ for (int i = 0; i < 10; ++i) {
+ MyObj unsafe;
+ if (cond) {
+ p = &unsafe; // expected-warning {{may not live long enough}}
+ if (cond)
+ break;
+ }
+ } // expected-note {{destroyed here}}
+ (void)*p; // expected-note {{later used here}}
+
+ p = &safe;
+ for (int i = 0; i < 10; ++i) {
+ if (cond) {
+ MyObj unsafe2;
+ p = &unsafe2; // expected-warning {{may not live long enough}}
+ break; // expected-note {{destroyed here}}
+ }
+ }
+ (void)*p; // expected-note {{later used here}}
+}
+
+void potential_switch(int mode) {
+ MyObj safe;
+ MyObj* p = &safe;
+ switch (mode) {
+ case 1: {
+ MyObj temp;
+ p = &temp; // expected-warning {{object whose reference is captured may not live long enough}}
+ break; // expected-note {{destroyed here}}
+ }
+ case 2: {
+ p = &safe; // This path is okay.
+ break;
+ }
+ }
+ (void)*p; // expected-note {{later used here}}
+}
+
+void definite_switch(int mode) {
+ MyObj safe;
+ MyObj* p = &safe;
+ // All cases are UaF --> Definite error.
+ switch (mode) {
+ case 1: {
+ MyObj temp1;
+ p = &temp1; // expected-warning {{does not live long enough}}
+ break; // expected-note {{destroyed here}}
+ }
+ case 2: {
+ MyObj temp2;
+ p = &temp2; // expected-warning {{does not live long enough}}
+ break; // expected-note {{destroyed here}}
+ }
+ default: {
+ MyObj temp2;
+ p = &temp2; // expected-warning {{does not live long enough}}
+ break; // expected-note {{destroyed here}}
+ }
+ }
+ (void)*p; // expected-note 3 {{later used here}}
+}
+
+//===----------------------------------------------------------------------===//
+// No-Error Cases
+//===----------------------------------------------------------------------===//
+void no_error_if_dangle_then_rescue() {
+ MyObj safe;
+ MyObj* p;
+ {
+ MyObj temp;
+ p = &temp; // p is temporarily dangling.
+ }
+ p = &safe; // p is "rescued" before use.
+ (void)*p; // This is safe.
+}
diff --git a/clang/test/SemaCXX/MicrosoftExtensions.cpp b/clang/test/SemaCXX/MicrosoftExtensions.cpp
index 4dff2b1..ca072d8 100644
--- a/clang/test/SemaCXX/MicrosoftExtensions.cpp
+++ b/clang/test/SemaCXX/MicrosoftExtensions.cpp
@@ -126,17 +126,17 @@ __inline void FreeIDListArray(LPITEMIDLIST *ppidls) {
typedef struct in_addr {
public:
in_addr(in_addr &a) {} // precxx17-note {{candidate constructor not viable: expects an lvalue for 1st argument}}
- in_addr(in_addr *a) {} // precxx17-note {{candidate constructor not viable: no known conversion from 'IN_ADDR' (aka 'in_addr') to 'in_addr *' for 1st argument}}
+ in_addr(in_addr *a) {} // precxx17-note {{candidate constructor not viable: no known conversion from 'IN_ADDR' (aka 'struct in_addr') to 'in_addr *' for 1st argument}}
} IN_ADDR;
void f(IN_ADDR __unaligned *a) {
IN_ADDR local_addr = *a;
// FIXME: MSVC accepts the following; not sure why clang tries to
// copy-construct an in_addr.
- IN_ADDR local_addr2 = a; // precxx17-error {{no viable constructor copying variable of type 'IN_ADDR' (aka 'in_addr')}}
- // expected-warning@-1 {{implicit cast from type '__unaligned IN_ADDR *' (aka '__unaligned in_addr *') to type 'in_addr *' drops __unaligned qualifier}}
+ IN_ADDR local_addr2 = a; // precxx17-error {{no viable constructor copying variable of type 'IN_ADDR' (aka 'struct in_addr')}}
+ // expected-warning@-1 {{implicit cast from type '__unaligned IN_ADDR *' (aka '__unaligned struct in_addr *') to type 'in_addr *' drops __unaligned qualifier}}
IN_ADDR local_addr3(a);
- // expected-warning@-1 {{implicit cast from type '__unaligned IN_ADDR *' (aka '__unaligned in_addr *') to type 'in_addr *' drops __unaligned qualifier}}
+ // expected-warning@-1 {{implicit cast from type '__unaligned IN_ADDR *' (aka '__unaligned struct in_addr *') to type 'in_addr *' drops __unaligned qualifier}}
}
template<typename T> void h1(T (__stdcall M::* const )()) { }
diff --git a/clang/test/SemaCXX/builtin-get-vtable-pointer.cpp b/clang/test/SemaCXX/builtin-get-vtable-pointer.cpp
index 273f9c3..b04b38d 100644
--- a/clang/test/SemaCXX/builtin-get-vtable-pointer.cpp
+++ b/clang/test/SemaCXX/builtin-get-vtable-pointer.cpp
@@ -53,7 +53,7 @@ const void *getThing(const Bar<T> *b = nullptr) {
return __builtin_get_vtable_pointer(b->ty()); // expected-error{{__builtin_get_vtable_pointer requires an argument of class pointer type, but 'SubType *' (aka 'int *') was provided}}
// expected-error@-1{{__builtin_get_vtable_pointer requires an argument of polymorphic class pointer type, but 'Thing1' has no virtual methods}}
// expected-error@-2{{__builtin_get_vtable_pointer requires an argument of polymorphic class pointer type, but 'NonPolymorphic' has no virtual methods}}
- // expected-error@-3{{__builtin_get_vtable_pointer requires an argument with a complete type, but 'SubType' (aka 'basic::ForwardDeclaration') is incomplete}}
+ // expected-error@-3{{__builtin_get_vtable_pointer requires an argument with a complete type, but 'SubType' (aka 'ForwardDeclaration') is incomplete}}
}
template <typename>
struct IncompleteTemplate; // expected-note{{template is declared here}}
diff --git a/clang/test/SemaCXX/class-base-member-init.cpp b/clang/test/SemaCXX/class-base-member-init.cpp
index f5489e2..29b4545 100644
--- a/clang/test/SemaCXX/class-base-member-init.cpp
+++ b/clang/test/SemaCXX/class-base-member-init.cpp
@@ -83,7 +83,7 @@ namespace test5 {
A() : decltype(Base(1))(3) {
}
A(int) : Base(3), // expected-note {{previous initialization is here}}
- decltype(Base(1))(2), // expected-error {{multiple initializations given for base 'decltype(Base(1))' (aka 'test5::Base')}}
+ decltype(Base(1))(2), // expected-error {{multiple initializations given for base 'decltype(Base(1))' (aka 'Base')}}
decltype(int())() { // expected-error {{constructor initializer 'decltype(int())' (aka 'int') does not name a class}}
}
A(float) : decltype(A())(3) {
diff --git a/clang/test/SemaCXX/co_await-ast.cpp b/clang/test/SemaCXX/co_await-ast.cpp
index f792a2c..5be2004 100644
--- a/clang/test/SemaCXX/co_await-ast.cpp
+++ b/clang/test/SemaCXX/co_await-ast.cpp
@@ -48,9 +48,9 @@ awaitable foo() {
// CHECK: | `-ExprWithCleanups {{.*}} 'void'
// CHECK: | `-CoawaitExpr {{.*}} 'void'
// CHECK: | |-CXXTemporaryObjectExpr {{.*}} 'executor' 'void (){{.*}} noexcept' zeroing
-// CHECK: | |-MaterializeTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | `-CXXBindTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' (CXXTemporary {{.*}})
-// CHECK: | | `-CXXMemberCallExpr {{.*}} 'result_t':'awaitable_frame::result_t'
+// CHECK: | |-MaterializeTemporaryExpr {{.*}} 'result_t' lvalue
+// CHECK: | | `-CXXBindTemporaryExpr {{.*}} 'result_t' (CXXTemporary {{.*}})
+// CHECK: | | `-CXXMemberCallExpr {{.*}} 'result_t'
// CHECK: | | |-MemberExpr {{.*}} '<bound member function type>' .await_transform {{.*}}
// CHECK: | | | `-DeclRefExpr {{.*}} 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame' lvalue Var {{.*}} '__promise' 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame'
// CHECK: | | `-CXXTemporaryObjectExpr {{.*}} 'executor' 'void (){{.*}} noexcept' zeroing
@@ -58,27 +58,27 @@ awaitable foo() {
// CHECK: | | `-CXXMemberCallExpr {{.*}} 'bool'
// CHECK: | | `-MemberExpr {{.*}} '<bound member function type>' .await_ready {{.*}}
// CHECK: | | `-ImplicitCastExpr {{.*}} 'const awaitable_frame::result_t' lvalue <NoOp>
-// CHECK: | | `-OpaqueValueExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | `-MaterializeTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | `-CXXBindTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' (CXXTemporary {{.*}})
-// CHECK: | | `-CXXMemberCallExpr {{.*}} 'result_t':'awaitable_frame::result_t'
+// CHECK: | | `-OpaqueValueExpr {{.*}} 'result_t' lvalue
+// CHECK: | | `-MaterializeTemporaryExpr {{.*}} 'result_t' lvalue
+// CHECK: | | `-CXXBindTemporaryExpr {{.*}} 'result_t' (CXXTemporary {{.*}})
+// CHECK: | | `-CXXMemberCallExpr {{.*}} 'result_t'
// CHECK: | | |-MemberExpr {{.*}} '<bound member function type>' .await_transform {{.*}}
// CHECK: | | | `-DeclRefExpr {{.*}} 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame' lvalue Var {{.*}} '__promise' 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame'
// CHECK: | | `-CXXTemporaryObjectExpr {{.*}} 'executor' 'void (){{.*}} noexcept' zeroing
// CHECK: | |-ExprWithCleanups {{.*}} 'void'
// CHECK: | | `-CXXMemberCallExpr {{.*}} 'void'
// CHECK: | | |-MemberExpr {{.*}} '<bound member function type>' .await_suspend {{.*}}
-// CHECK: | | | `-OpaqueValueExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | | `-MaterializeTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | | `-CXXBindTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' (CXXTemporary {{.*}})
-// CHECK: | | | `-CXXMemberCallExpr {{.*}} 'result_t':'awaitable_frame::result_t'
+// CHECK: | | | `-OpaqueValueExpr {{.*}} 'result_t' lvalue
+// CHECK: | | | `-MaterializeTemporaryExpr {{.*}} 'result_t' lvalue
+// CHECK: | | | `-CXXBindTemporaryExpr {{.*}} 'result_t' (CXXTemporary {{.*}})
+// CHECK: | | | `-CXXMemberCallExpr {{.*}} 'result_t'
// CHECK: | | | |-MemberExpr {{.*}} '<bound member function type>' .await_transform {{.*}}
// CHECK: | | | | `-DeclRefExpr {{.*}} 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame' lvalue Var {{.*}} '__promise' 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame'
// CHECK: | | | `-CXXTemporaryObjectExpr {{.*}} 'executor' 'void (){{.*}} noexcept' zeroing
// CHECK: | | `-ImplicitCastExpr {{.*}} 'std::coroutine_handle<void>' <ConstructorConversion>
// CHECK: | | `-CXXConstructExpr {{.*}} 'std::coroutine_handle<void>' 'void (coroutine_handle<awaitable_frame> &&){{.*}} noexcept'
-// CHECK: | | `-MaterializeTemporaryExpr {{.*}} 'coroutine_handle<awaitable_frame>':'std::coroutine_handle<awaitable_frame>' xvalue
-// CHECK: | | `-CallExpr {{.*}} 'coroutine_handle<awaitable_frame>':'std::coroutine_handle<awaitable_frame>'
+// CHECK: | | `-MaterializeTemporaryExpr {{.*}} 'coroutine_handle<awaitable_frame>' xvalue
+// CHECK: | | `-CallExpr {{.*}} 'coroutine_handle<awaitable_frame>'
// CHECK: | | |-ImplicitCastExpr {{.*}} 'coroutine_handle<awaitable_frame> (*)(void *) noexcept' <FunctionToPointerDecay>
// CHECK: | | | `-DeclRefExpr {{.*}} 'coroutine_handle<awaitable_frame> (void *) noexcept' lvalue CXXMethod {{.*}} 'from_address' 'coroutine_handle<awaitable_frame> (void *) noexcept'
// CHECK: | | `-CallExpr {{.*}} 'void *'
@@ -87,10 +87,10 @@ awaitable foo() {
// CHECK: | `-CXXMemberCallExpr {{.*}} 'void'
// CHECK: | `-MemberExpr {{.*}} '<bound member function type>' .await_resume {{.*}}
// CHECK: | `-ImplicitCastExpr {{.*}} 'const awaitable_frame::result_t' lvalue <NoOp>
-// CHECK: | `-OpaqueValueExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | `-MaterializeTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | `-CXXBindTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' (CXXTemporary {{.*}})
-// CHECK: | `-CXXMemberCallExpr {{.*}} 'result_t':'awaitable_frame::result_t'
+// CHECK: | `-OpaqueValueExpr {{.*}} 'result_t' lvalue
+// CHECK: | `-MaterializeTemporaryExpr {{.*}} 'result_t' lvalue
+// CHECK: | `-CXXBindTemporaryExpr {{.*}} 'result_t' (CXXTemporary {{.*}})
+// CHECK: | `-CXXMemberCallExpr {{.*}} 'result_t'
// CHECK: | |-MemberExpr {{.*}} '<bound member function type>' .await_transform {{.*}}
// CHECK: | | `-DeclRefExpr {{.*}} 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame' lvalue Var {{.*}} '__promise' 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame'
// CHECK: | `-CXXTemporaryObjectExpr {{.*}} <col:12, col:21> 'executor' 'void (){{.*}} noexcept' zeroing
diff --git a/clang/test/SemaCXX/compound-literal.cpp b/clang/test/SemaCXX/compound-literal.cpp
index 9c7c606..4b975a0 100644
--- a/clang/test/SemaCXX/compound-literal.cpp
+++ b/clang/test/SemaCXX/compound-literal.cpp
@@ -37,8 +37,8 @@ namespace brace_initializers {
POD p = (POD){1, 2};
// CHECK-NOT: CXXBindTemporaryExpr {{.*}} 'brace_initializers::POD'
- // CHECK: CompoundLiteralExpr {{.*}} 'POD':'brace_initializers::POD'
- // CHECK-NEXT: InitListExpr {{.*}} 'POD':'brace_initializers::POD'
+ // CHECK: CompoundLiteralExpr {{.*}} 'POD'{{$}}
+ // CHECK-NEXT: InitListExpr {{.*}} 'POD'{{$}}
// CHECK-NEXT: ConstantExpr {{.*}}
// CHECK-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-NEXT: ConstantExpr {{.*}}
@@ -46,34 +46,34 @@ namespace brace_initializers {
void test() {
(void)(POD){1, 2};
- // CHECK-NOT: CXXBindTemporaryExpr {{.*}} 'POD':'brace_initializers::POD'
- // CHECK-NOT: ConstantExpr {{.*}} 'POD':'brace_initializers::POD'
- // CHECK: CompoundLiteralExpr {{.*}} 'POD':'brace_initializers::POD'
- // CHECK-NEXT: InitListExpr {{.*}} 'POD':'brace_initializers::POD'
+ // CHECK-NOT: CXXBindTemporaryExpr {{.*}} 'POD'
+ // CHECK-NOT: ConstantExpr {{.*}} 'POD'
+ // CHECK: CompoundLiteralExpr {{.*}} 'POD'{{$}}
+ // CHECK-NEXT: InitListExpr {{.*}} 'POD'{{$}}
// CHECK-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-NEXT: IntegerLiteral {{.*}} 2{{$}}
(void)(HasDtor){1, 2};
- // CHECK: CXXBindTemporaryExpr {{.*}} 'HasDtor':'brace_initializers::HasDtor'
- // CHECK-NEXT: CompoundLiteralExpr {{.*}} 'HasDtor':'brace_initializers::HasDtor'
- // CHECK-NEXT: InitListExpr {{.*}} 'HasDtor':'brace_initializers::HasDtor'
+ // CHECK: CXXBindTemporaryExpr {{.*}} 'HasDtor'
+ // CHECK-NEXT: CompoundLiteralExpr {{.*}} 'HasDtor'{{$}}
+ // CHECK-NEXT: InitListExpr {{.*}} 'HasDtor'{{$}}
// CHECK-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-NEXT: IntegerLiteral {{.*}} 2{{$}}
#if __cplusplus >= 201103L
(void)(HasCtor){1, 2};
- // CHECK-CXX11-NOT: CXXBindTemporaryExpr {{.*}} 'HasCtor':'brace_initializers::HasCtor'
- // CHECK-CXX11-NOT: ConstantExpr {{.*}} 'HasCtor':'brace_initializers::HasCtor'
- // CHECK-CXX11: CompoundLiteralExpr {{.*}} 'HasCtor':'brace_initializers::HasCtor'
- // CHECK-CXX11-NEXT: CXXTemporaryObjectExpr {{.*}} 'HasCtor':'brace_initializers::HasCtor'
+ // CHECK-CXX11-NOT: CXXBindTemporaryExpr {{.*}} 'HasCtor'
+ // CHECK-CXX11-NOT: ConstantExpr {{.*}} 'HasCtor'
+ // CHECK-CXX11: CompoundLiteralExpr {{.*}} 'HasCtor'{{$}}
+ // CHECK-CXX11-NEXT: CXXTemporaryObjectExpr {{.*}} 'HasCtor'
// CHECK-CXX11-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-CXX11-NEXT: IntegerLiteral {{.*}} 2{{$}}
(void)(HasCtorDtor){1, 2};
- // CHECK-CXX11: CXXBindTemporaryExpr {{.*}} 'HasCtorDtor':'brace_initializers::HasCtorDtor'
- // CHECK-CXX11-NOT: ConstantExpr {{.*}} 'HasCtorDtor':'brace_initializers::HasCtorDtor'
- // CHECK-CXX11: CompoundLiteralExpr {{.*}} 'HasCtorDtor':'brace_initializers::HasCtorDtor'
- // CHECK-CXX11-NEXT: CXXTemporaryObjectExpr {{.*}} 'HasCtorDtor':'brace_initializers::HasCtorDtor'
+ // CHECK-CXX11: CXXBindTemporaryExpr {{.*}} 'HasCtorDtor'
+ // CHECK-CXX11-NOT: ConstantExpr {{.*}} 'HasCtorDtor'
+ // CHECK-CXX11: CompoundLiteralExpr {{.*}} 'HasCtorDtor'{{$}}
+ // CHECK-CXX11-NEXT: CXXTemporaryObjectExpr {{.*}} 'HasCtorDtor'
// CHECK-CXX11-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-CXX11-NEXT: IntegerLiteral {{.*}} 2{{$}}
#endif
diff --git a/clang/test/SemaCXX/constant-expression-cxx11.cpp b/clang/test/SemaCXX/constant-expression-cxx11.cpp
index 2423a77..91c4ff1 100644
--- a/clang/test/SemaCXX/constant-expression-cxx11.cpp
+++ b/clang/test/SemaCXX/constant-expression-cxx11.cpp
@@ -2645,3 +2645,19 @@ namespace GH150709 {
static_assert((e2[0].*mp)() == 1, ""); // expected-error {{constant expression}}
static_assert((g.*mp)() == 1, ""); // expected-error {{constant expression}}
}
+
+namespace GH154567 {
+ struct T {
+ int i;
+ };
+
+ struct S {
+ struct { // expected-warning {{GNU extension}}
+ T val;
+ };
+ constexpr S() : val() {}
+ };
+
+ constexpr S s{};
+ static_assert(s.val.i == 0, "");
+}
diff --git a/clang/test/SemaCXX/constant-expression.cpp b/clang/test/SemaCXX/constant-expression.cpp
index cc041a4..ef48ee5 100644
--- a/clang/test/SemaCXX/constant-expression.cpp
+++ b/clang/test/SemaCXX/constant-expression.cpp
@@ -149,7 +149,7 @@ namespace PR31701 {
};
template <int M> class D;
template <int M>
- template<int i> void D<M>::set() { // expected-error {{from class 'D<M>' without definition}}
+ template<int i> void D<M>::set() { // expected-error {{from class 'PR31701::D<M>' without definition}}
const C c = C::n<i>;
}
}
diff --git a/clang/test/SemaCXX/constructor.cpp b/clang/test/SemaCXX/constructor.cpp
index b0b580b..b278fd5 100644
--- a/clang/test/SemaCXX/constructor.cpp
+++ b/clang/test/SemaCXX/constructor.cpp
@@ -92,7 +92,7 @@ namespace PR38286 {
template<typename T> A<T>::A() {} // expected-error {{incomplete type 'A' named in nested name specifier}}
/*FIXME: needed to recover properly from previous error*/;
template<typename> struct B;
- template<typename T> void B<T>::f() {} // expected-error {{out-of-line definition of 'f' from class 'B<type-parameter-0-0>'}}
+ template<typename T> void B<T>::f() {} // expected-error {{out-of-line definition of 'f' from class 'PR38286::B<type-parameter-0-0>'}}
template<typename> struct C; // expected-note {{non-type declaration found}}
template<typename T> C<T>::~C() {} // expected-error {{identifier 'C' after '~' in destructor name does not name a type}}
}
diff --git a/clang/test/SemaCXX/coroutine-allocs.cpp b/clang/test/SemaCXX/coroutine-allocs.cpp
index cce56de..e6b086bd 100644
--- a/clang/test/SemaCXX/coroutine-allocs.cpp
+++ b/clang/test/SemaCXX/coroutine-allocs.cpp
@@ -19,7 +19,7 @@ struct resumable {
};
};
-resumable f1() { // expected-error {{'operator new' provided by 'std::coroutine_traits<resumable>::promise_type' (aka 'resumable::promise_type') is not usable with the function signature of 'f1'}}
+resumable f1() { // expected-error {{'operator new' provided by 'std::coroutine_traits<resumable>::promise_type' (aka 'typename resumable::promise_type') is not usable with the function signature of 'f1'}}
co_return;
}
@@ -52,11 +52,11 @@ resumable f4(Allocator) {
co_return;
}
-resumable f5(const Allocator) { // expected-error {{operator new' provided by 'std::coroutine_traits<resumable, const Allocator>::promise_type' (aka 'resumable::promise_type') is not usable}}
+resumable f5(const Allocator) { // expected-error {{operator new' provided by 'std::coroutine_traits<resumable, const Allocator>::promise_type' (aka 'typename resumable::promise_type') is not usable}}
co_return;
}
-resumable f6(const Allocator &) { // expected-error {{operator new' provided by 'std::coroutine_traits<resumable, const Allocator &>::promise_type' (aka 'resumable::promise_type') is not usable}}
+resumable f6(const Allocator &) { // expected-error {{operator new' provided by 'std::coroutine_traits<resumable, const Allocator &>::promise_type' (aka 'typename resumable::promise_type') is not usable}}
co_return;
}
diff --git a/clang/test/SemaCXX/coroutine-traits-undefined-template.cpp b/clang/test/SemaCXX/coroutine-traits-undefined-template.cpp
index ea25cea..e7de0c8 100644
--- a/clang/test/SemaCXX/coroutine-traits-undefined-template.cpp
+++ b/clang/test/SemaCXX/coroutine-traits-undefined-template.cpp
@@ -14,5 +14,5 @@ template <> struct coroutine_traits<void>; // expected-note {{forward declaratio
} // namespace std
void uses_forward_declaration() {
- co_return; // expected-error {{this function cannot be a coroutine: missing definition of specialization 'coroutine_traits<void>'}}
+ co_return; // expected-error {{this function cannot be a coroutine: missing definition of specialization 'std::coroutine_traits<void>'}}
}
diff --git a/clang/test/SemaCXX/coroutines.cpp b/clang/test/SemaCXX/coroutines.cpp
index c9cefeb..098c1c2 100644
--- a/clang/test/SemaCXX/coroutines.cpp
+++ b/clang/test/SemaCXX/coroutines.cpp
@@ -1396,7 +1396,7 @@ struct bad_promise_deleted_constructor {
coro<bad_promise_deleted_constructor>
bad_coroutine_calls_deleted_promise_constructor() {
- // expected-error@-1 {{call to deleted constructor of 'std::coroutine_traits<coro<CoroHandleMemberFunctionTest::bad_promise_deleted_constructor>>::promise_type' (aka 'CoroHandleMemberFunctionTest::bad_promise_deleted_constructor')}}
+ // expected-error@-1 {{call to deleted constructor of 'std::coroutine_traits<coro<bad_promise_deleted_constructor>>::promise_type' (aka 'CoroHandleMemberFunctionTest::bad_promise_deleted_constructor')}}
co_return;
}
@@ -1463,7 +1463,7 @@ struct bad_promise_no_matching_constructor {
coro<bad_promise_no_matching_constructor>
bad_coroutine_calls_with_no_matching_constructor(int, int) {
- // expected-error@-1 {{call to deleted constructor of 'std::coroutine_traits<coro<CoroHandleMemberFunctionTest::bad_promise_no_matching_constructor>, int, int>::promise_type' (aka 'CoroHandleMemberFunctionTest::bad_promise_no_matching_constructor')}}
+ // expected-error@-1 {{call to deleted constructor of 'std::coroutine_traits<coro<bad_promise_no_matching_constructor>, int, int>::promise_type' (aka 'CoroHandleMemberFunctionTest::bad_promise_no_matching_constructor')}}
co_return;
}
diff --git a/clang/test/SemaCXX/ctad.cpp b/clang/test/SemaCXX/ctad.cpp
index 50b64e3..8380b56 100644
--- a/clang/test/SemaCXX/ctad.cpp
+++ b/clang/test/SemaCXX/ctad.cpp
@@ -186,7 +186,7 @@ namespace GH136624 {
template<class Y> using Alias = A<Y>;
}
- // FIXME: This diagnostic prints incorrect qualification for `A<int>`.
+ // FIXME: This diagnostic is missing 'foo::Alias', as written.
foo::Alias t = 0;
- // expected-error@-1 {{no viable conversion from 'int' to 'foo::A<int>' (aka 'A<int>')}}
+ // expected-error@-1 {{no viable conversion from 'int' to 'GH136624::A<int>' (aka 'A<int>')}}
} // namespace GH136624
diff --git a/clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp b/clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp
index 57a48fa..e18223d 100644
--- a/clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp
+++ b/clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp
@@ -302,17 +302,17 @@ namespace in_class_template {
};
template<typename T> void f() {
- typename T::template A<int> a; // expected-error {{template name refers to non-type template 'S::template A'}}
+ typename T::template A<int> a; // expected-error {{template name refers to non-type template 'in_class_template::bad_reference::S::template A'}}
}
template<typename T> void g() {
- T::template A<int>::B = 0; // expected-error {{template name refers to non-type template 'S::template A'}}
+ T::template A<int>::B = 0; // expected-error {{template name refers to non-type template 'in_class_template::bad_reference::S::template A'}}
}
template<typename T> void h() {
- class T::template A<int> c; // expected-error {{template name refers to non-type template 'S::template A'}}
+ class T::template A<int> c; // expected-error {{template name refers to non-type template 'in_class_template::bad_reference::S::template A'}}
}
template<typename T>
- struct X : T::template A<int> {}; // expected-error {{template name refers to non-type template 'S::template A'}}
+ struct X : T::template A<int> {}; // expected-error {{template name refers to non-type template 'in_class_template::bad_reference::S::template A'}}
template void f<S>(); // expected-note {{in instantiation of}}
template void g<S>(); // expected-note {{in instantiation of}}
@@ -393,7 +393,7 @@ namespace dependent_static_var_template {
int &r = A::template n; // expected-error {{use of variable template 'A::template n' requires template arguments}} expected-error {{a template argument list is expected after a name prefixed by the template keyword}}
template<typename T>
- int &f() { return T::template n; } // expected-error {{use of variable template 'A::template n' requires template arguments}} expected-error {{a template argument list is expected after a name prefixed by the template keyword}}
+ int &f() { return T::template n; } // expected-error {{use of variable template 'dependent_static_var_template::A::template n' requires template arguments}} expected-error {{a template argument list is expected after a name prefixed by the template keyword}}
int &s = f<A>(); // expected-note {{instantiation of}}
namespace B {
diff --git a/clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp b/clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp
index 8745185..204dd9b 100644
--- a/clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp
+++ b/clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp
@@ -142,13 +142,13 @@ namespace look_into_current_instantiation {
// templates, and members of the current instantiation
A<float> &r = a;
- template<typename T> struct B { // expected-note {{could not match 'B<T>' against 'int'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename T> B(B<T>) -> B<T>'}}
+ template<typename T> struct B { // expected-note {{could not match 'look_into_current_instantiation::B<T>' against 'int'}} \
+ // expected-note {{implicit deduction guide declared as 'template <typename T> B(look_into_current_instantiation::B<T>) -> look_into_current_instantiation::B<T>'}}
struct X {
typedef T type;
};
B(typename X::type); // expected-note {{couldn't infer template argument 'T'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename T> B(typename X::type) -> B<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename T> B(typename X::type) -> look_into_current_instantiation::B<T>'}}
};
B b = 0; // expected-error {{no viable}}
diff --git a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
index ae70cd9..1f4d442 100644
--- a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
+++ b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
@@ -110,10 +110,10 @@ struct Foo {
template <typename X, int Y>
using Bar = Foo<X, sizeof(X)>; // expected-note {{candidate template ignored: couldn't infer template argument 'X'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename X> requires __is_deducible(test9::Bar, Foo<X, sizeof(X)>) Bar(Foo<X, sizeof(X)>) -> Foo<X, sizeof(X)>'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename X> requires __is_deducible(test9::Bar, Foo<X, sizeof(X)>) Bar(const X (&)[sizeof(X)]) -> Foo<X, sizeof(X)>'}} \
+ // expected-note {{implicit deduction guide declared as 'template <typename X> requires __is_deducible(test9::Bar, test9::Foo<X, sizeof(X)>) Bar(test9::Foo<X, sizeof(X)>) -> test9::Foo<X, sizeof(X)>'}} \
+ // expected-note {{implicit deduction guide declared as 'template <typename X> requires __is_deducible(test9::Bar, test9::Foo<X, sizeof(X)>) Bar(const X (&)[sizeof(X)]) -> test9::Foo<X, sizeof(X)>'}} \
// expected-note {{candidate template ignored: constraints not satisfied [with X = int]}} \
- // expected-note {{cannot deduce template arguments for 'Bar' from 'Foo<int, 4UL>'}}
+ // expected-note {{cannot deduce template arguments for 'test9::Bar' from 'test9::Foo<int, 4UL>'}}
Bar s = {{1}}; // expected-error {{no viable constructor or deduction guide }}
@@ -138,13 +138,13 @@ namespace test11 {
struct A {};
template<class T> struct Foo { T c; };
template<class X, class Y=A>
-using AFoo = Foo<Y>; // expected-note {{candidate template ignored: could not match 'Foo<Y>' against 'int'}} \
- // expected-note {{implicit deduction guide declared as 'template <class Y = A> requires __is_deducible(test11::AFoo, Foo<Y>) AFoo(Foo<Y>) -> Foo<Y>'}} \
+using AFoo = Foo<Y>; // expected-note {{candidate template ignored: could not match 'test11::Foo<Y>' against 'int'}} \
+ // expected-note {{implicit deduction guide declared as 'template <class Y = A> requires __is_deducible(test11::AFoo, test11::Foo<Y>) AFoo(test11::Foo<Y>) -> test11::Foo<Y>'}} \
// expected-note {{candidate template ignored: constraints not satisfied [with Y = int]}} \
- // expected-note {{cannot deduce template arguments for 'AFoo' from 'Foo<int>'}} \
- // expected-note {{implicit deduction guide declared as 'template <class Y = A> requires __is_deducible(test11::AFoo, Foo<Y>) AFoo(Y) -> Foo<Y>'}} \
+ // expected-note {{cannot deduce template arguments for 'test11::AFoo' from 'test11::Foo<int>'}} \
+ // expected-note {{implicit deduction guide declared as 'template <class Y = A> requires __is_deducible(test11::AFoo, test11::Foo<Y>) AFoo(Y) -> test11::Foo<Y>'}} \
// expected-note {{candidate function template not viable: requires 0 arguments, but 1 was provided}} \
- // expected-note {{implicit deduction guide declared as 'template <class Y = A> requires __is_deducible(test11::AFoo, Foo<Y>) AFoo() -> Foo<Y>'}}
+ // expected-note {{implicit deduction guide declared as 'template <class Y = A> requires __is_deducible(test11::AFoo, test11::Foo<Y>) AFoo() -> test11::Foo<Y>'}}
AFoo s = {1}; // expected-error {{no viable constructor or deduction guide for deduction of template arguments of 'AFoo'}}
} // namespace test11
@@ -197,8 +197,8 @@ struct Foo {
template <int K>
using Bar = Foo<double, K>; // expected-note {{constraints not satisfied for class template 'Foo'}}
// expected-note@-1 {{candidate template ignored: could not match}} expected-note@-1 {{candidate template ignored: constraints not satisfied}}
-// expected-note@-2 {{implicit deduction guide declared as 'template <int K> requires __is_deducible(test14::Bar, Foo<double, K>) Bar(Foo<double, K>) -> Foo<double, K>'}}
-// expected-note@-3 {{implicit deduction guide declared as 'template <int K> requires __is_deducible(test14::Bar, Foo<double, K>) Bar(const double (&)[K]) -> Foo<double, K>'}}
+// expected-note@-2 {{implicit deduction guide declared as 'template <int K> requires __is_deducible(test14::Bar, test14::Foo<double, K>) Bar(test14::Foo<double, K>) -> test14::Foo<double, K>'}}
+// expected-note@-3 {{implicit deduction guide declared as 'template <int K> requires __is_deducible(test14::Bar, test14::Foo<double, K>) Bar(const double (&)[K]) -> test14::Foo<double, K>'}}
double abc[3];
Bar s2 = {abc}; // expected-error {{no viable constructor or deduction guide for deduction }}
} // namespace test14
@@ -212,9 +212,9 @@ template<False W>
using BFoo = AFoo<W>; // expected-note {{candidate template ignored: constraints not satisfied [with W = int]}} \
// expected-note@-1 {{because 'int' does not satisfy 'False'}} \
// expected-note@#test15_False {{because 'false' evaluated to false}} \
- // expected-note {{implicit deduction guide declared as 'template <False<> W> requires __is_deducible(AFoo, Foo<W *>) && __is_deducible(test15::BFoo, Foo<W *>) BFoo(W *) -> Foo<W *>}} \
- // expected-note {{candidate template ignored: could not match 'Foo<W *>' against 'int *'}} \
- // expected-note {{template <False<> W> requires __is_deducible(AFoo, Foo<W *>) && __is_deducible(test15::BFoo, Foo<W *>) BFoo(Foo<W *>) -> Foo<W *>}}
+ // expected-note {{implicit deduction guide declared as 'template <False<> W> requires __is_deducible(test15::AFoo, test15::Foo<W *>) && __is_deducible(test15::BFoo, test15::Foo<W *>) BFoo(W *) -> test15::Foo<W *>}} \
+ // expected-note {{candidate template ignored: could not match 'test15::Foo<W *>' against 'int *'}} \
+ // expected-note {{template <False<> W> requires __is_deducible(test15::AFoo, test15::Foo<W *>) && __is_deducible(test15::BFoo, test15::Foo<W *>) BFoo(test15::Foo<W *>) -> test15::Foo<W *>}}
int i = 0;
AFoo a1(&i); // OK, deduce Foo<int *>
@@ -276,12 +276,12 @@ template<typename T> requires False<T> // expected-note {{because 'int' does not
Foo(T) -> Foo<int>;
template <typename U>
-using Bar = Foo<U>; // expected-note {{could not match 'Foo<U>' against 'int'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename U> requires __is_deducible(test18::Bar, Foo<U>) Bar(Foo<U>) -> Foo<U>'}} \
+using Bar = Foo<U>; // expected-note {{could not match 'test18::Foo<U>' against 'int'}} \
+ // expected-note {{implicit deduction guide declared as 'template <typename U> requires __is_deducible(test18::Bar, test18::Foo<U>) Bar(test18::Foo<U>) -> test18::Foo<U>'}} \
// expected-note {{candidate template ignored: constraints not satisfied}} \
// expected-note {{implicit deduction guide declared as 'template <typename T> requires False<T> && __is_deducible(test18::Bar, Foo<int>) Bar(T) -> Foo<int>'}} \
// expected-note {{candidate function template not viable}} \
- // expected-note {{implicit deduction guide declared as 'template <typename U> requires __is_deducible(test18::Bar, Foo<U>) Bar() -> Foo<U>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename U> requires __is_deducible(test18::Bar, test18::Foo<U>) Bar() -> test18::Foo<U>'}}
Bar s = {1}; // expected-error {{no viable constructor or deduction guide for deduction of template arguments}}
} // namespace test18
@@ -309,8 +309,8 @@ class Foo {};
// Verify that template template type parameter TTP is referenced/used in the
// template arguments of the RHS.
template <template<typename> typename TTP>
-using Bar = Foo<K<TTP>>; // expected-note {{candidate template ignored: could not match 'Foo<K<TTP>>' against 'int'}} \
- // expected-note {{implicit deduction guide declared as 'template <template <typename> typename TTP> requires __is_deducible(test20::Bar, Foo<K<TTP>>) Bar(Foo<K<TTP>>) -> Foo<K<TTP>>'}}
+using Bar = Foo<K<TTP>>; // expected-note {{candidate template ignored: could not match 'test20::Foo<K<TTP>>' against 'int'}} \
+ // expected-note {{implicit deduction guide declared as 'template <template <typename> typename TTP> requires __is_deducible(test20::Bar, test20::Foo<K<TTP>>) Bar(test20::Foo<K<TTP>>) -> test20::Foo<K<TTP>>'}}
template <class T>
class Container {};
@@ -463,7 +463,7 @@ static_assert(__is_same(decltype(a), A<int>));
BB b{0, 1};
// expected-error@-1 {{no viable}}
// expected-note@#test25_BB 2{{not viable}}
-// expected-note@#test25_BB {{template <typename ...US, typename V> requires __is_same(V, int) && __is_deducible(AA, A<int, US...>) && __is_deducible(test25::BB, A<int, US...>) BB(V) -> A<int, US...>}}
+// expected-note@#test25_BB {{template <typename ...US, typename V> requires __is_same(V, int) && __is_deducible(test25::AA, test25::A<int, US...>) && __is_deducible(test25::BB, test25::A<int, US...>) BB(V) -> test25::A<int, US...>}}
// expected-note@#test25_BB {{implicit deduction guide}}
}
diff --git a/clang/test/SemaCXX/cxx23-invalid-constexpr.cpp b/clang/test/SemaCXX/cxx23-invalid-constexpr.cpp
index 3229a91..1c832e5 100644
--- a/clang/test/SemaCXX/cxx23-invalid-constexpr.cpp
+++ b/clang/test/SemaCXX/cxx23-invalid-constexpr.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only -verify=expected -std=c++23 %s
+// RUN: %clang_cc1 -fsyntax-only -verify=expected -std=c++23 %s -fexperimental-new-constant-interpreter
// This test covers modifications made by P2448R2.
diff --git a/clang/test/SemaCXX/cxx2a-destroying-delete.cpp b/clang/test/SemaCXX/cxx2a-destroying-delete.cpp
index 27ea666..812fd78 100644
--- a/clang/test/SemaCXX/cxx2a-destroying-delete.cpp
+++ b/clang/test/SemaCXX/cxx2a-destroying-delete.cpp
@@ -138,7 +138,7 @@ namespace templated {
void operator delete(id_alias<C> *, std::destroying_delete_t);
};
template<typename T> struct D {
- void operator delete(typename id_struct<D>::type *, std::destroying_delete_t); // expected-error {{use 'D<T> *'}}
+ void operator delete(typename id_struct<D>::type *, std::destroying_delete_t); // expected-error {{use 'templated::D<T> *'}}
};
}
diff --git a/clang/test/SemaCXX/cxx2b-consteval-propagate.cpp b/clang/test/SemaCXX/cxx2b-consteval-propagate.cpp
index c4cfd93..6cf0e02 100644
--- a/clang/test/SemaCXX/cxx2b-consteval-propagate.cpp
+++ b/clang/test/SemaCXX/cxx2b-consteval-propagate.cpp
@@ -610,3 +610,19 @@ namespace GH135281 {
void (*ff)() = f2<B>; // expected-note {{instantiation of function template specialization}}
}
#endif
+
+namespace GH145776 {
+
+void runtime_only() {}
+consteval void comptime_only() {}
+
+void fn() {
+ []() {
+ runtime_only();
+ []() {
+ &comptime_only;
+ }();
+ }();
+}
+
+}
diff --git a/clang/test/SemaCXX/cxx2b-deducing-this.cpp b/clang/test/SemaCXX/cxx2b-deducing-this.cpp
index fcbe0f6..74b3573 100644
--- a/clang/test/SemaCXX/cxx2b-deducing-this.cpp
+++ b/clang/test/SemaCXX/cxx2b-deducing-this.cpp
@@ -96,12 +96,12 @@ struct Test {
void test() {
[i = 0](this Test) { }();
- // expected-error@-1 {{invalid explicit object parameter type 'ThisInLambdaWithCaptures::Test' in lambda with capture; the type must be the same as, or derived from, the lambda}}
+ // expected-error@-1 {{invalid explicit object parameter type 'Test' in lambda with capture; the type must be the same as, or derived from, the lambda}}
struct Derived;
auto ok = [i = 0](this const Derived&) {};
auto ko = [i = 0](this const Test&) {};
- // expected-error@-1 {{invalid explicit object parameter type 'ThisInLambdaWithCaptures::Test' in lambda with capture; the type must be the same as, or derived from, the lambda}}
+ // expected-error@-1 {{invalid explicit object parameter type 'Test' in lambda with capture; the type must be the same as, or derived from, the lambda}}
struct Derived : decltype(ok){};
Derived dok{ok};
diff --git a/clang/test/SemaCXX/cxx2c-variadic-friends.cpp b/clang/test/SemaCXX/cxx2c-variadic-friends.cpp
index a4d7c80..0b01907 100644
--- a/clang/test/SemaCXX/cxx2c-variadic-friends.cpp
+++ b/clang/test/SemaCXX/cxx2c-variadic-friends.cpp
@@ -145,7 +145,7 @@ class S {
template <typename U>
struct T {
static_assert(S<U, T>::a == 42);
- static_assert(S<U, T>::a == 43); // expected-error {{static assertion failed due to requirement 'S<int, template_template::T>::a == 43'}} \
+ static_assert(S<U, T>::a == 43); // expected-error {{static assertion failed due to requirement 'template_template::S<int, template_template::T>::a == 43'}} \
// expected-note {{expression evaluates to '42 == 43'}}
};
diff --git a/clang/test/SemaCXX/destructor.cpp b/clang/test/SemaCXX/destructor.cpp
index b9e0b17..bc47d87 100644
--- a/clang/test/SemaCXX/destructor.cpp
+++ b/clang/test/SemaCXX/destructor.cpp
@@ -431,15 +431,15 @@ namespace PR9238 {
}
namespace PR7900 {
- struct A { // expected-note 2{{type 'PR7900::A' found by destructor name lookup}}
+ struct A { // expected-note 2{{type 'A' found by destructor name lookup}}
};
struct B : public A {
};
void foo() {
B b;
b.~B();
- b.~A(); // expected-error{{destructor type 'PR7900::A' in object destruction expression does not match the type 'B' of the object being destroyed}}
- (&b)->~A(); // expected-error{{destructor type 'PR7900::A' in object destruction expression does not match the type 'B' of the object being destroyed}}
+ b.~A(); // expected-error{{destructor type 'A' in object destruction expression does not match the type 'B' of the object being destroyed}}
+ (&b)->~A(); // expected-error{{destructor type 'A' in object destruction expression does not match the type 'B' of the object being destroyed}}
}
}
diff --git a/clang/test/SemaCXX/elaborated-type-specifier.cpp b/clang/test/SemaCXX/elaborated-type-specifier.cpp
index a96e696..a80ba07 100644
--- a/clang/test/SemaCXX/elaborated-type-specifier.cpp
+++ b/clang/test/SemaCXX/elaborated-type-specifier.cpp
@@ -27,7 +27,7 @@ namespace NS {
void test_X_elab(NS::X x) {
struct S4 *s4 = 0; // expected-note{{'S4' is not defined, but forward declared here; conversion would be valid if it was derived from 'NS::S4'}}
- x.test_elab2(s4); // expected-error{{cannot initialize a parameter of type 'S4 *' (aka 'NS::S4 *') with an lvalue of type 'struct S4 *'}}
+ x.test_elab2(s4); // expected-error{{cannot initialize a parameter of type 'S4 *' with an lvalue of type 'struct S4 *'}}
}
namespace NS {
diff --git a/clang/test/SemaCXX/enum-scoped.cpp b/clang/test/SemaCXX/enum-scoped.cpp
index 2d7b3c9..09f3206 100644
--- a/clang/test/SemaCXX/enum-scoped.cpp
+++ b/clang/test/SemaCXX/enum-scoped.cpp
@@ -1,5 +1,6 @@
-// RUN: %clang_cc1 -fsyntax-only -pedantic -std=c++11 -verify -triple x86_64-apple-darwin %s
-// RUN: %clang_cc1 -fsyntax-only -pedantic -std=c++17 -verify -triple x86_64-apple-darwin %s
+// RUN: %clang_cc1 -fsyntax-only -pedantic -std=c++11 -verify=expected,cxx11-17 -triple x86_64-apple-darwin %s
+// RUN: %clang_cc1 -fsyntax-only -pedantic -std=c++17 -verify=expected,cxx11-17 -triple x86_64-apple-darwin %s
+// RUN: %clang_cc1 -fsyntax-only -pedantic -std=c++20 -verify -triple x86_64-apple-darwin %s
enum class E1 {
Val1 = 1L
@@ -128,7 +129,10 @@ namespace rdar9366066 {
void f(X x) {
x % X::value; // expected-error{{invalid operands to binary expression ('X' and 'rdar9366066::X')}}
+ // expected-note@-1{{no implicit conversion for scoped enum; consider casting to underlying type}}
+ // expected-note@-2{{no implicit conversion for scoped enum; consider casting to underlying type}}
x % 8; // expected-error{{invalid operands to binary expression ('X' and 'int')}}
+ // expected-note@-1{{no implicit conversion for scoped enum; consider casting to underlying type}}
}
}
@@ -325,7 +329,7 @@ namespace PR18044 {
int E::*p; // expected-error {{does not point into a class}}
using E::f; // expected-error {{no member named 'f'}}
- using E::a; // expected-warning {{using declaration naming a scoped enumerator is a C++20 extension}}
+ using E::a; // cxx11-17-warning {{using declaration naming a scoped enumerator is a C++20 extension}}
E b = a;
}
@@ -334,7 +338,7 @@ namespace test11 {
typedef E E2;
E2 f1() { return E::a; }
- bool f() { return !f1(); } // expected-error {{invalid argument type 'E2' (aka 'test11::E') to unary expression}}
+ bool f() { return !f1(); } // expected-error {{invalid argument type 'E2' (aka 'E') to unary expression}}
}
namespace PR35586 {
@@ -364,3 +368,102 @@ S<_Atomic(int)> s; // expected-warning {{'_Atomic' is a C11 extension}}
static_assert(__is_same(__underlying_type(S<_Atomic(long long)>::OhBoy), long long), ""); // expected-warning {{'_Atomic' is a C11 extension}}
// expected-note@-1 {{in instantiation of template class 'GH147736::S<_Atomic(long long)>' requested here}}
}
+
+namespace GH24265 {
+ enum class E_int { e };
+ enum class E_long : long { e };
+
+ void f() {
+ E_int::e + E_long::e; // expected-error {{invalid operands to binary expression ('GH24265::E_int' and 'GH24265::E_long')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ // expected-note@-2 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ E_int::e + 0; // expected-error {{invalid operands to binary expression ('GH24265::E_int' and 'int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+
+ 0 * E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 / E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 % E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 + E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 - E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 << E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 >> E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+
+ #if __cplusplus >= 202002L
+ 0 <=> E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ #endif
+
+ 0 < E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 > E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 <= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 >= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 == E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 != E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 & E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 ^ E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 | E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 && E_int::e; // expected-error {{value of type 'GH24265::E_int' is not contextually convertible to 'bool'}}
+ // expected-error@-1 {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-2 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ 0 || E_int::e; // expected-error {{value of type 'GH24265::E_int' is not contextually convertible to 'bool'}}
+ // expected-error@-1 {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-2 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+
+ int a;
+ a *= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a /= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a %= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a += E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a -= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a <<= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a >>= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a &= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a ^= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+ a |= E_int::e; // expected-error {{invalid operands to binary expression ('int' and 'GH24265::E_int')}}
+ // expected-note@-1 {{no implicit conversion for scoped enum; consider casting to underlying type}}
+
+ // TODO: These do not have the diagnostic yet
+ E_int b;
+ b *= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b /= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b %= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b += 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b -= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b <<= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b >>= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b &= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b ^= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+ b |= 0; // expected-error {{invalid operands to binary expression ('E_int' and 'int')}}
+
+ a = E_int::e; // expected-error {{assigning to 'int' from incompatible type 'GH24265::E_int'}}
+ b = 0; // expected-error {{assigning to 'E_int' from incompatible type 'int'}}
+
+ E_int c = 0; // expected-error {{cannot initialize a variable of type 'E_int' with an rvalue of type 'int'}}
+ int d = E_int::e; // expected-error {{cannot initialize a variable of type 'int' with an rvalue of type 'GH24265::E_int'}}
+ }
+}
diff --git a/clang/test/SemaCXX/err_init_conversion_failed.cpp b/clang/test/SemaCXX/err_init_conversion_failed.cpp
index f1949c0..e2617a1 100644
--- a/clang/test/SemaCXX/err_init_conversion_failed.cpp
+++ b/clang/test/SemaCXX/err_init_conversion_failed.cpp
@@ -56,6 +56,6 @@ template <class P> struct S2 {
void test_15() {
S2<S> X = {&S::foo};
- // expected-error-re@-1{{cannot initialize a member subobject of type 'void (S::*)(const int &){{( __attribute__\(\(thiscall\)\))?}}' with an rvalue of type 'void (S::*)(int){{( __attribute__\(\(thiscall\)\))?}}': type mismatch at 1st parameter ('const int &' vs 'int')}}
+ // expected-error-re@-1{{cannot initialize a member subobject of type 'void (template_test::S::*)(const int &){{( __attribute__\(\(thiscall\)\))?}}' with an rvalue of type 'void (S::*)(int){{( __attribute__\(\(thiscall\)\))?}}': type mismatch at 1st parameter ('const int &' vs 'int')}}
}
}
diff --git a/clang/test/SemaCXX/gh102293.cpp b/clang/test/SemaCXX/gh102293.cpp
index fe417e6..37b674d 100644
--- a/clang/test/SemaCXX/gh102293.cpp
+++ b/clang/test/SemaCXX/gh102293.cpp
@@ -35,7 +35,7 @@ class bar { // expected-note {{definition of 'GH104802::bar' is not comple
class baz { // expected-note {{definition of 'GH104802::baz' is not complete until the closing '}'}}
typedef class baz blech;
- blech a; // expected-error {{field has incomplete type 'blech' (aka 'GH104802::baz')}}
+ blech a; // expected-error {{field has incomplete type 'blech' (aka 'class baz')}}
virtual int c();
};
diff --git a/clang/test/SemaCXX/gh113323.cpp b/clang/test/SemaCXX/gh113323.cpp
new file mode 100644
index 0000000..b1f9c5b
--- /dev/null
+++ b/clang/test/SemaCXX/gh113323.cpp
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -fsyntax-only -verify %s
+// RUN: %clang_cc1 -fsyntax-only -verify=expected,no-recovery -fno-recovery-ast %s
+
+int a() {} // expected-warning {{non-void function does not return a value}}
+constexpr void (*d)() = a; // expected-error {{cannot initialize a variable of type}}
+const void *f = __builtin_function_start(d); // no-recovery-error {{argument must be a function}}
diff --git a/clang/test/SemaCXX/incomplete-call.cpp b/clang/test/SemaCXX/incomplete-call.cpp
index 0846c88..8f24d45 100644
--- a/clang/test/SemaCXX/incomplete-call.cpp
+++ b/clang/test/SemaCXX/incomplete-call.cpp
@@ -59,7 +59,7 @@ namespace pr18542 {
int count;
template<typename CharT> class basic_istream;
template<typename CharT>
- void basic_istream<CharT>::read() { // expected-error{{out-of-line definition of 'read' from class 'basic_istream<CharT>' without definition}}
+ void basic_istream<CharT>::read() { // expected-error{{out-of-line definition of 'read' from class 'pr18542::X::basic_istream<CharT>' without definition}}
count = 0;
}
};
diff --git a/clang/test/SemaCXX/lambda-expressions.cpp b/clang/test/SemaCXX/lambda-expressions.cpp
index 2d2dde8..8ea8e32 100644
--- a/clang/test/SemaCXX/lambda-expressions.cpp
+++ b/clang/test/SemaCXX/lambda-expressions.cpp
@@ -194,6 +194,11 @@ namespace ModifyingCapture {
[=] {
n = 1; // expected-error {{cannot assign to a variable captured by copy in a non-mutable lambda}}
};
+ const int cn = 0;
+ // cxx03-cxx11-warning@+1 {{initialized lambda captures are a C++14 extension}}
+ [&cnr = cn]{ // expected-note {{variable 'cnr' declared const here}}
+ cnr = 1; // expected-error {{cannot assign to variable 'cnr' with const-qualified type 'const int &'}}
+ };
}
}
diff --git a/clang/test/SemaCXX/matrix-casts.cpp b/clang/test/SemaCXX/matrix-casts.cpp
index c0f3df1..708b3fa 100644
--- a/clang/test/SemaCXX/matrix-casts.cpp
+++ b/clang/test/SemaCXX/matrix-casts.cpp
@@ -35,7 +35,7 @@ void f1() {
(matrix_4_4<char>)v; // expected-error {{C-style cast from 'vec' (vector of 1 'int' value) to 'matrix_4_4<char>' (aka 'char __attribute__((matrix_type(4, 4)))') is not allowed}}
(test_struct *)m1; // expected-error {{cannot cast from type 'matrix_4_4<char>' (aka 'char __attribute__((matrix_type(4, 4)))') to pointer type 'test_struct *'}}
- (matrix_5_5<float>)s; // expected-error {{C-style cast from 'test_struct *' to 'matrix_5_5<float>' (aka 'float __attribute__((matrix_type(5, 5)))') is not allowed}}
+ (matrix_5_5<float>)s; // expected-error {{C-style cast from 'test_struct *' (aka 'struct test_struct *') to 'matrix_5_5<float>' (aka 'float __attribute__((matrix_type(5, 5)))') is not allowed}}
}
void f2() {
@@ -59,7 +59,7 @@ void f2() {
static_cast<matrix_4_4<char>>(v); // expected-error {{static_cast from 'vec' (vector of 1 'int' value) to 'matrix_4_4<char>' (aka 'char __attribute__((matrix_type(4, 4)))') is not allowed}}
static_cast<test_struct *>(m1); // expected-error {{cannot cast from type 'matrix_4_4<char>' (aka 'char __attribute__((matrix_type(4, 4)))') to pointer type 'test_struct *'}}
- static_cast<matrix_5_5<float>>(s); // expected-error {{static_cast from 'test_struct *' to 'matrix_5_5<float>' (aka 'float __attribute__((matrix_type(5, 5)))') is not allowed}}
+ static_cast<matrix_5_5<float>>(s); // expected-error {{static_cast from 'test_struct *' (aka 'struct test_struct *') to 'matrix_5_5<float>' (aka 'float __attribute__((matrix_type(5, 5)))') is not allowed}}
}
void f3() {
diff --git a/clang/test/SemaCXX/nested-name-spec.cpp b/clang/test/SemaCXX/nested-name-spec.cpp
index fedbb30..c60275b 100644
--- a/clang/test/SemaCXX/nested-name-spec.cpp
+++ b/clang/test/SemaCXX/nested-name-spec.cpp
@@ -15,7 +15,9 @@ namespace A {
static int Ag1();
static int Ag2();
};
- int ax; // expected-note {{'ax' declared here}}
+ int ax;
+ // expected-note@-1 {{'ax' declared here}}
+ // expected-note@-2 {{'::A::ax' declared here}}
void Af();
}
@@ -100,7 +102,7 @@ void f3() {
N::x = 0; // expected-error {{'N' is not a class, namespace, or enumeration}}
{ int A; A::ax = 0; }
{ typedef int A; A::ax = 0; } // expected-error{{'A' (aka 'int') is not a class, namespace, or enumeration}}
- { typedef A::C A; A::ax = 0; } // expected-error {{no member named 'ax'}}
+ { typedef A::C A; A::ax = 0; } // expected-error {{no member named 'ax' in 'A::C'; did you mean '::A::ax'?}}
{ typedef A::C A; A::cx = 0; }
}
@@ -474,7 +476,7 @@ namespace A {
class B {
typedef C D; // expected-error{{unknown type name 'C'}}
A::D::F;
- // expected-error@-1{{'PR30619::A::B::D' (aka 'int') is not a class, namespace, or enumeration}}
+ // expected-error@-1{{'A::D' (aka 'int') is not a class, namespace, or enumeration}}
};
}
}
diff --git a/clang/test/SemaCXX/new-delete.cpp b/clang/test/SemaCXX/new-delete.cpp
index c05130b..1adb993 100644
--- a/clang/test/SemaCXX/new-delete.cpp
+++ b/clang/test/SemaCXX/new-delete.cpp
@@ -721,19 +721,7 @@ int (*const_fold)[12] = new int[3][&const_fold + 12 - &const_fold];
#if __cplusplus >= 201402L && !defined(NEW_INTERP)
// expected-error@-2 {{array size is not a constant expression}}
// expected-note@-3 {{cannot refer to element 12 of non-array}}
-#elif __cplusplus < 201103L && !defined(NEW_INTERP)
+#elif __cplusplus < 201103L
// expected-error@-5 {{cannot allocate object of variably modified type}}
// expected-warning@-6 {{variable length arrays in C++ are a Clang extension}}
#endif
-#ifdef NEW_INTERP
-#if __cplusplus >= 201402L
-// expected-error@-10 {{array size is not a constant expression}}
-// expected-note@-11 {{cannot refer to element 12 of non-array}}
-#elif __cplusplus >= 201103L
-// expected-error@-13 {{only the first dimension of an allocated array may have dynamic size}}
-// expected-note@-14 {{cannot refer to element 12 of non-array}}
-#else
-// expected-error@-16 {{only the first dimension of an allocated array may have dynamic size}}
-// expected-note@-17 {{cannot refer to element 12 of non-array}}
-#endif
-#endif
diff --git a/clang/test/SemaCXX/opaque-enum-declaration-in-class-template.cpp b/clang/test/SemaCXX/opaque-enum-declaration-in-class-template.cpp
index 7101a15..4257e53 100644
--- a/clang/test/SemaCXX/opaque-enum-declaration-in-class-template.cpp
+++ b/clang/test/SemaCXX/opaque-enum-declaration-in-class-template.cpp
@@ -94,7 +94,9 @@ struct S5 {
};
int X5 = S5<char>::E1{} + '\0'; // expected-error{{invalid operands to binary expression}}
+ // expected-note@-1{{no implicit conversion for scoped enum; consider casting to underlying type}}
int Y5 = S5<char>::E2{} + '\0'; // expected-error{{invalid operands to binary expression}}
+ // expected-note@-1{{no implicit conversion for scoped enum; consider casting to underlying type}}
template <typename T>
diff --git a/clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp b/clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp
index b0a101e..8d0cdcd 100644
--- a/clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp
+++ b/clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp
@@ -12,6 +12,6 @@ A2<int> *a2 = b2; // expected-error{{cannot initialize a variable of type 'A2<in
typedef struct S s;
const s *f();
-s *g1() { return f(); } // expected-error{{cannot initialize return object of type 's *' (aka 'S *') with an rvalue of type 'const s *' (aka 'const S *')}}
+s *g1() { return f(); } // expected-error{{cannot initialize return object of type 's *' (aka 'struct S *') with an rvalue of type 'const s *' (aka 'const struct S *')}}
-B1 *g2() { return f(); } // expected-error{{cannot initialize return object of type 'B1 *' with an rvalue of type 'const s *' (aka 'const S *')}}
+B1 *g2() { return f(); } // expected-error{{cannot initialize return object of type 'B1 *' with an rvalue of type 'const s *' (aka 'const struct S *')}}
diff --git a/clang/test/SemaCXX/pr100095.cpp b/clang/test/SemaCXX/pr100095.cpp
index 15913fe..9b8c09c 100644
--- a/clang/test/SemaCXX/pr100095.cpp
+++ b/clang/test/SemaCXX/pr100095.cpp
@@ -1,5 +1,4 @@
// RUN: %clang_cc1 -fsyntax-only -std=c++11 %s
-// XFAIL: asserts
template <class> struct Pair;
template <class...> struct Tuple {
diff --git a/clang/test/SemaCXX/pseudo-destructors.cpp b/clang/test/SemaCXX/pseudo-destructors.cpp
index 8120292..55849d6 100644
--- a/clang/test/SemaCXX/pseudo-destructors.cpp
+++ b/clang/test/SemaCXX/pseudo-destructors.cpp
@@ -116,7 +116,7 @@ typedef Derived *Foo;
void test2(Foo d) {
d.~Foo(); // This is ok
- d.~Derived(); // expected-error {{member reference type 'Foo' (aka 'dotPointerAccess::Derived *') is a pointer; did you mean to use '->'}}
+ d.~Derived(); // expected-error {{member reference type 'Foo' (aka 'Derived *') is a pointer; did you mean to use '->'}}
}
}
diff --git a/clang/test/SemaCXX/ptrauth-triviality.cpp b/clang/test/SemaCXX/ptrauth-triviality.cpp
index ba8a827..b1b334b 100644
--- a/clang/test/SemaCXX/ptrauth-triviality.cpp
+++ b/clang/test/SemaCXX/ptrauth-triviality.cpp
@@ -74,7 +74,7 @@ static_assert(__is_trivially_destructible(S3));
static_assert(!__is_trivially_copyable(S3));
static_assert(!__is_trivially_relocatable(S3)); // expected-warning{{deprecated}}
//FIXME
-static_assert(__builtin_is_cpp_trivially_relocatable(S3));
+static_assert(!__builtin_is_cpp_trivially_relocatable(S3));
static_assert(!__is_trivially_equality_comparable(S3));
@@ -84,7 +84,7 @@ static_assert(!__is_trivially_assignable(Holder<S3>, const Holder<S3>&));
static_assert(__is_trivially_destructible(Holder<S3>));
static_assert(!__is_trivially_copyable(Holder<S3>));
static_assert(!__is_trivially_relocatable(Holder<S3>)); // expected-warning{{deprecated}}
-static_assert(__builtin_is_cpp_trivially_relocatable(Holder<S3>));
+static_assert(!__builtin_is_cpp_trivially_relocatable(Holder<S3>));
static_assert(!__is_trivially_equality_comparable(Holder<S3>));
struct IA S4 {
@@ -207,7 +207,7 @@ template <class T> struct UnionWrapper trivially_relocatable_if_eligible {
} u;
};
-static_assert(test_is_trivially_relocatable_v<AddressDiscriminatedPolymorphicBase>);
+static_assert(!test_is_trivially_relocatable_v<AddressDiscriminatedPolymorphicBase>);
static_assert(test_is_trivially_relocatable_v<NoAddressDiscriminatedPolymorphicBase>);
static_assert(inheritance_relocatability_matches_bases_v<AddressDiscriminatedPolymorphicBase, NoAddressDiscriminatedPolymorphicBase>);
static_assert(inheritance_relocatability_matches_bases_v<NoAddressDiscriminatedPolymorphicBase, AddressDiscriminatedPolymorphicBase>);
diff --git a/clang/test/SemaCXX/ptrauth-type-traits.cpp b/clang/test/SemaCXX/ptrauth-type-traits.cpp
new file mode 100644
index 0000000..aefbd63
--- /dev/null
+++ b/clang/test/SemaCXX/ptrauth-type-traits.cpp
@@ -0,0 +1,401 @@
+// RUN: %clang_cc1 -triple arm64 -std=c++26 -Wno-deprecated-builtins \
+// RUN: -fsyntax-only -verify %s
+// RUN: %clang_cc1 -triple arm64-apple-darwin -fptrauth-calls -fptrauth-intrinsics \
+// RUN: -fptrauth-vtable-pointer-address-discrimination \
+// RUN: -std=c++26 -Wno-deprecated-builtins \
+// RUN: -fsyntax-only -verify %s
+
+// expected-no-diagnostics
+
+#ifdef __PTRAUTH__
+
+#define NonAddressDiscriminatedVTablePtrAttr \
+ [[clang::ptrauth_vtable_pointer(process_independent, no_address_discrimination, no_extra_discrimination)]]
+#define AddressDiscriminatedVTablePtrAttr \
+ [[clang::ptrauth_vtable_pointer(process_independent, address_discrimination, no_extra_discrimination)]]
+#define ADDR_DISC_ENABLED true
+#else
+#define NonAddressDiscriminatedVTablePtrAttr
+#define AddressDiscriminatedVTablePtrAttr
+#define ADDR_DISC_ENABLED false
+#define __ptrauth(...)
+#endif
+
+
+typedef int* __ptrauth(1,1,1) AddressDiscriminatedPtr;
+typedef __UINT64_TYPE__ __ptrauth(1,1,1) AddressDiscriminatedInt64;
+struct AddressDiscriminatedFields {
+ AddressDiscriminatedPtr ptr;
+};
+struct RelocatableAddressDiscriminatedFields trivially_relocatable_if_eligible {
+ AddressDiscriminatedPtr ptr;
+};
+struct AddressDiscriminatedFieldInBaseClass : AddressDiscriminatedFields {
+ void *newfield;
+};
+
+struct NonAddressDiscriminatedVTablePtrAttr NonAddressDiscriminatedVTablePtr {
+ virtual ~NonAddressDiscriminatedVTablePtr();
+ void *i;
+};
+
+struct NonAddressDiscriminatedVTablePtrAttr NonAddressDiscriminatedVTablePtr2 {
+ virtual ~NonAddressDiscriminatedVTablePtr2();
+ void *j;
+};
+
+struct NonAddressDiscriminatedVTablePtrAttr RelocatableNonAddressDiscriminatedVTablePtr trivially_relocatable_if_eligible {
+ virtual ~RelocatableNonAddressDiscriminatedVTablePtr();
+ void *i;
+};
+
+struct NonAddressDiscriminatedVTablePtrAttr RelocatableNonAddressDiscriminatedVTablePtr2 trivially_relocatable_if_eligible {
+ virtual ~RelocatableNonAddressDiscriminatedVTablePtr2();
+ void *j;
+};
+
+struct AddressDiscriminatedVTablePtrAttr AddressDiscriminatedVTablePtr {
+ virtual ~AddressDiscriminatedVTablePtr();
+ void *k;
+};
+
+struct AddressDiscriminatedVTablePtrAttr RelocatableAddressDiscriminatedVTablePtr trivially_relocatable_if_eligible {
+ virtual ~RelocatableAddressDiscriminatedVTablePtr();
+ void *k;
+};
+
+struct NoAddressDiscriminatedBaseClasses : NonAddressDiscriminatedVTablePtr,
+ NonAddressDiscriminatedVTablePtr2 {
+ void *l;
+};
+
+struct RelocatableNoAddressDiscriminatedBaseClasses trivially_relocatable_if_eligible :
+ NonAddressDiscriminatedVTablePtr,
+ NonAddressDiscriminatedVTablePtr2 {
+ void *l;
+};
+
+struct AddressDiscriminatedPrimaryBase : AddressDiscriminatedVTablePtr,
+ NonAddressDiscriminatedVTablePtr {
+ void *l;
+};
+struct AddressDiscriminatedSecondaryBase : NonAddressDiscriminatedVTablePtr,
+ AddressDiscriminatedVTablePtr {
+ void *l;
+};
+
+struct RelocatableAddressDiscriminatedPrimaryBase : RelocatableAddressDiscriminatedVTablePtr,
+ RelocatableNonAddressDiscriminatedVTablePtr {
+ void *l;
+};
+struct RelocatableAddressDiscriminatedSecondaryBase : RelocatableNonAddressDiscriminatedVTablePtr,
+ RelocatableAddressDiscriminatedVTablePtr {
+ void *l;
+};
+struct EmbdeddedAddressDiscriminatedPolymorphicClass {
+ AddressDiscriminatedVTablePtr field;
+};
+struct RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass trivially_relocatable_if_eligible {
+ AddressDiscriminatedVTablePtr field;
+};
+
+static_assert( __is_pod(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __is_pod(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __is_pod(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_pod(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert(!__is_pod(AddressDiscriminatedFieldInBaseClass));
+static_assert(!__is_pod(NonAddressDiscriminatedVTablePtr));
+static_assert(!__is_pod(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_pod(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__is_pod(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_pod(AddressDiscriminatedVTablePtr));
+static_assert(!__is_pod(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__is_pod(NoAddressDiscriminatedBaseClasses));
+static_assert(!__is_pod(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__is_pod(AddressDiscriminatedPrimaryBase));
+static_assert(!__is_pod(AddressDiscriminatedSecondaryBase));
+static_assert(!__is_pod(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__is_pod(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__is_pod(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__is_pod(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __is_standard_layout(AddressDiscriminatedPtr));
+static_assert( __is_standard_layout(AddressDiscriminatedInt64));
+static_assert( __is_standard_layout(AddressDiscriminatedFields));
+static_assert( __is_standard_layout(RelocatableAddressDiscriminatedFields));
+static_assert(!__is_standard_layout(AddressDiscriminatedFieldInBaseClass));
+static_assert(!__is_standard_layout(NonAddressDiscriminatedVTablePtr));
+static_assert(!__is_standard_layout(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_standard_layout(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__is_standard_layout(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_standard_layout(AddressDiscriminatedVTablePtr));
+static_assert(!__is_standard_layout(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__is_standard_layout(NoAddressDiscriminatedBaseClasses));
+static_assert(!__is_standard_layout(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__is_standard_layout(AddressDiscriminatedPrimaryBase));
+static_assert(!__is_standard_layout(AddressDiscriminatedSecondaryBase));
+static_assert(!__is_standard_layout(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__is_standard_layout(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__is_standard_layout(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__is_standard_layout(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __has_trivial_move_constructor(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_move_constructor(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_move_constructor(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_move_constructor(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_move_constructor(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__has_trivial_move_constructor(NonAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_move_constructor(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_trivial_move_constructor(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_move_constructor(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_trivial_move_constructor(AddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_move_constructor(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_move_constructor(NoAddressDiscriminatedBaseClasses));
+static_assert(!__has_trivial_move_constructor(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__has_trivial_move_constructor(AddressDiscriminatedPrimaryBase));
+static_assert(!__has_trivial_move_constructor(AddressDiscriminatedSecondaryBase));
+static_assert(!__has_trivial_move_constructor(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__has_trivial_move_constructor(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__has_trivial_move_constructor(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__has_trivial_move_constructor(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __has_trivial_copy(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_copy(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_copy(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_copy(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_copy(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__has_trivial_copy(NonAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_copy(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_trivial_copy(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_copy(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_trivial_copy(AddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_copy(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_copy(NoAddressDiscriminatedBaseClasses));
+static_assert(!__has_trivial_copy(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__has_trivial_copy(AddressDiscriminatedPrimaryBase));
+static_assert(!__has_trivial_copy(AddressDiscriminatedSecondaryBase));
+static_assert(!__has_trivial_copy(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__has_trivial_copy(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__has_trivial_copy(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__has_trivial_copy(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __has_trivial_assign(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_assign(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_assign(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_assign(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_assign(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__has_trivial_assign(NonAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_assign(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_trivial_assign(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_assign(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_trivial_assign(AddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_assign(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_assign(NoAddressDiscriminatedBaseClasses));
+static_assert(!__has_trivial_assign(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__has_trivial_assign(AddressDiscriminatedPrimaryBase));
+static_assert(!__has_trivial_assign(AddressDiscriminatedSecondaryBase));
+static_assert(!__has_trivial_assign(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__has_trivial_assign(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__has_trivial_assign(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__has_trivial_assign(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __has_trivial_move_assign(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_move_assign(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_move_assign(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_move_assign(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_trivial_move_assign(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__has_trivial_move_assign(NonAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_move_assign(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_trivial_move_assign(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_move_assign(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_trivial_move_assign(AddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_move_assign(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__has_trivial_move_assign(NoAddressDiscriminatedBaseClasses));
+static_assert(!__has_trivial_move_assign(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__has_trivial_move_assign(AddressDiscriminatedPrimaryBase));
+static_assert(!__has_trivial_move_assign(AddressDiscriminatedSecondaryBase));
+static_assert(!__has_trivial_move_assign(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__has_trivial_move_assign(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__has_trivial_move_assign(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__has_trivial_move_assign(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __is_trivial(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivial(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivial(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivial(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivial(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__is_trivial(NonAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivial(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_trivial(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivial(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_trivial(AddressDiscriminatedVTablePtr));
+static_assert(!__is_trivial(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivial(NoAddressDiscriminatedBaseClasses));
+static_assert(!__is_trivial(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__is_trivial(AddressDiscriminatedPrimaryBase));
+static_assert(!__is_trivial(AddressDiscriminatedSecondaryBase));
+static_assert(!__is_trivial(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__is_trivial(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__is_trivial(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__is_trivial(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __is_trivially_copyable(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_copyable(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_copyable(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_copyable(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_copyable(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__is_trivially_copyable(NonAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_copyable(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_trivially_copyable(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_copyable(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_trivially_copyable(AddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_copyable(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_copyable(NoAddressDiscriminatedBaseClasses));
+static_assert(!__is_trivially_copyable(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__is_trivially_copyable(AddressDiscriminatedPrimaryBase));
+static_assert(!__is_trivially_copyable(AddressDiscriminatedSecondaryBase));
+static_assert(!__is_trivially_copyable(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__is_trivially_copyable(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__is_trivially_copyable(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__is_trivially_copyable(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __is_trivially_equality_comparable(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_equality_comparable(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert(!__is_trivially_equality_comparable(AddressDiscriminatedFields));
+static_assert(!__is_trivially_equality_comparable(RelocatableAddressDiscriminatedFields));
+static_assert(!__is_trivially_equality_comparable(AddressDiscriminatedFieldInBaseClass));
+static_assert(!__is_trivially_equality_comparable(NonAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_equality_comparable(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_trivially_equality_comparable(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_equality_comparable(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_trivially_equality_comparable(AddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_equality_comparable(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_equality_comparable(NoAddressDiscriminatedBaseClasses));
+static_assert(!__is_trivially_equality_comparable(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__is_trivially_equality_comparable(AddressDiscriminatedPrimaryBase));
+static_assert(!__is_trivially_equality_comparable(AddressDiscriminatedSecondaryBase));
+static_assert(!__is_trivially_equality_comparable(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__is_trivially_equality_comparable(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__is_trivially_equality_comparable(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__is_trivially_equality_comparable(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __is_trivially_relocatable(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_relocatable(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_relocatable(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_relocatable(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_trivially_relocatable(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__is_trivially_relocatable(NonAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_relocatable(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_trivially_relocatable(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_relocatable(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__is_trivially_relocatable(AddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_relocatable(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__is_trivially_relocatable(NoAddressDiscriminatedBaseClasses));
+static_assert(!__is_trivially_relocatable(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__is_trivially_relocatable(AddressDiscriminatedPrimaryBase));
+static_assert(!__is_trivially_relocatable(AddressDiscriminatedSecondaryBase));
+static_assert(!__is_trivially_relocatable(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__is_trivially_relocatable(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__is_trivially_relocatable(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__is_trivially_relocatable(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __builtin_is_cpp_trivially_relocatable(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __builtin_is_cpp_trivially_relocatable(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __builtin_is_cpp_trivially_relocatable(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __builtin_is_cpp_trivially_relocatable(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __builtin_is_cpp_trivially_relocatable(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__builtin_is_cpp_trivially_relocatable(NonAddressDiscriminatedVTablePtr));
+static_assert(!__builtin_is_cpp_trivially_relocatable(NonAddressDiscriminatedVTablePtr2));
+static_assert( __builtin_is_cpp_trivially_relocatable(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert( __builtin_is_cpp_trivially_relocatable(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__builtin_is_cpp_trivially_relocatable(AddressDiscriminatedVTablePtr));
+static_assert( __builtin_is_cpp_trivially_relocatable(RelocatableAddressDiscriminatedVTablePtr) == !ADDR_DISC_ENABLED);
+static_assert(!__builtin_is_cpp_trivially_relocatable(NoAddressDiscriminatedBaseClasses));
+static_assert(!__builtin_is_cpp_trivially_relocatable(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__builtin_is_cpp_trivially_relocatable(AddressDiscriminatedPrimaryBase));
+static_assert(!__builtin_is_cpp_trivially_relocatable(AddressDiscriminatedSecondaryBase));
+static_assert( __builtin_is_cpp_trivially_relocatable(RelocatableAddressDiscriminatedPrimaryBase) == !ADDR_DISC_ENABLED);
+static_assert( __builtin_is_cpp_trivially_relocatable(RelocatableAddressDiscriminatedSecondaryBase) == !ADDR_DISC_ENABLED);
+static_assert(!__builtin_is_cpp_trivially_relocatable(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__builtin_is_cpp_trivially_relocatable(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __builtin_is_replaceable(AddressDiscriminatedPtr));
+static_assert( __builtin_is_replaceable(AddressDiscriminatedInt64));
+static_assert( __builtin_is_replaceable(AddressDiscriminatedFields));
+static_assert( __builtin_is_replaceable(RelocatableAddressDiscriminatedFields));
+static_assert( __builtin_is_replaceable(AddressDiscriminatedFieldInBaseClass));
+static_assert(!__builtin_is_replaceable(NonAddressDiscriminatedVTablePtr));
+static_assert(!__builtin_is_replaceable(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__builtin_is_replaceable(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__builtin_is_replaceable(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__builtin_is_replaceable(AddressDiscriminatedVTablePtr));
+static_assert(!__builtin_is_replaceable(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__builtin_is_replaceable(NoAddressDiscriminatedBaseClasses));
+static_assert(!__builtin_is_replaceable(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__builtin_is_replaceable(AddressDiscriminatedPrimaryBase));
+static_assert(!__builtin_is_replaceable(AddressDiscriminatedSecondaryBase));
+static_assert(!__builtin_is_replaceable(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__builtin_is_replaceable(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__builtin_is_replaceable(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__builtin_is_replaceable(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+static_assert( __is_bitwise_cloneable(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(NonAddressDiscriminatedVTablePtr));
+static_assert( __is_bitwise_cloneable(NonAddressDiscriminatedVTablePtr2));
+static_assert( __is_bitwise_cloneable(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert( __is_bitwise_cloneable(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert( __is_bitwise_cloneable(AddressDiscriminatedVTablePtr) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(RelocatableAddressDiscriminatedVTablePtr) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(NoAddressDiscriminatedBaseClasses));
+static_assert( __is_bitwise_cloneable(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert( __is_bitwise_cloneable(AddressDiscriminatedPrimaryBase) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(AddressDiscriminatedSecondaryBase) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(RelocatableAddressDiscriminatedPrimaryBase) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(RelocatableAddressDiscriminatedSecondaryBase) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(EmbdeddedAddressDiscriminatedPolymorphicClass) == !ADDR_DISC_ENABLED);
+static_assert( __is_bitwise_cloneable(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass) == !ADDR_DISC_ENABLED);
+
+static_assert( __has_unique_object_representations(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( __has_unique_object_representations(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( __has_unique_object_representations(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_unique_object_representations(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( __has_unique_object_representations(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!__has_unique_object_representations(NonAddressDiscriminatedVTablePtr));
+static_assert(!__has_unique_object_representations(NonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_unique_object_representations(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!__has_unique_object_representations(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!__has_unique_object_representations(AddressDiscriminatedVTablePtr));
+static_assert(!__has_unique_object_representations(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!__has_unique_object_representations(NoAddressDiscriminatedBaseClasses));
+static_assert(!__has_unique_object_representations(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!__has_unique_object_representations(AddressDiscriminatedPrimaryBase));
+static_assert(!__has_unique_object_representations(AddressDiscriminatedSecondaryBase));
+static_assert(!__has_unique_object_representations(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!__has_unique_object_representations(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!__has_unique_object_representations(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!__has_unique_object_representations(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
+
+#define ASSIGNABLE_WRAPPER(Type) __is_trivially_assignable(Type&, Type)
+static_assert( ASSIGNABLE_WRAPPER(AddressDiscriminatedPtr) == !ADDR_DISC_ENABLED);
+static_assert( ASSIGNABLE_WRAPPER(AddressDiscriminatedInt64) == !ADDR_DISC_ENABLED);
+static_assert( ASSIGNABLE_WRAPPER(AddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( ASSIGNABLE_WRAPPER(RelocatableAddressDiscriminatedFields) == !ADDR_DISC_ENABLED);
+static_assert( ASSIGNABLE_WRAPPER(AddressDiscriminatedFieldInBaseClass) == !ADDR_DISC_ENABLED);
+static_assert(!ASSIGNABLE_WRAPPER(NonAddressDiscriminatedVTablePtr));
+static_assert(!ASSIGNABLE_WRAPPER(NonAddressDiscriminatedVTablePtr2));
+static_assert(!ASSIGNABLE_WRAPPER(RelocatableNonAddressDiscriminatedVTablePtr));
+static_assert(!ASSIGNABLE_WRAPPER(RelocatableNonAddressDiscriminatedVTablePtr2));
+static_assert(!ASSIGNABLE_WRAPPER(AddressDiscriminatedVTablePtr));
+static_assert(!ASSIGNABLE_WRAPPER(RelocatableAddressDiscriminatedVTablePtr));
+static_assert(!ASSIGNABLE_WRAPPER(NoAddressDiscriminatedBaseClasses));
+static_assert(!ASSIGNABLE_WRAPPER(RelocatableNoAddressDiscriminatedBaseClasses));
+static_assert(!ASSIGNABLE_WRAPPER(AddressDiscriminatedPrimaryBase));
+static_assert(!ASSIGNABLE_WRAPPER(AddressDiscriminatedSecondaryBase));
+static_assert(!ASSIGNABLE_WRAPPER(RelocatableAddressDiscriminatedPrimaryBase));
+static_assert(!ASSIGNABLE_WRAPPER(RelocatableAddressDiscriminatedSecondaryBase));
+static_assert(!ASSIGNABLE_WRAPPER(EmbdeddedAddressDiscriminatedPolymorphicClass));
+static_assert(!ASSIGNABLE_WRAPPER(RelocatableEmbdeddedAddressDiscriminatedPolymorphicClass));
diff --git a/clang/test/SemaCXX/static-assert.cpp b/clang/test/SemaCXX/static-assert.cpp
index bf6a2ee..354016d 100644
--- a/clang/test/SemaCXX/static-assert.cpp
+++ b/clang/test/SemaCXX/static-assert.cpp
@@ -199,7 +199,7 @@ void foo2() {
// FIXME: Here the template keyword is dropped because the failed condition
// for a static assert is always printed with canonical types.
static_assert(::ns::NestedTemplates1<T, a>::NestedTemplates2::template NestedTemplates3<U>::value, "message");
- // expected-error@-1{{static assertion failed due to requirement '::ns::NestedTemplates1<int, 3>::NestedTemplates2::NestedTemplates3<float>::value': message}}
+ // expected-error@-1{{static assertion failed due to requirement 'ns::NestedTemplates1<int, 3>::NestedTemplates2::NestedTemplates3<float>::value': message}}
}
template void foo2<int, float, 3>();
// expected-note@-1{{in instantiation of function template specialization 'foo2<int, float, 3>' requested here}}
diff --git a/clang/test/SemaCXX/sugar-common-types.cpp b/clang/test/SemaCXX/sugar-common-types.cpp
index d58f6cd..dd5fc4a 100644
--- a/clang/test/SemaCXX/sugar-common-types.cpp
+++ b/clang/test/SemaCXX/sugar-common-types.cpp
@@ -44,7 +44,8 @@ template <class T> struct S1 {
};
N t10 = 0 ? S1<X1>() : S1<Y1>(); // expected-error {{from 'S1<B1>' (aka 'S1<int>')}}
-N t11 = 0 ? S1<X1>::S2<X2>() : S1<Y1>::S2<Y2>(); // expected-error {{from 'S1<B1>::S2<B2>' (aka 'S2<void>')}}
+// FIXME: needs to compute common sugar for qualified template names
+N t11 = 0 ? S1<X1>::S2<X2>() : S1<Y1>::S2<Y2>(); // expected-error {{from 'S1<int>::S2<B2>' (aka 'S1<int>::S2<void>')}}
template <class T> using Al = S1<T>;
@@ -200,5 +201,5 @@ namespace member_pointers {
// FIXME: adjusted MemberPointer does not preserve qualifier
N t3 = 0 ? &W1::a : &W2::b;
- // expected-error@-1 {{rvalue of type 'B1 W<void>::*'}}
+ // expected-error@-1 {{rvalue of type 'B1 member_pointers::W<void>::*'}}
} // namespace member_pointers
diff --git a/clang/test/SemaCXX/sugared-auto.cpp b/clang/test/SemaCXX/sugared-auto.cpp
index b5bb4f0..cf879ef 100644
--- a/clang/test/SemaCXX/sugared-auto.cpp
+++ b/clang/test/SemaCXX/sugared-auto.cpp
@@ -54,7 +54,7 @@ N t4 = x4; // expected-error {{lvalue of type 'Man' (aka 'int')}}
N t5 = x5; // expected-error {{lvalue of type 'Dog' (aka 'int')}}
auto x6 = { Man(), Dog() };
-N t6 = x6; // expected-error {{from 'std::initializer_list<Animal>' (aka 'initializer_list<int>')}}
+N t6 = x6; // expected-error {{from 'std::initializer_list<Animal>' (aka 'std::initializer_list<int>')}}
} // namespace variable
diff --git a/clang/test/SemaCXX/trivially-relocatable-ptrauth.cpp b/clang/test/SemaCXX/trivially-relocatable-ptrauth.cpp
index b38499a..4a907b8 100644
--- a/clang/test/SemaCXX/trivially-relocatable-ptrauth.cpp
+++ b/clang/test/SemaCXX/trivially-relocatable-ptrauth.cpp
@@ -57,7 +57,7 @@ struct Foo : Polymorphic {
};
-static_assert(__builtin_is_cpp_trivially_relocatable(Polymorphic));
+static_assert(!__builtin_is_cpp_trivially_relocatable(Polymorphic));
struct [[clang::ptrauth_vtable_pointer(process_independent,no_address_discrimination,no_extra_discrimination)]] NonAddressDiscriminatedPolymorphic trivially_relocatable_if_eligible {
virtual ~NonAddressDiscriminatedPolymorphic();
@@ -70,7 +70,7 @@ struct PolymorphicMembers {
Polymorphic field;
};
-static_assert(__builtin_is_cpp_trivially_relocatable(PolymorphicMembers));
+static_assert(!__builtin_is_cpp_trivially_relocatable(PolymorphicMembers));
struct UnionOfPolymorphic {
union trivially_relocatable_if_eligible {
diff --git a/clang/test/SemaCXX/type-aware-coroutines.cpp b/clang/test/SemaCXX/type-aware-coroutines.cpp
index 742e5f0..e41d07b 100644
--- a/clang/test/SemaCXX/type-aware-coroutines.cpp
+++ b/clang/test/SemaCXX/type-aware-coroutines.cpp
@@ -93,7 +93,7 @@ struct resumable5 {
};
resumable f1(int) {
- // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, int>::promise_type' (aka 'resumable::promise_type') is not usable with the function signature of 'f1'}}
+ // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, int>::promise_type' (aka 'typename resumable::promise_type') is not usable with the function signature of 'f1'}}
// expected-note@-2 {{type aware 'operator new' will not be used for coroutine allocation}}
// expected-note@#resumable_tan1 {{type aware 'operator new' declared here}}
// expected-note@#resumable_tan2 {{type aware 'operator new' declared here}}
@@ -101,7 +101,7 @@ resumable f1(int) {
}
resumable f2(float) {
- // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, float>::promise_type' (aka 'resumable::promise_type') is not usable with the function signature of 'f2'}}
+ // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, float>::promise_type' (aka 'typename resumable::promise_type') is not usable with the function signature of 'f2'}}
// expected-note@-2 {{type aware 'operator new' will not be used for coroutine allocation}}
// expected-note@#resumable_tan1 {{type aware 'operator new' declared here}}
// expected-note@#resumable_tan2 {{type aware 'operator new' declared here}}
@@ -109,7 +109,7 @@ resumable f2(float) {
}
resumable2 f3(int, float, const char*, Allocator) {
- // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable2, int, float, const char *, Allocator>::promise_type' (aka 'resumable2::promise_type') is not usable with the function signature of 'f3'}}
+ // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable2, int, float, const char *, Allocator>::promise_type' (aka 'typename resumable2::promise_type') is not usable with the function signature of 'f3'}}
// expected-note@-2 {{type aware 'operator new' will not be used for coroutine allocation}}
// expected-note@#resumable2_tan1 {{type aware 'operator new' declared here}}
co_yield 1;
@@ -117,7 +117,7 @@ resumable2 f3(int, float, const char*, Allocator) {
}
resumable f4(int n = 10) {
- // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, int>::promise_type' (aka 'resumable::promise_type') is not usable with the function signature of 'f4'}}
+ // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, int>::promise_type' (aka 'typename resumable::promise_type') is not usable with the function signature of 'f4'}}
// expected-note@-2 {{type aware 'operator new' will not be used for coroutine allocation}}
// expected-note@#resumable_tan1 {{type aware 'operator new' declared here}}
// expected-note@#resumable_tan2 {{type aware 'operator new' declared here}}
diff --git a/clang/test/SemaCXX/type-trait-synthesises-from-spaceship.cpp b/clang/test/SemaCXX/type-trait-synthesises-from-spaceship.cpp
new file mode 100644
index 0000000..ba58147
--- /dev/null
+++ b/clang/test/SemaCXX/type-trait-synthesises-from-spaceship.cpp
@@ -0,0 +1,212 @@
+// RUN: %clang_cc1 -fsyntax-only -verify -std=c++20 %s
+
+static_assert(!__builtin_lt_synthesises_from_spaceship()); // expected-error {{expected a type}}
+static_assert(!__builtin_lt_synthesises_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}}
+static_assert(!__builtin_lt_synthesises_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}}
+static_assert(!__builtin_lt_synthesises_from_spaceship(int, 0)); // expected-error {{expected a type}}
+
+static_assert(!__builtin_le_synthesises_from_spaceship()); // expected-error {{expected a type}}
+static_assert(!__builtin_le_synthesises_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}}
+static_assert(!__builtin_le_synthesises_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}}
+static_assert(!__builtin_le_synthesises_from_spaceship(int, 0)); // expected-error {{expected a type}}
+
+static_assert(!__builtin_gt_synthesises_from_spaceship()); // expected-error {{expected a type}}
+static_assert(!__builtin_gt_synthesises_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}}
+static_assert(!__builtin_gt_synthesises_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}}
+static_assert(!__builtin_gt_synthesises_from_spaceship(int, 0)); // expected-error {{expected a type}}
+
+static_assert(!__builtin_ge_synthesises_from_spaceship()); // expected-error {{expected a type}}
+static_assert(!__builtin_ge_synthesises_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}}
+static_assert(!__builtin_ge_synthesises_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}}
+static_assert(!__builtin_ge_synthesises_from_spaceship(int, 0)); // expected-error {{expected a type}}
+
+namespace std {
+ struct strong_ordering {
+ int n;
+ constexpr operator int() const { return n; }
+ static const strong_ordering less, equal, greater;
+ };
+ constexpr strong_ordering strong_ordering::less = {-1};
+ constexpr strong_ordering strong_ordering::equal = {0};
+ constexpr strong_ordering strong_ordering::greater = {1};
+}
+
+struct DefaultSpaceship {
+ friend auto operator<=>(DefaultSpaceship, DefaultSpaceship) = default;
+};
+
+static_assert(__builtin_lt_synthesises_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&));
+static_assert(__builtin_le_synthesises_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&));
+static_assert(__builtin_gt_synthesises_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&));
+static_assert(__builtin_ge_synthesises_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&));
+
+struct CustomSpaceship {
+ int i;
+
+ friend auto operator<=>(CustomSpaceship lhs, CustomSpaceship rhs) {
+ return rhs.i <=> lhs.i;
+ }
+};
+
+static_assert(__builtin_lt_synthesises_from_spaceship(const CustomSpaceship&, const CustomSpaceship&));
+static_assert(__builtin_le_synthesises_from_spaceship(const CustomSpaceship&, const CustomSpaceship&));
+static_assert(__builtin_gt_synthesises_from_spaceship(const CustomSpaceship&, const CustomSpaceship&));
+static_assert(__builtin_ge_synthesises_from_spaceship(const CustomSpaceship&, const CustomSpaceship&));
+
+struct CustomLT {
+ int i;
+
+ friend auto operator<(CustomLT lhs, CustomLT rhs) {
+ return rhs.i < lhs.i;
+ }
+};
+
+static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomLT&, const CustomLT&));
+static_assert(!__builtin_le_synthesises_from_spaceship(const CustomLT&, const CustomLT&));
+static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomLT&, const CustomLT&));
+static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomLT&, const CustomLT&));
+
+struct CustomLE {
+ int i;
+
+ friend auto operator<=(CustomLE lhs, CustomLE rhs) {
+ return rhs.i < lhs.i;
+ }
+};
+
+static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomLE&, const CustomLE&));
+static_assert(!__builtin_le_synthesises_from_spaceship(const CustomLE&, const CustomLE&));
+static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomLE&, const CustomLE&));
+static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomLE&, const CustomLE&));
+
+struct CustomGT {
+ int i;
+
+ friend auto operator>(CustomGT lhs, CustomGT rhs) {
+ return rhs.i < lhs.i;
+ }
+};
+
+static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomGT&, const CustomGT&));
+static_assert(!__builtin_le_synthesises_from_spaceship(const CustomGT&, const CustomGT&));
+static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomGT&, const CustomGT&));
+static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomGT&, const CustomGT&));
+
+struct CustomGE {
+ int i;
+
+ friend auto operator>=(CustomGE lhs, CustomGE rhs) {
+ return rhs.i < lhs.i;
+ }
+};
+
+static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomGE&, const CustomGE&));
+static_assert(!__builtin_le_synthesises_from_spaceship(const CustomGE&, const CustomGE&));
+static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomGE&, const CustomGE&));
+static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomGE&, const CustomGE&));
+
+struct CustomLTAndSpaceship {
+ int i;
+
+ friend auto operator<=>(CustomLTAndSpaceship lhs, CustomLTAndSpaceship rhs) {
+ return rhs.i <=> lhs.i;
+ }
+
+ friend auto operator<(CustomLTAndSpaceship lhs, CustomLTAndSpaceship rhs) {
+ return rhs.i < lhs.i;
+ }
+};
+
+static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&));
+static_assert(__builtin_le_synthesises_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&));
+static_assert(__builtin_gt_synthesises_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&));
+static_assert(__builtin_ge_synthesises_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&));
+
+struct CustomLEAndSpaceship {
+ int i;
+
+ friend auto operator<=>(CustomLEAndSpaceship lhs, CustomLEAndSpaceship rhs) {
+ return rhs.i <=> lhs.i;
+ }
+
+ friend auto operator<=(CustomLEAndSpaceship lhs, CustomLEAndSpaceship rhs) {
+ return rhs.i < lhs.i;
+ }
+};
+
+static_assert(__builtin_lt_synthesises_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&));
+static_assert(!__builtin_le_synthesises_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&));
+static_assert(__builtin_gt_synthesises_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&));
+static_assert(__builtin_ge_synthesises_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&));
+
+struct CustomGTAndSpaceship {
+ int i;
+
+ friend auto operator<=>(CustomGTAndSpaceship lhs, CustomGTAndSpaceship rhs) {
+ return rhs.i <=> lhs.i;
+ }
+
+ friend auto operator>(CustomGTAndSpaceship lhs, CustomGTAndSpaceship rhs) {
+ return rhs.i < lhs.i;
+ }
+};
+
+static_assert(__builtin_lt_synthesises_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&));
+static_assert(__builtin_le_synthesises_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&));
+static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&));
+static_assert(__builtin_ge_synthesises_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&));
+
+struct CustomGEAndSpaceship {
+ int i;
+
+ friend auto operator<=>(CustomGEAndSpaceship lhs, CustomGEAndSpaceship rhs) {
+ return rhs.i <=> lhs.i;
+ }
+
+ friend auto operator>=(CustomGEAndSpaceship lhs, CustomGEAndSpaceship rhs) {
+ return rhs.i < lhs.i;
+ }
+};
+
+static_assert(__builtin_lt_synthesises_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&));
+static_assert(__builtin_le_synthesises_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&));
+static_assert(__builtin_gt_synthesises_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&));
+static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&));
+
+struct DefaultedCmpAndSpaceship {
+ int i;
+
+ friend auto operator<=>(DefaultedCmpAndSpaceship lhs, DefaultedCmpAndSpaceship rhs) {
+ return rhs.i <=> lhs.i;
+ }
+
+ friend bool operator<(DefaultedCmpAndSpaceship lhs, DefaultedCmpAndSpaceship rhs) = default;
+ friend bool operator<=(DefaultedCmpAndSpaceship lhs, DefaultedCmpAndSpaceship rhs) = default;
+ friend bool operator>(DefaultedCmpAndSpaceship lhs, DefaultedCmpAndSpaceship rhs) = default;
+ friend bool operator>=(DefaultedCmpAndSpaceship lhs, DefaultedCmpAndSpaceship rhs) = default;
+};
+
+// TODO: This should probably return true
+static_assert(!__builtin_lt_synthesises_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&));
+static_assert(!__builtin_le_synthesises_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&));
+static_assert(!__builtin_gt_synthesises_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&));
+static_assert(!__builtin_ge_synthesises_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&));
+
+struct DifferentTypes {
+ int i;
+
+ friend auto operator<=>(DifferentTypes lhs, int rhs) {
+ return rhs <=> lhs.i;
+ }
+};
+
+static_assert(__builtin_lt_synthesises_from_spaceship(const DifferentTypes&, const int&));
+static_assert(__builtin_le_synthesises_from_spaceship(const DifferentTypes&, const int&));
+static_assert(__builtin_gt_synthesises_from_spaceship(const DifferentTypes&, const int&));
+static_assert(__builtin_ge_synthesises_from_spaceship(const DifferentTypes&, const int&));
+
+// TODO: Should this return true? It's technically not synthesized from spaceship, but it behaves exactly as-if it was
+static_assert(!__builtin_lt_synthesises_from_spaceship(int, int));
+static_assert(!__builtin_le_synthesises_from_spaceship(int, int));
+static_assert(!__builtin_gt_synthesises_from_spaceship(int, int));
+static_assert(!__builtin_ge_synthesises_from_spaceship(int, int));
diff --git a/clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp b/clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp
index f3ddbbf..7c6c9ea 100644
--- a/clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp
+++ b/clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp
@@ -50,6 +50,14 @@ struct is_constructible {
template <typename... Args>
constexpr bool is_constructible_v = __is_constructible(Args...);
+
+template <typename T>
+struct is_final {
+ static constexpr bool value = __is_final(T);
+};
+template <typename T>
+constexpr bool is_final_v = __is_final(T);
+
#endif
#ifdef STD2
@@ -116,6 +124,16 @@ using is_constructible = __details_is_constructible<Args...>;
template <typename... Args>
constexpr bool is_constructible_v = __is_constructible(Args...);
+
+template <typename T>
+struct __details_is_final {
+ static constexpr bool value = __is_final(T);
+};
+template <typename T>
+using is_final = __details_is_final<T>;
+template <typename T>
+constexpr bool is_final_v = __is_final(T);
+
#endif
@@ -177,6 +195,14 @@ using is_constructible = __details_is_constructible<Args...>;
template <typename... Args>
constexpr bool is_constructible_v = is_constructible<Args...>::value;
+
+template <typename T>
+struct __details_is_final : bool_constant<__is_final(T)> {};
+template <typename T>
+using is_final = __details_is_final<T>;
+template <typename T>
+constexpr bool is_final_v = is_final<T>::value;
+
#endif
}
@@ -248,6 +274,31 @@ static_assert(std::is_constructible_v<void>);
// expected-error@-1 {{static assertion failed due to requirement 'std::is_constructible_v<void>'}} \
// expected-note@-1 {{because it is a cv void type}}
+static_assert(!std::is_final<int>::value);
+
+static_assert(std::is_final<int&>::value);
+// expected-error-re@-1 {{static assertion failed due to requirement 'std::{{.*}}is_final<int &>::value'}} \
+// expected-note@-1 {{'int &' is not final}} \
+// expected-note@-1 {{because it is a reference type}} \
+// expected-note@-1 {{because it is not a class or union type}}
+
+static_assert(std::is_final_v<int&>);
+// expected-error@-1 {{static assertion failed due to requirement 'std::is_final_v<int &>'}} \
+// expected-note@-1 {{'int &' is not final}} \
+// expected-note@-1 {{because it is a reference type}} \
+// expected-note@-1 {{because it is not a class or union type}}
+
+using Arr = int[3];
+static_assert(std::is_final<Arr>::value);
+// expected-error-re@-1 {{static assertion failed due to requirement 'std::{{.*}}is_final<int[3]>::value'}} \
+// expected-note@-1 {{'Arr' (aka 'int[3]') is not final}} \
+// expected-note@-1 {{because it is not a class or union type}}
+
+static_assert(std::is_final_v<Arr>);
+// expected-error@-1 {{static assertion failed due to requirement 'std::is_final_v<int[3]>'}} \
+// expected-note@-1 {{'int[3]' is not final}} \
+// expected-note@-1 {{because it is not a class or union type}}
+
namespace test_namespace {
using namespace std;
static_assert(is_trivially_relocatable<int&>::value);
@@ -300,6 +351,31 @@ namespace test_namespace {
static_assert(is_constructible_v<void>);
// expected-error@-1 {{static assertion failed due to requirement 'is_constructible_v<void>'}} \
// expected-note@-1 {{because it is a cv void type}}
+
+ static_assert(is_final<int&>::value);
+ // expected-error-re@-1 {{static assertion failed due to requirement '{{.*}}is_final<int &>::value'}} \
+ // expected-note@-1 {{'int &' is not final}} \
+ // expected-note@-1 {{because it is a reference type}} \
+ // expected-note@-1 {{because it is not a class or union type}}
+
+ static_assert(is_final_v<int&>);
+ // expected-error@-1 {{static assertion failed due to requirement 'is_final_v<int &>'}} \
+ // expected-note@-1 {{'int &' is not final}} \
+ // expected-note@-1 {{because it is a reference type}} \
+ // expected-note@-1 {{because it is not a class or union type}}
+
+ using A = int[2];
+ static_assert(is_final<A>::value);
+ // expected-error-re@-1 {{static assertion failed due to requirement '{{.*}}is_final<int[2]>::value'}} \
+ // expected-note@-1 {{'A' (aka 'int[2]') is not final}} \
+ // expected-note@-1 {{because it is not a class or union type}}
+
+ using Fn = void();
+ static_assert(is_final<Fn>::value);
+ // expected-error-re@-1 {{static assertion failed due to requirement '{{.*}}is_final<void ()>::value'}} \
+ // expected-note@-1 {{'Fn' (aka 'void ()') is not final}} \
+ // expected-note@-1 {{because it is a function type}} \
+ // expected-note@-1 {{because it is not a class or union type}}
}
diff --git a/clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp b/clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp
index 54806a9..1619b0b 100644
--- a/clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp
+++ b/clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp
@@ -829,3 +829,46 @@ static_assert(__is_standard_layout(H)); // no diagnostics
static_assert(__is_standard_layout(I)); // no diagnostics
}
+namespace is_final_tests {
+ struct C {}; // #e-C
+ static_assert(__is_final(C));
+ // expected-error@-1 {{static assertion failed due to requirement '__is_final(is_final_tests::C)'}} \
+ // expected-note@-1 {{'C' is not final}} \
+ // expected-note@-1 {{because it is not marked 'final'}} \
+ // expected-note@#e-C {{'C' defined here}}
+
+ union U {}; // #e-U
+ static_assert(__is_final(U));
+ // expected-error@-1 {{static assertion failed due to requirement '__is_final(is_final_tests::U)'}} \
+ // expected-note@-1 {{'U' is not final}} \
+ // expected-note@-1 {{because it is not marked 'final'}} \
+ // expected-note@#e-U {{'U' defined here}}
+
+ // ----- non-class/union types -----
+ using I = int;
+ static_assert(__is_final(I));
+ // expected-error@-1 {{static assertion failed due to requirement '__is_final(int)'}} \
+ // expected-note@-1 {{'I' (aka 'int') is not final}} \
+ // expected-note@-1 {{because it is not a class or union type}}
+
+ using Fty = void(); // function type
+ static_assert(__is_final(Fty));
+ // expected-error@-1 {{static assertion failed due to requirement '__is_final(void ())'}} \
+ // expected-note@-1 {{'Fty' (aka 'void ()') is not final}} \
+ // expected-note@-1 {{because it is a function type}} \
+ // expected-note@-1 {{because it is not a class or union type}}
+
+ using Arr = int[3];
+ static_assert(__is_final(Arr));
+ // expected-error@-1 {{static assertion failed due to requirement '__is_final(int[3])'}} \
+ // expected-note@-1 {{'Arr' (aka 'int[3]') is not final}} \
+ // expected-note@-1 {{because it is not a class or union type}}
+
+ using Ref = int&;
+ static_assert(__is_final(Ref));
+ // expected-error@-1 {{static assertion failed due to requirement '__is_final(int &)'}} \
+ // expected-note@-1 {{'Ref' (aka 'int &') is not final}} \
+ // expected-note@-1 {{because it is a reference type}} \
+ // expected-note@-1 {{because it is not a class or union type}}
+
+}
diff --git a/clang/test/SemaCXX/undefined-partial-specialization.cpp b/clang/test/SemaCXX/undefined-partial-specialization.cpp
index 0f776a6..3992d02 100644
--- a/clang/test/SemaCXX/undefined-partial-specialization.cpp
+++ b/clang/test/SemaCXX/undefined-partial-specialization.cpp
@@ -10,6 +10,6 @@ template <typename T>
class boo<T, true>;
template<typename T>
-void boo<T, true>::foo(){} // expected-error{{out-of-line definition of 'foo' from class 'boo<T, true>' without definition}}
+void boo<T, true>::foo(){} // expected-error{{out-of-line definition of 'foo' from class 'GH61356::boo<T, true>' without definition}}
}
diff --git a/clang/test/SemaCXX/using-decl-templates.cpp b/clang/test/SemaCXX/using-decl-templates.cpp
index c96c487..58b3059 100644
--- a/clang/test/SemaCXX/using-decl-templates.cpp
+++ b/clang/test/SemaCXX/using-decl-templates.cpp
@@ -153,3 +153,11 @@ T foo(T t) { // OK
}
} // namespace sss
} // namespace func_templ
+
+namespace DependentName {
+ template <typename T> struct S {
+ using typename T::Ty;
+ static Ty Val;
+ };
+ template <typename T> typename S<T>::Ty S<T>::Val;
+} // DependentName
diff --git a/clang/test/SemaCXX/vector-bool.cpp b/clang/test/SemaCXX/vector-bool.cpp
index cd63805..0db04dc 100644
--- a/clang/test/SemaCXX/vector-bool.cpp
+++ b/clang/test/SemaCXX/vector-bool.cpp
@@ -112,3 +112,7 @@ void Sizeof() {
static_assert(sizeof(Bool195) == 32);
static_assert(sizeof(Bool257) == 64);
}
+
+#if !__has_feature(ext_vector_type_boolean)
+#error "FAIL"
+#endif
diff --git a/clang/test/SemaCXX/warn-unused-result.cpp b/clang/test/SemaCXX/warn-unused-result.cpp
index 447654e..0988177 100644
--- a/clang/test/SemaCXX/warn-unused-result.cpp
+++ b/clang/test/SemaCXX/warn-unused-result.cpp
@@ -309,7 +309,7 @@ void use() {
S<double>(2); // no warning
S<int>(2); // expected-warning {{ignoring temporary of type 'S<int>' declared with 'nodiscard'}}
- S<const char>(2); // no warning (warn_unused_result does not diagnose constructor temporaries)
+ S<const char>(2); // expected-warning {{ignoring temporary of type 'S<const char>' declared with 'clang::warn_unused_result' attribute}}
// function should take precedence over type
obtain2(1.0); // expected-warning {{ignoring return value of function declared with 'nodiscard'}}
@@ -336,7 +336,7 @@ struct [[nodiscard]] G {
void use2() {
H{2}; // no warning
H(2.0); // expected-warning {{ignoring temporary created by a constructor declared with 'nodiscard'}}
- H("Hello"); // no warning (warn_unused_result does not diagnose constructor temporaries)
+ H("Hello"); // expected-warning {{ignoring temporary created by a constructor declared with 'warn_unused_result' attribute}}
// no warning for explicit cast to void
(void)H(2);
@@ -407,3 +407,88 @@ void doGccThings() {
}
} // namespace BuildStringOnClangScope
+
+namespace candiscard {
+
+struct [[nodiscard]] NoDiscard {
+ [[nodiscard]] NoDiscard(int);
+ NoDiscard(const char *);
+};
+
+struct [[gnu::warn_unused]] WarnUnused {
+ [[gnu::warn_unused]] WarnUnused(int); // expected-warning {{'gnu::warn_unused' attribute only applies to structs, unions, and classes}}
+ WarnUnused(const char*);
+};
+
+struct [[gnu::warn_unused_result]] WarnUnusedResult {
+ [[gnu::warn_unused_result]] WarnUnusedResult(int);
+ WarnUnusedResult(const char*);
+};
+
+NoDiscard return_nodiscard();
+WarnUnused return_warnunused();
+WarnUnusedResult return_warnunusedresult();
+
+NoDiscard (*p_return_nodiscard)();
+WarnUnused (*p_return_warnunused)();
+WarnUnusedResult (*p_return_warnunusedresult)();
+
+NoDiscard (*(*pp_return_nodiscard)())();
+WarnUnused (*(*pp_return_warnunused)())();
+WarnUnusedResult (*(*pp_return_warnunusedresult)())();
+
+template <class T> T from_a_template();
+
+void test() {
+ // Unused but named variables
+ NoDiscard unused_variable1(1); // no warning
+ NoDiscard unused_variable2(""); // no warning
+ WarnUnused unused_variable3(1); // no warning
+ WarnUnused unused_variable4(""); // no warning
+ WarnUnusedResult unused_variable5(1); // no warning
+ WarnUnusedResult unused_variable6(""); // no warning
+
+ // Constructor return values
+ NoDiscard(1); // expected-warning {{ignoring temporary created by a constructor declared with 'nodiscard' attribute}}
+ NoDiscard(""); // expected-warning {{ignoring temporary of type 'NoDiscard' declared with 'nodiscard' attribute}}
+ WarnUnused(1); // expected-warning {{expression result unused}}
+ WarnUnused(""); // expected-warning {{expression result unused}}
+ WarnUnusedResult(1); // expected-warning {{ignoring temporary created by a constructor declared with 'gnu::warn_unused_result' attribute}}
+ WarnUnusedResult(""); // expected-warning {{ignoring temporary of type 'WarnUnusedResult' declared with 'gnu::warn_unused_result' attribute}}
+
+ NoDiscard{1}; // expected-warning {{ignoring temporary created by a constructor declared with 'nodiscard' attribute}}
+ NoDiscard{""}; // expected-warning {{ignoring temporary of type 'NoDiscard' declared with 'nodiscard' attribute}}
+ WarnUnused{1}; // expected-warning {{expression result unused}}
+ WarnUnused{""}; // expected-warning {{expression result unused}}
+ WarnUnusedResult{1}; // expected-warning {{ignoring temporary created by a constructor declared with 'gnu::warn_unused_result' attribute}}
+ WarnUnusedResult{""}; // expected-warning {{ignoring temporary of type 'WarnUnusedResult' declared with 'gnu::warn_unused_result' attribute}}
+
+ static_cast<NoDiscard>(1); // expected-warning {{ignoring temporary created by a constructor declared with 'nodiscard' attribute}}
+ static_cast<NoDiscard>(""); // expected-warning {{ignoring temporary of type 'NoDiscard' declared with 'nodiscard' attribute}}
+ static_cast<WarnUnused>(1); // expected-warning {{expression result unused}}
+ static_cast<WarnUnused>(""); // expected-warning {{expression result unused}}
+ static_cast<WarnUnusedResult>(1); // expected-warning {{ignoring temporary created by a constructor declared with 'gnu::warn_unused_result' attribute}}
+ static_cast<WarnUnusedResult>(""); // expected-warning {{ignoring temporary of type 'WarnUnusedResult' declared with 'gnu::warn_unused_result' attribute}}
+
+ // Function return values
+ return_nodiscard(); // expected-warning {{ignoring return value of type 'NoDiscard' declared with 'nodiscard' attribute}}
+ return_warnunused(); // no warning
+ return_warnunusedresult(); // expected-warning {{ignoring return value of type 'WarnUnusedResult' declared with 'gnu::warn_unused_result' attribute}}
+
+ // Function pointer return values
+ p_return_nodiscard(); // expected-warning {{ignoring return value of type 'NoDiscard' declared with 'nodiscard' attribute}}
+ p_return_warnunused(); // no warning
+ p_return_warnunusedresult(); // expected-warning {{ignoring return value of type 'WarnUnusedResult' declared with 'gnu::warn_unused_result' attribute}}
+
+ // Function pointer expression return values
+ pp_return_nodiscard()(); // expected-warning {{ignoring return value of type 'NoDiscard' declared with 'nodiscard' attribute}}
+ pp_return_warnunused()(); // no warning
+ pp_return_warnunusedresult()(); // expected-warning {{ignoring return value of type 'WarnUnusedResult' declared with 'gnu::warn_unused_result' attribute}}
+
+ // From a template
+ from_a_template<NoDiscard>(); // expected-warning {{ignoring return value of type 'NoDiscard' declared with 'nodiscard' attribute}}
+ from_a_template<WarnUnused>(); // no warning
+ from_a_template<WarnUnusedResult>(); // expected-warning {{ignoring return value of type 'WarnUnusedResult' declared with 'gnu::warn_unused_result' attribute}}
+}
+
+} // namespace candiscard
diff --git a/clang/test/SemaCXX/wmissing-noreturn-suggestion.cpp b/clang/test/SemaCXX/wmissing-noreturn-suggestion.cpp
index 8beffcd..06db972 100644
--- a/clang/test/SemaCXX/wmissing-noreturn-suggestion.cpp
+++ b/clang/test/SemaCXX/wmissing-noreturn-suggestion.cpp
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -fsyntax-only -fcxx-exceptions -fexceptions -Wreturn-type -Wmissing-noreturn -verify %s
+// RUN: %clang_cc1 -fsyntax-only -fcxx-exceptions -fexceptions -Wreturn-type -Wmissing-noreturn -verify=expected,cxx17 -std=c++17 %s
+// RUN: %clang_cc1 -fsyntax-only -fcxx-exceptions -fexceptions -Wreturn-type -Wmissing-noreturn -verify=expected,cxx23 -std=c++23 %s
namespace std {
class string {
@@ -16,6 +17,15 @@ void throwError(const std::string& msg) { // expected-warning {{function 'throwE
throw std::runtime_error(msg);
}
+// Using the [[noreturn]] attribute on lambdas is not available until C++23,
+// so we should not emit the -Wmissing-noreturn warning on earlier standards.
+// Clang supports the attribute on earlier standards as an extension, and emits
+// the c++23-lambda-attributes warning.
+void lambda() {
+ auto l1 = [] () { throw std::runtime_error("ERROR"); }; // cxx23-warning {{function 'operator()' could be declared with attribute 'noreturn'}}
+ auto l2 = [] [[noreturn]] () { throw std::runtime_error("ERROR"); }; // cxx17-warning {{an attribute specifier sequence in this position is a C++23 extension}}
+}
+
// The non-void caller should not warn about missing return.
int ensureZero(int i) {
if (i == 0) return 0;
diff --git a/clang/test/SemaHIP/amdgcnspirv-implicit-alloc-function-calling-conv.hip b/clang/test/SemaHIP/amdgcnspirv-implicit-alloc-function-calling-conv.hip
new file mode 100644
index 0000000..c3e7e1a
--- /dev/null
+++ b/clang/test/SemaHIP/amdgcnspirv-implicit-alloc-function-calling-conv.hip
@@ -0,0 +1,32 @@
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv32 -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64 -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64-amd-amdhsa -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv32 -aux-triple x86_64-unknown-linux-gnu -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64-amd-amdhsa -aux-triple x86_64-unknown-linux-gnu -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv32 -aux-triple x86_64-pc-windows-msvc -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64 -aux-triple x86_64-pc-windows-msvc -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64-amd-amdhsa -aux-triple x86_64-pc-windows-msvc -verify
+
+// expected-no-diagnostics
+
+namespace std
+{
+ enum class align_val_t : __SIZE_TYPE__ {};
+ struct nothrow_t { explicit nothrow_t() = default; };
+ extern nothrow_t const nothrow;
+}
+
+void* __attribute__((cdecl)) operator new(__SIZE_TYPE__);
+void* __attribute__((cdecl)) operator new[](__SIZE_TYPE__);
+void* __attribute__((cdecl)) operator new(__SIZE_TYPE__, ::std::align_val_t);
+void* __attribute__((cdecl)) operator new[](__SIZE_TYPE__, ::std::align_val_t);
+
+void __attribute__((cdecl)) operator delete(void*) noexcept;
+void __attribute__((cdecl)) operator delete[](void*) noexcept;
+void __attribute__((cdecl)) operator delete(void*, __SIZE_TYPE__) noexcept;
+void __attribute__((cdecl)) operator delete[](void*, __SIZE_TYPE__) noexcept;
+void __attribute__((cdecl)) operator delete(void*, ::std::align_val_t) noexcept;
+void __attribute__((cdecl)) operator delete[](void*, ::std::align_val_t) noexcept;
+void __attribute__((cdecl)) operator delete(void*, __SIZE_TYPE__, ::std::align_val_t) noexcept;
+void __attribute__((cdecl)) operator delete[](void*, __SIZE_TYPE__, ::std::align_val_t) noexcept;
diff --git a/clang/test/SemaObjC/attr-nodiscard.m b/clang/test/SemaObjC/attr-nodiscard.m
index 6d04665..26bbd24 100644
--- a/clang/test/SemaObjC/attr-nodiscard.m
+++ b/clang/test/SemaObjC/attr-nodiscard.m
@@ -4,6 +4,9 @@ struct [[nodiscard]] expected {};
typedef struct expected E;
+[[nodiscard]] typedef int NI; // expected-warning {{'[[nodiscard]]' attribute ignored when applied to a typedef}}
+typedef __attribute__((warn_unused_result)) int WUR;
+
@interface INTF
- (int) a [[nodiscard]];
+ (int) b [[nodiscard]];
@@ -12,6 +15,8 @@ typedef struct expected E;
- (E) e;
+ (E) f;
- (void) g [[nodiscard]]; // expected-warning {{attribute 'nodiscard' cannot be applied to Objective-C method without return value}}
+- (NI) h;
+- (WUR) i;
@end
void foo(INTF *a) {
@@ -21,5 +26,7 @@ void foo(INTF *a) {
[INTF d]; // expected-warning {{ignoring return value of type 'expected' declared with 'nodiscard' attribute}}
[a e]; // expected-warning {{ignoring return value of type 'expected' declared with 'nodiscard' attribute}}
[INTF f]; // expected-warning {{ignoring return value of type 'expected' declared with 'nodiscard' attribute}}
- [a g];
+ [a g]; // no warning because g returns void
+ [a h]; // no warning because attribute is ignored when applied to a typedef
+ [a i]; // expected-warning {{ignoring return value of type 'WUR' declared with 'warn_unused_result' attribute}}
}
diff --git a/clang/test/SemaObjC/exprs.m b/clang/test/SemaObjC/exprs.m
index dcf46d3..c42d270 100644
--- a/clang/test/SemaObjC/exprs.m
+++ b/clang/test/SemaObjC/exprs.m
@@ -36,3 +36,10 @@ void test_encode(void) {
(void)@encode(Incomplete_ObjC_class*);
(void)@encode(id);
}
+
+void gh154046(void) {
+ (void)(const char[]) {
+ [0] = @encode(int), // expected-error {{incompatible pointer to integer conversion initializing 'const char' with an expression of type 'char[2]'}}
+ [1] = @encode(float) // expected-error {{incompatible pointer to integer conversion initializing 'const char' with an expression of type 'char[2]'}}
+ }[1];
+}
diff --git a/clang/test/SemaObjC/ptrauth-qualifier.m b/clang/test/SemaObjC/ptrauth-qualifier.m
index 74bbe6f0..67a73bb 100644
--- a/clang/test/SemaObjC/ptrauth-qualifier.m
+++ b/clang/test/SemaObjC/ptrauth-qualifier.m
@@ -1,13 +1,25 @@
-// RUN: %clang_cc1 -triple arm64-apple-ios -fsyntax-only -verify -fptrauth-intrinsics %s
+// RUN: %clang_cc1 -triple arm64-apple-ios -DIS_DARWIN -fsyntax-only -verify -fptrauth-intrinsics %s
// RUN: %clang_cc1 -triple aarch64-linux-gnu -fsyntax-only -verify -fptrauth-intrinsics %s
-#if !__has_extension(ptrauth_qualifier)
+#if defined(IS_DARWIN) && !__has_extension(ptrauth_qualifier)
// This error means that the __ptrauth qualifier availability test says that it
// is not available. This error is not expected in the output, if it is seen
// there is a feature detection regression.
#error __ptrauth qualifier not enabled
#endif
+#if defined(IS_DARWIN) && !__has_feature(ptrauth_qualifier)
+// This error means that the __has_feature test for ptrauth_qualifier has
+// failed, despite it being expected on darwin.
+#error __ptrauth qualifier not enabled
+#elif !defined(IS_DARWIN) && (__has_feature(ptrauth_qualifier) || __has_extension(ptrauth_qualifier))
+#error ptrauth_qualifier labeled a feature on a non-darwin platform
+#endif
+
+#if !defined (__PTRAUTH__)
+#error __PTRAUTH__ test macro not defined when ptrauth is enabled
+#endif
+
@interface Foo
// expected-warning@-1 {{class 'Foo' defined without specifying a base class}}
// expected-note@-2 {{add a super class to fix this problem}}
diff --git a/clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h b/clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h
index 91753ca..9d5329c 100644
--- a/clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h
+++ b/clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h
@@ -39,7 +39,7 @@ A *f14(void);
int * _Null_unspecified f15(void);
A * _Null_unspecified f16(void);
-void f17(CFErrorRef *error); // expected-note{{no known conversion from 'A * _Nonnull' to 'CFErrorRef _Nullable * _Nullable' (aka '__CFError **') for 1st argument}}
+void f17(CFErrorRef *error); // expected-note{{no known conversion from 'A * _Nonnull' to 'CFErrorRef _Nullable * _Nullable' (aka 'struct __CFError **') for 1st argument}}
void f18(A **); // expected-warning 2{{pointer is missing a nullability type specifier}}
// expected-note@-1 2 {{insert '_Nullable' if the pointer may be null}}
// expected-note@-2 2 {{insert '_Nonnull' if the pointer should never be null}}
diff --git a/clang/test/SemaObjCXX/arc-bridged-cast.mm b/clang/test/SemaObjCXX/arc-bridged-cast.mm
index 1f68897..67769eb 100644
--- a/clang/test/SemaObjCXX/arc-bridged-cast.mm
+++ b/clang/test/SemaObjCXX/arc-bridged-cast.mm
@@ -63,7 +63,7 @@ extern "C" const CFAnnotatedObjectRef r3 = &cf0;
void testExternC() {
id obj;
obj = (id)r0;
- obj = (id)r1; // expected-error{{cast of C pointer type 'CFAnnotatedObjectRef' (aka 'const __CFAnnotatedObject *') to Objective-C pointer type 'id' requires a bridged cast}} expected-note{{use __bridge to convert directly}} expected-note{{use __bridge_transfer to transfer ownership of a +1 'CFAnnotatedObjectRef'}}
+ obj = (id)r1; // expected-error{{cast of C pointer type 'CFAnnotatedObjectRef' (aka 'const struct __CFAnnotatedObject *') to Objective-C pointer type 'id' requires a bridged cast}} expected-note{{use __bridge to convert directly}} expected-note{{use __bridge_transfer to transfer ownership of a +1 'CFAnnotatedObjectRef'}}
obj = (id)r2;
- obj = (id)r3; // expected-error{{cast of C pointer type 'CFAnnotatedObjectRef' (aka 'const __CFAnnotatedObject *') to Objective-C pointer type 'id' requires a bridged cast}} expected-note{{use __bridge to convert directly}} expected-note{{use __bridge_transfer to transfer ownership of a +1 'CFAnnotatedObjectRef'}}
+ obj = (id)r3; // expected-error{{cast of C pointer type 'CFAnnotatedObjectRef' (aka 'const struct __CFAnnotatedObject *') to Objective-C pointer type 'id' requires a bridged cast}} expected-note{{use __bridge to convert directly}} expected-note{{use __bridge_transfer to transfer ownership of a +1 'CFAnnotatedObjectRef'}}
}
diff --git a/clang/test/SemaObjCXX/attr-nodiscard.mm b/clang/test/SemaObjCXX/attr-nodiscard.mm
index e1eefb7..18d8296 100644
--- a/clang/test/SemaObjCXX/attr-nodiscard.mm
+++ b/clang/test/SemaObjCXX/attr-nodiscard.mm
@@ -5,6 +5,9 @@ struct [[nodiscard]] expected {};
using E = expected<int>;
+using NI [[nodiscard]] = int; // expected-warning {{'[[nodiscard]]' attribute ignored when applied to a typedef}}
+using WURI [[clang::warn_unused_result]] = int;
+
@interface INTF
- (int) a [[nodiscard]];
+ (int) b [[nodiscard]];
@@ -13,6 +16,8 @@ using E = expected<int>;
- (E) e;
+ (E) f;
- (void) g [[nodiscard]]; // expected-warning {{attribute 'nodiscard' cannot be applied to Objective-C method without return value}}
+- (NI) h;
+- (WURI) i;
@end
void foo(INTF *a) {
@@ -22,5 +27,7 @@ void foo(INTF *a) {
[INTF d]; // expected-warning {{ignoring return value of type 'expected<int>' declared with 'nodiscard' attribute}}
[a e]; // expected-warning {{ignoring return value of type 'expected<int>' declared with 'nodiscard' attribute}}
[INTF f]; // expected-warning {{ignoring return value of type 'expected<int>' declared with 'nodiscard' attribute}}
- [a g];
+ [a g]; // no warning because g returns void
+ [a h]; // no warning because attribute is ignored
+ [a i]; // expected-warning {{ignoring return value of type 'WURI' declared with 'clang::warn_unused_result' attribute}}
}
diff --git a/clang/test/SemaObjCXX/discarded-block-type-inference.mm b/clang/test/SemaObjCXX/discarded-block-type-inference.mm
new file mode 100644
index 0000000..8e25877
--- /dev/null
+++ b/clang/test/SemaObjCXX/discarded-block-type-inference.mm
@@ -0,0 +1,15 @@
+// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fobjc-arc -fblocks %s
+
+void block_receiver(int (^)() );
+
+int f1() {
+ if constexpr (0)
+ (block_receiver)(^{ return 2; });
+ return 1;
+}
+
+int f2() {
+ if constexpr (0)
+ return (^{ return 2; })();
+ return 1;
+}
diff --git a/clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm b/clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
index 3b590c4..57aaa62 100644
--- a/clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
+++ b/clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
@@ -81,7 +81,7 @@ int main() {
id edge_insets_object = @(edge_insets);
SomeStruct s;
- id err = @(s); // expected-error{{illegal type 'SomeStruct' (aka '_SomeStruct') used in a boxed expression}}
+ id err = @(s); // expected-error{{illegal type 'SomeStruct' (aka 'struct _SomeStruct') used in a boxed expression}}
NonTriviallyCopyable ntc;
id ntcErr = @(ntc); // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
@@ -99,7 +99,7 @@ SomeStruct getSomeStruct() {
void rvalue() {
id rv_rect = @(getRect());
- id rv_some_struct = @(getSomeStruct()); // expected-error {{illegal type 'SomeStruct' (aka '_SomeStruct') used in a boxed expression}}
+ id rv_some_struct = @(getSomeStruct()); // expected-error {{illegal type 'SomeStruct' (aka 'struct _SomeStruct') used in a boxed expression}}
}
template <class T> id box(T value) { return @(value); } // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
diff --git a/clang/test/SemaObjCXX/objcbridge-attribute-arc.mm b/clang/test/SemaObjCXX/objcbridge-attribute-arc.mm
index 0a86ee9..0d3751f 100644
--- a/clang/test/SemaObjCXX/objcbridge-attribute-arc.mm
+++ b/clang/test/SemaObjCXX/objcbridge-attribute-arc.mm
@@ -26,10 +26,10 @@ typedef XXX *CFUColor2Ref;
typedef struct __attribute__((objc_bridge(NSTesting))) __CFError *CFTestingRef; // expected-note {{declared here}}
id Test1(CFTestingRef cf) {
- return (NSString *)cf; // expected-error {{CF object of type 'CFTestingRef' (aka '__CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
- // expected-error {{cast of C pointer type 'CFTestingRef' (aka '__CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ return (NSString *)cf; // expected-error {{CF object of type 'CFTestingRef' (aka 'struct __CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
+ // expected-error {{cast of C pointer type 'CFTestingRef' (aka 'struct __CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFTestingRef' (aka '__CFError *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFTestingRef' (aka 'struct __CFError *') into ARC}}
}
typedef CFErrorRef CFErrorRef1;
@@ -52,105 +52,105 @@ typedef CFErrorRef1 CFErrorRef2; // expected-note 2 {{declared here}}
@class NSString;
void Test2(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)(NSString *)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ (void)(NSString *)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{__bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(NSError *)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(NSError *)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(MyError*)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(MyError*)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
// expected-note {{__bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}} \
- // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'MyError'}}
- (void)(NSUColor *)cf2; // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka '__CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}} \
+ // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'MyError'}}
+ (void)(NSUColor *)cf2; // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFUColor2Ref' (aka '__CFUPrimeColor *') into ARC}}
- (void)(CFErrorRef)ns; // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') into ARC}}
+ (void)(CFErrorRef)ns; // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}} \\
- // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}} \\
+ // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(Class)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}} \\
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(Class)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}} \\
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *}} \\
- // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *'}} \\
+ // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test3(CFErrorRef cf, NSError *ns) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFErrorRef' (aka '__CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFErrorRef' (aka 'struct __CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
}
void Test4(CFMyErrorRef cf) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFMyErrorRef' (aka '__CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
}
void Test5(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ (void)(CFErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test6(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFMyErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ (void)(CFMyErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
}
typedef struct __attribute__ ((objc_bridge(MyPersonalError))) __CFMyPersonalErrorRef * CFMyPersonalErrorRef; // expected-note 1 {{declared here}}
@@ -159,51 +159,51 @@ typedef struct __attribute__ ((objc_bridge(MyPersonalError))) __CFMyPersonalErro
@end
void Test7(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFMyPersonalErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ (void)(CFMyPersonalErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
}
void Test8(CFMyPersonalErrorRef cf) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3, P4>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3, P4>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3, P4, P5>)cf; // expected-warning {{'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') bridges to MyPersonalError, not 'id<P1,P2,P3,P4,P5>'}} \
- // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4,P5>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3, P4, P5>)cf; // expected-warning {{'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') bridges to MyPersonalError, not 'id<P1,P2,P3,P4,P5>'}} \
+ // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4,P5>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
}
void Test9(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)(__bridge NSString *)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}}
+ (void)(__bridge NSString *)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}}
(void)(__bridge NSError *)cf; // okay
(void)(__bridge NSUColor *)cf2; // okay
(void)(__bridge CFErrorRef)ns; // okay
- (void)(__bridge CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(__bridge Class)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}}
- (void)(__bridge CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}}
+ (void)(__bridge CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(__bridge Class)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}}
+ (void)(__bridge CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
diff --git a/clang/test/SemaObjCXX/objcbridge-attribute.mm b/clang/test/SemaObjCXX/objcbridge-attribute.mm
index 907163b..c04fba1 100644
--- a/clang/test/SemaObjCXX/objcbridge-attribute.mm
+++ b/clang/test/SemaObjCXX/objcbridge-attribute.mm
@@ -26,10 +26,10 @@ typedef XXX *CFUColor2Ref;
typedef struct __attribute__((objc_bridge(NSTesting))) __CFError *CFTestingRef; // expected-note {{declared here}}
id Test1(CFTestingRef cf) {
- return (NSString *)cf; // expected-error {{CF object of type 'CFTestingRef' (aka '__CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
- // expected-error {{cast of C pointer type 'CFTestingRef' (aka '__CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ return (NSString *)cf; // expected-error {{CF object of type 'CFTestingRef' (aka 'struct __CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
+ // expected-error {{cast of C pointer type 'CFTestingRef' (aka 'struct __CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFTestingRef' (aka '__CFError *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFTestingRef' (aka 'struct __CFError *') into ARC}}
}
typedef CFErrorRef CFErrorRef1;
@@ -52,105 +52,105 @@ typedef CFErrorRef1 CFErrorRef2; // expected-note 2 {{declared here}}
@class NSString;
void Test2(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)(NSString *)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ (void)(NSString *)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{__bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(NSError *)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(NSError *)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(MyError*)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(MyError*)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
// expected-note {{__bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}} \
- // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'MyError'}}
- (void)(NSUColor *)cf2; // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka '__CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}} \
+ // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'MyError'}}
+ (void)(NSUColor *)cf2; // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFUColor2Ref' (aka '__CFUPrimeColor *') into ARC}}
- (void)(CFErrorRef)ns; // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') into ARC}}
+ (void)(CFErrorRef)ns; // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}} \\
- // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}} \\
+ // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(Class)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}} \\
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(Class)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}} \\
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}} \
- // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}} \
+ // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test3(CFErrorRef cf, NSError *ns) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFErrorRef' (aka '__CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFErrorRef' (aka 'struct __CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
}
void Test4(CFMyErrorRef cf) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFMyErrorRef' (aka '__CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
}
void Test5(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ (void)(CFErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test6(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFMyErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ (void)(CFMyErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
}
typedef struct __attribute__ ((objc_bridge(MyPersonalError))) __CFMyPersonalErrorRef * CFMyPersonalErrorRef; // expected-note 1 {{declared here}}
@@ -159,52 +159,52 @@ typedef struct __attribute__ ((objc_bridge(MyPersonalError))) __CFMyPersonalErro
@end
void Test7(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFMyPersonalErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ (void)(CFMyPersonalErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
}
void Test8(CFMyPersonalErrorRef cf) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3, P4>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3, P4>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3, P4, P5>)cf; // expected-warning {{'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') bridges to MyPersonalError, not 'id<P1,P2,P3,P4,P5>'}} \
- // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4,P5>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3, P4, P5>)cf; // expected-warning {{'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') bridges to MyPersonalError, not 'id<P1,P2,P3,P4,P5>'}} \
+ // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4,P5>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
}
void Test9(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)(__bridge NSString *)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}}
+ (void)(__bridge NSString *)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}}
(void)(__bridge NSError *)cf; // okay
- (void)(__bridge MyError*)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'MyError'}}
+ (void)(__bridge MyError*)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'MyError'}}
(void)(__bridge NSUColor *)cf2; // okay
(void)(__bridge CFErrorRef)ns; // okay
- (void)(__bridge CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(__bridge Class)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}}
- (void)(__bridge CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}}
+ (void)(__bridge CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(__bridge Class)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}}
+ (void)(__bridge CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
diff --git a/clang/test/SemaObjCXX/objcbridge-related-attribute.mm b/clang/test/SemaObjCXX/objcbridge-related-attribute.mm
index 66755f3..b835a10 100644
--- a/clang/test/SemaObjCXX/objcbridge-related-attribute.mm
+++ b/clang/test/SemaObjCXX/objcbridge-related-attribute.mm
@@ -13,14 +13,14 @@ typedef struct __attribute__((objc_bridge_related(NSColor,colorWithCGColor:,CGCo
NSColor *Test1(NSColor *nsColor, CGColorRef newColor) {
- nsColor = newColor; // expected-error {{'CGColorRef' (aka 'CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
- NSColor *ns = newColor; // expected-error {{'CGColorRef' (aka 'CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
- return newColor; // expected-error {{'CGColorRef' (aka 'CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
+ nsColor = newColor; // expected-error {{'CGColorRef' (aka 'struct CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
+ NSColor *ns = newColor; // expected-error {{'CGColorRef' (aka 'struct CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
+ return newColor; // expected-error {{'CGColorRef' (aka 'struct CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
}
CGColorRef Test2(NSColor *newColor, CGColorRef cgColor) {
- cgColor = newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'CGColor *'); use '-CGColor' method for this conversion}}
- CGColorRef cg = newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'CGColor *'); use '-CGColor' method for this conversion}}
- return newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'CGColor *'); use '-CGColor' method for this conversion}}
+ cgColor = newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'struct CGColor *'); use '-CGColor' method for this conversion}}
+ CGColorRef cg = newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'struct CGColor *'); use '-CGColor' method for this conversion}}
+ return newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'struct CGColor *'); use '-CGColor' method for this conversion}}
}
diff --git a/clang/test/SemaObjCXX/objcbridge-static-cast.mm b/clang/test/SemaObjCXX/objcbridge-static-cast.mm
index 6cb9137..ad939da 100644
--- a/clang/test/SemaObjCXX/objcbridge-static-cast.mm
+++ b/clang/test/SemaObjCXX/objcbridge-static-cast.mm
@@ -26,10 +26,10 @@ typedef XXX *CFUColor2Ref;
typedef struct __attribute__((objc_bridge(NSTesting))) __CFError *CFTestingRef; // expected-note {{declared here}}
id Test1(CFTestingRef cf) {
- return static_cast<NSString *>(cf); // expected-error {{CF object of type 'CFTestingRef' (aka '__CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
- // expected-error {{cast of C pointer type 'CFTestingRef' (aka '__CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ return static_cast<NSString *>(cf); // expected-error {{CF object of type 'CFTestingRef' (aka 'struct __CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
+ // expected-error {{cast of C pointer type 'CFTestingRef' (aka 'struct __CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFTestingRef' (aka '__CFError *') into ARC}}
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFTestingRef' (aka 'struct __CFError *') into ARC}}
}
typedef CFErrorRef CFErrorRef1;
@@ -52,84 +52,84 @@ typedef CFErrorRef1 CFErrorRef2; // expected-note 1 {{declared here}}
@class NSString;
void Test2(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)static_cast<NSString *>(cf); // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ (void)static_cast<NSString *>(cf); // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{__bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast<NSError *>(cf); // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast<NSError *>(cf); // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast<MyError*>(cf); // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast<MyError*>(cf); // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}} \
- // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'MyError'}}
- (void)static_cast<NSUColor *>(cf2); // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka '__CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}} \
+ // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'MyError'}}
+ (void)static_cast<NSUColor *>(cf2); // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFUColor2Ref' (aka '__CFUPrimeColor *') into ARC}}
- (void)static_cast<CFErrorRef>(ns); // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') into ARC}}
+ (void)static_cast<CFErrorRef>(ns); // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(str); // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}} \\
- // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(str); // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}} \\
+ // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<Class>(cf); // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}} \\
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<Class>(cf); // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}} \\
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast<CFErrorRef>(c); // expected-warning {{'Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *}} \\
- // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast<CFErrorRef>(c); // expected-warning {{'Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *'}} \\
+ // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test3(CFErrorRef cf, NSError *ns) {
- (void)static_cast<id>(cf); // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)static_cast<id>(cf); // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2> >(cf); // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2> >(cf); // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2, P4> >(cf); // expected-warning {{'CFErrorRef' (aka '__CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2, P4> >(cf); // expected-warning {{'CFErrorRef' (aka 'struct __CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
}
void Test4(CFMyErrorRef cf) {
- (void)static_cast<id>(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)static_cast<id>(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2, P3> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2, P3> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)static_cast< id<P2, P3> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)static_cast< id<P2, P3> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2, P4> >(cf); // expected-warning {{'CFMyErrorRef' (aka '__CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2, P4> >(cf); // expected-warning {{'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
}
void Test5(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)static_cast<CFErrorRef>(ID); // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ (void)static_cast<CFErrorRef>(ID); // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(P123); // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(P123); // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(P1234); // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(P1234); // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(P12); // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(P12); // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(P23); // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(P23); // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
diff --git a/clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp b/clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp
index a04fcdd..cdd6bc7 100644
--- a/clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp
+++ b/clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp
@@ -66,7 +66,7 @@ void NormalUses() {
// CHECK-NEXT: ImplicitCastExpr{{.*}} 'int' <UserDefinedConversion>
// CHECK-NEXT: CXXMemberCallExpr{{.*}}'int'
// CHECK-NEXT: MemberExpr{{.*}} '<bound member function type>' .operator int
- // CHECK-NEXT: DeclRefExpr{{.*}} 'struct CorrectConvert':'CorrectConvert' lvalue Var
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'struct CorrectConvert' lvalue Var
// CHECK-NEXT: ForStmt
// CHECK: NullStmt
}
diff --git a/clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp b/clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp
index 7b1a61e..1a54907 100644
--- a/clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp
+++ b/clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp
@@ -39,8 +39,9 @@ void uses(unsigned Parm) {
#pragma acc kernels loop reduction(min: CoS, Array[I], Array[0:I])
for(int i = 0; i < 5; ++i);
- // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
- // expected-note@#COS_FIELD{{invalid field is here}}
+ // expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
#pragma acc parallel loop reduction(&: ChC)
for(int i = 0; i < 5; ++i);
@@ -166,4 +167,24 @@ void uses(unsigned Parm) {
#pragma acc parallel loop reduction(&:I)
for(int i = 0; i < 5; ++i);
}
+
+ CompositeHasComposite CoCArr[5];
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of array type 'CompositeHasComposite'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel loop reduction(+:CoCArr)
+ for(int i = 0; i < 5; ++i);
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of array type 'CompositeHasComposite[5]'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel loop reduction(+:CoCArr[3])
+ for(int i = 0; i < 5; ++i);
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of sub-array type 'CompositeHasComposite'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel loop reduction(+:CoCArr[1:1])
+ for(int i = 0; i < 5; ++i);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp b/clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp
index 58476df..63c5cde 100644
--- a/clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp
@@ -73,7 +73,7 @@ void NormalUses() {
// CHECK-NEXT: ImplicitCastExpr{{.*}} 'int' <UserDefinedConversion>
// CHECK-NEXT: CXXMemberCallExpr{{.*}}'int'
// CHECK-NEXT: MemberExpr{{.*}} '<bound member function type>' .operator int
- // CHECK-NEXT: DeclRefExpr{{.*}} 'struct CorrectConvert':'CorrectConvert' lvalue Var
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'struct CorrectConvert' lvalue Var
// CHECK-NEXT: WhileStmt
// CHECK-NEXT: CXXBoolLiteralExpr
// CHECK-NEXT: CompoundStmt
diff --git a/clang/test/SemaOpenACC/compute-construct-reduction-clause.c b/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
index 995b6d3..ebced2b 100644
--- a/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
@@ -63,8 +63,9 @@ void uses(unsigned Parm) {
// Vars in a reduction must be a scalar or a composite of scalars.
#pragma acc parallel reduction(&: CoS, I, F)
while (1);
- // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
- // expected-note@#COS_FIELD{{invalid field is here}}
+ // expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
#pragma acc parallel reduction(&: ChC)
while (1);
@@ -75,7 +76,10 @@ void uses(unsigned Parm) {
while (1);
struct CompositeHasComposite ChCArray[5];
- // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, aggregate, sub-array, or a composite of scalar types; sub-array base type is 'struct CompositeHasComposite'}}
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of sub-array type 'struct CompositeHasComposite'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
#pragma acc parallel reduction(&: CoS, Array[I], ChCArray[0:I])
while (1);
diff --git a/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp b/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
index b40268c..2e1f180 100644
--- a/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
@@ -66,8 +66,9 @@ void uses(unsigned Parm) {
// Vars in a reduction must be a scalar or a composite of scalars.
#pragma acc parallel reduction(&: CoS, I, F)
while (1);
- // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
- // expected-note@#COS_FIELD{{invalid field is here}}
+ // expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
#pragma acc parallel reduction(&: ChC)
while (1);
#pragma acc parallel reduction(&: Array)
@@ -91,6 +92,59 @@ void uses(unsigned Parm) {
// expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
#pragma acc parallel reduction(&: ChCPtr->COS)
while (1);
+
+ CompositeHasComposite CoCArr[5];
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of array type 'CompositeHasComposite'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel reduction(+:CoCArr)
+ while (1);
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of array type 'CompositeHasComposite[5]'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel reduction(+:CoCArr[3])
+ while (1);
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of sub-array type 'CompositeHasComposite'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel reduction(+:CoCArr[1:1])
+ while (1);
+
+ int *IPtr;
+ // expected-error@+2{{invalid type 'int *' used in OpenACC 'reduction' variable reference; type is not a scalar value, or array of scalars, or composite of scalars}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel reduction(+:IPtr)
+ while (1);
+#pragma acc parallel reduction(+:IPtr[1])
+ while (1);
+#pragma acc parallel reduction(+:IPtr[1:1])
+ while (1);
+
+ int *IPtrArr[5];
+ // expected-error@+3{{invalid type 'int *' used in OpenACC 'reduction' variable reference; type is not a scalar value, or array of scalars, or composite of scalars}}
+ // expected-note@+2{{used as element type of array type 'int *'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel reduction(+:IPtrArr)
+ while (1);
+
+ struct HasPtr { int *I; }; // #HASPTR
+ HasPtr HP;
+ // expected-error@+3{{invalid type 'int *' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@#HASPTR{{used as field 'I' of composite 'HasPtr'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel reduction(+:HP)
+ while (1);
+
+ HasPtr HPArr[5];
+ // expected-error@+4{{invalid type 'int *' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of array type 'HasPtr'}}
+ // expected-note@#HASPTR{{used as field 'I' of composite 'HasPtr'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc parallel reduction(+:HPArr)
+ while (1);
}
template<typename T, typename U, typename V>
@@ -135,8 +189,9 @@ void TemplUses(T Parm, U CoS, V ChC) {
// Vars in a reduction must be a scalar or a composite of scalars.
#pragma acc parallel reduction(&: CoS, Var, Parm)
while (1);
- // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
- // expected-note@#COS_FIELD{{invalid field is here}}
+ // expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
#pragma acc parallel reduction(&: ChC)
while (1);
#pragma acc parallel reduction(&: Array)
diff --git a/clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp b/clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp
index e3c4184..2d2b20f 100644
--- a/clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp
@@ -102,7 +102,7 @@ void NormalUses(float *PointerParam) {
#pragma acc parallel copy(GlobalArray) pcopy(always: PointerParam[Global]) present_or_copy(alwaysin, alwaysout: Global)
while(true);
// CHECK-NEXT: OpenACCComputeConstruct{{.*}} parallel
- // CHECK-NEXT: copy clause
+ // CHECK-NEXT: copy clause
// CHECK-NEXT: DeclRefExpr{{.*}}'short[5]' lvalue Var{{.*}}'GlobalArray' 'short[5]'
// CHECK-NEXT: pcopy clause modifiers: always
// CHECK-NEXT: ArraySubscriptExpr{{.*}}'float' lvalue
@@ -649,7 +649,7 @@ void TemplUses(T t, U u, T*PointerParam) {
struct S {
// CHECK-NEXT: CXXRecordDecl{{.*}} struct S definition
- // CHECK: CXXRecordDecl{{.*}} implicit struct S
+ // CHECK: CXXRecordDecl{{.*}} implicit {{.*}}struct S{{$}}
int ThisMember;
// CHECK-NEXT: FieldDecl{{.*}} ThisMember 'int'
int *ThisMemberPtr;
diff --git a/clang/test/SemaOpenACC/gh154008.cpp b/clang/test/SemaOpenACC/gh154008.cpp
new file mode 100644
index 0000000..1ec114c
--- /dev/null
+++ b/clang/test/SemaOpenACC/gh154008.cpp
@@ -0,0 +1,5 @@
+// RUN: %clang_cc1 %s -fopenacc -verify
+
+// expected-error@+2{{expected ';'}}
+// expected-error@+1{{blocks support disabled}}
+void *a = ^ { static int b };
diff --git a/clang/test/SemaOpenACC/init-construct.cpp b/clang/test/SemaOpenACC/init-construct.cpp
index abc7f74..d553589 100644
--- a/clang/test/SemaOpenACC/init-construct.cpp
+++ b/clang/test/SemaOpenACC/init-construct.cpp
@@ -34,6 +34,12 @@ void uses() {
// expected-error@+2{{OpenACC integer expression requires explicit conversion from 'struct ExplicitConvertOnly' to 'int'}}
// expected-note@#EXPL_CONV{{conversion to integral type 'int'}}
#pragma acc init device_num(Explicit)
+
+ // expected-error@+1{{OpenACC 'device_type' clause on a 'init' construct only permits one architecture}}
+#pragma acc init device_type(nvidia, radeon)
+
+ // expected-error@+1{{OpenACC 'device_type' clause on a 'init' construct only permits one architecture}}
+#pragma acc init device_type(nonsense, nvidia, radeon)
}
template<typename T>
diff --git a/clang/test/SemaOpenACC/loop-construct-reduction-clause.cpp b/clang/test/SemaOpenACC/loop-construct-reduction-clause.cpp
index 00bcd74..4e88516 100644
--- a/clang/test/SemaOpenACC/loop-construct-reduction-clause.cpp
+++ b/clang/test/SemaOpenACC/loop-construct-reduction-clause.cpp
@@ -42,8 +42,9 @@ void uses() {
#pragma acc serial
{
- // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
- // expected-note@#COS_FIELD{{invalid field is here}}
+ // expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
#pragma acc loop reduction(+:ChC)
for(int i = 0; i < 5; ++i){}
}
@@ -153,6 +154,26 @@ void uses() {
for(int i = 0; i < 5; ++i) {
}
}
+
+ CompositeHasComposite CoCArr[5];
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of array type 'CompositeHasComposite'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc loop reduction(+:CoCArr)
+ for(int i = 0; i < 5; ++i);
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of array type 'CompositeHasComposite[5]'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc loop reduction(+:CoCArr[3])
+ for(int i = 0; i < 5; ++i);
+ // expected-error@+4{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@+3{{used as element type of sub-array type 'CompositeHasComposite'}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+#pragma acc loop reduction(+:CoCArr[1:1])
+ for(int i = 0; i < 5; ++i);
}
template<typename IntTy, typename CoSTy, typename ChCTy, unsigned One,
@@ -177,8 +198,9 @@ void templ_uses() {
#pragma acc serial
{
- // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
- // expected-note@#COS_FIELD{{invalid field is here}}
+ // expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
+ // expected-note@+1{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
#pragma acc loop reduction(+:ChC)
for(int i = 0; i < 5; ++i){}
}
diff --git a/clang/test/SemaOpenACC/shutdown-construct.cpp b/clang/test/SemaOpenACC/shutdown-construct.cpp
index 95cea90..e08a968 100644
--- a/clang/test/SemaOpenACC/shutdown-construct.cpp
+++ b/clang/test/SemaOpenACC/shutdown-construct.cpp
@@ -34,6 +34,12 @@ void uses() {
// expected-error@+2{{OpenACC integer expression requires explicit conversion from 'struct ExplicitConvertOnly' to 'int'}}
// expected-note@#EXPL_CONV{{conversion to integral type 'int'}}
#pragma acc shutdown device_num(Explicit)
+
+ // expected-error@+1{{OpenACC 'device_type' clause on a 'shutdown' construct only permits one architecture}}
+#pragma acc shutdown device_type(nvidia, radeon)
+
+ // expected-error@+1{{OpenACC 'device_type' clause on a 'shutdown' construct only permits one architecture}}
+#pragma acc shutdown device_type(nonsense, nvidia, radeon)
}
template<typename T>
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index 8f34ccc..4a28f9a 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -23,6 +23,11 @@ void test_setprio_inc_wg(short a) {
__builtin_amdgcn_s_setprio_inc_wg(a); // expected-error {{'__builtin_amdgcn_s_setprio_inc_wg' must be a constant integer}}
}
+void test_bitop3_args(global uint* out, uint a, uint b, uint c) {
+ *out = __builtin_amdgcn_bitop3_b32(a, b, c, a); // expected-error {{argument to '__builtin_amdgcn_bitop3_b32' must be a constant integer}}
+ *out = __builtin_amdgcn_bitop3_b16((ushort)a, (ushort)b, (ushort)c, a); // expected-error {{argument to '__builtin_amdgcn_bitop3_b16' must be a constant integer}}
+}
+
void test_s_monitor_sleep(short a) {
__builtin_amdgcn_s_monitor_sleep(a); // expected-error {{'__builtin_amdgcn_s_monitor_sleep' must be a constant integer}}
}
@@ -43,6 +48,12 @@ void test__builtin_amdgcn_cvt_f16_bf8(int a, int b) {
__builtin_amdgcn_cvt_f16_bf8(a, b); // expected-error {{'__builtin_amdgcn_cvt_f16_bf8' must be a constant integer}}
}
+void test_cvt_sr_f8_f16(global int* out, uint sr, int old, int sel)
+{
+ *out = __builtin_amdgcn_cvt_sr_bf8_f16(1.0, sr, old, sel); // expected-error {{'__builtin_amdgcn_cvt_sr_bf8_f16' must be a constant integer}}
+ *out = __builtin_amdgcn_cvt_sr_fp8_f16(1.0, sr, old, sel); // expected-error {{'__builtin_amdgcn_cvt_sr_fp8_f16' must be a constant integer}}
+}
+
void test_cvt_scale_pk(global half8 *outh8, global bfloat8 *outy8, uint2 src2,
global float32 *outf32, global half16 *outh16, global bfloat16 *outy16,
global float16 *outf16, uint3 src3,
@@ -92,6 +103,34 @@ void test_amdgcn_load_monitor(global int* b32gaddr, global v2i* b64gaddr, global
*b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, cpol); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
}
+void test_amdgcn_async_load_store_lds_offset(global char* gaddr8, global int *gaddr32, global v2i* gaddr64, global v4i* gaddr128, local char* laddr8,
+ local int *laddr32, local v2i* laddr64, local v4i* laddr128, int offset, int mask)
+{
+ __builtin_amdgcn_global_store_async_from_lds_b8(gaddr8, laddr8, offset, 0); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b8' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b32(gaddr32, laddr32, offset, 0); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b32' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b64(gaddr64, laddr64, offset, 0); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b64' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b128(gaddr128, laddr128, offset, 0); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b128' must be a constant integer}}
+
+ __builtin_amdgcn_global_store_async_from_lds_b8(gaddr8, laddr8, offset, 0); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b8' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b32(gaddr32, laddr32, offset, 0); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b32' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b64(gaddr64, laddr64, offset, 0); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b64' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b128(gaddr128, laddr128, offset, 0); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b128' must be a constant integer}}
+}
+
+void test_amdgcn_async_load_store_lds_cpol(global char* gaddr8, global int *gaddr32, global v2i* gaddr64, global v4i* gaddr128, local char* laddr8,
+ local int *laddr32, local v2i* laddr64, local v4i* laddr128, int cpol, int mask)
+{
+ __builtin_amdgcn_global_store_async_from_lds_b8(gaddr8, laddr8, 16, cpol); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b8' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b32(gaddr32, laddr32, 16, cpol); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b32' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b64(gaddr64, laddr64, 16, cpol); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b64' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b128(gaddr128, laddr128, 16, cpol); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b128' must be a constant integer}}
+
+ __builtin_amdgcn_global_store_async_from_lds_b8(gaddr8, laddr8, 16, cpol); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b8' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b32(gaddr32, laddr32, 16, cpol); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b32' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b64(gaddr64, laddr64, 16, cpol); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b64' must be a constant integer}}
+ __builtin_amdgcn_global_store_async_from_lds_b128(gaddr128, laddr128, 16, cpol); // expected-error {{'__builtin_amdgcn_global_store_async_from_lds_b128' must be a constant integer}}
+}
+
void test_amdgcn_tensor_load_store(v4i sg0, v8i sg1, v4i sg2, v4i sg3, int cpol)
{
__builtin_amdgcn_tensor_load_to_lds(sg0, sg1, sg2, sg3, cpol); // expected-error {{'__builtin_amdgcn_tensor_load_to_lds' must be a constant integer}}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl
index c5440ed..d7045cd 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl
@@ -1,6 +1,11 @@
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-- -target-cpu gfx1200 -verify -S -o - %s
-void test() {
+typedef unsigned int uint;
+typedef unsigned short int ushort;
+
+void test(global uint* out, uint a, uint b, uint c) {
__builtin_amdgcn_s_setprio_inc_wg(1); // expected-error {{'__builtin_amdgcn_s_setprio_inc_wg' needs target feature setprio-inc-wg-inst}}
+ *out = __builtin_amdgcn_bitop3_b32(a, b, c, 1); // expected-error {{'__builtin_amdgcn_bitop3_b32' needs target feature bitop3-insts}}
+ *out = __builtin_amdgcn_bitop3_b16((ushort)a, (ushort)b, (ushort)c, 1); // expected-error {{'__builtin_amdgcn_bitop3_b16' needs target feature bitop3-insts}}
}
diff --git a/clang/test/SemaSYCL/sycl-external-attr-appertainment.cpp b/clang/test/SemaSYCL/sycl-external-attr-appertainment.cpp
new file mode 100644
index 0000000..d06c9c9
--- /dev/null
+++ b/clang/test/SemaSYCL/sycl-external-attr-appertainment.cpp
@@ -0,0 +1,36 @@
+// RUN: %clang_cc1 -fsycl-is-host -fsyntax-only -std=c++17 -verify %s
+// RUN: %clang_cc1 -fsycl-is-device -fsyntax-only -std=c++17 -verify %s
+// RUN: %clang_cc1 -fsycl-is-host -fsyntax-only -std=c++20 -verify %s
+// RUN: %clang_cc1 -fsycl-is-device -fsyntax-only -std=c++20 -verify %s
+// RUN: %clang_cc1 -fsycl-is-host -fsyntax-only -std=c++23 -verify %s
+// RUN: %clang_cc1 -fsycl-is-device -fsyntax-only -std=c++23 -verify %s
+
+// expected-error@+1{{'clang::sycl_external' attribute only applies to functions}}
+[[clang::sycl_external]] int bad1;
+
+
+// expected-error@+2{{'clang::sycl_external' attribute only applies to functions}}
+struct s {
+[[clang::sycl_external]] int bad2;
+};
+
+// expected-error@+1{{'clang::sycl_external' attribute only applies to functions}}
+namespace [[clang::sycl_external]] bad3 {}
+
+// expected-error@+1{{'clang::sycl_external' attribute only applies to functions}}
+struct [[clang::sycl_external]] bad4;
+
+// expected-error@+1{{'clang::sycl_external' attribute only applies to functions}}
+enum [[clang::sycl_external]] bad5 {};
+
+// expected-error@+1{{'clang::sycl_external' attribute only applies to functions}}
+int bad6(void (fp [[clang::sycl_external]])());
+
+// expected-error@+1{{'clang::sycl_external' attribute only applies to functions}}
+[[clang::sycl_external]];
+
+#if __cplusplus >= 202002L
+// expected-error@+2{{'clang::sycl_external' attribute only applies to functions}}
+template<typename>
+concept bad8 [[clang::sycl_external]] = true;
+#endif
diff --git a/clang/test/SemaSYCL/sycl-external-attr-grammar.cpp b/clang/test/SemaSYCL/sycl-external-attr-grammar.cpp
new file mode 100644
index 0000000..a016985
--- /dev/null
+++ b/clang/test/SemaSYCL/sycl-external-attr-grammar.cpp
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 -fsycl-is-host -fsyntax-only -verify %s
+// RUN: %clang_cc1 -fsycl-is-device -fsyntax-only -verify %s
+
+// FIXME-expected-error@+1{{'clang::sycl_external' attribute takes no arguments}}
+[[clang::sycl_external()]] void bad1();
+
+// expected-error@+1{{expected expression}}
+[[clang::sycl_external(,)]] void bad2();
+
+// expected-error@+1{{'clang::sycl_external' attribute takes no arguments}}
+[[clang::sycl_external(3)]] void bad3();
+
+// expected-error@+1{{expected expression}}
+[[clang::sycl_external(4,)]] void bad4();
diff --git a/clang/test/SemaSYCL/sycl-external-attr-ignored.cpp b/clang/test/SemaSYCL/sycl-external-attr-ignored.cpp
new file mode 100644
index 0000000..211b3f1
--- /dev/null
+++ b/clang/test/SemaSYCL/sycl-external-attr-ignored.cpp
@@ -0,0 +1,15 @@
+// RUN: %clang_cc1 -fsyntax-only -verify %s
+
+// These tests validate that the sycl_external attribute is ignored when SYCL
+// support is not enabled.
+
+// expected-warning@+1{{'clang::sycl_external' attribute ignored}}
+[[clang::sycl_external]] void bar() {}
+
+// expected-warning@+1{{'clang::sycl_external' attribute ignored}}
+[[clang::sycl_external]] int a;
+
+// expected-warning@+2{{'clang::sycl_external' attribute ignored}}
+template<typename T>
+[[clang::sycl_external]] void ft(T) {}
+template void ft(int);
diff --git a/clang/test/SemaSYCL/sycl-external-attr.cpp b/clang/test/SemaSYCL/sycl-external-attr.cpp
new file mode 100644
index 0000000..ebda94e
--- /dev/null
+++ b/clang/test/SemaSYCL/sycl-external-attr.cpp
@@ -0,0 +1,154 @@
+// RUN: %clang_cc1 -fsycl-is-host -std=c++17 -fsyntax-only -verify %s
+// RUN: %clang_cc1 -fsycl-is-device -std=c++17 -fsyntax-only -verify %s
+// RUN: %clang_cc1 -fsycl-is-host -std=c++20 -fsyntax-only -verify -DCPP20 %s
+// RUN: %clang_cc1 -fsycl-is-device -std=c++20 -fsyntax-only -verify -DCPP20 %s
+
+// Semantic tests for the sycl_external attribute.
+
+// expected-error@+1{{'clang::sycl_external' can only be applied to functions with external linkage}}
+[[clang::sycl_external]]
+static void func1() {}
+
+// expected-error@+2{{'clang::sycl_external' can only be applied to functions with external linkage}}
+namespace {
+ [[clang::sycl_external]]
+ void func2() {}
+}
+
+// expected-error@+2{{'clang::sycl_external' can only be applied to functions with external linkage}}
+namespace { struct S4 {}; }
+[[clang::sycl_external]] void func4(S4) {}
+
+// expected-error@+3{{'clang::sycl_external' can only be applied to functions with external linkage}}
+namespace { struct S5 {}; }
+template<typename> [[clang::sycl_external]] void func5();
+template<> [[clang::sycl_external]] void func5<S5>() {}
+
+namespace { struct S6 {}; }
+template<typename>
+[[clang::sycl_external]] void func6() {}
+template void func6<S6>();
+
+// FIXME: C++23 [temp.expl.spec]p12 states:
+// ... Similarly, attributes appearing in the declaration of a template
+// have no effect on an explicit specialization of that template.
+// Clang currently instantiates and propagates attributes from a function
+// template to its explicit specializations resulting in the following
+// spurious error.
+// expected-error@+3{{'clang::sycl_external' can only be applied to functions with external linkage}}
+namespace { struct S7 {}; }
+template<typename>
+[[clang::sycl_external]] void func7();
+template<> void func7<S7>() {}
+
+// FIXME: The explicit function template specialization appears to trigger
+// instantiation of a declaration from the primary template without the
+// attribute leading to a spurious diagnostic that the sycl_external
+// attribute is not present on the first declaration.
+namespace { struct S8 {}; }
+template<typename>
+void func8();
+template<> [[clang::sycl_external]] void func8<S8>() {}
+// expected-warning@-1{{'clang::sycl_external' attribute does not appear on the first declaration}}
+// expected-error@-2{{'clang::sycl_external' can only be applied to functions with external linkage}}
+// expected-note@-3{{previous declaration is here}}
+
+namespace { struct S9 {}; }
+struct T9 {
+ using type = S9;
+};
+template<typename>
+[[clang::sycl_external]] void func9() {}
+template<typename T>
+[[clang::sycl_external]] void test_func9() {
+ func9<typename T::type>();
+}
+template void test_func9<T9>();
+
+// The first declaration of a SYCL external function is required to have this attribute.
+// expected-note@+1{{previous declaration is here}}
+int foo();
+// expected-warning@+1{{'clang::sycl_external' attribute does not appear on the first declaration}}
+[[clang::sycl_external]] int foo();
+
+// expected-note@+1{{previous declaration is here}}
+void goo();
+// expected-warning@+1{{'clang::sycl_external' attribute does not appear on the first declaration}}
+[[clang::sycl_external]] void goo();
+void goo() {}
+
+// expected-note@+1{{previous declaration is here}}
+void hoo() {}
+// expected-warning@+1{{'clang::sycl_external' attribute does not appear on the first declaration}}
+[[clang::sycl_external]] void hoo();
+
+// expected-note@+1{{previous declaration is here}}
+void joo();
+void use_joo() {
+ joo();
+}
+// expected-warning@+1{{'clang::sycl_external' attribute does not appear on the first declaration}}
+[[clang::sycl_external]] void joo();
+
+// Subsequent declarations of a SYCL external function may optionally specify this attribute.
+[[clang::sycl_external]] int boo();
+[[clang::sycl_external]] int boo(); // OK
+int boo(); // OK
+
+class C {
+ [[clang::sycl_external]] void member();
+};
+
+// expected-error@+1{{'clang::sycl_external' cannot be applied to the 'main' function}}
+[[clang::sycl_external]] int main()
+{
+ return 0;
+}
+
+// expected-error@+2{{'clang::sycl_external' cannot be applied to an explicitly deleted function}}
+class D {
+ [[clang::sycl_external]] void mdel() = delete;
+};
+
+// expected-error@+1{{'clang::sycl_external' cannot be applied to an explicitly deleted function}}
+[[clang::sycl_external]] void del() = delete;
+
+struct NonCopyable {
+ ~NonCopyable() = delete;
+ [[clang::sycl_external]] NonCopyable(const NonCopyable&) = default;
+};
+
+class A {
+ [[clang::sycl_external]]
+ A() {}
+
+ [[clang::sycl_external]] void mf() {}
+ [[clang::sycl_external]] static void smf();
+};
+
+class B {
+public:
+ [[clang::sycl_external]] virtual void foo() {}
+
+ [[clang::sycl_external]] virtual void bar() = 0;
+};
+[[clang::sycl_external]] void B::bar() {}
+
+[[clang::sycl_external]] constexpr int square(int x);
+
+// Devices that do not support the generic address space shall not specify
+// a raw pointer or reference type as the return type or as a parameter type.
+[[clang::sycl_external]] int *fun0();
+[[clang::sycl_external]] int &fun1();
+[[clang::sycl_external]] int &&fun2();
+[[clang::sycl_external]] void fun3(int *);
+[[clang::sycl_external]] void fun4(int &);
+[[clang::sycl_external]] void fun5(int &&);
+template<typename T>
+[[clang::sycl_external]] void fun6(T) {}
+template void fun6(int *);
+template<> [[clang::sycl_external]] void fun6<long*>(long *) {}
+
+#if CPP20
+[[clang::sycl_external]] consteval int func();
+#endif
diff --git a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp
index 4774c8e..9aba284 100644
--- a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp
+++ b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp
@@ -360,3 +360,16 @@ void bad38() try {} catch(...) {}
template<typename>
[[clang::sycl_kernel_entry_point(BADKN<39>)]]
void bad39() try {} catch(...) {}
+
+// expected-error@+1 {{'clang::sycl_kernel_entry_point' attribute only applies to functions}}
+[[clang::sycl_kernel_entry_point(BADKN<40>)]];
+
+void bad41() {
+ // expected-error@+1 {{'clang::sycl_kernel_entry_point' attribute cannot be applied to a statement}}
+ [[clang::sycl_kernel_entry_point(BADKN<41>)]];
+}
+
+struct B42 {
+ // expected-warning@+1 {{declaration does not declare anything}}
+ [[clang::sycl_kernel_entry_point(BADKN<42>)]];
+};
diff --git a/clang/test/SemaTemplate/aggregate-deduction-candidate.cpp b/clang/test/SemaTemplate/aggregate-deduction-candidate.cpp
index 2ecd269..bf153b2 100644
--- a/clang/test/SemaTemplate/aggregate-deduction-candidate.cpp
+++ b/clang/test/SemaTemplate/aggregate-deduction-candidate.cpp
@@ -15,7 +15,7 @@ namespace Basic {
// CHECK-LABEL: Dumping Basic::<deduction guide for A>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced class depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T, T) -> A<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T, T) -> Basic::A<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'T'
// CHECK: | `-ParmVarDecl {{.*}} 'T'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A> 'auto (double, double) -> Basic::A<double>'
@@ -23,8 +23,8 @@ namespace Basic {
// CHECK: | `-BuiltinType {{.*}} 'double'
// CHECK: |-ParmVarDecl {{.*}} 'double'
// CHECK: `-ParmVarDecl {{.*}} 'double'
- // CHECK: FunctionProtoType {{.*}} 'auto (T, T) -> A<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T, T) -> Basic::A<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::A<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: |-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'T'
@@ -66,7 +66,7 @@ namespace Basic {
// CHECK-LABEL: Dumping Basic::<deduction guide for C>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for C>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for C> 'auto (S<T>, T) -> C<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for C> 'auto (S<T>, T) -> Basic::C<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'S<T>'
// CHECK: | `-ParmVarDecl {{.*}} 'T'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for C> 'auto (S<int>, int) -> Basic::C<int>'
@@ -74,25 +74,24 @@ namespace Basic {
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'S<int>':'Basic::S<int>'
// CHECK: `-ParmVarDecl {{.*}} 'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (S<T>, T) -> C<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'C<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (S<T>, T) -> Basic::C<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::C<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'C'
- // CHECK: |-ElaboratedType {{.*}} 'S<T>' sugar dependent
- // CHECK: | `-TemplateSpecializationType {{.*}} 'S<T>' dependent
- // CHECK: | `-TemplateArgument type 'T'
- // CHECK: | `-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
- // CHECK: | `-TemplateTypeParm {{.*}} 'T'
+ // CHECK: |-TemplateSpecializationType {{.*}} 'S<T>' dependent
+ // CHECK: | `-TemplateArgument type 'T'
+ // CHECK: | `-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
+ // CHECK: | `-TemplateTypeParm {{.*}} 'T'
// CHECK: `-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
// CHECK: `-TemplateTypeParm {{.*}} 'T'
// CHECK-LABEL: Dumping Basic::<deduction guide for D>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for D>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: `-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for D> 'auto (int, int) -> D<T>'
+ // CHECK: `-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for D> 'auto (int, int) -> Basic::D<T>'
// CHECK: |-ParmVarDecl {{.*}} 'int'
// CHECK: `-ParmVarDecl {{.*}} 'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (int, int) -> D<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'D<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (int, int) -> Basic::D<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::D<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'D'
// CHECK: |-SubstTemplateTypeParmType {{.*}} 'int' sugar typename depth 0 index 0 T
// CHECK: | |-ClassTemplateSpecialization {{.*}} 'S'
@@ -113,7 +112,7 @@ namespace Basic {
// CHECK-LABEL: Dumping Basic::<deduction guide for E>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for E>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for E> 'auto (T, decltype(t)) -> E<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for E> 'auto (T, decltype(t)) -> Basic::E<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'T'
// CHECK: | `-ParmVarDecl {{.*}} 'decltype(t)'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for E> 'auto (int, decltype(t)) -> Basic::E<int>'
@@ -121,8 +120,8 @@ namespace Basic {
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'int'
// CHECK: `-ParmVarDecl {{.*}} 'decltype(t)':'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (T, decltype(t)) -> E<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'E<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T, decltype(t)) -> Basic::E<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::E<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'E'
// CHECK: |-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'T'
@@ -145,7 +144,7 @@ namespace Basic {
// CHECK-LABEL: Dumping Basic::<deduction guide for F>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for F>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for F> 'auto (typename I<T>::type, T) -> F<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for F> 'auto (typename I<T>::type, T) -> Basic::F<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'typename I<T>::type'
// CHECK: | `-ParmVarDecl {{.*}} 'T'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for F> 'auto (typename I<int>::type, int) -> Basic::F<int>'
@@ -153,8 +152,8 @@ namespace Basic {
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'typename I<int>::type':'int'
// CHECK: `-ParmVarDecl {{.*}} 'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (typename I<T>::type, T) -> F<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'F<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (typename I<T>::type, T) -> Basic::F<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::F<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'F'
// CHECK: |-DependentNameType {{.*}} 'typename I<T>::type' dependent
// CHECK: `-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
@@ -181,15 +180,15 @@ namespace Array {
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
// CHECK: |-NonTypeTemplateParmDecl {{.*}} 'size_t':'unsigned {{.*}}' depth 0 index 1 N
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T (&&)[N]) -> A<T, N>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T (&&)[N]) -> Array::A<T, N>'
// CHECK: | `-ParmVarDecl {{.*}} 'T (&&)[N]'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A> 'auto (int (&&)[3]) -> Array::A<int, 3>'
// CHECK: |-TemplateArgument type 'int'
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-TemplateArgument integral '3UL'
// CHECK: `-ParmVarDecl {{.*}} 'int (&&)[3]'
- // CHECK: FunctionProtoType {{.*}} 'auto (T (&&)[N]) -> A<T, N>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T, N>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T (&&)[N]) -> Array::A<T, N>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Array::A<T, N>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: `-RValueReferenceType {{.*}} 'T (&&)[N]' dependent
// CHECK: `-DependentSizedArrayType {{.*}} 'T[N]' dependent
@@ -201,15 +200,15 @@ namespace Array {
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
// CHECK: |-NonTypeTemplateParmDecl {{.*}} 'size_t':'unsigned {{.*}}' depth 0 index 1 N
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (const T (&)[N]) -> A<T, N>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (const T (&)[N]) -> Array::A<T, N>'
// CHECK: | `-ParmVarDecl {{.*}} 'const T (&)[N]'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A> 'auto (const char (&)[5]) -> Array::A<char, 5>'
// CHECK: |-TemplateArgument type 'char'
// CHECK: | `-BuiltinType {{.*}} 'char'
// CHECK: |-TemplateArgument integral '5UL'
// CHECK: `-ParmVarDecl {{.*}} 'const char (&)[5]'
- // CHECK: FunctionProtoType {{.*}} 'auto (const T (&)[N]) -> A<T, N>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T, N>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (const T (&)[N]) -> Array::A<T, N>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Array::A<T, N>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: `-LValueReferenceType {{.*}} 'const T (&)[N]' dependent
// CHECK: `-QualType {{.*}} 'const T[N]' const
@@ -232,7 +231,7 @@ namespace BraceElision {
// CHECK-LABEL: Dumping BraceElision::<deduction guide for A>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T, T) -> A<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T, T) -> BraceElision::A<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'T'
// CHECK: | `-ParmVarDecl {{.*}} 'T'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A> 'auto (int, int) -> BraceElision::A<int>'
@@ -240,8 +239,8 @@ namespace BraceElision {
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'int'
// CHECK: `-ParmVarDecl {{.*}} 'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (T, T) -> A<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T, T) -> BraceElision::A<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'BraceElision::A<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: |-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'T'
@@ -266,7 +265,7 @@ namespace TrailingPack {
// CHECK-LABEL: Dumping TrailingPack::<deduction guide for A>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 ... T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T...) -> A<T...>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T...) -> TrailingPack::A<T...>'
// CHECK: | `-ParmVarDecl {{.*}} 'T...' pack
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A>
// CHECK-SAME: 'auto (TrailingPack::(lambda at {{.*}}), TrailingPack::(lambda at {{.*}})) ->
@@ -274,14 +273,14 @@ namespace TrailingPack {
// CHECK: |-TemplateArgument pack
// CHECK: | |-TemplateArgument type 'TrailingPack::(lambda at {{.*}})'
// CHECK: | | `-RecordType {{.*}} 'TrailingPack::(lambda at {{.*}})'
- // CHECK: | | `-CXXRecord {{.*}} <line:262:5>
+ // CHECK: | | `-CXXRecord {{.*}} <line:261:5>
// CHECK: | `-TemplateArgument type 'TrailingPack::(lambda at {{.*}})'
// CHECK: | `-RecordType {{.*}} 'TrailingPack::(lambda at {{.*}})'
- // CHECK: | `-CXXRecord {{.*}} <line:263:5>
+ // CHECK: | `-CXXRecord {{.*}} <line:262:5>
// CHECK: |-ParmVarDecl {{.*}} 'TrailingPack::(lambda at {{.*}})'
// CHECK: `-ParmVarDecl {{.*}} 'TrailingPack::(lambda at {{.*}})'
- // CHECK: FunctionProtoType {{.*}} 'auto (T...) -> A<T...>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T...>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T...) -> TrailingPack::A<T...>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'TrailingPack::A<T...>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: `-PackExpansionType {{.*}} 'T...' dependent
// CHECK: `-TemplateTypeParmType {{.*}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
@@ -327,7 +326,7 @@ namespace DeduceArity {
// CHECK-LABEL: Dumping DeduceArity::<deduction guide for F>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for F>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 ... T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for F> 'auto (Types<T...>, T...) -> F<T...>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for F> 'auto (Types<T...>, T...) -> DeduceArity::F<T...>'
// CHECK: | |-ParmVarDecl {{.*}} 'Types<T...>'
// CHECK: | `-ParmVarDecl {{.*}} 'T...' pack
// CHECK: |-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for F>
@@ -354,15 +353,14 @@ namespace DeduceArity {
// CHECK: | `-CXXRecord {{.*}} 'X'
// CHECK: |-ParmVarDecl {{.*}} 'Types<DeduceArity::X>':'DeduceArity::Types<DeduceArity::X>'
// CHECK: `-ParmVarDecl {{.*}} 'DeduceArity::X'
- // CHECK: FunctionProtoType {{.*}} 'auto (Types<T...>, T...) -> F<T...>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'F<T...>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (Types<T...>, T...) -> DeduceArity::F<T...>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'DeduceArity::F<T...>' dependent
// CHECK: | `-CXXRecord {{.*}} 'F'
- // CHECK: |-ElaboratedType {{.*}} 'Types<T...>' sugar dependent
- // CHECK: | `-TemplateSpecializationType {{.*}} 'Types<T...>' dependent
- // CHECK: | `-TemplateArgument type 'T...'
- // CHECK: | `-PackExpansionType {{.*}} 'T...' dependent
- // CHECK: | `-TemplateTypeParmType {{.*}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
- // CHECK: | `-TemplateTypeParm {{.*}} 'T'
+ // CHECK: |-TemplateSpecializationType {{.*}} 'Types<T...>' dependent
+ // CHECK: | `-TemplateArgument type 'T...'
+ // CHECK: | `-PackExpansionType {{.*}} 'T...' dependent
+ // CHECK: | `-TemplateTypeParmType {{.*}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
+ // CHECK: | `-TemplateTypeParm {{.*}} 'T'
// CHECK: `-PackExpansionType {{.*}} 'T...' dependent
// CHECK: `-TemplateTypeParmType {{.*}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
// CHECK: `-TemplateTypeParm {{.*}} 'T'
diff --git a/clang/test/SemaTemplate/class-template-ctor-initializer.cpp b/clang/test/SemaTemplate/class-template-ctor-initializer.cpp
index 6dae207..43a3986 100644
--- a/clang/test/SemaTemplate/class-template-ctor-initializer.cpp
+++ b/clang/test/SemaTemplate/class-template-ctor-initializer.cpp
@@ -4,8 +4,8 @@
template<class X> struct A {};
-template<class X> struct B : A<X> {
- B() : A<X>() {}
+template<class X> struct B : A<X> {
+ B() : A<X>() {}
};
B<int> x;
@@ -76,3 +76,12 @@ namespace NonDependentError {
Derived1<void> d1;
Derived2<void> d2;
}
+
+namespace UnresolvedUsing {
+ template <class T> class A {
+ using typename T::B;
+ struct C : B {
+ C() : B() {}
+ };
+ };
+} // namespace UnresolvedUsing
diff --git a/clang/test/SemaTemplate/ctad.cpp b/clang/test/SemaTemplate/ctad.cpp
index 1bf605f..1a575ea 100644
--- a/clang/test/SemaTemplate/ctad.cpp
+++ b/clang/test/SemaTemplate/ctad.cpp
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -std=c++17 -verify %s
+// RUN: %clang_cc1 -std=c++17 -verify=expected,cxx17 %s
+// RUN: %clang_cc1 -std=c++20 -verify=expected,cxx20 %s
namespace pr41427 {
template <typename T> class A {
@@ -21,9 +22,9 @@ namespace Access {
struct type {};
};
template<typename T> struct D : B { // expected-note {{not viable}} \
- expected-note {{implicit deduction guide declared as 'template <typename T> D(D<T>) -> D<T>'}}
+ expected-note {{implicit deduction guide declared as 'template <typename T> D(Access::D<T>) -> Access::D<T>'}}
D(T, typename T::type); // expected-note {{private member}} \
- // expected-note {{implicit deduction guide declared as 'template <typename T> D(T, typename T::type) -> D<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename T> D(T, typename T::type) -> Access::D<T>'}}
};
D b = {B(), {}};
@@ -61,15 +62,45 @@ namespace NoCrashOnGettingDefaultArgLoc {
template <typename>
class A {
A(int = 1); // expected-note {{candidate template ignored: couldn't infer template argumen}} \
- // expected-note {{implicit deduction guide declared as 'template <typename> D(int = <null expr>) -> D<type-parameter-0-0>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename> D(int = <null expr>) -> NoCrashOnGettingDefaultArgLoc::D<type-parameter-0-0>'}}
};
class C : A<int> {
using A::A;
};
template <typename>
class D : C { // expected-note {{candidate function template not viable: requires 1 argument}} \
- expected-note {{implicit deduction guide declared as 'template <typename> D(D<type-parameter-0-0>) -> D<type-parameter-0-0>'}}
+ expected-note {{implicit deduction guide declared as 'template <typename> D(NoCrashOnGettingDefaultArgLoc::D<type-parameter-0-0>) -> NoCrashOnGettingDefaultArgLoc::D<type-parameter-0-0>'}}
using C::C;
};
D abc; // expected-error {{no viable constructor or deduction guide}}
}
+
+namespace AsValueParameter {
+ namespace foo {
+ // cxx17-note@+2 {{template is declared here}}
+ // cxx20-note@+1 {{'A<int>' is not literal because it is not an aggregate and has no constexpr constructors other than copy or move constructors}}
+ template <class> struct A {
+ A();
+ };
+ }
+ template <foo::A> struct B {}; // expected-note {{template parameter is declared here}}
+ // cxx17-error@-1 {{use of class template 'foo::A' requires template arguments; argument deduction not allowed in template parameter}}
+
+ template struct B<foo::A<int>{}>;
+ // cxx17-error@-1 {{value of type 'foo::A<int>' is not implicitly convertible to 'int'}}
+ // cxx20-error@-2 {{non-type template parameter has non-literal type 'foo::A<int>' (aka 'AsValueParameter::foo::A<int>')}}
+} // namespace AsValueParameter
+
+namespace ConvertDeducedTemplateArgument {
+ namespace A {
+ template <class> struct B {};
+ }
+
+ template <template <class> class TT1> struct C {
+ C(TT1<int>);
+ };
+
+ template <template <class> class TT2> using D = TT2<int>;
+
+ auto x = C(D<A::B>());
+}
diff --git a/clang/test/SemaTemplate/current-instantiation.cpp b/clang/test/SemaTemplate/current-instantiation.cpp
index 9030768..9214bbe 100644
--- a/clang/test/SemaTemplate/current-instantiation.cpp
+++ b/clang/test/SemaTemplate/current-instantiation.cpp
@@ -245,5 +245,5 @@ namespace RebuildDependentScopeDeclRefExpr {
};
template<typename T> N<X<T>::thing> X<T>::data() {}
// FIXME: We should issue a typo-correction here.
- template<typename T> N<X<T>::think> X<T>::foo() {} // expected-error {{no member named 'think' in 'X<T>'}}
+ template<typename T> N<X<T>::think> X<T>::foo() {} // expected-error {{no member named 'think' in 'RebuildDependentScopeDeclRefExpr::X<T>'}}
}
diff --git a/clang/test/SemaTemplate/deduction-crash.cpp b/clang/test/SemaTemplate/deduction-crash.cpp
index 287c61a..99ca0b3 100644
--- a/clang/test/SemaTemplate/deduction-crash.cpp
+++ b/clang/test/SemaTemplate/deduction-crash.cpp
@@ -166,9 +166,9 @@ namespace PR51872_part1 {
template<int> class T1 { template <struct U1> T1(); };
// expected-error@-1 {{non-type template parameter has incomplete type 'struct U1'}}
// expected-note@-2 {{forward declaration of 'PR51872_part1::U1'}}
- // expected-note@-3 {{implicit deduction guide declared as 'template <int> T1(T1<value-parameter-0-0>) -> T1<value-parameter-0-0>'}}
+ // expected-note@-3 {{implicit deduction guide declared as 'template <int> T1(PR51872_part1::T1<value-parameter-0-0>) -> PR51872_part1::T1<value-parameter-0-0>'}}
T1 t1 = 0;
// expected-error@-1 {{no viable constructor or deduction guide for deduction of template arguments of 'T1'}}
- // expected-note@-7 {{candidate template ignored: could not match 'T1<value-parameter-0-0>' against 'int'}}
+ // expected-note@-7 {{candidate template ignored: could not match 'PR51872_part1::T1<value-parameter-0-0>' against 'int'}}
}
diff --git a/clang/test/SemaTemplate/deduction-guide.cpp b/clang/test/SemaTemplate/deduction-guide.cpp
index f6bc6ee..e41ba7b 100644
--- a/clang/test/SemaTemplate/deduction-guide.cpp
+++ b/clang/test/SemaTemplate/deduction-guide.cpp
@@ -43,11 +43,10 @@ using AT = A<int[3], int, int, short>;
// CHECK: `-ParmVarDecl {{.*}} 'short (*)[4]'
// CHECK: FunctionProtoType {{.*}} 'auto (X<Ps...>, Ts (*)[Ns]...) -> A<T, Ts...>' dependent trailing_return
// CHECK: |-InjectedClassNameType {{.*}} 'A<T, Ts...>' dependent
-// CHECK: |-ElaboratedType {{.*}} 'X<Ps...>' sugar dependent
-// CHECK: | `-TemplateSpecializationType {{.*}} 'X<Ps...>' dependent
-// CHECK: | `-TemplateArgument expr
-// CHECK: | `-PackExpansionExpr {{.*}} 'T *'
-// CHECK: | `-DeclRefExpr {{.*}} 'T *' NonTypeTemplateParm {{.*}} 'Ps' 'T *'
+// CHECK: |-TemplateSpecializationType {{.*}} 'X<Ps...>' dependent
+// CHECK: | `-TemplateArgument expr
+// CHECK: | `-PackExpansionExpr {{.*}} 'T *'
+// CHECK: | `-DeclRefExpr {{.*}} 'T *' NonTypeTemplateParm {{.*}} 'Ps' 'T *'
// CHECK: `-PackExpansionType {{.*}} 'Ts (*)[Ns]...' dependent
// CHECK: `-PointerType {{.*}} 'Ts (*)[Ns]' dependent contains_unexpanded_pack
// CHECK: `-ParenType {{.*}} 'Ts[Ns]' sugar dependent contains_unexpanded_pack
@@ -118,9 +117,8 @@ using CT = C<int>;
// CHECK: |-InjectedClassNameType {{.*}} 'C<A>' dependent
// CHECK: |-TemplateTypeParmType {{.*}} 'A' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'A'
-// CHECK: |-ElaboratedType {{.*}} 'Y<T>' sugar dependent
-// CHECK: | `-TemplateSpecializationType {{.*}} 'Y<T>' dependent
-// CHECK: | `-TemplateArgument template
+// CHECK: |-TemplateSpecializationType {{.*}} 'Y<T>' dependent
+// CHECK: | `-TemplateArgument template
// CHECK: `-TemplateTypeParmType {{.*}} 'U' dependent depth 0 index 2
template<typename ...T> struct D { // expected-note {{candidate}} \
@@ -321,24 +319,23 @@ namespace TTP {
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 0 T{{$}}
// CHECK-NEXT: |-TemplateTemplateParmDecl {{.+}} depth 0 index 1 TT{{$}}
// CHECK-NEXT: | `-TemplateTypeParmDecl {{.+}} class depth 1 index 0{{$}}
-// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} 'auto (TT<T>) -> B<T>'{{$}}
+// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} 'auto (TT<T>) -> TTP::B<T>'{{$}}
// CHECK-NEXT: | `-ParmVarDecl {{.+}} 'TT<T>'{{$}}
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} 'auto (A<int>) -> TTP::B<int>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} 'auto (TTP::A<int>) -> TTP::B<int>'
// CHECK-NEXT: |-TemplateArgument type 'int'
// CHECK-NEXT: | `-BuiltinType {{.+}} 'int'{{$}}
// CHECK-NEXT: |-TemplateArgument template 'TTP::A'{{$}}
// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
-// CHECK-NEXT: `-ParmVarDecl {{.+}} 'A<int>':'TTP::A<int>'{{$}}
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (TT<T>) -> B<T>' dependent trailing_return cdecl{{$}}
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'B<T>' dependent{{$}}
+// CHECK-NEXT: `-ParmVarDecl {{.+}} 'TTP::A<int>'{{$}}
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (TT<T>) -> TTP::B<T>' dependent trailing_return cdecl{{$}}
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'TTP::B<T>' dependent{{$}}
// CHECK-NEXT: | `-CXXRecord {{.+}} 'B'{{$}}
-// CHECK-NEXT: `-ElaboratedType {{.+}} 'TT<T>' sugar dependent{{$}}
-// CHECK-NEXT: `-TemplateSpecializationType {{.+}} 'TT<T>' dependent{{$}}
-// CHECK-NEXT: |-name: 'TT':'template-parameter-0-1' qualified
-// CHECK-NEXT: | `-TemplateTemplateParmDecl {{.+}} depth 0 index 1
-// CHECK-NEXT: `-TemplateArgument type 'T':'type-parameter-0-0'{{$}}
-// CHECK-NEXT: `-TemplateTypeParmType {{.+}} 'T' dependent depth 0 index 0{{$}}
-// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'T'{{$}}
+// CHECK-NEXT: `-TemplateSpecializationType {{.+}} 'TT<T>' dependent{{$}}
+// CHECK-NEXT: |-name: 'TT':'template-parameter-0-1'
+// CHECK-NEXT: | `-TemplateTemplateParmDecl {{.+}} depth 0 index 1
+// CHECK-NEXT: `-TemplateArgument type 'T':'type-parameter-0-0'{{$}}
+// CHECK-NEXT: `-TemplateTypeParmType {{.+}} 'T' dependent depth 0 index 0{{$}}
+// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'T'{{$}}
namespace GH64625 {
@@ -351,14 +348,14 @@ X x = {{1, 2}};
// CHECK-LABEL: Dumping GH64625::<deduction guide for X>:
// CHECK-NEXT: FunctionTemplateDecl {{.+}} <{{.+}}:[[#@LINE - 7]]:1, col:27> col:27 implicit <deduction guide for X>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} <col:11, col:17> col:17 referenced class depth 0 index 0 T
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:27> col:27 implicit <deduction guide for X> 'auto (T (&&)[2]) -> X<T>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:27> col:27 implicit <deduction guide for X> 'auto (T (&&)[2]) -> GH64625::X<T>' aggregate
// CHECK-NEXT: | `-ParmVarDecl {{.+}} <col:27> col:27 'T (&&)[2]'
// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <col:27> col:27 implicit used <deduction guide for X> 'auto (int (&&)[2]) -> GH64625::X<int>' implicit_instantiation aggregate
// CHECK-NEXT: |-TemplateArgument type 'int'
// CHECK-NEXT: | `-BuiltinType {{.+}} 'int'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:27> col:27 'int (&&)[2]'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2]) -> X<T>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'X<T>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2]) -> GH64625::X<T>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH64625::X<T>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'X'
// CHECK-NEXT: `-RValueReferenceType {{.+}} 'T (&&)[2]' dependent
// CHECK-NEXT: `-ConstantArrayType {{.+}} 'T[2]' dependent 2
@@ -375,7 +372,7 @@ TwoArrays ta = {{1, 2}, {3, 4, 5}};
// CHECK-NEXT: FunctionTemplateDecl {{.+}} <{{.+}}:[[#@LINE - 7]]:1, col:36> col:36 implicit <deduction guide for TwoArrays>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} <col:11, col:17> col:17 referenced class depth 0 index 0 T
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} <col:20, col:26> col:26 referenced class depth 0 index 1 U
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T (&&)[2], U (&&)[3]) -> TwoArrays<T, U>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T (&&)[2], U (&&)[3]) -> GH64625::TwoArrays<T, U>' aggregate
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'T (&&)[2]'
// CHECK-NEXT: | `-ParmVarDecl {{.+}} <col:36> col:36 'U (&&)[3]'
// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit used <deduction guide for TwoArrays> 'auto (int (&&)[2], int (&&)[3]) -> GH64625::TwoArrays<int, int>' implicit_instantiation aggregate
@@ -385,8 +382,8 @@ TwoArrays ta = {{1, 2}, {3, 4, 5}};
// CHECK-NEXT: | `-BuiltinType {{.+}} 'int'
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int (&&)[2]'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:36> col:36 'int (&&)[3]'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2], U (&&)[3]) -> TwoArrays<T, U>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'TwoArrays<T, U>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2], U (&&)[3]) -> GH64625::TwoArrays<T, U>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH64625::TwoArrays<T, U>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'TwoArrays'
// CHECK-NEXT: |-RValueReferenceType {{.+}} 'T (&&)[2]' dependent
// CHECK-NEXT: | `-ConstantArrayType {{.+}} 'T[2]' dependent 2
@@ -398,7 +395,7 @@ TwoArrays ta = {{1, 2}, {3, 4, 5}};
// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'U'
TwoArrays tb = {1, 2, {3, 4, 5}};
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T, T, U (&&)[3]) -> TwoArrays<T, U>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T, T, U (&&)[3]) -> GH64625::TwoArrays<T, U>' aggregate
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'T'
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'T'
// CHECK-NEXT: | `-ParmVarDecl {{.+}} <col:36> col:36 'U (&&)[3]'
@@ -410,8 +407,8 @@ TwoArrays tb = {1, 2, {3, 4, 5}};
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int'
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:36> col:36 'int (&&)[3]'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T, T, U (&&)[3]) -> TwoArrays<T, U>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'TwoArrays<T, U>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T, T, U (&&)[3]) -> GH64625::TwoArrays<T, U>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH64625::TwoArrays<T, U>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'TwoArrays'
// CHECK-NEXT: |-TemplateTypeParmType {{.+}} 'T' dependent depth 0 index 0
// CHECK-NEXT: | `-TemplateTypeParm {{.+}} 'T'
@@ -423,7 +420,7 @@ TwoArrays tb = {1, 2, {3, 4, 5}};
// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'U'
TwoArrays tc = {{1, 2}, 3, 4, 5};
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T (&&)[2], U, U, U) -> TwoArrays<T, U>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T (&&)[2], U, U, U) -> GH64625::TwoArrays<T, U>' aggregate
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'T (&&)[2]'
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'U'
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'U'
@@ -437,8 +434,8 @@ TwoArrays tc = {{1, 2}, 3, 4, 5};
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int'
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:36> col:36 'int'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2], U, U, U) -> TwoArrays<T, U>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'TwoArrays<T, U>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2], U, U, U) -> GH64625::TwoArrays<T, U>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH64625::TwoArrays<T, U>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'TwoArrays'
// CHECK-NEXT: |-RValueReferenceType {{.+}} 'T (&&)[2]' dependent
// CHECK-NEXT: | `-ConstantArrayType {{.+}} 'T[2]' dependent 2
@@ -464,13 +461,13 @@ A a{.f1 = {1}};
// CHECK-LABEL: Dumping GH83368::<deduction guide for A>:
// CHECK-NEXT: FunctionTemplateDecl 0x{{.+}} <{{.+}}:[[#@LINE - 7]]:1, col:25> col:25 implicit <deduction guide for A>
// CHECK-NEXT: |-NonTypeTemplateParmDecl {{.+}} <col:11, col:15> col:15 referenced 'int' depth 0 index 0 N
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:25> col:25 implicit <deduction guide for A> 'auto (int (&&)[N]) -> A<N>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:25> col:25 implicit <deduction guide for A> 'auto (int (&&)[N]) -> GH83368::A<N>' aggregate
// CHECK-NEXT: | `-ParmVarDecl {{.+}} <col:25> col:25 'int (&&)[N]'
// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <col:25> col:25 implicit used <deduction guide for A> 'auto (int (&&)[1]) -> GH83368::A<1>' implicit_instantiation aggregate
// CHECK-NEXT: |-TemplateArgument integral '1'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:25> col:25 'int (&&)[1]'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (int (&&)[N]) -> A<N>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'A<N>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (int (&&)[N]) -> GH83368::A<N>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH83368::A<N>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'A'
// CHECK-NEXT: `-RValueReferenceType {{.+}} 'int (&&)[N]' dependent
// CHECK-NEXT: `-DependentSizedArrayType {{.+}} 'int[N]' dependent
@@ -512,7 +509,7 @@ A a(42);
// CHECK-NEXT: | | `-TemplateTypeParm 0x{{.+}} 'Ts'
// CHECK-NEXT: | `-ImplicitCastExpr {{.+}} <IntegralCast>
// CHECK-NEXT: | `-IntegerLiteral 0x{{.+}} <{{.+}}> 'int' 0
-// CHECK-NEXT: |-CXXDeductionGuideDecl 0x{{.+}} <{{.+}}> line:{{.+}} implicit <deduction guide for A> 'auto (T, Ts...) -> A<T>'
+// CHECK-NEXT: |-CXXDeductionGuideDecl 0x{{.+}} <{{.+}}> line:{{.+}} implicit <deduction guide for A> 'auto (T, Ts...) -> GH60777::A<T>'
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <{{.+}}> col:{{.+}} val 'T'
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <{{.+}}> col:{{.+}} tail 'Ts...' pack
// CHECK-NEXT: | `-BinaryOperator 0x{{.+}} <{{.+}}> 'bool' '&&'
@@ -541,9 +538,9 @@ B b(42, 43);
// expected-error@-1 {{no viable constructor}} \
// expected-note@-6 {{constraints not satisfied}} \
// expected-note@-5 {{because substituted constraint expression is ill-formed}} \
-// expected-note@-6 {{implicit deduction guide declared as 'template <typename T, typename ...Ts> B(T val, Ts ...tail) -> B<T> requires (True<tail...>())'}} \
+// expected-note@-6 {{implicit deduction guide declared as 'template <typename T, typename ...Ts> B(T val, Ts ...tail) -> GH60777::B<T> requires (True<tail...>())'}} \
// expected-note@-8 {{function template not viable}} \
-// expected-note@-8 {{implicit deduction guide declared as 'template <typename T> B(B<T>) -> B<T>'}}
+// expected-note@-8 {{implicit deduction guide declared as 'template <typename T> B(GH60777::B<T>) -> GH60777::B<T>'}}
} // namespace GH60777
@@ -572,7 +569,7 @@ static_assert(x.size == 4);
// CHECK-NEXT: FunctionTemplateDecl 0x{{.+}} <{{.+}}> col:13 implicit <deduction guide for X>
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <{{.+}}> col:17 referenced class depth 0 index 0 T
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <{{.+}}> col:19 class depth 0 index 1 U
-// CHECK-NEXT: |-CXXDeductionGuideDecl 0x{{.+}} <{{.+}}> col:13 implicit <deduction guide for X> 'auto (T, U (&)[3]) -> X<T>'
+// CHECK-NEXT: |-CXXDeductionGuideDecl 0x{{.+}} <{{.+}}> col:13 implicit <deduction guide for X> 'auto (T, U (&)[3]) -> GH98592::X<T>'
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <col:15> col:16 'T'
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <col:18, col:24> col:21 'U (&)[3]'
// CHECK-NEXT: | `-ConceptSpecializationExpr 0x{{.+}} <col:36, col:42> 'bool' Concept 0x{{.+}} 'True'
@@ -676,8 +673,8 @@ Test test(42);
// CHECK-NEXT: | |-DeducedTemplateSpecializationType {{.*}} 'GH122134::Test' dependent
// CHECK-NEXT: | | `-name: 'GH122134::Test'
// CHECK-NEXT: | | `-TypeAliasTemplateDecl {{.*}} Test
-// CHECK-NEXT: | `-TemplateSpecializationType {{.*}} 'Struct<int, N>' dependent
-// CHECK-NEXT: | |-name: 'Struct':'GH122134::Struct' qualified
+// CHECK-NEXT: | `-TemplateSpecializationType {{.*}} 'GH122134::Struct<int, N>' dependent
+// CHECK-NEXT: | |-name: 'GH122134::Struct'
// CHECK-NEXT: | | `-ClassTemplateDecl {{.*}} Struct
// CHECK-NEXT: | |-TemplateArgument type 'int'
// CHECK-NEXT: | | `-SubstTemplateTypeParmType {{.*}} 'int' sugar class depth 0 index 0 T
@@ -687,7 +684,7 @@ Test test(42);
// CHECK-NEXT: | `-SubstNonTypeTemplateParmExpr {{.*}} 'int'
// CHECK-NEXT: | |-NonTypeTemplateParmDecl {{.*}} 'int' depth 0 index 1
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int' NonTypeTemplateParm {{.*}} 'N' 'int'
-// CHECK-NEXT: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Test> 'auto (auto:1) -> Struct<int, N>'
+// CHECK-NEXT: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Test> 'auto (auto:1) -> GH122134::Struct<int, N>'
// CHECK-NEXT: | `-ParmVarDecl {{.*}} 'auto:1'
} // namespace GH122134
@@ -719,7 +716,7 @@ void test() { NewDeleteAllocator abc(42); } // expected-error {{no viable constr
// CHECK-NEXT: | |-inherited from TemplateTypeParm {{.+}} depth 0 index 0
// CHECK-NEXT: | `-BuiltinType {{.+}} 'void'
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} typename depth 0 index 1 T
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <deduction guide for NewDeleteAllocator> 'auto (T) -> NewDeleteAllocator<type-parameter-0-0>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <deduction guide for NewDeleteAllocator> 'auto (T) -> GH128691::NewDeleteAllocator<type-parameter-0-0>'
// CHECK-NEXT: `-ParmVarDecl {{.+}} 'T'
} // namespace GH128691
@@ -745,7 +742,7 @@ B b(24);
// CHECK-NEXT: FunctionTemplateDecl {{.+}} implicit <deduction guide for B>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} typename depth 0 index 0
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 1 U
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for B> 'auto (U) -> B<type-parameter-0-0>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for B> 'auto (U) -> GH132616_DeductionGuide::B<type-parameter-0-0>'
// CHECK-NEXT: `-ParmVarDecl {{.+}} 'U'
struct C {
@@ -767,7 +764,7 @@ D d(24);
// CHECK-NEXT: FunctionTemplateDecl {{.+}} implicit <deduction guide for D>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} typename depth 0 index 0
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 1 U
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for D> 'auto (U) -> D<type-parameter-0-0>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for D> 'auto (U) -> GH132616_DeductionGuide::D<type-parameter-0-0>'
// CHECK-NEXT: `-ParmVarDecl {{.+}} 'U'
} // namespace GH132616_DeductionGuide
@@ -795,16 +792,16 @@ AA a{};
// CHECK-NEXT: | |-DeducedTemplateSpecializationType {{.+}} 'GH133132::AA' dependent
// CHECK-NEXT: | | `-name: 'GH133132::AA'
// CHECK-NEXT: | | `-TypeAliasTemplateDecl {{.+}} AA
-// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'A<U>' dependent
-// CHECK-NEXT: | |-name: 'A':'GH133132::A' qualified
+// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'GH133132::A<U>' dependent
+// CHECK-NEXT: | |-name: 'GH133132::A'
// CHECK-NEXT: | | `-ClassTemplateDecl {{.+}} A
// CHECK-NEXT: | `-TemplateArgument type 'U':'type-parameter-0-1'
// CHECK-NEXT: | `-SubstTemplateTypeParmType {{.+}} 'U' sugar dependent class depth 0 index 0 _Ty
// CHECK-NEXT: | |-FunctionTemplate {{.+}} '<deduction guide for A>'
// CHECK-NEXT: | `-TemplateTypeParmType {{.+}} 'U' dependent depth 0 index 1
// CHECK-NEXT: | `-TemplateTypeParm {{.+}} 'U'
-// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for AA> 'auto () -> A<U>'
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit used <deduction guide for AA> 'auto () -> A<int>' implicit_instantiation
+// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for AA> 'auto () -> GH133132::A<U>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit used <deduction guide for AA> 'auto () -> GH133132::A<int>' implicit_instantiation
// CHECK-NEXT: |-TemplateArgument type 'int'
// CHECK-NEXT: | `-BuiltinType {{.+}} 'int'
// CHECK-NEXT: `-TemplateArgument type 'int'
@@ -826,22 +823,22 @@ BB b{};
// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A
// CHECK-NEXT: |-TemplateTemplateParmDecl {{.+}} depth 0 index 1 _Y
// CHECK-NEXT: | |-TemplateTypeParmDecl {{.+}} class depth 0 index 0
-// CHECK-NEXT: | `-TemplateArgument {{.+}} template '_X':'template-parameter-0-0' qualified
+// CHECK-NEXT: | `-TemplateArgument {{.+}} template '_X':'template-parameter-0-0'
// CHECK-NEXT: | `-TemplateTemplateParmDecl {{.+}} depth 0 index 0 _X
// CHECK-NEXT: |-TypeTraitExpr {{.+}} 'bool' __is_deducible
// CHECK-NEXT: | |-DeducedTemplateSpecializationType {{.+}} 'GH133132::BB' dependent
// CHECK-NEXT: | | `-name: 'GH133132::BB'
// CHECK-NEXT: | | `-TypeAliasTemplateDecl {{.+}} BB
-// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'B<_Y>' dependent
-// CHECK-NEXT: | |-name: 'B':'GH133132::B' qualified
+// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'GH133132::B<_Y>' dependent
+// CHECK-NEXT: | |-name: 'GH133132::B'
// CHECK-NEXT: | | `-ClassTemplateDecl {{.+}} B
// CHECK-NEXT: | `-TemplateArgument template '_Y':'template-parameter-0-1' subst index 0
// CHECK-NEXT: | |-parameter: TemplateTemplateParmDecl {{.+}} depth 0 index 0 _X
// CHECK-NEXT: | |-associated FunctionTemplate {{.+}} '<deduction guide for B>'
-// CHECK-NEXT: | `-replacement: '_Y':'template-parameter-0-1' qualified
+// CHECK-NEXT: | `-replacement: '_Y':'template-parameter-0-1'
// CHECK-NEXT: | `-TemplateTemplateParmDecl {{.+}} depth 0 index 1 _Y
-// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for BB> 'auto () -> B<_Y>'
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit used <deduction guide for BB> 'auto () -> B<GH133132::A>' implicit_instantiation
+// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for BB> 'auto () -> GH133132::B<_Y>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit used <deduction guide for BB> 'auto () -> GH133132::B<GH133132::A>' implicit_instantiation
// CHECK-NEXT: |-TemplateArgument template 'GH133132::A'
// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A
// CHECK-NEXT: `-TemplateArgument template 'GH133132::A'
@@ -859,27 +856,26 @@ CC c{};
// CHECK-NEXT: | `-IntegerLiteral {{.+}} 'int' 42
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 1 U
// CHECK-NEXT: | `-TemplateArgument type 'A<decltype(N)>'
-// CHECK-NEXT: | `-ElaboratedType {{.+}} 'A<decltype(N)>' sugar dependent
-// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'A<decltype(N)>' dependent
-// CHECK-NEXT: | |-name: 'A':'GH133132::A' qualified
-// CHECK-NEXT: | | `-ClassTemplateDecl {{.+}} A
-// CHECK-NEXT: | `-TemplateArgument type 'decltype(N)'
-// CHECK-NEXT: | `-DecltypeType {{.+}} 'decltype(N)' dependent
-// CHECK-NEXT: | `-DeclRefExpr {{.+}} 'int' NonTypeTemplateParm {{.+}} 'N' 'int'
+// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'A<decltype(N)>' dependent
+// CHECK-NEXT: | |-name: 'A':'GH133132::A' qualified
+// CHECK-NEXT: | | `-ClassTemplateDecl {{.+}} A
+// CHECK-NEXT: | `-TemplateArgument type 'decltype(N)'
+// CHECK-NEXT: | `-DecltypeType {{.+}} 'decltype(N)' dependent
+// CHECK-NEXT: | `-DeclRefExpr {{.+}} 'int' NonTypeTemplateParm {{.+}} 'N' 'int'
// CHECK-NEXT: |-TypeTraitExpr {{.+}} 'bool' __is_deducible
// CHECK-NEXT: | |-DeducedTemplateSpecializationType {{.+}} 'GH133132::CC' dependent
// CHECK-NEXT: | | `-name: 'GH133132::CC'
// CHECK-NEXT: | | `-TypeAliasTemplateDecl {{.+}} CC
-// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'A<U>' dependent
-// CHECK-NEXT: | |-name: 'A':'GH133132::A' qualified
+// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'GH133132::A<U>' dependent
+// CHECK-NEXT: | |-name: 'GH133132::A'
// CHECK-NEXT: | | `-ClassTemplateDecl {{.+}} A
// CHECK-NEXT: | `-TemplateArgument type 'U':'type-parameter-0-1'
// CHECK-NEXT: | `-SubstTemplateTypeParmType {{.+}} 'U' sugar dependent class depth 0 index 0 _Ty
// CHECK-NEXT: | |-FunctionTemplate {{.+}} '<deduction guide for A>'
// CHECK-NEXT: | `-TemplateTypeParmType {{.+}} 'U' dependent depth 0 index 1
// CHECK-NEXT: | `-TemplateTypeParm {{.+}} 'U'
-// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for CC> 'auto () -> A<U>'
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit used <deduction guide for CC> 'auto () -> A<GH133132::A<int>>' implicit_instantiation
+// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for CC> 'auto () -> GH133132::A<U>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit used <deduction guide for CC> 'auto () -> GH133132::A<GH133132::A<int>>' implicit_instantiation
// CHECK-NEXT: |-TemplateArgument integral '42'
// CHECK-NEXT: `-TemplateArgument type 'GH133132::A<int>'
// CHECK-NEXT: `-RecordType {{.+}} 'GH133132::A<int>'
@@ -903,7 +899,7 @@ void f() {
// CHECK-LABEL: Dumping GH67173::<deduction guide for Vec2d>:
// CHECK-NEXT: FunctionTemplateDecl {{.+}} implicit <deduction guide for Vec2d>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} referenced class depth 0 index 0 T
-// CHECK: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for Vec2d> 'auto (T, T) -> Vec2d<T>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for Vec2d> 'auto (T, T) -> GH67173::Vec2d<T>' aggregate
// CHECK-NEXT: | |-ParmVarDecl {{.+}} col:27 'T'
// CHECK-NEXT: | `-ParmVarDecl {{.+}} col:27 'T'
// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit used <deduction guide for Vec2d> 'auto (int, int) -> GH67173::Vec2d<int>' implicit_instantiation aggregate
@@ -953,8 +949,8 @@ Expand<Type, Invocable<>> _{};
// CHECK-NEXT: | |-DeducedTemplateSpecializationType {{.+}} 'GH141425::Alias' dependent
// CHECK-NEXT: | | `-name: 'GH141425::Alias'
// CHECK-NEXT: | | `-TypeAliasTemplateDecl {{.+}} Alias
-// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'Container<T...>' dependent
-// CHECK-NEXT: | |-name: 'Container':'GH141425::Container' qualified
+// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'GH141425::Container<T...>' dependent
+// CHECK-NEXT: | |-name: 'GH141425::Container'
// CHECK-NEXT: | | `-ClassTemplateDecl {{.+}} Container
// CHECK-NEXT: | `-TemplateArgument type 'T...':'type-parameter-0-0...'
// CHECK-NEXT: | `-PackExpansionType {{.+}} 'T...' dependent
@@ -962,7 +958,7 @@ Expand<Type, Invocable<>> _{};
// CHECK-NEXT: | |-FunctionTemplate {{.+}} '<deduction guide for Container>'
// CHECK-NEXT: | `-TemplateTypeParmType {{.+}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
// CHECK-NEXT: | `-TemplateTypeParm {{.+}} 'T'
-// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for Alias> 'auto (T...) -> Container<T...>'
+// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for Alias> 'auto (T...) -> GH141425::Container<T...>'
// CHECK-NEXT: | `-ParmVarDecl {{.+}} 'T...' pack
}
diff --git a/clang/test/SemaTemplate/dedup-types-builtin.cpp b/clang/test/SemaTemplate/dedup-types-builtin.cpp
new file mode 100644
index 0000000..fe6efd7
--- /dev/null
+++ b/clang/test/SemaTemplate/dedup-types-builtin.cpp
@@ -0,0 +1,225 @@
+// RUN: %clang_cc1 %s -verify -Wno-c++20-extensions
+template <typename...> struct TypeList;
+
+// === Check results of the builtin.
+template <class>
+struct TemplateWrapper {
+ static_assert(__is_same( // expected-error {{static assertion contains an unexpanded parameter pack}}
+ TypeList<__builtin_dedup_pack<int, int*, int, double, float>>,
+ TypeList<int, int*, double, float>));
+};
+
+template <template<typename ...> typename Templ, typename ...Types>
+struct Dependent {
+ using empty_list = Templ<__builtin_dedup_pack<>...>;
+ using same = Templ<__builtin_dedup_pack<Types...>...>;
+ using twice = Templ<__builtin_dedup_pack<Types..., Types...>...>;
+ using dep_only_types = TypeList<__builtin_dedup_pack<Types...>...>;
+ using dep_only_template = Templ<__builtin_dedup_pack<int, double, int>...>;
+};
+
+// Check the reverse condition to make sure we see an error and not accidentally produced dependent expression.
+static_assert(!__is_same(Dependent<TypeList>::empty_list, TypeList<>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList>::same, TypeList<>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList>::twice, TypeList<>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList>::dep_only_types, TypeList<>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList>::dep_only_template, TypeList<int, double>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList, int*, double*, int*>::empty_list, TypeList<>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList, int*, double*, int*>::same, TypeList<int*, double*>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList, int*, double*, int*>::twice, TypeList<int*, double*>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList, int*, double*, int*>::dep_only_types, TypeList<int*, double*>)); // expected-error {{static assertion failed}}
+static_assert(!__is_same(Dependent<TypeList, int*, double*, int*>::dep_only_template, TypeList<int, double>)); // expected-error {{static assertion failed}}
+
+
+template <class ...T>
+using Twice = TypeList<T..., T...>;
+
+template <class>
+struct TwiceTemplateWrapper {
+ static_assert(!__is_same(Twice<__builtin_dedup_pack<int, double, int>...>, TypeList<int, double, int, double>)); // expected-error {{static assertion failed}}
+
+};
+template struct TwiceTemplateWrapper<int>; // expected-note {{in instantiation of template class 'TwiceTemplateWrapper<int>' requested here}}
+
+template <int...> struct IntList;
+// Wrong kinds of template arguments.
+template <class> struct IntListTemplateWrapper {
+ IntList<__builtin_dedup_pack<int>...>* wrong_template; // expected-error {{template argument for non-type template parameter must be an expression}}
+ // expected-note@-4 {{template parameter is declared here}}
+ TypeList<__builtin_dedup_pack<1, 2, 3>...>* wrong_template_args; // expected-error {{template argument for template type parameter must be a type}}
+ // expected-note@* {{template parameter from hidden source}}
+ __builtin_dedup_pack<> not_enough_args; // expected-error {{data member type contains an unexpanded parameter pack}}
+ // expected-note@* {{template declaration from hidden source}}
+ __builtin_dedup_pack missing_template_args; // expected-error {{use of template '__builtin_dedup_pack' requires template arguments}}
+};
+
+// Make sure various canonical / non-canonical type representations do not affect results
+// of the deduplication and the qualifiers do end up creating different types when C++ requires it.
+using Int = int;
+using CInt = const Int;
+using IntArray = Int[10];
+using CIntArray = Int[10];
+using IntPtr = int*;
+using CIntPtr = const int*;
+
+template <class>
+struct Foo {
+ static_assert(
+ !__is_same( // expected-error {{static assertion failed}}
+ // expected-note@* {{in instantiation of template class 'Foo<int>'}}
+ TypeList<__builtin_dedup_pack<
+ Int, int,
+ const int, const Int, CInt, const CInt,
+ IntArray, Int[10], int[10],
+ const IntArray, const int[10], CIntArray, const CIntArray,
+ IntPtr, int*,
+ const IntPtr, int* const,
+ CIntPtr, const int*,
+ const IntPtr*, int*const*,
+ CIntPtr*, const int**,
+ const CIntPtr*, const int* const*
+ >...>,
+ TypeList<int, const int, int[10], const int [10], int*, int* const, const int*, int*const *, const int**, const int*const*>),
+ "");
+};
+
+template struct Foo<int>;
+
+// === Show an error when packs are used in non-template contexts.
+static_assert(!__is_same(TypeList<__builtin_dedup_pack<int>...>, TypeList<int>)); // expected-error {{outside}}
+// Non-dependent uses in template are fine, though.
+template <class T>
+struct NonDepInTemplate {
+ static_assert(!__is_same(TypeList<__builtin_dedup_pack<int>...>, TypeList<int>)); // expected-error {{static assertion failed}}
+};
+template struct NonDepInTemplate<int>; // expected-note {{requested here}}
+
+template <template<class...> class T = __builtin_dedup_pack> // expected-error {{use of template '__builtin_dedup_pack' requires template arguments}}
+ // expected-note@* {{template declaration from hidden source}}
+struct UseAsTemplate;
+template <template<class...> class>
+struct AcceptsTemplateArg;
+template <class>
+struct UseAsTemplateWrapper {
+ AcceptsTemplateArg<__builtin_dedup_pack>* a; // expected-error {{use of template '__builtin_dedup_pack' requires template arguments}}
+ // expected-note@* {{template declaration from hidden source}}
+};
+
+// === Check how expansions in various contexts behave.
+// The following cases are not supported yet, should produce an error.
+template <class... T>
+struct DedupBases : __builtin_dedup_pack<T...>... {};
+struct Base1 {
+ int a1;
+};
+struct Base2 {
+ int a2;
+};
+static_assert(DedupBases<Base1, Base1, Base2, Base1, Base2, Base2>{1, 2}.a1 != 1); // expected-error {{static assertion failed}} \
+ // expected-note {{}}
+static_assert(DedupBases<Base1, Base1, Base2, Base1, Base2, Base2>{1, 2}.a2 != 2); // expected-error {{static assertion failed}} \
+ // expected-note {{}}
+
+template <class ...T>
+constexpr int dedup_params(__builtin_dedup_pack<T...>... as) {
+ return (as + ...);
+}
+static_assert(dedup_params<int, int, short, int, short, short>(1, 2)); // expected-error {{no matching function}} \
+ // expected-note@-3 {{expansions of '__builtin_dedup_pack' are not supported here}}
+
+template <class ...T>
+constexpr int dedup_params_into_type_list(TypeList<__builtin_dedup_pack<T...>...> *, T... as) {
+ return (as + ...);
+}
+static_assert(dedup_params_into_type_list(static_cast<TypeList<int,short,long>*>(nullptr), 1, short(1), 1, 1l, 1l) != 5); // expected-error {{static assertion failed}} \
+ // expected-note {{expression evaluates}}
+
+template <class T, __builtin_dedup_pack<T, int>...> // expected-error 2{{expansions of '__builtin_dedup_pack' are not supported here}}
+struct InTemplateParams {};
+InTemplateParams<int> itp1;
+InTemplateParams<int, 1, 2, 3, 4, 5> itp2;
+
+template <class T>
+struct DeepTemplateParams {
+ template <__builtin_dedup_pack<T, int>...> // expected-error {{expansions of '__builtin_dedup_pack' are not supported here}}
+ struct Templ {};
+};
+DeepTemplateParams<int>::Templ<> dtp1; // expected-note {{requested here}} \
+ // expected-error {{no template named 'Templ'}}
+
+
+template <class ...T>
+struct MemInitializers : T... {
+ MemInitializers() : __builtin_dedup_pack<T...>()... {} // expected-error 2{{expansions of '__builtin_dedup_pack' are not supported here.}}
+};
+MemInitializers<> mi1; // expected-note {{in instantiation of member function}}
+MemInitializers<Base1, Base2> mi2; // expected-note {{in instantiation of member function}}
+
+template <class ...T>
+constexpr int dedup_in_expressions() {
+ // counts the number of unique Ts.
+ return ((1 + __builtin_dedup_pack<T...>()) + ...); // expected-error {{expansions of '__builtin_dedup_pack' are not supported here.}} \
+ // expected-note@+3 {{in instantiation of function template specialization}}
+}
+static_assert(dedup_in_expressions<int, int, short, double, int, short, double, int>() == 3); // expected-error {{not an integral constant expression}}
+
+template <class ...T>
+void in_exception_spec() throw(__builtin_dedup_pack<T...>...); // expected-error{{C++17 does not allow dynamic exception specifications}} \
+ // expected-note {{use 'noexcept}} \
+ // expected-error{{expansions of '__builtin_dedup_pack' are not supported here.}}
+
+void test_in_exception_spec() {
+ in_exception_spec<int, double, int>(); // expected-note {{instantiation of exception specification}}
+}
+
+template <class ...T>
+constexpr bool in_type_trait = __is_trivially_constructible(int, __builtin_dedup_pack<T...>...); // expected-error{{expansions of '__builtin_dedup_pack' are not supported here.}}
+
+static_assert(in_type_trait<int, int, int>); // expected-note{{in instantiation of variable template specialization}}
+
+template <class ...T>
+struct InFriends {
+ friend __builtin_dedup_pack<T>...; // expected-warning {{variadic 'friend' declarations are a C++2c extension}} \
+ // expected-error 2 {{expansions of '__builtin_dedup_pack' are not supported here.}} \
+ // expected-note@* 2 {{in instantiation of template class}}
+
+};
+struct Friend1 {};
+struct Friend2 {};
+InFriends<> if1;
+InFriends<Friend1, Friend2> if2;
+
+template <class ...T>
+struct InUsingDecl {
+ using __builtin_dedup_pack<T...>::func...; // expected-error 2 {{expansions of '__builtin_dedup_pack' are not supported here.}}
+};
+struct WithFunc1 { void func(); };
+struct WithFunc2 { void func(int); };
+InUsingDecl<> iu1; // expected-note {{in instantiation of template class}}
+InUsingDecl<WithFunc1, WithFunc2> iu2; // expected-note {{in instantiation of template class}}
+
+// Note: produces parsing errors and does not construct pack indexing.
+// Keep this commented out until the parser supports this.
+//
+// template <class ...T>
+// struct InPackIndexing {
+//
+// using type = __builtin_dedup_pack<T...>...[0];
+// };
+// static_assert(__is_same(InPackIndexing<int, int>, int));
+
+template <class ...T>
+struct LambdaInitCaptures {
+ static constexpr int test() {
+ [...foos=__builtin_dedup_pack<T...>()]{}; // expected-error 2{{expansions of '__builtin_dedup_pack' are not supported here.}}
+ return 3;
+ }
+};
+static_assert(LambdaInitCaptures<>::test() == 3); // expected-note {{in instantiation of member function}}
+static_assert(LambdaInitCaptures<int, int, int>::test() == 3); // expected-note {{in instantiation of member function}}
+
+template <class ...T>
+struct alignas(__builtin_dedup_pack<T...>...) AlignAs {}; // expected-error 2{{expansions of '__builtin_dedup_pack' are not supported here.}}
+AlignAs<> aa1; // expected-note {{in instantiation of template class}}
+AlignAs<int, double> aa2; // expected-note {{in instantiation of template class}}
+
diff --git a/clang/test/SemaTemplate/dependent-base-classes.cpp b/clang/test/SemaTemplate/dependent-base-classes.cpp
index b4b2669..b1c6102 100644
--- a/clang/test/SemaTemplate/dependent-base-classes.cpp
+++ b/clang/test/SemaTemplate/dependent-base-classes.cpp
@@ -32,7 +32,7 @@ namespace PR6031 {
template <class TT>
struct FI2
{
- C<typename FI2::type> a; // expected-error{{no type named 'type' in 'FI2<TT>'}}
+ C<typename FI2::type> a; // expected-error{{no type named 'type' in 'PR6031::FI2<TT>'}}
};
template<typename T>
@@ -54,9 +54,9 @@ namespace PR6031 {
template<typename T>
struct NoDepBase {
int foo() {
- class NoDepBase::Nested nested; // expected-error{{no class named 'Nested' in 'NoDepBase<T>'}}
- typedef typename NoDepBase::template MemberTemplate<T>::type type; // expected-error{{no member named 'MemberTemplate' in 'NoDepBase<T>'}}
- return NoDepBase::a; // expected-error{{no member named 'a' in 'NoDepBase<T>'}}
+ class NoDepBase::Nested nested; // expected-error{{no class named 'Nested' in 'PR6031::NoDepBase<T>'}}
+ typedef typename NoDepBase::template MemberTemplate<T>::type type; // expected-error{{no member named 'MemberTemplate' in 'PR6031::NoDepBase<T>'}}
+ return NoDepBase::a; // expected-error{{no member named 'a' in 'PR6031::NoDepBase<T>'}}
}
};
}
@@ -102,7 +102,7 @@ namespace PR6081 {
template< class X >
void f0(const X & k)
{
- this->template f1<int>()(k); // expected-error{{no member named 'f1' in 'C<T>'}}
+ this->template f1<int>()(k); // expected-error{{no member named 'f1' in 'PR6081::C<T>'}}
}
};
}
diff --git a/clang/test/SemaTemplate/dependent-names.cpp b/clang/test/SemaTemplate/dependent-names.cpp
index 54ce376..d6bd670 100644
--- a/clang/test/SemaTemplate/dependent-names.cpp
+++ b/clang/test/SemaTemplate/dependent-names.cpp
@@ -336,7 +336,7 @@ template < unsigned > struct X {
static const unsigned dimension = 3;
template<unsigned dim=dimension>
struct Y: Y<dim> { }; // expected-error{{base class has incomplete type}}
- // expected-note@-1{{definition of 'Y<dim>' is not complete until the closing '}'}}
+ // expected-note@-1{{definition of 'PR11421::X::Y<dim>' is not complete until the closing '}'}}
};
typedef X<3> X3;
X3::Y<>::iterator it; // expected-error {{no type named 'iterator' in 'PR11421::X<3>::Y<>'}}
diff --git a/clang/test/SemaTemplate/elaborated-type-specifier.cpp b/clang/test/SemaTemplate/elaborated-type-specifier.cpp
index 95c2aa9..ce6e258 100644
--- a/clang/test/SemaTemplate/elaborated-type-specifier.cpp
+++ b/clang/test/SemaTemplate/elaborated-type-specifier.cpp
@@ -79,21 +79,18 @@ namespace canon {
// expected-note@#canon-t3-3 {{candidate function}}
template <class T> constexpr int t4(typename T::template X<int>* = 0) { return 0; }
- // expected-note@-1 3{{candidate function}}
+ // expected-note@-1 2{{candidate function}}
template <class T> constexpr int t4(struct T::template X<int>* = 0) { return 1; }
- // expected-note@-1 3{{candidate function}}
+ // expected-note@-1 2{{candidate function}}
template <class T> constexpr int t4(union T::template X<int>* = 0) { return 2; }
- // expected-note@-1 3{{candidate function}}
+ // expected-note@-1 2{{candidate function}}
- // FIXME: This should work.
struct E { template <class T> using X = T; };
- static_assert(t4<E>() == 0); // expected-error {{call to 't4' is ambiguous}}
+ static_assert(t4<E>() == 0);
- // FIXME: Should not match the union overload.
struct F { template <class> struct X {}; };
static_assert(t4<F>() == 1); // expected-error {{call to 't4' is ambiguous}}
- // FIXME: Should not match the struct overload.
struct G { template <class> union X {}; };
static_assert(t4<G>() == 2); // expected-error {{call to 't4' is ambiguous}}
} // namespace canon
diff --git a/clang/test/SemaTemplate/instantiate-requires-expr.cpp b/clang/test/SemaTemplate/instantiate-requires-expr.cpp
index 47689b9..e60f792 100644
--- a/clang/test/SemaTemplate/instantiate-requires-expr.cpp
+++ b/clang/test/SemaTemplate/instantiate-requires-expr.cpp
@@ -72,8 +72,8 @@ namespace type_requirement {
template<typename T> requires
false_v<requires { typename T::template temp<T>; }>
- // expected-note@-1 {{because 'false_v<requires { typename contains_template<int>::template temp<type_requirement::contains_template<int>>; }>' evaluated to false}}
- // expected-note@-2 {{because 'false_v<requires { typename contains_template<short>::template temp<type_requirement::contains_template<short>>; }>' evaluated to false}}
+ // expected-note@-1 {{because 'false_v<requires { typename type_requirement::contains_template<int>::template temp<type_requirement::contains_template<int>>; }>' evaluated to false}}
+ // expected-note@-2 {{because 'false_v<requires { typename type_requirement::contains_template<short>::template temp<type_requirement::contains_template<short>>; }>' evaluated to false}}
struct r2 {};
using r2i1 = r2<contains_template<int>>; // expected-error{{constraints not satisfied for class template 'r2' [with T = type_requirement::contains_template<int>]}}
diff --git a/clang/test/SemaTemplate/make_integer_seq.cpp b/clang/test/SemaTemplate/make_integer_seq.cpp
index cd36d1e..1203a58 100644
--- a/clang/test/SemaTemplate/make_integer_seq.cpp
+++ b/clang/test/SemaTemplate/make_integer_seq.cpp
@@ -4,116 +4,110 @@ template <class A1, A1... A2> struct A {};
using test1 = __make_integer_seq<A, int, 1>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:5:1, col:43> col:7 test1 '__make_integer_seq<A, int, 1>':'A<int, 0>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar alias
+// CHECK-NEXT: |-name: '__make_integer_seq' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
+// CHECK-NEXT: |-TemplateArgument template 'A'
+// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: |-TemplateArgument expr '1'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'int'
+// CHECK-NEXT: | |-value: Int 1
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 1
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>' sugar
+// CHECK-NEXT: |-name: 'A' qualified
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: |-TemplateArgument expr '0'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'int'
+// CHECK-NEXT: | |-value: Int 0
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 0
+// CHECK-NEXT: `-RecordType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>'
+// CHECK-NEXT: `-ClassTemplateSpecialization 0x{{[0-9A-Fa-f]+}} 'A'
+
+template <class B1, B1 B2> using B = __make_integer_seq<A, B1, B2>;
+using test2 = B<int, 1>;
+// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:23> col:7 test2 'B<int, 1>':'A<int, 0>'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'B<int, 1>' sugar alias
+// CHECK-NEXT: |-name: 'B' qualified
+// CHECK-NEXT: | `-TypeAliasTemplateDecl {{.+}} B
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: |-TemplateArgument expr '1'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:22> 'int'
+// CHECK-NEXT: | |-value: Int 1
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:22> 'int' 1
// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar alias
// CHECK-NEXT: |-name: '__make_integer_seq' qualified
// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
// CHECK-NEXT: |-TemplateArgument template 'A'
// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: | `-SubstTemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'int' sugar class depth 0 index 0 B1 final
+// CHECK-NEXT: | |-TypeAliasTemplate 0x{{[0-9A-Fa-f]+}} 'B'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
// CHECK-NEXT: |-TemplateArgument expr '1'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'int'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:64> 'int'
// CHECK-NEXT: | |-value: Int 1
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 1
+// CHECK-NEXT: | `-SubstNonTypeTemplateParmExpr 0x{{[0-9A-Fa-f]+}} <col:64> 'int'
+// CHECK-NEXT: | |-NonTypeTemplateParmDecl 0x{{[0-9A-Fa-f]+}} <col:21, col:24> col:24 referenced 'B1' depth 0 index 1 B2
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:64> 'int' 1
// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>' sugar
// CHECK-NEXT: |-name: 'A' qualified
// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A
// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: | `-SubstTemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'int' sugar class depth 0 index 0 B1 final
+// CHECK-NEXT: | |-TypeAliasTemplate 0x{{[0-9A-Fa-f]+}} 'B'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
// CHECK-NEXT: |-TemplateArgument expr '0'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'int'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:64> 'int'
// CHECK-NEXT: | |-value: Int 0
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 0
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:64> 'int' 0
// CHECK-NEXT: `-RecordType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>'
// CHECK-NEXT: `-ClassTemplateSpecialization 0x{{[0-9A-Fa-f]+}} 'A'
-template <class B1, B1 B2> using B = __make_integer_seq<A, B1, B2>;
-using test2 = B<int, 1>;
-// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:23> col:7 test2 'B<int, 1>':'A<int, 0>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} 'B<int, 1>' sugar
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'B<int, 1>' sugar alias
-// CHECK-NEXT: |-name: 'B' qualified
-// CHECK-NEXT: | `-TypeAliasTemplateDecl {{.+}} B
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: |-TemplateArgument expr '1'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:22> 'int'
-// CHECK-NEXT: | |-value: Int 1
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:22> 'int' 1
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar alias
-// CHECK-NEXT: |-name: '__make_integer_seq' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
-// CHECK-NEXT: |-TemplateArgument template 'A'
-// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-SubstTemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'int' sugar class depth 0 index 0 B1 final
-// CHECK-NEXT: | |-TypeAliasTemplate 0x{{[0-9A-Fa-f]+}} 'B'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: |-TemplateArgument expr '1'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:64> 'int'
-// CHECK-NEXT: | |-value: Int 1
-// CHECK-NEXT: | `-SubstNonTypeTemplateParmExpr 0x{{[0-9A-Fa-f]+}} <col:64> 'int'
-// CHECK-NEXT: | |-NonTypeTemplateParmDecl 0x{{[0-9A-Fa-f]+}} <col:21, col:24> col:24 referenced 'B1' depth 0 index 1 B2
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:64> 'int' 1
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>' sugar
-// CHECK-NEXT: |-name: 'A' qualified
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-SubstTemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'int' sugar class depth 0 index 0 B1 final
-// CHECK-NEXT: | |-TypeAliasTemplate 0x{{[0-9A-Fa-f]+}} 'B'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: |-TemplateArgument expr '0'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:64> 'int'
-// CHECK-NEXT: | |-value: Int 0
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:64> 'int' 0
-// CHECK-NEXT: `-RecordType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>'
-// CHECK-NEXT: `-ClassTemplateSpecialization 0x{{[0-9A-Fa-f]+}} 'A'
-
template <template <class T, T...> class S, class T, int N> struct C {
using test3 = __make_integer_seq<S, T, N>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:43> col:9 test3 '__make_integer_seq<S, T, N>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<S, T, N>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<S, T, N>' dependent
-// CHECK-NEXT: |-name: '__make_integer_seq'
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
-// CHECK-NEXT: |-TemplateArgument template 'S'
-// CHECK-NEXT: | | `-TemplateTemplateParmDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:11, col:42> col:42 depth 0 index 0 S
-// CHECK-NEXT: |-TemplateArgument type 'T'
-// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'T' dependent depth 0 index 1
-// CHECK-NEXT: | `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'T'
-// CHECK-NEXT: `-TemplateArgument expr 'N'
-// CHECK-NEXT: `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'T' <Dependent>
-// CHECK-NEXT: `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:42> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<S, T, N>' dependent
+// CHECK-NEXT: |-name: '__make_integer_seq'
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
+// CHECK-NEXT: |-TemplateArgument template 'S'
+// CHECK-NEXT: | | `-TemplateTemplateParmDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:11, col:42> col:42 depth 0 index 0 S
+// CHECK-NEXT: |-TemplateArgument type 'T'
+// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'T' dependent depth 0 index 1
+// CHECK-NEXT: | `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'T'
+// CHECK-NEXT: `-TemplateArgument expr 'N'
+// CHECK-NEXT: `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'T' <Dependent>
+// CHECK-NEXT: `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:42> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
using test4 = __make_integer_seq<A, T, 1>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:43> col:9 test4 '__make_integer_seq<A, T, 1>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, T, 1>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, T, 1>' dependent
-// CHECK-NEXT: |-name: '__make_integer_seq'
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
-// CHECK-NEXT: |-TemplateArgument template 'A'
-// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
-// CHECK-NEXT: |-TemplateArgument type 'T'
-// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'T' dependent depth 0 index 1
-// CHECK-NEXT: | `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'T'
-// CHECK-NEXT: `-TemplateArgument expr '1'
-// CHECK-NEXT: `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'T' <Dependent>
-// CHECK-NEXT: `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 1
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, T, 1>' dependent
+// CHECK-NEXT: |-name: '__make_integer_seq'
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
+// CHECK-NEXT: |-TemplateArgument template 'A'
+// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
+// CHECK-NEXT: |-TemplateArgument type 'T'
+// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'T' dependent depth 0 index 1
+// CHECK-NEXT: | `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'T'
+// CHECK-NEXT: `-TemplateArgument expr '1'
+// CHECK-NEXT: `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'T' <Dependent>
+// CHECK-NEXT: `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 1
using test5 = __make_integer_seq<A, int, N>;
// CHECK: `-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:45> col:9 test5 '__make_integer_seq<A, int, N>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, N>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, N>' dependent
-// CHECK-NEXT: |-name: '__make_integer_seq'
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
-// CHECK-NEXT: |-TemplateArgument template 'A'
-// CHECK-NEXT: | `-ClassTemplateDecl 0x{{.+}} <line:{{.+}}:1, col:41> col:38 A
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: `-TemplateArgument expr 'N'
-// CHECK-NEXT: `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:44> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, N>' dependent
+// CHECK-NEXT: |-name: '__make_integer_seq'
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
+// CHECK-NEXT: |-TemplateArgument template 'A'
+// CHECK-NEXT: | `-ClassTemplateDecl 0x{{.+}} <line:{{.+}}:1, col:41> col:38 A
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: `-TemplateArgument expr 'N'
+// CHECK-NEXT: `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:44> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
};
// expected-no-diagnostics
diff --git a/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp b/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
index 547e594..16fe8d3 100644
--- a/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
+++ b/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
@@ -76,7 +76,7 @@ template<typename T> struct C;
// Test lookup with incomplete lookup context
template<typename T>
auto C<T>::f() -> decltype(x) { } // expected-error {{use of undeclared identifier 'x'}}
- // expected-error@-1 {{out-of-line definition of 'f' from class 'C<T>' without definition}}
+ // expected-error@-1 {{out-of-line definition of 'f' from class 'lookup_dependent_bases_id_expr::C<T>' without definition}}
}
diff --git a/clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp b/clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp
index 463d86f..ea72332 100644
--- a/clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp
+++ b/clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp
@@ -4,9 +4,9 @@
// get the size of this type, so they don't get errors after inserting typename.
namespace basic {
-template <typename T> int type_f() { return sizeof T::type; } // expected-error {{missing 'typename' prior to dependent type name 'X::type'}}
-template <typename T> int type_g() { return sizeof(T::type); } // expected-warning {{missing 'typename' prior to dependent type name 'X::type'}}
-template <typename T> int type_h() { return sizeof((T::type)); } // expected-error {{missing 'typename' prior to dependent type name 'X::type'}}
+template <typename T> int type_f() { return sizeof T::type; } // expected-error {{missing 'typename' prior to dependent type name 'basic::X::type'}}
+template <typename T> int type_g() { return sizeof(T::type); } // expected-warning {{missing 'typename' prior to dependent type name 'basic::X::type'}}
+template <typename T> int type_h() { return sizeof((T::type)); } // expected-error {{missing 'typename' prior to dependent type name 'basic::X::type'}}
template <typename T> int value_f() { return sizeof T::not_a_type; }
template <typename T> int value_g() { return sizeof(T::not_a_type); }
template <typename T> int value_h() { return sizeof((T::not_a_type)); }
@@ -30,9 +30,9 @@ template <typename T>
struct Foo {
enum {
// expected-warning@+2 {{use 'template' keyword to treat 'InnerTemplate' as a dependent template name}}
- // expected-warning@+1 {{missing 'typename' prior to dependent type name 'Bar::InnerType'}}
+ // expected-warning@+1 {{missing 'typename' prior to dependent type name 'nested_sizeof::Bar::InnerType'}}
x1 = sizeof(typename T::/*template*/ InnerTemplate<sizeof(/*typename*/ T::InnerType)>),
- // expected-warning@+1 {{missing 'typename' prior to dependent type name 'Bar::InnerType'}}
+ // expected-warning@+1 {{missing 'typename' prior to dependent type name 'nested_sizeof::Bar::InnerType'}}
x2 = sizeof(typename T::template InnerTemplate<sizeof(/*typename*/ T::InnerType)>),
// expected-warning@+1 {{use 'template' keyword to treat 'InnerTemplate' as a dependent template name}}
y1 = sizeof(typename T::/*template*/ InnerTemplate<sizeof(T::InnerVar)>),
@@ -50,7 +50,7 @@ template struct Foo<Bar>; // expected-note-re {{in instantiation {{.*}} requeste
}
namespace ambiguous_missing_parens {
-// expected-error@+1 {{'Q::template U' is expected to be a non-type template, but instantiated to a class template}}
+// expected-error@+1 {{'ambiguous_missing_parens::Q::template U' is expected to be a non-type template, but instantiated to a class template}}
template <typename T> void f() { int a = sizeof T::template U<0> + 4; }
struct Q {
// expected-note@+1 {{class template declared here}}
diff --git a/clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp b/clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp
index 5c7a902..8e3b1b6 100644
--- a/clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp
+++ b/clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp
@@ -84,11 +84,11 @@ nested_init_list<int>::concept_fail nil_invalid{1, ""};
// expected-note@#INIT_LIST_INNER_INVALID {{candidate template ignored: constraints not satisfied [with F = const char *]}}
// expected-note@#INIT_LIST_INNER_INVALID_HEADER {{because 'const char *' does not satisfy 'False'}}
// expected-note@#False {{because 'false' evaluated to false}}
-// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail(int, F) -> concept_fail<F>'}}
+// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail(int, F) -> nested_init_list<int>::concept_fail<F>'}}
// expected-note@#INIT_LIST_INNER_INVALID {{candidate function template not viable: requires 1 argument, but 2 were provided}}
-// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail(concept_fail<F>) -> concept_fail<F>'}}
+// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail(nested_init_list<int>::concept_fail<F>) -> nested_init_list<int>::concept_fail<F>'}}
// expected-note@#INIT_LIST_INNER_INVALID {{candidate function template not viable: requires 0 arguments, but 2 were provided}}
-// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail() -> concept_fail<F>'}}
+// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail() -> nested_init_list<int>::concept_fail<F>'}}
namespace GH88142 {
diff --git a/clang/test/SemaTemplate/nested-name-spec-template.cpp b/clang/test/SemaTemplate/nested-name-spec-template.cpp
index d366925..f99fa4b 100644
--- a/clang/test/SemaTemplate/nested-name-spec-template.cpp
+++ b/clang/test/SemaTemplate/nested-name-spec-template.cpp
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -fsyntax-only -verify %s
-// RUN: %clang_cc1 -fsyntax-only -verify -std=c++98 %s
+// RUN: %clang_cc1 -fsyntax-only -verify %s -Wno-c++20-extensions
+// RUN: %clang_cc1 -fsyntax-only -verify -Wno-c++11-extensions -std=c++98 %s
// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s
namespace N {
@@ -24,14 +24,7 @@ namespace N {
M::Promote<int>::type *ret_intptr3(int* ip) { return ip; }
M::template Promote<int>::type *ret_intptr4(int* ip) { return ip; }
-#if __cplusplus <= 199711L
- // expected-warning@-2 {{'template' keyword outside of a template}}
-#endif
-
M::template Promote<int> pi;
-#if __cplusplus <= 199711L
- // expected-warning@-2 {{'template' keyword outside of a template}}
-#endif
}
N::M::Promote<int>::type *ret_intptr5(int* ip) { return ip; }
@@ -130,7 +123,7 @@ namespace PR9226 {
template<typename T, typename U>
struct Y {
- typedef typename T::template f<U> type; // expected-error{{template name refers to non-type template 'X::template f'}}
+ typedef typename T::template f<U> type; // expected-error{{template name refers to non-type template 'PR9226::X::template f'}}
};
Y<X, int> yxi; // expected-note{{in instantiation of template class 'PR9226::Y<PR9226::X, int>' requested here}}
@@ -154,3 +147,66 @@ namespace sugared_template_instantiation {
struct B { typedef int type1; };
typedef A<const B> type2;
} // namespace sugated_template_instantiation
+
+namespace unresolved_using {
+ template <class> struct A {
+ struct B {
+ typedef int X;
+ };
+ };
+ template <class T> struct C : A<T> {
+ using typename A<T>::B;
+ typedef typename B::X Y;
+ };
+ template struct C<int>;
+} // namespace unresolved_using
+
+#if __cplusplus >= 201703L
+namespace SubstTemplateTypeParmPackType {
+ template <int...> struct A {};
+
+ template <class... Ts> void f() {
+ []<int ... Is>(A<Is...>) { (Ts::g(Is) && ...); }(A<0>{});
+ };
+
+ struct B { static void g(int); };
+
+ template void f<B>();
+} // namespace SubstTemplateTypeParmPackType
+#endif
+
+namespace DependentUnaryTransform {
+ template <class T> using decay_t = __decay(T);
+ template <class, class> struct A;
+ template <class T> struct A<T, typename decay_t<T>::X>;
+} // namespace DependentUnaryTransform
+
+namespace DependentSizedArray {
+ template <int V> using Z = int[V];
+ template <class, class> struct A;
+ template <class T> struct A<T, typename Z<T(0)>::X>;
+} // namespace DependentUnaryTransform
+
+namespace GH155281 {
+ template <bool> struct enable_if;
+ template <class _Tp, _Tp> struct integral_constant;
+ template <typename> struct conjunction;
+ template <typename T> using value_type_t = T;
+ template <class Check> using require_t = typename enable_if<Check::value>::type;
+ template <template <class> class, template <class> class,
+ template <class> class, class... Check>
+ using container_type_check_base =
+ integral_constant<bool, conjunction<Check...>::value>;
+ template <typename> struct is_std_vector;
+ template <template <class> class TypeCheck, class... Check>
+ using require_std_vector_vt =
+ require_t<container_type_check_base<is_std_vector, value_type_t, TypeCheck,
+ Check...> >;
+ template <typename, typename> class vector_seq_view;
+ namespace internal {
+ template <typename> using is_matrix_or_std_vector = int;
+ }
+ template <typename T>
+ class vector_seq_view<
+ T, require_std_vector_vt<internal::is_matrix_or_std_vector, T> >;
+} // namespace GH155281
diff --git a/clang/test/SemaTemplate/nested-template.cpp b/clang/test/SemaTemplate/nested-template.cpp
index 7911cf5..b5da1b1 100644
--- a/clang/test/SemaTemplate/nested-template.cpp
+++ b/clang/test/SemaTemplate/nested-template.cpp
@@ -152,14 +152,14 @@ namespace PR10924 {
template< class Topology, class ctype >
template< int codim >
- class ReferenceElement< Topology, ctype > :: BaryCenterArray // expected-error{{out-of-line definition of 'BaryCenterArray' does not match any declaration in 'ReferenceElement<Topology, ctype>'}}
+ class ReferenceElement< Topology, ctype > :: BaryCenterArray // expected-error{{out-of-line definition of 'BaryCenterArray' does not match any declaration in 'PR10924::ReferenceElement<Topology, ctype>'}}
{
};
}
class Outer1 {
template <typename T> struct X;
- template <typename T> int X<T>::func() {} // expected-error{{out-of-line definition of 'func' from class 'X<T>' without definition}}
+ template <typename T> int X<T>::func() {} // expected-error{{out-of-line definition of 'func' from class 'Outer1::X<T>' without definition}}
};
namespace RefPack {
diff --git a/clang/test/SemaTemplate/overload-candidates.cpp b/clang/test/SemaTemplate/overload-candidates.cpp
index de998d7..a9c86b2 100644
--- a/clang/test/SemaTemplate/overload-candidates.cpp
+++ b/clang/test/SemaTemplate/overload-candidates.cpp
@@ -16,9 +16,9 @@ void test_dyn_cast(int* ptr) {
(void)dyn_cast(ptr); // expected-error{{no matching function for call to 'dyn_cast'}}
}
-template<int I, typename T>
+template<int I, typename T>
void get(const T&); // expected-note{{candidate template ignored: invalid explicitly-specified argument for template parameter 'I'}}
-template<template<class T> class, typename T>
+template<template<class T> class, typename T>
void get(const T&); // expected-note{{candidate template ignored: invalid explicitly-specified argument for 1st template parameter}}
void test_get(void *ptr) {
@@ -100,7 +100,7 @@ namespace PR15673 {
#if __cplusplus <= 199711L
// expected-warning@-2 {{default template arguments for a function template are a C++11 extension}}
#endif
- // expected-note@+1 {{candidate template ignored: requirement 'a_trait<int>::value' was not satisfied [with T = int]}}
+ // expected-note@+1 {{candidate template ignored: requirement 'PR15673::a_trait<int>::value' was not satisfied [with T = int]}}
void foo() {}
void bar() { foo<int>(); } // expected-error {{no matching function for call to 'foo'}}
@@ -128,7 +128,7 @@ namespace PR15673 {
#if __cplusplus <= 199711L
// expected-warning@-2 {{alias declarations are a C++11 extension}}
#endif
- // expected-note@+7 {{candidate template ignored: requirement 'some_trait<int>::value' was not satisfied [with T = int]}}
+ // expected-note@+7 {{candidate template ignored: requirement 'PR15673::some_trait<int>::value' was not satisfied [with T = int]}}
template<typename T,
typename Requires = unicorns<T> >
@@ -148,7 +148,7 @@ namespace PR15673 {
template<typename T,
int n = 42,
typename std::enable_if<n == 43 || (some_passing_trait<T>::value && some_trait<T>::value), int>::type = 0>
- void almost_rangesv3(); // expected-note{{candidate template ignored: requirement '42 == 43 || (some_passing_trait<int>::value && some_trait<int>::value)' was not satisfied}}
+ void almost_rangesv3(); // expected-note{{candidate template ignored: requirement '42 == 43 || (PR15673::some_passing_trait<int>::value && PR15673::some_trait<int>::value)' was not satisfied}}
void test_almost_rangesv3() { almost_rangesv3<int>(); } // expected-error{{no matching function for call to 'almost_rangesv3'}}
#define CONCEPT_REQUIRES_(...) \
@@ -161,6 +161,6 @@ namespace PR15673 {
#endif
template<typename T,
CONCEPT_REQUIRES_(some_passing_trait<T>::value && some_trait<T>::value)>
- void rangesv3(); // expected-note{{candidate template ignored: requirement 'some_trait<int>::value' was not satisfied [with T = int, x = 42]}}
+ void rangesv3(); // expected-note{{candidate template ignored: requirement 'PR15673::some_trait<int>::value' was not satisfied [with T = int, x = 42]}}
void test_rangesv3() { rangesv3<int>(); } // expected-error{{no matching function for call to 'rangesv3'}}
}
diff --git a/clang/test/SemaTemplate/temp_arg_nontype.cpp b/clang/test/SemaTemplate/temp_arg_nontype.cpp
index 9363e74..7d2a010 100644
--- a/clang/test/SemaTemplate/temp_arg_nontype.cpp
+++ b/clang/test/SemaTemplate/temp_arg_nontype.cpp
@@ -438,7 +438,7 @@ namespace dependent_nested_partial_specialization {
template<template<typename> class X> struct A {
template<typename T, X<T> N> struct B; // expected-note 2{{here}}
- template <typename T> struct B<T, 0> {}; // expected-error {{non-type template argument specializes a template parameter with dependent type 'Y<T>' (aka 'T *')}}
+ template <typename T> struct B<T, 0> {}; // expected-error {{non-type template argument specializes a template parameter with dependent type 'dependent_nested_partial_specialization::Y<T>' (aka 'T *')}}
};
A<X>::B<int, 0> ax;
A<Y>::B<int, &n> ay; // expected-error {{undefined}} expected-note {{instantiation of}}
diff --git a/clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp b/clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp
index c35743b..9c25e26 100644
--- a/clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp
+++ b/clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp
@@ -621,3 +621,8 @@ namespace GH73460 {
int j;
template struct A<int&, j, j>;
} // namespace GH73460
+
+namespace GH118190 {
+ template <auto> int x;
+ template <int i> int x<i>;
+}
diff --git a/clang/test/SemaTemplate/template-id-expr.cpp b/clang/test/SemaTemplate/template-id-expr.cpp
index a13013a..0527af4 100644
--- a/clang/test/SemaTemplate/template-id-expr.cpp
+++ b/clang/test/SemaTemplate/template-id-expr.cpp
@@ -202,7 +202,7 @@ struct P {
struct Q {
template <typename T> int foo() {
return T::template I<int>;
- // expected-error@-1 {{'P::template I' is expected to be a non-type template, but instantiated to a type alias template}}
+ // expected-error@-1 {{'non_functions::PR88832::P::template I' is expected to be a non-type template, but instantiated to a type alias template}}
// expected-note@#TypeAlias {{type alias template declared here}}
}
};
diff --git a/clang/test/SemaTemplate/type_pack_element.cpp b/clang/test/SemaTemplate/type_pack_element.cpp
index 5ff010c..e077638 100644
--- a/clang/test/SemaTemplate/type_pack_element.cpp
+++ b/clang/test/SemaTemplate/type_pack_element.cpp
@@ -2,61 +2,57 @@
using test1 = __type_pack_element<0, int>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <<stdin>:3:1, col:41> col:7 test1 '__type_pack_element<0, int>':'int'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, int>' sugar
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, int>' sugar alias
-// CHECK-NEXT: |-name: '__type_pack_element' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
-// CHECK-NEXT: |-TemplateArgument expr '0'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <col:35> '__size_t':'unsigned long'
-// CHECK-NEXT: | |-value: Int 0
-// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:35> '__size_t':'unsigned long' <IntegralCast>
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:35> 'int' 0
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, int>' sugar alias
+// CHECK-NEXT: |-name: '__type_pack_element' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
+// CHECK-NEXT: |-TemplateArgument expr '0'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <col:35> '__size_t':'unsigned long'
+// CHECK-NEXT: | |-value: Int 0
+// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:35> '__size_t':'unsigned long' <IntegralCast>
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:35> 'int' 0
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
template<int N, class ...Ts> struct A {
using test2 = __type_pack_element<N, Ts...>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:45> col:9 test2 '__type_pack_element<N, Ts...>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, Ts...>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, Ts...>' dependent
-// CHECK-NEXT: |-name: '__type_pack_element' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
-// CHECK-NEXT: |-TemplateArgument expr 'N'
-// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
-// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:37> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
-// CHECK-NEXT: `-TemplateArgument type 'Ts...'
-// CHECK-NEXT: `-PackExpansionType 0x{{[0-9A-Fa-f]+}} 'Ts...' dependent
-// CHECK-NEXT: `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
-// CHECK-NEXT: `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'Ts'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, Ts...>' dependent
+// CHECK-NEXT: |-name: '__type_pack_element' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
+// CHECK-NEXT: |-TemplateArgument expr 'N'
+// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
+// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:37> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
+// CHECK-NEXT: `-TemplateArgument type 'Ts...'
+// CHECK-NEXT: `-PackExpansionType 0x{{[0-9A-Fa-f]+}} 'Ts...' dependent
+// CHECK-NEXT: `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
+// CHECK-NEXT: `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'Ts'
using test3 = __type_pack_element<0, Ts...>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:45> col:9 test3 '__type_pack_element<0, Ts...>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, Ts...>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, Ts...>' dependent
-// CHECK-NEXT: |-name: '__type_pack_element' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
-// CHECK-NEXT: |-TemplateArgument expr '0'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long'
-// CHECK-NEXT: | |-value: Int 0
-// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:37> 'int' 0
-// CHECK-NEXT: `-TemplateArgument type 'Ts...'
-// CHECK-NEXT: `-PackExpansionType 0x{{[0-9A-Fa-f]+}} 'Ts...' dependent
-// CHECK-NEXT: `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
-// CHECK-NEXT: `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'Ts'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, Ts...>' dependent
+// CHECK-NEXT: |-name: '__type_pack_element' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
+// CHECK-NEXT: |-TemplateArgument expr '0'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long'
+// CHECK-NEXT: | |-value: Int 0
+// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:37> 'int' 0
+// CHECK-NEXT: `-TemplateArgument type 'Ts...'
+// CHECK-NEXT: `-PackExpansionType 0x{{[0-9A-Fa-f]+}} 'Ts...' dependent
+// CHECK-NEXT: `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
+// CHECK-NEXT: `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'Ts'
using test4 = __type_pack_element<N, int>;
// CHECK: `-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:43> col:9 test4 '__type_pack_element<N, int>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, int>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, int>' dependent
-// CHECK-NEXT: |-name: '__type_pack_element' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
-// CHECK-NEXT: |-TemplateArgument expr 'N'
-// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
-// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:37> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
-// CHECK-NEXT: `-TemplateArgument type 'int'
-// CHECK-NEXT: `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, int>' dependent
+// CHECK-NEXT: |-name: '__type_pack_element' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
+// CHECK-NEXT: |-TemplateArgument expr 'N'
+// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
+// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:37> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
+// CHECK-NEXT: `-TemplateArgument type 'int'
+// CHECK-NEXT: `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
};
// expected-no-diagnostics
diff --git a/clang/test/SemaTemplate/typename-specifier-4.cpp b/clang/test/SemaTemplate/typename-specifier-4.cpp
index 8d9962e..8f9dce6 100644
--- a/clang/test/SemaTemplate/typename-specifier-4.cpp
+++ b/clang/test/SemaTemplate/typename-specifier-4.cpp
@@ -147,7 +147,7 @@ namespace rdar8740998 {
// expected-error{{dependent using declaration resolved to type without 'typename'}}
void f() {
- typename X<T>::iterator i; // expected-error{{typename specifier refers to a dependent using declaration for a value 'iterator' in 'X<T>'}}
+ typename X<T>::iterator i; // expected-error{{typename specifier refers to a dependent using declaration for a value 'iterator' in 'rdar8740998::X<T>'}}
}
};
diff --git a/clang/test/SemaTemplate/typename-specifier.cpp b/clang/test/SemaTemplate/typename-specifier.cpp
index 01acc34..27175c5 100644
--- a/clang/test/SemaTemplate/typename-specifier.cpp
+++ b/clang/test/SemaTemplate/typename-specifier.cpp
@@ -40,7 +40,7 @@ void test(double d) {
// expected-warning@-3 2{{'typename' outside of a template is a C++11 extension}}
#endif
int five = f(5);
-
+
using namespace N;
for (typename A::type i = 0; i < 10; ++i)
#if __cplusplus <= 199711L
@@ -102,7 +102,7 @@ D<long> struct_D; // expected-note {{in instantiation of template class 'D<long
template<typename T> struct E {
typedef typename T::foo foo;
- typedef typename foo::bar bar; // expected-error {{type 'E<F>::foo' (aka 'double') cannot be used prior to '::' because it has no members}}
+ typedef typename foo::bar bar; // expected-error {{type 'foo' (aka 'double') cannot be used prior to '::' because it has no members}}
};
struct F {
@@ -245,7 +245,7 @@ void j() {
namespace pointer_vs_multiply {
int x;
-// expected-error@+1 {{missing 'typename' prior to dependent type name 'B::type_or_int'}}
+// expected-error@+1 {{missing 'typename' prior to dependent type name 'pointer_vs_multiply::B::type_or_int'}}
template <typename T> void g() { T::type_or_int * x; }
// expected-error@+1 {{typename specifier refers to non-type member 'type_or_int' in 'pointer_vs_multiply::A'}}
template <typename T> void h() { typename T::type_or_int * x; }
diff --git a/clang/test/SemaTemplate/using-decl.cpp b/clang/test/SemaTemplate/using-decl.cpp
index 1ef2a2d..d54d3a3 100644
--- a/clang/test/SemaTemplate/using-decl.cpp
+++ b/clang/test/SemaTemplate/using-decl.cpp
@@ -14,3 +14,15 @@ namespace UsingInGenericLambda {
}
void e() { c<int>(); }
}
+
+namespace UsingUsingEnum {
+ namespace foo {
+ enum class EnumOne {};
+ }
+ using foo::EnumOne;
+
+ template <class> void t() {
+ using enum EnumOne;
+ }
+ template void t<void>();
+} // namespace UsingUsingEnum
diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py
index 1957bb1..d343191 100644
--- a/clang/test/lit.cfg.py
+++ b/clang/test/lit.cfg.py
@@ -316,7 +316,7 @@ if re.match(r".*-(windows-msvc)$", config.target_triple):
# [PR8833] LLP64-incompatible tests
if not re.match(
- r"^(aarch64|x86_64).*-(windows-msvc|windows-gnu)$", config.target_triple
+ r"^(aarch64|arm64ec|x86_64).*-(windows-msvc|windows-gnu)$", config.target_triple
):
config.available_features.add("LP64")
@@ -410,3 +410,13 @@ if "system-aix" in config.available_features:
# possibly be present in system and user configuration files, so disable
# default configs for the test runs.
config.environment["CLANG_NO_DEFAULT_CONFIG"] = "1"
+
+if lit_config.update_tests:
+ import sys
+ import os
+
+ utilspath = os.path.join(config.llvm_src_root, "utils")
+ sys.path.append(utilspath)
+ from update_any_test_checks import utc_lit_plugin
+
+ lit_config.test_updaters.append(utc_lit_plugin)
diff --git a/clang/test/lit.site.cfg.py.in b/clang/test/lit.site.cfg.py.in
index 176cf64..f50953a 100644
--- a/clang/test/lit.site.cfg.py.in
+++ b/clang/test/lit.site.cfg.py.in
@@ -46,7 +46,6 @@ config.ppc_linux_default_ieeelongdouble = @PPC_LINUX_DEFAULT_IEEELONGDOUBLE@
config.have_llvm_driver = @LLVM_TOOL_LLVM_DRIVER_BUILD@
config.spirv_tools_tests = @LLVM_INCLUDE_SPIRV_TOOLS_TESTS@
config.substitutions.append(("%llvm-version-major", "@LLVM_VERSION_MAJOR@"))
-config.has_key_instructions = @LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS@
import lit.llvm
lit.llvm.initialize(lit_config, config)
diff --git a/clang/test/utils/update_cc_test_checks/lit.local.cfg b/clang/test/utils/update_cc_test_checks/lit.local.cfg
index f2810fa..d7cc78d 100644
--- a/clang/test/utils/update_cc_test_checks/lit.local.cfg
+++ b/clang/test/utils/update_cc_test_checks/lit.local.cfg
@@ -39,7 +39,7 @@ else:
lit = config.llvm_external_lit
else:
lit = shell_quote(
- glob.glob(os.path.join(config.llvm_tools_dir, "llvm-lit*"))[0]
+ os.path.join(config.llvm_tools_dir, "llvm-lit.py" if os.name == "nt" else "llvm-lit")
)
python = shell_quote(config.python_executable)
config.substitutions.append(
diff --git a/clang/tools/clang-format/git-clang-format b/clang/tools/clang-format/git-clang-format
index e709803..fe2dd28 100755
--- a/clang/tools/clang-format/git-clang-format
+++ b/clang/tools/clang-format/git-clang-format
@@ -419,7 +419,7 @@ def compute_diff(commits, files, staged, diff_common_commit):
if len(commits) == 2:
git_tool = "diff-tree"
if diff_common_commit:
- commits = [f"{commits[0]}...{commits[1]}"]
+ extra_args += ["--merge-base"]
elif staged:
extra_args += ["--cached"]
diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
index 1d91f5f2..a56e758 100644
--- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
+++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
@@ -911,9 +911,9 @@ Error handleOverrideImages(
/// Transforms all the extracted offloading input files into an image that can
/// be registered by the runtime.
-Expected<SmallVector<StringRef>> linkAndWrapDeviceFiles(
- SmallVectorImpl<SmallVector<OffloadFile>> &LinkerInputFiles,
- const InputArgList &Args, char **Argv, int Argc) {
+Expected<SmallVector<StringRef>>
+linkAndWrapDeviceFiles(ArrayRef<SmallVector<OffloadFile>> LinkerInputFiles,
+ const InputArgList &Args, char **Argv, int Argc) {
llvm::TimeTraceScope TimeScope("Handle all device input");
std::mutex ImageMtx;
@@ -1069,147 +1069,6 @@ std::optional<std::string> searchLibrary(StringRef Input, StringRef Root,
return searchLibraryBaseName(Input, Root, SearchPaths);
}
-/// Common redeclaration of needed symbol flags.
-enum Symbol : uint32_t {
- Sym_None = 0,
- Sym_Undefined = 1U << 1,
- Sym_Weak = 1U << 2,
-};
-
-/// Scan the symbols from a BitcodeFile \p Buffer and record if we need to
-/// extract any symbols from it.
-Expected<bool> getSymbolsFromBitcode(MemoryBufferRef Buffer, OffloadKind Kind,
- bool IsArchive, StringSaver &Saver,
- DenseMap<StringRef, Symbol> &Syms) {
- Expected<IRSymtabFile> IRSymtabOrErr = readIRSymtab(Buffer);
- if (!IRSymtabOrErr)
- return IRSymtabOrErr.takeError();
-
- bool ShouldExtract = !IsArchive;
- DenseMap<StringRef, Symbol> TmpSyms;
- for (unsigned I = 0; I != IRSymtabOrErr->Mods.size(); ++I) {
- for (const auto &Sym : IRSymtabOrErr->TheReader.module_symbols(I)) {
- if (Sym.isFormatSpecific() || !Sym.isGlobal())
- continue;
-
- auto It = Syms.find(Sym.getName());
- bool NewSymbol = It == Syms.end();
- auto OldSym = NewSymbol ? Sym_None : It->second;
-
- // We will extract if it defines a currenlty undefined non-weak
- // symbol.
- bool ResolvesStrongReference =
- ((OldSym & Sym_Undefined && !(OldSym & Sym_Weak)) &&
- !Sym.isUndefined());
- // We will extract if it defines a new global symbol visible to the
- // host. This is only necessary for code targeting an offloading
- // language.
- bool NewGlobalSymbol =
- ((NewSymbol || (OldSym & Sym_Undefined)) && !Sym.isUndefined() &&
- !Sym.canBeOmittedFromSymbolTable() && Kind != object::OFK_None &&
- (Sym.getVisibility() != GlobalValue::HiddenVisibility));
- ShouldExtract |= ResolvesStrongReference | NewGlobalSymbol;
-
- // Update this symbol in the "table" with the new information.
- if (OldSym & Sym_Undefined && !Sym.isUndefined())
- TmpSyms[Saver.save(Sym.getName())] =
- static_cast<Symbol>(OldSym & ~Sym_Undefined);
- if (Sym.isUndefined() && NewSymbol)
- TmpSyms[Saver.save(Sym.getName())] =
- static_cast<Symbol>(OldSym | Sym_Undefined);
- if (Sym.isWeak())
- TmpSyms[Saver.save(Sym.getName())] =
- static_cast<Symbol>(OldSym | Sym_Weak);
- }
- }
-
- // If the file gets extracted we update the table with the new symbols.
- if (ShouldExtract)
- Syms.insert_range(TmpSyms);
-
- return ShouldExtract;
-}
-
-/// Scan the symbols from an ObjectFile \p Obj and record if we need to extract
-/// any symbols from it.
-Expected<bool> getSymbolsFromObject(const ObjectFile &Obj, OffloadKind Kind,
- bool IsArchive, StringSaver &Saver,
- DenseMap<StringRef, Symbol> &Syms) {
- bool ShouldExtract = !IsArchive;
- DenseMap<StringRef, Symbol> TmpSyms;
- for (SymbolRef Sym : Obj.symbols()) {
- auto FlagsOrErr = Sym.getFlags();
- if (!FlagsOrErr)
- return FlagsOrErr.takeError();
-
- if (!(*FlagsOrErr & SymbolRef::SF_Global) ||
- (*FlagsOrErr & SymbolRef::SF_FormatSpecific))
- continue;
-
- auto NameOrErr = Sym.getName();
- if (!NameOrErr)
- return NameOrErr.takeError();
-
- bool NewSymbol = Syms.count(*NameOrErr) == 0;
- auto OldSym = NewSymbol ? Sym_None : Syms[*NameOrErr];
-
- // We will extract if it defines a currenlty undefined non-weak symbol.
- bool ResolvesStrongReference = (OldSym & Sym_Undefined) &&
- !(OldSym & Sym_Weak) &&
- !(*FlagsOrErr & SymbolRef::SF_Undefined);
-
- // We will extract if it defines a new global symbol visible to the
- // host. This is only necessary for code targeting an offloading
- // language.
- bool NewGlobalSymbol =
- ((NewSymbol || (OldSym & Sym_Undefined)) &&
- !(*FlagsOrErr & SymbolRef::SF_Undefined) && Kind != object::OFK_None &&
- !(*FlagsOrErr & SymbolRef::SF_Hidden));
- ShouldExtract |= ResolvesStrongReference | NewGlobalSymbol;
-
- // Update this symbol in the "table" with the new information.
- if (OldSym & Sym_Undefined && !(*FlagsOrErr & SymbolRef::SF_Undefined))
- TmpSyms[Saver.save(*NameOrErr)] =
- static_cast<Symbol>(OldSym & ~Sym_Undefined);
- if (*FlagsOrErr & SymbolRef::SF_Undefined && NewSymbol)
- TmpSyms[Saver.save(*NameOrErr)] =
- static_cast<Symbol>(OldSym | Sym_Undefined);
- if (*FlagsOrErr & SymbolRef::SF_Weak)
- TmpSyms[Saver.save(*NameOrErr)] = static_cast<Symbol>(OldSym | Sym_Weak);
- }
-
- // If the file gets extracted we update the table with the new symbols.
- if (ShouldExtract)
- Syms.insert_range(TmpSyms);
-
- return ShouldExtract;
-}
-
-/// Attempt to 'resolve' symbols found in input files. We use this to
-/// determine if an archive member needs to be extracted. An archive member
-/// will be extracted if any of the following is true.
-/// 1) It defines an undefined symbol in a regular object filie.
-/// 2) It defines a global symbol without hidden visibility that has not
-/// yet been defined.
-Expected<bool> getSymbols(StringRef Image, OffloadKind Kind, bool IsArchive,
- StringSaver &Saver,
- DenseMap<StringRef, Symbol> &Syms) {
- MemoryBufferRef Buffer = MemoryBufferRef(Image, "");
- switch (identify_magic(Image)) {
- case file_magic::bitcode:
- return getSymbolsFromBitcode(Buffer, Kind, IsArchive, Saver, Syms);
- case file_magic::elf_relocatable: {
- Expected<std::unique_ptr<ObjectFile>> ObjFile =
- ObjectFile::createObjectFile(Buffer);
- if (!ObjFile)
- return ObjFile.takeError();
- return getSymbolsFromObject(**ObjFile, Kind, IsArchive, Saver, Syms);
- }
- default:
- return false;
- }
-}
-
/// Search the input files and libraries for embedded device offloading code
/// and add it to the list of files to be linked. Files coming from static
/// libraries are only added to the input if they are used by an existing
@@ -1279,7 +1138,6 @@ getDeviceInput(const ArgList &Args) {
// Link all standard input files and update the list of symbols.
MapVector<OffloadFile::TargetID, SmallVector<OffloadFile, 0>> InputFiles;
- DenseMap<OffloadFile::TargetID, DenseMap<StringRef, Symbol>> Syms;
for (OffloadFile &Binary : ObjectFilesToExtract) {
if (!Binary.getBinary())
continue;
@@ -1290,12 +1148,6 @@ getDeviceInput(const ArgList &Args) {
CompatibleTargets.emplace_back(ID);
for (const auto &[Index, ID] : llvm::enumerate(CompatibleTargets)) {
- Expected<bool> ExtractOrErr = getSymbols(
- Binary.getBinary()->getImage(), Binary.getBinary()->getOffloadKind(),
- /*IsArchive=*/false, Saver, Syms[ID]);
- if (!ExtractOrErr)
- return ExtractOrErr.takeError();
-
// If another target needs this binary it must be copied instead.
if (Index == CompatibleTargets.size() - 1)
InputFiles[ID].emplace_back(std::move(Binary));
@@ -1304,55 +1156,33 @@ getDeviceInput(const ArgList &Args) {
}
}
- // Archive members only extract if they define needed symbols. We do this
- // after every regular input file so that libraries may be included out of
- // order. This follows 'ld.lld' semantics which are more lenient.
- bool Extracted = true;
llvm::DenseSet<StringRef> ShouldExtract;
for (auto &Arg : Args.getAllArgValues(OPT_should_extract))
ShouldExtract.insert(Arg);
- while (Extracted) {
- Extracted = false;
- for (OffloadFile &Binary : ArchiveFilesToExtract) {
- // If the binary was previously extracted it will be set to null.
- if (!Binary.getBinary())
- continue;
-
- SmallVector<OffloadFile::TargetID> CompatibleTargets = {Binary};
- for (const auto &[ID, Input] : InputFiles)
- if (object::areTargetsCompatible(Binary, ID))
- CompatibleTargets.emplace_back(ID);
-
- for (const auto &[Index, ID] : llvm::enumerate(CompatibleTargets)) {
- // Only extract an if we have an an object matching this target or it
- // was specifically requested.
- if (!InputFiles.count(ID) && !ShouldExtract.contains(ID.second))
- continue;
-
- Expected<bool> ExtractOrErr =
- getSymbols(Binary.getBinary()->getImage(),
- Binary.getBinary()->getOffloadKind(),
- /*IsArchive=*/true, Saver, Syms[ID]);
- if (!ExtractOrErr)
- return ExtractOrErr.takeError();
- Extracted = *ExtractOrErr;
+ // We only extract archive members from the fat binary if we find a used or
+ // requested target. Unlike normal static archive handling, we just extract
+ // every object file contained in the archive.
+ for (OffloadFile &Binary : ArchiveFilesToExtract) {
+ if (!Binary.getBinary())
+ continue;
- // Skip including the file if it is an archive that does not resolve
- // any symbols.
- if (!Extracted && !ShouldExtract.contains(ID.second))
- continue;
+ SmallVector<OffloadFile::TargetID> CompatibleTargets = {Binary};
+ for (const auto &[ID, Input] : InputFiles)
+ if (object::areTargetsCompatible(Binary, ID))
+ CompatibleTargets.emplace_back(ID);
- // If another target needs this binary it must be copied instead.
- if (Index == CompatibleTargets.size() - 1)
- InputFiles[ID].emplace_back(std::move(Binary));
- else
- InputFiles[ID].emplace_back(Binary.copy());
- }
+ for (const auto &[Index, ID] : llvm::enumerate(CompatibleTargets)) {
+ // Only extract an if we have an an object matching this target or it
+ // was specifically requested.
+ if (!InputFiles.count(ID) && !ShouldExtract.contains(ID.second))
+ continue;
- // If we extracted any files we need to check all the symbols again.
- if (Extracted)
- break;
+ // If another target needs this binary it must be copied instead.
+ if (Index == CompatibleTargets.size() - 1)
+ InputFiles[ID].emplace_back(std::move(Binary));
+ else
+ InputFiles[ID].emplace_back(Binary.copy());
}
}
diff --git a/clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp b/clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp
index 4b639712..58eb671 100644
--- a/clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp
+++ b/clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp
@@ -286,12 +286,16 @@ struct Symbol {
};
Expected<StringRef> runPTXAs(StringRef File, const ArgList &Args) {
- std::string CudaPath = Args.getLastArgValue(OPT_cuda_path_EQ).str();
- std::string GivenPath = Args.getLastArgValue(OPT_ptxas_path_EQ).str();
- Expected<std::string> PTXAsPath =
- findProgram(Args, "ptxas", {CudaPath + "/bin", GivenPath});
+ SmallVector<StringRef, 1> SearchPaths;
+ if (Arg *A = Args.getLastArg(OPT_cuda_path_EQ))
+ SearchPaths.push_back(Args.MakeArgString(A->getValue() + Twine("/bin")));
+ if (Arg *A = Args.getLastArg(OPT_ptxas_path_EQ))
+ SearchPaths.push_back(Args.MakeArgString(A->getValue()));
+
+ Expected<std::string> PTXAsPath = findProgram(Args, "ptxas", SearchPaths);
if (!PTXAsPath)
return PTXAsPath.takeError();
+
if (!Args.hasArg(OPT_arch))
return createStringError(
"must pass in an explicit nvptx64 gpu architecture to 'ptxas'");
@@ -691,9 +695,11 @@ Error runNVLink(ArrayRef<StringRef> Files, const ArgList &Args) {
if (Args.hasArg(OPT_lto_emit_asm) || Args.hasArg(OPT_lto_emit_llvm))
return Error::success();
- std::string CudaPath = Args.getLastArgValue(OPT_cuda_path_EQ).str();
- Expected<std::string> NVLinkPath =
- findProgram(Args, "nvlink", {CudaPath + "/bin"});
+ SmallVector<StringRef, 1> SearchPaths;
+ if (Arg *A = Args.getLastArg(OPT_cuda_path_EQ))
+ SearchPaths.push_back(Args.MakeArgString(A->getValue() + Twine("/bin")));
+
+ Expected<std::string> NVLinkPath = findProgram(Args, "nvlink", SearchPaths);
if (!NVLinkPath)
return NVLinkPath.takeError();
diff --git a/clang/tools/diagtool/ListWarnings.cpp b/clang/tools/diagtool/ListWarnings.cpp
index 9f96471..ce24f11 100644
--- a/clang/tools/diagtool/ListWarnings.cpp
+++ b/clang/tools/diagtool/ListWarnings.cpp
@@ -56,6 +56,9 @@ int ListWarnings::run(unsigned int argc, char **argv, llvm::raw_ostream &out) {
if (DiagnosticIDs{}.isNote(diagID))
continue;
+ if (DiagnosticIDs{}.isTrapDiag(diagID))
+ continue;
+
if (!DiagnosticIDs{}.isWarningOrExtension(diagID))
continue;
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index 9493edf..858423a 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -1420,79 +1420,25 @@ bool CursorVisitor::VisitDeclarationNameInfo(DeclarationNameInfo Name) {
llvm_unreachable("Invalid DeclarationName::Kind!");
}
-bool CursorVisitor::VisitNestedNameSpecifier(NestedNameSpecifier *NNS,
- SourceRange Range) {
- // FIXME: This whole routine is a hack to work around the lack of proper
- // source information in nested-name-specifiers (PR5791). Since we do have
- // a beginning source location, we can visit the first component of the
- // nested-name-specifier, if it's a single-token component.
- if (!NNS)
- return false;
-
- // Get the first component in the nested-name-specifier.
- while (NestedNameSpecifier *Prefix = NNS->getPrefix())
- NNS = Prefix;
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Namespace:
- return Visit(
- MakeCursorNamespaceRef(NNS->getAsNamespace(), Range.getBegin(), TU));
-
- case NestedNameSpecifier::TypeSpec: {
- // If the type has a form where we know that the beginning of the source
- // range matches up with a reference cursor. Visit the appropriate reference
- // cursor.
- const Type *T = NNS->getAsType();
- if (const TypedefType *Typedef = dyn_cast<TypedefType>(T))
- return Visit(MakeCursorTypeRef(Typedef->getDecl(), Range.getBegin(), TU));
- if (const TagType *Tag = dyn_cast<TagType>(T))
- return Visit(MakeCursorTypeRef(Tag->getDecl(), Range.getBegin(), TU));
- if (const TemplateSpecializationType *TST =
- dyn_cast<TemplateSpecializationType>(T))
- return VisitTemplateName(TST->getTemplateName(), Range.getBegin());
- break;
- }
-
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Super:
- break;
- }
-
- return false;
-}
-
bool CursorVisitor::VisitNestedNameSpecifierLoc(
NestedNameSpecifierLoc Qualifier) {
- SmallVector<NestedNameSpecifierLoc, 4> Qualifiers;
- for (; Qualifier; Qualifier = Qualifier.getPrefix())
- Qualifiers.push_back(Qualifier);
-
- while (!Qualifiers.empty()) {
- NestedNameSpecifierLoc Q = Qualifiers.pop_back_val();
- NestedNameSpecifier *NNS = Q.getNestedNameSpecifier();
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Namespace:
- if (Visit(MakeCursorNamespaceRef(NNS->getAsNamespace(),
- Q.getLocalBeginLoc(), TU)))
- return true;
-
- break;
-
- case NestedNameSpecifier::TypeSpec:
- if (Visit(Q.getTypeLoc()))
- return true;
-
- break;
-
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Super:
- break;
- }
+ NestedNameSpecifier NNS = Qualifier.getNestedNameSpecifier();
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = Qualifier.castAsNamespaceAndPrefix();
+ if (VisitNestedNameSpecifierLoc(Prefix))
+ return true;
+ return Visit(
+ MakeCursorNamespaceRef(Namespace, Qualifier.getLocalBeginLoc(), TU));
}
-
- return false;
+ case NestedNameSpecifier::Kind::Type:
+ return Visit(Qualifier.castAsTypeLoc());
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return false;
+ }
+ llvm_unreachable("unexpected nested name specifier kind");
}
bool CursorVisitor::VisitTemplateParameters(
@@ -1515,16 +1461,23 @@ bool CursorVisitor::VisitTemplateParameters(
return false;
}
-bool CursorVisitor::VisitTemplateName(TemplateName Name, SourceLocation Loc) {
+bool CursorVisitor::VisitTemplateName(TemplateName Name, SourceLocation NameLoc,
+ NestedNameSpecifierLoc NNS) {
switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate: {
+ const QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName();
+ assert(QTN->getQualifier() == NNS.getNestedNameSpecifier());
+ if (VisitNestedNameSpecifierLoc(NNS))
+ return true;
+ return VisitTemplateName(QTN->getUnderlyingTemplate(), NameLoc, /*NNS=*/{});
+ }
case TemplateName::Template:
case TemplateName::UsingTemplate:
- case TemplateName::QualifiedTemplate: // FIXME: Visit nested-name-specifier.
- return Visit(MakeCursorTemplateRef(Name.getAsTemplateDecl(), Loc, TU));
+ return Visit(MakeCursorTemplateRef(Name.getAsTemplateDecl(), NameLoc, TU));
case TemplateName::OverloadedTemplate:
// Visit the overloaded template set.
- if (Visit(MakeCursorOverloadedDeclRef(Name, Loc, TU)))
+ if (Visit(MakeCursorOverloadedDeclRef(Name, NameLoc, TU)))
return true;
return false;
@@ -1533,17 +1486,19 @@ bool CursorVisitor::VisitTemplateName(TemplateName Name, SourceLocation Loc) {
// FIXME: Visit DeclarationName?
return false;
- case TemplateName::DependentTemplate:
- // FIXME: Visit nested-name-specifier.
- return false;
+ case TemplateName::DependentTemplate: {
+ assert(Name.getAsDependentTemplateName()->getQualifier() ==
+ NNS.getNestedNameSpecifier());
+ return VisitNestedNameSpecifierLoc(NNS);
+ }
case TemplateName::SubstTemplateTemplateParm:
return Visit(MakeCursorTemplateRef(
- Name.getAsSubstTemplateTemplateParm()->getParameter(), Loc, TU));
+ Name.getAsSubstTemplateTemplateParm()->getParameter(), NameLoc, TU));
case TemplateName::SubstTemplateTemplateParmPack:
return Visit(MakeCursorTemplateRef(
- Name.getAsSubstTemplateTemplateParmPack()->getParameterPack(), Loc,
+ Name.getAsSubstTemplateTemplateParmPack()->getParameterPack(), NameLoc,
TU));
case TemplateName::DeducedTemplate:
@@ -1587,11 +1542,9 @@ bool CursorVisitor::VisitTemplateArgumentLoc(const TemplateArgumentLoc &TAL) {
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
- if (VisitNestedNameSpecifierLoc(TAL.getTemplateQualifierLoc()))
- return true;
-
return VisitTemplateName(TAL.getArgument().getAsTemplateOrTemplatePattern(),
- TAL.getTemplateNameLoc());
+ TAL.getTemplateNameLoc(),
+ TAL.getTemplateQualifierLoc());
}
llvm_unreachable("Invalid TemplateArgument::Kind!");
@@ -1669,7 +1622,10 @@ bool CursorVisitor::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
}
bool CursorVisitor::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- return Visit(MakeCursorTypeRef(TL.getTypedefNameDecl(), TL.getNameLoc(), TU));
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ return true;
+
+ return Visit(MakeCursorTypeRef(TL.getDecl(), TL.getNameLoc(), TU));
}
bool CursorVisitor::VisitPredefinedSugarTypeLoc(PredefinedSugarTypeLoc TL) {
@@ -1677,14 +1633,20 @@ bool CursorVisitor::VisitPredefinedSugarTypeLoc(PredefinedSugarTypeLoc TL) {
}
bool CursorVisitor::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ return true;
+
return Visit(MakeCursorTypeRef(TL.getDecl(), TL.getNameLoc(), TU));
}
bool CursorVisitor::VisitTagTypeLoc(TagTypeLoc TL) {
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ return true;
+
if (TL.isDefinition())
- return Visit(MakeCXCursor(TL.getDecl(), TU, RegionOfInterest));
+ return Visit(MakeCXCursor(TL.getOriginalDecl(), TU, RegionOfInterest));
- return Visit(MakeCursorTypeRef(TL.getDecl(), TL.getNameLoc(), TU));
+ return Visit(MakeCursorTypeRef(TL.getOriginalDecl(), TL.getNameLoc(), TU));
}
bool CursorVisitor::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
@@ -1763,7 +1725,10 @@ bool CursorVisitor::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
}
bool CursorVisitor::VisitUsingTypeLoc(UsingTypeLoc TL) {
- auto *underlyingDecl = TL.getUnderlyingType()->getAsTagDecl();
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ return true;
+
+ auto *underlyingDecl = TL.getTypePtr()->getAsTagDecl();
if (underlyingDecl) {
return Visit(MakeCursorTypeRef(underlyingDecl, TL.getNameLoc(), TU));
}
@@ -1826,7 +1791,7 @@ bool CursorVisitor::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
bool CursorVisitor::VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc TL) {
if (VisitTemplateName(TL.getTypePtr()->getTemplateName(),
- TL.getTemplateNameLoc()))
+ TL.getTemplateNameLoc(), TL.getQualifierLoc()))
return true;
return false;
@@ -1836,7 +1801,7 @@ bool CursorVisitor::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
// Visit the template name.
if (VisitTemplateName(TL.getTypePtr()->getTemplateName(),
- TL.getTemplateNameLoc()))
+ TL.getTemplateNameLoc(), TL.getQualifierLoc()))
return true;
// Visit the template arguments.
@@ -1871,8 +1836,7 @@ bool CursorVisitor::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
bool CursorVisitor::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- // Visit the nested-name-specifier, if there is one.
- if (TL.getQualifierLoc() && VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
return true;
// Visit the template arguments.
@@ -1883,13 +1847,6 @@ bool CursorVisitor::VisitDependentTemplateSpecializationTypeLoc(
return false;
}
-bool CursorVisitor::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
- return true;
-
- return Visit(TL.getNamedTypeLoc());
-}
-
bool CursorVisitor::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
return Visit(TL.getPatternLoc());
}
@@ -1908,7 +1865,7 @@ bool CursorVisitor::VisitPackIndexingTypeLoc(PackIndexingTypeLoc TL) {
}
bool CursorVisitor::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- return Visit(MakeCursorTypeRef(TL.getDecl(), TL.getNameLoc(), TU));
+ return Visit(MakeCursorTypeRef(TL.getOriginalDecl(), TL.getNameLoc(), TU));
}
bool CursorVisitor::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
@@ -1943,6 +1900,7 @@ DEFAULT_TYPELOC_IMPL(Record, TagType)
DEFAULT_TYPELOC_IMPL(Enum, TagType)
DEFAULT_TYPELOC_IMPL(SubstTemplateTypeParm, Type)
DEFAULT_TYPELOC_IMPL(SubstTemplateTypeParmPack, Type)
+DEFAULT_TYPELOC_IMPL(SubstBuiltinTemplatePack, Type)
DEFAULT_TYPELOC_IMPL(Auto, Type)
DEFAULT_TYPELOC_IMPL(BitInt, Type)
DEFAULT_TYPELOC_IMPL(DependentBitInt, Type)
@@ -2065,7 +2023,7 @@ class NestedNameSpecifierLocVisit : public VisitorJob {
public:
NestedNameSpecifierLocVisit(NestedNameSpecifierLoc Qualifier, CXCursor parent)
: VisitorJob(parent, VisitorJob::NestedNameSpecifierLocVisitKind,
- Qualifier.getNestedNameSpecifier(),
+ Qualifier.getNestedNameSpecifier().getAsVoidPointer(),
Qualifier.getOpaqueData()) {}
static bool classof(const VisitorJob *VJ) {
@@ -2074,8 +2032,7 @@ public:
NestedNameSpecifierLoc get() const {
return NestedNameSpecifierLoc(
- const_cast<NestedNameSpecifier *>(
- static_cast<const NestedNameSpecifier *>(data[0])),
+ NestedNameSpecifier::getFromVoidPointer(data[0]),
const_cast<void *>(data[1]));
}
};
@@ -2972,6 +2929,10 @@ void OpenACCClauseEnqueue::VisitDeviceTypeClause(
void OpenACCClauseEnqueue::VisitReductionClause(
const OpenACCReductionClause &C) {
VisitVarList(C);
+ for (const OpenACCReductionRecipe &R : C.getRecipes()) {
+ static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *));
+ Visitor.AddDecl(R.RecipeDecl);
+ }
}
void OpenACCClauseEnqueue::VisitAutoClause(const OpenACCAutoClause &C) {}
void OpenACCClauseEnqueue::VisitIndependentClause(
@@ -5363,9 +5324,13 @@ CXString clang_getCursorSpelling(CXCursor C) {
case CXCursor_TypeRef: {
const TypeDecl *Type = getCursorTypeRef(C).first;
assert(Type && "Missing type decl");
+ const ASTContext &Ctx = getCursorContext(C);
+ QualType T = Ctx.getTypeDeclType(Type);
- return cxstring::createDup(
- getCursorContext(C).getTypeDeclType(Type).getAsString());
+ PrintingPolicy Policy = Ctx.getPrintingPolicy();
+ Policy.FullyQualifiedName = true;
+ Policy.SuppressTagKeyword = false;
+ return cxstring::createDup(T.getAsString(Policy));
}
case CXCursor_TemplateRef: {
const TemplateDecl *Template = getCursorTemplateRef(C).first;
diff --git a/clang/tools/libclang/CIndexCodeCompletion.cpp b/clang/tools/libclang/CIndexCodeCompletion.cpp
index 81448b4..6d14f28 100644
--- a/clang/tools/libclang/CIndexCodeCompletion.cpp
+++ b/clang/tools/libclang/CIndexCodeCompletion.cpp
@@ -617,7 +617,7 @@ namespace {
if (!baseType.isNull()) {
// Get the declaration for a class/struct/union/enum type
if (const TagType *Tag = baseType->getAs<TagType>())
- D = Tag->getDecl();
+ D = Tag->getOriginalDecl();
// Get the @interface declaration for a (possibly-qualified) Objective-C
// object pointer type, e.g., NSString*
else if (const ObjCObjectPointerType *ObjPtr =
@@ -629,7 +629,7 @@ namespace {
// Get the class for a C++ injected-class-name
else if (const InjectedClassNameType *Injected =
baseType->getAs<InjectedClassNameType>())
- D = Injected->getDecl();
+ D = Injected->getOriginalDecl();
}
if (D != nullptr) {
diff --git a/clang/tools/libclang/CMakeLists.txt b/clang/tools/libclang/CMakeLists.txt
index 2b1e266..e0ff760 100644
--- a/clang/tools/libclang/CMakeLists.txt
+++ b/clang/tools/libclang/CMakeLists.txt
@@ -93,7 +93,7 @@ if(MSVC)
set(LLVM_EXPORTED_SYMBOL_FILE)
endif()
-if (UNIX AND NOT APPLE AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "AIX" AND NOT CYGWIN)
+if (UNIX AND NOT APPLE AND NOT "${CMAKE_SYSTEM_NAME}" MATCHES "AIX" AND NOT CYGWIN)
set(LLVM_EXPORTED_SYMBOL_FILE)
set(USE_VERSION_SCRIPT ${LLVM_HAVE_LINK_VERSION_SCRIPT})
endif()
@@ -125,7 +125,7 @@ else()
set(output_name "clang")
endif()
-if (UNIX AND ${CMAKE_SYSTEM_NAME} MATCHES "AIX")
+if (UNIX AND "${CMAKE_SYSTEM_NAME}" MATCHES "AIX")
set(CMAKE_AIX_EXPORT_ALL_SYMBOLS OFF)
# libclang requires headers which need _ALL_SOURCE to build on AIX
remove_definitions("-D_XOPEN_SOURCE=700")
@@ -186,7 +186,7 @@ if(ENABLE_SHARED)
endif()
endif()
if (USE_VERSION_SCRIPT)
- if (${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
+ if ("${CMAKE_SYSTEM_NAME}" MATCHES "SunOS")
include(CheckLinkerFlag)
# The Solaris 11.4 linker supports a subset of GNU ld version scripts,
# but requires a special option to enable it.
diff --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp
index a6301da..3c40624 100644
--- a/clang/tools/libclang/CXCursor.cpp
+++ b/clang/tools/libclang/CXCursor.cpp
@@ -1332,16 +1332,10 @@ CXCursor cxcursor::getTypeRefCursor(CXCursor cursor) {
TypeLoc TL = Type->getTypeLoc();
SourceLocation Loc = TL.getBeginLoc();
- if (const ElaboratedType *ElabT = Ty->getAs<ElaboratedType>()) {
- Ty = ElabT->getNamedType();
- ElaboratedTypeLoc ElabTL = TL.castAs<ElaboratedTypeLoc>();
- Loc = ElabTL.getNamedTypeLoc().getBeginLoc();
- }
-
if (const TypedefType *Typedef = Ty->getAs<TypedefType>())
return MakeCursorTypeRef(Typedef->getDecl(), Loc, TU);
if (const TagType *Tag = Ty->getAs<TagType>())
- return MakeCursorTypeRef(Tag->getDecl(), Loc, TU);
+ return MakeCursorTypeRef(Tag->getOriginalDecl(), Loc, TU);
if (const TemplateTypeParmType *TemplP = Ty->getAs<TemplateTypeParmType>())
return MakeCursorTypeRef(TemplP->getDecl(), Loc, TU);
diff --git a/clang/tools/libclang/CXIndexDataConsumer.cpp b/clang/tools/libclang/CXIndexDataConsumer.cpp
index 73d04b8..423dd1b 100644
--- a/clang/tools/libclang/CXIndexDataConsumer.cpp
+++ b/clang/tools/libclang/CXIndexDataConsumer.cpp
@@ -357,7 +357,7 @@ CXIndexDataConsumer::CXXBasesListInfo::CXXBasesListInfo(const CXXRecordDecl *D,
TST = T->getAs<TemplateSpecializationType>()) {
BaseD = TST->getTemplateName().getAsTemplateDecl();
} else if (const RecordType *RT = T->getAs<RecordType>()) {
- BaseD = RT->getDecl();
+ BaseD = RT->getOriginalDecl();
}
if (BaseD)
@@ -389,13 +389,22 @@ SourceLocation CXIndexDataConsumer::CXXBasesListInfo::getBaseLoc(
if (QualifiedTypeLoc QL = TL.getAs<QualifiedTypeLoc>())
TL = QL.getUnqualifiedLoc();
- if (ElaboratedTypeLoc EL = TL.getAs<ElaboratedTypeLoc>())
- return EL.getNamedTypeLoc().getBeginLoc();
- if (DependentNameTypeLoc DL = TL.getAs<DependentNameTypeLoc>())
- return DL.getNameLoc();
- if (DependentTemplateSpecializationTypeLoc DTL =
- TL.getAs<DependentTemplateSpecializationTypeLoc>())
- return DTL.getTemplateNameLoc();
+ // FIXME: Factor this out, a lot of TypeLoc users seem to need a generic
+ // TypeLoc::getNameLoc()
+ if (auto TTL = TL.getAs<DependentNameTypeLoc>())
+ return TTL.getNameLoc();
+ if (auto TTL = TL.getAs<DependentTemplateSpecializationTypeLoc>())
+ return TTL.getTemplateNameLoc();
+ if (auto TTL = TL.getAs<TemplateSpecializationTypeLoc>())
+ return TTL.getTemplateNameLoc();
+ if (auto TTL = TL.getAs<TagTypeLoc>())
+ return TTL.getNameLoc();
+ if (auto TTL = TL.getAs<TypedefTypeLoc>())
+ return TTL.getNameLoc();
+ if (auto TTL = TL.getAs<UnresolvedUsingTypeLoc>())
+ return TTL.getNameLoc();
+ if (auto TTL = TL.getAs<UsingTypeLoc>())
+ return TTL.getNameLoc();
return Loc;
}
@@ -1232,6 +1241,7 @@ static CXIdxEntityKind getEntityKindFromSymbolKind(SymbolKind K, SymbolLanguage
case SymbolKind::TemplateTypeParm:
case SymbolKind::TemplateTemplateParm:
case SymbolKind::NonTypeTemplateParm:
+ case SymbolKind::IncludeDirective:
return CXIdxEntity_Unexposed;
case SymbolKind::Enum: return CXIdxEntity_Enum;
diff --git a/clang/tools/libclang/CXType.cpp b/clang/tools/libclang/CXType.cpp
index e7864e6..d21ac7c 100644
--- a/clang/tools/libclang/CXType.cpp
+++ b/clang/tools/libclang/CXType.cpp
@@ -118,7 +118,6 @@ static CXTypeKind GetTypeKind(QualType T) {
TKCASE(ExtVector);
TKCASE(MemberPointer);
TKCASE(Auto);
- TKCASE(Elaborated);
TKCASE(Pipe);
TKCASE(Attributed);
TKCASE(BTFTagAttributed);
@@ -225,6 +224,11 @@ FindTemplateArgumentTypeAt(ArrayRef<TemplateArgument> TA, unsigned index) {
return std::nullopt;
}
+static CXType getTypeDeclType(const ASTContext &Context, CXTranslationUnit TU,
+ const TypeDecl *TD) {
+ return MakeCXType(Context.getTypeDeclType(TD), TU);
+}
+
CXType clang_getCursorType(CXCursor C) {
using namespace cxcursor;
@@ -244,7 +248,7 @@ CXType clang_getCursorType(CXCursor C) {
return MakeCXType(QualType(), TU);
if (const TypeDecl *TD = dyn_cast<TypeDecl>(D))
- return MakeCXType(Context.getTypeDeclType(TD), TU);
+ return getTypeDeclType(Context, TU, TD);
if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D))
return MakeCXType(Context.getObjCInterfaceType(ID), TU);
if (const DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D))
@@ -271,11 +275,8 @@ CXType clang_getCursorType(CXCursor C) {
return MakeCXType(T, TU);
}
- case CXCursor_TypeRef: {
- QualType T = Context.getTypeDeclType(getCursorTypeRef(C).first);
- return MakeCXType(T, TU);
-
- }
+ case CXCursor_TypeRef:
+ return getTypeDeclType(Context, TU, getCursorTypeRef(C).first);
case CXCursor_CXXBaseSpecifier:
return cxtype::MakeCXType(getCursorCXXBaseSpecifier(C)->getType(), TU);
@@ -545,11 +546,11 @@ try_again:
break;
case Type::Record:
case Type::Enum:
- D = cast<TagType>(TP)->getDecl();
+ D = cast<TagType>(TP)->getOriginalDecl();
break;
case Type::TemplateSpecialization:
if (const RecordType *Record = TP->getAs<RecordType>())
- D = Record->getDecl();
+ D = Record->getOriginalDecl();
else
D = cast<TemplateSpecializationType>(TP)->getTemplateName()
.getAsTemplateDecl();
@@ -563,14 +564,10 @@ try_again:
break;
case Type::InjectedClassName:
- D = cast<InjectedClassNameType>(TP)->getDecl();
+ D = cast<InjectedClassNameType>(TP)->getOriginalDecl();
break;
- // FIXME: Template type parameters!
-
- case Type::Elaborated:
- TP = cast<ElaboratedType>(TP)->getNamedType().getTypePtrOrNull();
- goto try_again;
+ // FIXME: Template type parameters!
default:
break;
@@ -990,7 +987,7 @@ CXType clang_Type_getClassType(CXType CT) {
const Type *TP = T.getTypePtrOrNull();
if (TP && TP->getTypeClass() == Type::MemberPointer) {
- ET = Ctx.getTypeDeclType(
+ ET = Ctx.getCanonicalTagType(
cast<MemberPointerType>(TP)->getMostRecentCXXRecordDecl());
}
return MakeCXType(ET, GetTU(CT));
@@ -1040,7 +1037,7 @@ static long long visitRecordForValidation(const RecordDecl *RD) {
return CXTypeLayoutError_Dependent;
// recurse
if (const RecordType *ChildType = I->getType()->getAs<RecordType>()) {
- if (const RecordDecl *Child = ChildType->getDecl()) {
+ if (const RecordDecl *Child = ChildType->getOriginalDecl()) {
long long ret = visitRecordForValidation(Child);
if (ret < 0)
return ret;
@@ -1390,10 +1387,9 @@ unsigned clang_Cursor_isInlineNamespace(CXCursor C) {
CXType clang_Type_getNamedType(CXType CT){
QualType T = GetQualType(CT);
- const Type *TP = T.getTypePtrOrNull();
- if (TP && TP->getTypeClass() == Type::Elaborated)
- return MakeCXType(cast<ElaboratedType>(TP)->getNamedType(), GetTU(CT));
+ if (!T.isNull() && !T.isCanonical())
+ return MakeCXType(T, GetTU(CT));
return MakeCXType(QualType(), GetTU(CT));
}
diff --git a/clang/tools/libclang/CursorVisitor.h b/clang/tools/libclang/CursorVisitor.h
index 949b739..d5ab699 100644
--- a/clang/tools/libclang/CursorVisitor.h
+++ b/clang/tools/libclang/CursorVisitor.h
@@ -255,12 +255,12 @@ public:
// Name visitor
bool VisitDeclarationNameInfo(DeclarationNameInfo Name);
- bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS, SourceRange Range);
bool VisitNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
// Template visitors
bool VisitTemplateParameters(const TemplateParameterList *Params);
- bool VisitTemplateName(TemplateName Name, SourceLocation Loc);
+ bool VisitTemplateName(TemplateName Name, SourceLocation NameLoc,
+ NestedNameSpecifierLoc NNS);
bool VisitTemplateArgumentLoc(const TemplateArgumentLoc &TAL);
// Type visitors
diff --git a/clang/unittests/AST/ASTContextParentMapTest.cpp b/clang/unittests/AST/ASTContextParentMapTest.cpp
index 9af0a46..4a8aa48 100644
--- a/clang/unittests/AST/ASTContextParentMapTest.cpp
+++ b/clang/unittests/AST/ASTContextParentMapTest.cpp
@@ -131,10 +131,10 @@ TEST(GetParents, FriendTypeLoc) {
auto &FrB = *cast<FriendDecl>(*++(cast<CXXRecordDecl>(B).decls_begin()));
TypeLoc FrALoc = FrA.getFriendType()->getTypeLoc();
TypeLoc FrBLoc = FrB.getFriendType()->getTypeLoc();
+ bool FrAOwnsTag = FrALoc.getTypePtr()->getAs<TagType>()->isTagOwned();
TagDecl *FrATagDecl =
- FrALoc.getTypePtr()->getAs<ElaboratedType>()->getOwnedTagDecl();
- TagDecl *FrBTagDecl =
- FrBLoc.getTypePtr()->getAs<ElaboratedType>()->getOwnedTagDecl();
+ FrALoc.getTypePtr()->getAs<TagType>()->getOriginalDecl();
+ bool FrBOwnsTag = FrBLoc.getTypePtr()->getAs<TagType>()->isTagOwned();
EXPECT_THAT(Ctx.getParents(A), ElementsAre(DynTypedNode::create(TU)));
EXPECT_THAT(Ctx.getParents(B), ElementsAre(DynTypedNode::create(TU)));
@@ -142,8 +142,8 @@ TEST(GetParents, FriendTypeLoc) {
EXPECT_THAT(Ctx.getParents(FrB), ElementsAre(DynTypedNode::create(B)));
EXPECT_THAT(Ctx.getParents(FrALoc), ElementsAre(DynTypedNode::create(FrA)));
EXPECT_THAT(Ctx.getParents(FrBLoc), ElementsAre(DynTypedNode::create(FrB)));
- EXPECT_TRUE(FrATagDecl);
- EXPECT_FALSE(FrBTagDecl);
+ EXPECT_TRUE(FrAOwnsTag);
+ EXPECT_FALSE(FrBOwnsTag);
EXPECT_THAT(Ctx.getParents(*FrATagDecl),
ElementsAre(DynTypedNode::create(FrA)));
}
diff --git a/clang/unittests/AST/ASTExprTest.cpp b/clang/unittests/AST/ASTExprTest.cpp
index 5ec6aea..adaceb7 100644
--- a/clang/unittests/AST/ASTExprTest.cpp
+++ b/clang/unittests/AST/ASTExprTest.cpp
@@ -89,14 +89,14 @@ TEST(ASTExpr, InitListIsConstantInitialized) {
SourceLocation Loc{};
InitListExpr *BaseInit = new (Ctx) InitListExpr(Ctx, Loc, {}, Loc);
- BaseInit->setType(Ctx.getRecordType(Empty));
+ BaseInit->setType(Ctx.getCanonicalTagType(Empty));
Expr *Exprs[3] = {
BaseInit,
createIntLiteral(Ctx, 13),
createIntLiteral(Ctx, 42),
};
InitListExpr *FooInit = new (Ctx) InitListExpr(Ctx, Loc, Exprs, Loc);
- FooInit->setType(Ctx.getRecordType(Foo));
+ FooInit->setType(Ctx.getCanonicalTagType(Foo));
EXPECT_TRUE(FooInit->isConstantInitializer(Ctx, false));
// Replace the last initializer with something non-constant and make sure
diff --git a/clang/unittests/AST/ASTImporterFixtures.h b/clang/unittests/AST/ASTImporterFixtures.h
index 87e62cb..2af69c62 100644
--- a/clang/unittests/AST/ASTImporterFixtures.h
+++ b/clang/unittests/AST/ASTImporterFixtures.h
@@ -425,8 +425,7 @@ public:
};
template <typename T> RecordDecl *getRecordDecl(T *D) {
- auto *ET = cast<ElaboratedType>(D->getType().getTypePtr());
- return cast<RecordType>(ET->getNamedType().getTypePtr())->getDecl();
+ return D->getType()->getAsRecordDecl();
}
template <class T>
diff --git a/clang/unittests/AST/ASTImporterTest.cpp b/clang/unittests/AST/ASTImporterTest.cpp
index ac38300..5badbd7 100644
--- a/clang/unittests/AST/ASTImporterTest.cpp
+++ b/clang/unittests/AST/ASTImporterTest.cpp
@@ -29,7 +29,7 @@ using internal::Matcher;
static const RecordDecl *getRecordDeclOfFriend(FriendDecl *FD) {
QualType Ty = FD->getFriendType()->getType().getCanonicalType();
- return cast<RecordType>(Ty)->getDecl();
+ return cast<RecordType>(Ty)->getOriginalDecl();
}
struct ImportExpr : TestImportBase {};
@@ -427,15 +427,14 @@ TEST_P(ImportExpr, ImportParenListExpr) {
"typedef dummy<int> declToImport;"
"template class dummy<int>;",
Lang_CXX03, "", Lang_CXX03, Verifier,
- typedefDecl(hasType(elaboratedType(namesType(templateSpecializationType(
+ typedefDecl(hasType(templateSpecializationType(
hasDeclaration(classTemplateSpecializationDecl(hasSpecializedTemplate(
- classTemplateDecl(hasTemplateDecl(cxxRecordDecl(hasMethod(
- allOf(hasName("f"),
- hasBody(compoundStmt(has(declStmt(hasSingleDecl(varDecl(
- hasInitializer(parenListExpr(has(unaryOperator(
- hasOperatorName("*"),
- hasUnaryOperand(
- cxxThisExpr())))))))))))))))))))))))));
+ classTemplateDecl(hasTemplateDecl(cxxRecordDecl(hasMethod(allOf(
+ hasName("f"),
+ hasBody(compoundStmt(has(declStmt(hasSingleDecl(
+ varDecl(hasInitializer(parenListExpr(has(unaryOperator(
+ hasOperatorName("*"),
+ hasUnaryOperand(cxxThisExpr())))))))))))))))))))))));
}
TEST_P(ImportExpr, ImportSwitch) {
@@ -691,8 +690,8 @@ TEST_P(ImportType, ImportUsingType) {
testImport("struct C {};"
"void declToImport() { using ::C; new C{}; }",
Lang_CXX11, "", Lang_CXX11, Verifier,
- functionDecl(hasDescendant(cxxNewExpr(hasType(pointerType(
- pointee(elaboratedType(namesType(usingType())))))))));
+ functionDecl(hasDescendant(
+ cxxNewExpr(hasType(pointerType(pointee(usingType())))))));
}
TEST_P(ImportDecl, ImportFunctionTemplateDecl) {
@@ -785,8 +784,7 @@ TEST_P(ImportType, ImportDeducedTemplateSpecialization) {
"class C { public: C(T); };"
"C declToImport(123);",
Lang_CXX17, "", Lang_CXX17, Verifier,
- varDecl(hasType(elaboratedType(
- namesType(deducedTemplateSpecializationType())))));
+ varDecl(hasType(deducedTemplateSpecializationType())));
}
const internal::VariadicDynCastAllOfMatcher<Stmt, SizeOfPackExpr>
@@ -996,9 +994,9 @@ TEST_P(ImportDecl, ImportUsingTemplate) {
"void declToImport() {"
"using ns::S; X<S> xi; }",
Lang_CXX11, "", Lang_CXX11, Verifier,
- functionDecl(hasDescendant(varDecl(hasTypeLoc(elaboratedTypeLoc(
- hasNamedTypeLoc(templateSpecializationTypeLoc(
- hasAnyTemplateArgumentLoc(templateArgumentLoc())))))))));
+ functionDecl(
+ hasDescendant(varDecl(hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(templateArgumentLoc())))))));
}
TEST_P(ImportDecl, ImportUsingEnumDecl) {
@@ -1019,24 +1017,10 @@ TEST_P(ImportDecl, ImportUsingPackDecl) {
"template<typename ...T> struct C : T... { using T::operator()...; };"
"C<A, B> declToImport;",
Lang_CXX20, "", Lang_CXX20, Verifier,
- varDecl(hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(classTemplateSpecializationDecl(
- hasDescendant(usingPackDecl())))))))));
+ varDecl(hasType(templateSpecializationType(hasDeclaration(
+ classTemplateSpecializationDecl(hasDescendant(usingPackDecl())))))));
}
-/// \brief Matches shadow declarations introduced into a scope by a
-/// (resolved) using declaration.
-///
-/// Given
-/// \code
-/// namespace n { int f; }
-/// namespace declToImport { using n::f; }
-/// \endcode
-/// usingShadowDecl()
-/// matches \code f \endcode
-const internal::VariadicDynCastAllOfMatcher<Decl,
- UsingShadowDecl> usingShadowDecl;
-
TEST_P(ImportDecl, ImportUsingShadowDecl) {
MatchVerifier<Decl> Verifier;
// from using-decl
@@ -2855,8 +2839,9 @@ TEST_P(ImportFriendFunctions, ImportFriendFunctionRedeclChainDefWithClass) {
EXPECT_FALSE(InClassFD->doesThisDeclarationHaveABody());
EXPECT_EQ(InClassFD->getPreviousDecl(), ImportedD);
// The parameters must refer the same type
- EXPECT_EQ((*InClassFD->param_begin())->getOriginalType(),
- (*ImportedD->param_begin())->getOriginalType());
+ EXPECT_TRUE(ToTU->getASTContext().hasSameType(
+ (*InClassFD->param_begin())->getOriginalType(),
+ (*ImportedD->param_begin())->getOriginalType()));
}
TEST_P(ImportFriendFunctions,
@@ -2884,8 +2869,9 @@ TEST_P(ImportFriendFunctions,
EXPECT_TRUE(OutOfClassFD->doesThisDeclarationHaveABody());
EXPECT_EQ(ImportedD->getPreviousDecl(), OutOfClassFD);
// The parameters must refer the same type
- EXPECT_EQ((*OutOfClassFD->param_begin())->getOriginalType(),
- (*ImportedD->param_begin())->getOriginalType());
+ EXPECT_TRUE(ToTU->getASTContext().hasSameType(
+ (*OutOfClassFD->param_begin())->getOriginalType(),
+ (*ImportedD->param_begin())->getOriginalType()));
}
TEST_P(ImportFriendFunctions, ImportFriendFunctionFromMultipleTU) {
@@ -4486,8 +4472,7 @@ TEST_P(ImportFriendClasses, TypeForDeclShouldBeSetInTemplated) {
auto *Definition = FirstDeclMatcher<ClassTemplateDecl>().match(
FromTU1, classTemplateDecl(hasName("F")));
auto *Imported1 = cast<ClassTemplateDecl>(Import(Definition, Lang_CXX03));
- EXPECT_EQ(Imported0->getTemplatedDecl()->getTypeForDecl(),
- Imported1->getTemplatedDecl()->getTypeForDecl());
+ EXPECT_TRUE(declaresSameEntity(Imported0, Imported1));
}
TEST_P(ImportFriendClasses, DeclsFromFriendsShouldBeInRedeclChains) {
@@ -4548,8 +4533,10 @@ TEST_P(ImportFriendClasses, SkipComparingFriendTemplateDepth) {
classTemplateDecl(has(cxxRecordDecl(hasDefinition(), hasName("A")))));
auto *ToA = Import(FromA, Lang_CXX11);
EXPECT_TRUE(ToA);
- EXPECT_EQ(Fwd->getTemplatedDecl()->getTypeForDecl(),
- ToA->getTemplatedDecl()->getTypeForDecl());
+ const ASTContext &Ctx = ToTU->getASTContext();
+ EXPECT_TRUE(
+ Ctx.hasSameType(Ctx.getCanonicalTagType(Fwd->getTemplatedDecl()),
+ Ctx.getCanonicalTagType(ToA->getTemplatedDecl())));
}
TEST_P(ImportFriendClasses,
@@ -4627,7 +4614,7 @@ TEST_P(ImportFriendClasses, ImportOfClassDefinitionAndFwdFriendShouldBeLinked) {
auto *Friend = FirstDeclMatcher<FriendDecl>().match(FromTU0, friendDecl());
QualType FT = Friend->getFriendType()->getType();
FT = FromTU0->getASTContext().getCanonicalType(FT);
- auto *Fwd = cast<TagType>(FT)->getDecl();
+ auto *Fwd = cast<TagType>(FT)->getOriginalDecl();
auto *ImportedFwd = Import(Fwd, Lang_CXX03);
Decl *FromTU1 = getTuDecl(
R"(
@@ -7543,7 +7530,8 @@ TEST_P(ImportAutoFunctions, ReturnWithTypedefDeclaredInside) {
ASTContext &Ctx = From->getASTContext();
TypeAliasDecl *FromTA =
FirstDeclMatcher<TypeAliasDecl>().match(FromTU, typeAliasDecl());
- QualType TT = Ctx.getTypedefType(FromTA);
+ QualType TT = Ctx.getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, FromTA);
const FunctionProtoType *FPT = cast<FunctionProtoType>(From->getType());
QualType NewFunType =
Ctx.getFunctionType(TT, FPT->getParamTypes(), FPT->getExtProtoInfo());
@@ -9193,41 +9181,37 @@ TEST_P(ASTImporterOptionSpecificTestBase, isNewDecl) {
struct ImportInjectedClassNameType : public ASTImporterOptionSpecificTestBase {
protected:
- const CXXRecordDecl *findInjected(const CXXRecordDecl *Parent) {
- for (Decl *Found : Parent->decls()) {
- const auto *Record = dyn_cast<CXXRecordDecl>(Found);
- if (Record && Record->isInjectedClassName())
- return Record;
- }
- return nullptr;
- }
-
- void checkInjType(const CXXRecordDecl *D) {
- // The whole redecl chain should have the same InjectedClassNameType
- // instance. The injected record declaration is a separate chain, this
- // should contain the same type too.
- const Type *Ty = nullptr;
- for (const Decl *ReD : D->redecls()) {
- const auto *ReRD = cast<CXXRecordDecl>(ReD);
- EXPECT_TRUE(ReRD->getTypeForDecl());
- EXPECT_TRUE(!Ty || Ty == ReRD->getTypeForDecl());
- Ty = ReRD->getTypeForDecl();
- }
+ void checkInjType(const ASTContext &Ctx, const CXXRecordDecl *D) {
+ ASSERT_TRUE(D->hasInjectedClassType());
+ const Type *Ty = Ctx.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, D,
+ /*OwnsTag=*/false)
+ .getTypePtr();
ASSERT_TRUE(Ty);
- const auto *InjTy = Ty->castAs<InjectedClassNameType>();
+ EXPECT_FALSE(Ty->isCanonicalUnqualified());
+ const auto *InjTy = dyn_cast<InjectedClassNameType>(Ty);
EXPECT_TRUE(InjTy);
- if (CXXRecordDecl *Def = D->getDefinition()) {
- const CXXRecordDecl *InjRD = findInjected(Def);
- EXPECT_TRUE(InjRD);
- EXPECT_EQ(InjRD->getTypeForDecl(), InjTy);
+ for (const Decl *ReD : D->redecls()) {
+ if (ReD == D)
+ continue;
+ const auto *ReRD = cast<CXXRecordDecl>(ReD);
+ ASSERT_TRUE(ReRD->hasInjectedClassType());
+ const Type *ReTy = Ctx.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ReRD,
+ /*OwnsTag=*/false)
+ .getTypePtr();
+ ASSERT_TRUE(ReTy);
+ EXPECT_FALSE(ReTy->isCanonicalUnqualified());
+ EXPECT_NE(ReTy, Ty);
+ EXPECT_TRUE(Ctx.hasSameType(ReTy, Ty));
}
}
void testImport(Decl *ToTU, Decl *FromTU, Decl *FromD) {
- checkInjType(cast<CXXRecordDecl>(FromD));
+ checkInjType(FromTU->getASTContext(), cast<CXXRecordDecl>(FromD));
Decl *ToD = Import(FromD, Lang_CXX11);
if (auto *ToRD = dyn_cast<CXXRecordDecl>(ToD))
- checkInjType(ToRD);
+ checkInjType(ToTU->getASTContext(), ToRD);
}
const char *ToCodeA =
@@ -9679,8 +9663,7 @@ TEST_P(ASTImporterOptionSpecificTestBase,
auto *ToX = Import(FromX, Lang_CXX11);
auto *ToXType = ToX->getType()->getAs<TypedefType>();
- // FIXME: This should be false.
- EXPECT_TRUE(ToXType->typeMatchesDecl());
+ EXPECT_FALSE(ToXType->typeMatchesDecl());
}
TEST_P(ASTImporterOptionSpecificTestBase,
@@ -10038,7 +10021,12 @@ protected:
.getInheritedFrom(),
GetTemplateParm(ToD));
- EXPECT_EQ(ToD->getPreviousDecl(), ToDInherited);
+ if (FromD->getPreviousDecl() == FromDInherited) {
+ EXPECT_EQ(ToD->getPreviousDecl(), ToDInherited);
+ } else {
+ EXPECT_EQ(FromD, FromDInherited->getPreviousDecl());
+ EXPECT_EQ(ToD, ToDInherited->getPreviousDecl());
+ }
}
const char *CodeFunction =
diff --git a/clang/unittests/AST/ByteCode/Descriptor.cpp b/clang/unittests/AST/ByteCode/Descriptor.cpp
index b3517d8..37e6f24 100644
--- a/clang/unittests/AST/ByteCode/Descriptor.cpp
+++ b/clang/unittests/AST/ByteCode/Descriptor.cpp
@@ -399,7 +399,12 @@ TEST(Descriptor, Primitives) {
const Pointer &PF5 = GlobalPtr.atField(F5->Offset);
ASSERT_TRUE(PF5.isZeroSizeArray());
- ASSERT_FALSE(PF5.isOnePastEnd());
+ ASSERT_TRUE(PF5.isOnePastEnd());
+ ASSERT_FALSE(PF5.isElementPastEnd());
+
+ const Pointer &E1 = PF5.atIndex(0);
+ (void)E1;
+ ASSERT_TRUE(PF5.isOnePastEnd());
ASSERT_FALSE(PF5.isElementPastEnd());
}
}
diff --git a/clang/unittests/AST/DeclPrinterTest.cpp b/clang/unittests/AST/DeclPrinterTest.cpp
index 124b1a1..28750c4 100644
--- a/clang/unittests/AST/DeclPrinterTest.cpp
+++ b/clang/unittests/AST/DeclPrinterTest.cpp
@@ -356,42 +356,40 @@ TEST(DeclPrinter, TestCXXRecordDecl11) {
}
TEST(DeclPrinter, TestCXXRecordDecl12) {
- ASSERT_TRUE(
- PrintedDeclCXX98Matches("struct S { int x; };"
- "namespace NS { class C {};}"
- "void foo() {using namespace NS; C c;}",
- "foo",
- "void foo() {\nusing namespace NS;\nclass "
- "NS::C c;\n}\n",
- [](PrintingPolicy &Policy) {
- Policy.SuppressTagKeyword = false;
- Policy.SuppressScope = true;
- Policy.TerseOutput = false;
- }));
+ ASSERT_TRUE(PrintedDeclCXX98Matches("struct S { int x; };"
+ "namespace NS { class C {};}"
+ "void foo() {using namespace NS; C c;}",
+ "foo",
+ "void foo() {\nusing namespace NS;\n"
+ "C c;\n}\n",
+ [](PrintingPolicy &Policy) {
+ Policy.SuppressTagKeyword = false;
+ Policy.SuppressScope = true;
+ Policy.TerseOutput = false;
+ }));
}
TEST(DeclPrinter, TestCXXRecordDecl13) {
- ASSERT_TRUE(PrintedDeclCXX98Matches(
- "struct S { int x; };"
- "S s1;"
- "S foo() {return s1;}",
- "foo", "struct S foo() {\nreturn s1;\n}\n", [](PrintingPolicy &Policy) {
- Policy.SuppressTagKeyword = false;
- Policy.SuppressScope = true;
- Policy.TerseOutput = false;
- }));
+ ASSERT_TRUE(PrintedDeclCXX98Matches("struct S { int x; };"
+ "S s1;"
+ "S foo() {return s1;}",
+ "foo", "S foo() {\nreturn s1;\n}\n",
+ [](PrintingPolicy &Policy) {
+ Policy.SuppressTagKeyword = false;
+ Policy.SuppressScope = true;
+ Policy.TerseOutput = false;
+ }));
}
TEST(DeclPrinter, TestCXXRecordDecl14) {
- ASSERT_TRUE(PrintedDeclCXX98Matches(
- "struct S { int x; };"
- "S foo(S s1) {return s1;}",
- "foo", "struct S foo(struct S s1) {\nreturn s1;\n}\n",
- [](PrintingPolicy &Policy) {
- Policy.SuppressTagKeyword = false;
- Policy.SuppressScope = true;
- Policy.TerseOutput = false;
- }));
+ ASSERT_TRUE(PrintedDeclCXX98Matches("struct S { int x; };"
+ "S foo(S s1) {return s1;}",
+ "foo", "S foo(S s1) {\nreturn s1;\n}\n",
+ [](PrintingPolicy &Policy) {
+ Policy.SuppressTagKeyword = false;
+ Policy.SuppressScope = true;
+ Policy.TerseOutput = false;
+ }));
}
TEST(DeclPrinter, TestCXXRecordDecl15) {
ASSERT_TRUE(PrintedDeclCXX98Matches(
@@ -399,8 +397,8 @@ TEST(DeclPrinter, TestCXXRecordDecl15) {
"namespace NS { class C {};}"
"S foo(S s1, NS::C c1) {using namespace NS; C c; return s1;}",
"foo",
- "struct S foo(struct S s1, class NS::C c1) {\nusing namespace NS;\nclass "
- "NS::C c;\nreturn s1;\n}\n",
+ "S foo(S s1, NS::C c1) {\nusing namespace NS;\n"
+ "C c;\nreturn s1;\n}\n",
[](PrintingPolicy &Policy) {
Policy.SuppressTagKeyword = false;
Policy.SuppressScope = true;
diff --git a/clang/unittests/AST/DeclTest.cpp b/clang/unittests/AST/DeclTest.cpp
index afaf413..4c83ff9 100644
--- a/clang/unittests/AST/DeclTest.cpp
+++ b/clang/unittests/AST/DeclTest.cpp
@@ -90,7 +90,7 @@ TEST(Decl, AsmLabelAttr) {
DeclF->addAttr(AsmLabelAttr::Create(Ctx, "foo"));
// Mangle the decl names.
- std::string MangleF, MangleG;
+ std::string MangleF;
std::unique_ptr<ItaniumMangleContext> MC(
ItaniumMangleContext::create(Ctx, Diags));
{
@@ -570,3 +570,19 @@ void instantiate_template() {
EXPECT_EQ(GetNameInfoRange(Matches[1]), "<input.cc:6:14, col:15>");
EXPECT_EQ(GetNameInfoRange(Matches[2]), "<input.cc:6:14, col:15>");
}
+
+TEST(Decl, getQualifiedNameAsString) {
+ llvm::Annotations Code(R"cpp(
+namespace x::y {
+ template <class T> class Foo { Foo() {} };
+}
+)cpp");
+
+ auto AST = tooling::buildASTFromCode(Code.code());
+ ASTContext &Ctx = AST->getASTContext();
+
+ auto const *FD = selectFirst<CXXConstructorDecl>(
+ "ctor", match(cxxConstructorDecl().bind("ctor"), Ctx));
+ ASSERT_NE(FD, nullptr);
+ ASSERT_EQ(FD->getQualifiedNameAsString(), "x::y::Foo::Foo<T>");
+}
diff --git a/clang/unittests/AST/ProfilingTest.cpp b/clang/unittests/AST/ProfilingTest.cpp
index 27a4a19..b46bade 100644
--- a/clang/unittests/AST/ProfilingTest.cpp
+++ b/clang/unittests/AST/ProfilingTest.cpp
@@ -63,11 +63,11 @@ TEST(Profiling, DeducedTemplateSpecializationType_Name) {
ASTContext &Ctx = AST->getASTContext();
auto *T1 = cast<DeducedTemplateSpecializationType>(
- Ctx.getDeducedTemplateSpecializationType(TemplateName(CTD1), QualType(),
- false));
+ Ctx.getDeducedTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, TemplateName(CTD1), QualType(), false));
auto *T2 = cast<DeducedTemplateSpecializationType>(
- Ctx.getDeducedTemplateSpecializationType(TemplateName(CTD2), QualType(),
- false));
+ Ctx.getDeducedTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, TemplateName(CTD2), QualType(), false));
testTypeNode(T1, T2);
}
diff --git a/clang/unittests/AST/RandstructTest.cpp b/clang/unittests/AST/RandstructTest.cpp
index c22d866..a90665b 100644
--- a/clang/unittests/AST/RandstructTest.cpp
+++ b/clang/unittests/AST/RandstructTest.cpp
@@ -531,8 +531,7 @@ TEST(RANDSTRUCT_TEST, AnonymousStructsAndUnionsRetainFieldOrder) {
for (const Decl *D : RD->decls())
if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
- if (const auto *Record = FD->getType()->getAs<RecordType>()) {
- RD = Record->getDecl();
+ if (const auto *RD = FD->getType()->getAsRecordDecl()) {
if (RD->isAnonymousStructOrUnion()) {
// These field orders shouldn't change.
if (RD->isUnion()) {
diff --git a/clang/unittests/AST/RecursiveASTVisitorTest.cpp b/clang/unittests/AST/RecursiveASTVisitorTest.cpp
index 9d7ff59..c5ad29a 100644
--- a/clang/unittests/AST/RecursiveASTVisitorTest.cpp
+++ b/clang/unittests/AST/RecursiveASTVisitorTest.cpp
@@ -95,9 +95,10 @@ public:
return Ret;
}
- bool TraverseTypedefTypeLoc(TypedefTypeLoc TL) {
+ bool TraverseTypedefTypeLoc(TypedefTypeLoc TL, bool TraverseQualifier) {
Events.push_back(VisitEvent::StartTraverseTypedefType);
- bool Ret = RecursiveASTVisitor::TraverseTypedefTypeLoc(TL);
+ bool Ret =
+ RecursiveASTVisitor::TraverseTypedefTypeLoc(TL, TraverseQualifier);
Events.push_back(VisitEvent::EndTraverseTypedefType);
return Ret;
diff --git a/clang/unittests/AST/SizelessTypesTest.cpp b/clang/unittests/AST/SizelessTypesTest.cpp
index 0b984b6..4a945a4 100644
--- a/clang/unittests/AST/SizelessTypesTest.cpp
+++ b/clang/unittests/AST/SizelessTypesTest.cpp
@@ -24,7 +24,7 @@ struct SizelessTypeTester : public ::testing::Test {
ASTContext &Ctx = AST->getASTContext();
TranslationUnitDecl &TU = *Ctx.getTranslationUnitDecl();
TypeDecl *Foo = cast<TypeDecl>(TU.lookup(&Ctx.Idents.get("foo")).front());
- const Type *FooTy = Foo->getTypeForDecl();
+ const Type *FooTy = Ctx.getTypeDeclType(Foo).getTypePtr();
};
TEST_F(SizelessTypeTester, TestSizelessBuiltin) {
diff --git a/clang/unittests/AST/StructuralEquivalenceTest.cpp b/clang/unittests/AST/StructuralEquivalenceTest.cpp
index ef82afa..bee288d 100644
--- a/clang/unittests/AST/StructuralEquivalenceTest.cpp
+++ b/clang/unittests/AST/StructuralEquivalenceTest.cpp
@@ -617,8 +617,7 @@ TEST_F(StructuralEquivalenceCXXMethodTest, OutOfClass2) {
struct StructuralEquivalenceRecordTest : StructuralEquivalenceTest {
// FIXME Use a common getRecordDecl with ASTImporterTest.cpp!
RecordDecl *getRecordDecl(FieldDecl *FD) {
- auto *ET = cast<ElaboratedType>(FD->getType().getTypePtr());
- return cast<RecordType>(ET->getNamedType().getTypePtr())->getDecl();
+ return FD->getType()->getAsRecordDecl();
};
};
@@ -720,11 +719,13 @@ TEST_F(StructuralEquivalenceRecordTest, AnonymousRecordsShouldBeInequivalent) {
auto *A = FirstDeclMatcher<IndirectFieldDecl>().match(
TU, indirectFieldDecl(hasName("a")));
auto *FA = cast<FieldDecl>(A->chain().front());
- RecordDecl *RA = cast<RecordType>(FA->getType().getTypePtr())->getDecl();
+ RecordDecl *RA =
+ cast<RecordType>(FA->getType().getTypePtr())->getOriginalDecl();
auto *B = FirstDeclMatcher<IndirectFieldDecl>().match(
TU, indirectFieldDecl(hasName("b")));
auto *FB = cast<FieldDecl>(B->chain().front());
- RecordDecl *RB = cast<RecordType>(FB->getType().getTypePtr())->getDecl();
+ RecordDecl *RB =
+ cast<RecordType>(FB->getType().getTypePtr())->getOriginalDecl();
ASSERT_NE(RA, RB);
EXPECT_TRUE(testStructuralMatch(RA, RA));
@@ -753,13 +754,15 @@ TEST_F(StructuralEquivalenceRecordTest,
auto *A = FirstDeclMatcher<IndirectFieldDecl>().match(
TU, indirectFieldDecl(hasName("a")));
auto *FA = cast<FieldDecl>(A->chain().front());
- RecordDecl *RA = cast<RecordType>(FA->getType().getTypePtr())->getDecl();
+ RecordDecl *RA =
+ cast<RecordType>(FA->getType().getTypePtr())->getOriginalDecl();
auto *TU1 = get<1>(t);
auto *A1 = FirstDeclMatcher<IndirectFieldDecl>().match(
TU1, indirectFieldDecl(hasName("a")));
auto *FA1 = cast<FieldDecl>(A1->chain().front());
- RecordDecl *RA1 = cast<RecordType>(FA1->getType().getTypePtr())->getDecl();
+ RecordDecl *RA1 =
+ cast<RecordType>(FA1->getType().getTypePtr())->getOriginalDecl();
RecordDecl *X =
FirstDeclMatcher<RecordDecl>().match(TU, recordDecl(hasName("X")));
diff --git a/clang/unittests/AST/TemplateNameTest.cpp b/clang/unittests/AST/TemplateNameTest.cpp
index 2eac5c5..31655e2 100644
--- a/clang/unittests/AST/TemplateNameTest.cpp
+++ b/clang/unittests/AST/TemplateNameTest.cpp
@@ -120,14 +120,14 @@ TEST(TemplateName, UsingTemplate) {
// are rather part of the ElaboratedType)!
absl::vector<int> v(123);
)cpp");
- auto Matcher = elaboratedTypeLoc(
- hasNamedTypeLoc(loc(templateSpecializationType().bind("id"))));
+ auto Matcher = templateSpecializationTypeLoc().bind("id");
auto MatchResults = match(Matcher, AST->getASTContext());
const auto *TST =
- MatchResults.front().getNodeAs<TemplateSpecializationType>("id");
+ MatchResults.front().getNodeAs<TemplateSpecializationTypeLoc>("id");
ASSERT_TRUE(TST);
- EXPECT_EQ(TST->getTemplateName().getKind(), TemplateName::QualifiedTemplate);
- EXPECT_TRUE(TST->getTemplateName().getAsUsingShadowDecl() != nullptr);
+ TemplateName TN = TST->getTypePtr()->getTemplateName();
+ EXPECT_EQ(TN.getKind(), TemplateName::QualifiedTemplate);
+ EXPECT_TRUE(TN.getAsUsingShadowDecl() != nullptr);
AST = tooling::buildASTFromCodeWithArgs(R"cpp(
namespace std {
@@ -139,8 +139,7 @@ TEST(TemplateName, UsingTemplate) {
absl::vector DTST(123);
)cpp",
{"-std=c++17"});
- Matcher = elaboratedTypeLoc(
- hasNamedTypeLoc(loc(deducedTemplateSpecializationType().bind("id"))));
+ Matcher = loc(deducedTemplateSpecializationType().bind("id"));
MatchResults = match(Matcher, AST->getASTContext());
const auto *DTST =
MatchResults.front().getNodeAs<DeducedTemplateSpecializationType>("id");
diff --git a/clang/unittests/AST/TypePrinterTest.cpp b/clang/unittests/AST/TypePrinterTest.cpp
index 2b37add..ca0380b 100644
--- a/clang/unittests/AST/TypePrinterTest.cpp
+++ b/clang/unittests/AST/TypePrinterTest.cpp
@@ -60,7 +60,7 @@ TEST(TypePrinter, TemplateId) {
[](PrintingPolicy &Policy) { Policy.FullyQualifiedName = false; }));
ASSERT_TRUE(PrintedTypeMatches(
- Code, {}, Matcher, "const Type<T> &",
+ Code, {}, Matcher, "const N::Type<T> &",
[](PrintingPolicy &Policy) { Policy.FullyQualifiedName = true; }));
}
@@ -97,7 +97,7 @@ TEST(TypePrinter, ParamsUglified) {
"const f<Tp &> *", Clean));
}
-TEST(TypePrinter, SuppressElaboration) {
+TEST(TypePrinter, TemplateSpecializationFullyQualified) {
llvm::StringLiteral Code = R"cpp(
namespace shared {
namespace a {
@@ -115,13 +115,10 @@ TEST(TypePrinter, SuppressElaboration) {
hasType(qualType().bind("id")));
ASSERT_TRUE(PrintedTypeMatches(
Code, {}, Matcher, "a::S<b::Foo>",
+ [](PrintingPolicy &Policy) { Policy.FullyQualifiedName = false; }));
+ ASSERT_TRUE(PrintedTypeMatches(
+ Code, {}, Matcher, "shared::a::S<shared::b::Foo>",
[](PrintingPolicy &Policy) { Policy.FullyQualifiedName = true; }));
- ASSERT_TRUE(PrintedTypeMatches(Code, {}, Matcher,
- "shared::a::S<shared::b::Foo>",
- [](PrintingPolicy &Policy) {
- Policy.SuppressElaboration = true;
- Policy.FullyQualifiedName = true;
- }));
}
TEST(TypePrinter, TemplateIdWithNTTP) {
diff --git a/clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp
index 2871223..8a957864 100644
--- a/clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp
+++ b/clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp
@@ -1172,8 +1172,8 @@ TEST_P(ASTMatchersTest, IsDerivedFrom_ElaboratedType) {
return;
}
- DeclarationMatcher IsDerivenFromBase =
- cxxRecordDecl(isDerivedFrom(decl().bind("typedef")));
+ DeclarationMatcher IsDerivenFromBase = cxxRecordDecl(
+ isDerivedFrom(decl().bind("typedef")), unless(isImplicit()));
EXPECT_TRUE(matchAndVerifyResultTrue(
"struct AnInterface {};"
@@ -2302,10 +2302,9 @@ TEST(ASTMatchersTest, NamesMember_CXXDependentScopeMemberExpr) {
EXPECT_TRUE(matches(
Code,
cxxDependentScopeMemberExpr(
- hasObjectExpression(declRefExpr(hasType(elaboratedType(namesType(
- templateSpecializationType(hasDeclaration(classTemplateDecl(
- has(cxxRecordDecl(has(cxxMethodDecl(hasName("mem"))
- .bind("templMem")))))))))))),
+ hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
+ hasDeclaration(classTemplateDecl(has(cxxRecordDecl(
+ has(cxxMethodDecl(hasName("mem")).bind("templMem")))))))))),
memberHasSameNameAsBoundNode("templMem"))));
EXPECT_TRUE(
@@ -2323,10 +2322,9 @@ TEST(ASTMatchersTest, NamesMember_CXXDependentScopeMemberExpr) {
EXPECT_TRUE(matches(
Code,
cxxDependentScopeMemberExpr(
- hasObjectExpression(declRefExpr(
- hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has(
- fieldDecl(hasName("mem")).bind("templMem")))))))))))),
+ hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
+ hasDeclaration(classTemplateDecl(has(cxxRecordDecl(
+ has(fieldDecl(hasName("mem")).bind("templMem")))))))))),
memberHasSameNameAsBoundNode("templMem"))));
}
@@ -2341,10 +2339,9 @@ TEST(ASTMatchersTest, NamesMember_CXXDependentScopeMemberExpr) {
EXPECT_TRUE(matches(
Code,
cxxDependentScopeMemberExpr(
- hasObjectExpression(declRefExpr(
- hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(classTemplateDecl(has(cxxRecordDecl(
- has(varDecl(hasName("mem")).bind("templMem")))))))))))),
+ hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
+ hasDeclaration(classTemplateDecl(has(cxxRecordDecl(
+ has(varDecl(hasName("mem")).bind("templMem")))))))))),
memberHasSameNameAsBoundNode("templMem"))));
}
{
diff --git a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
index 07450a0..d7df9ca 100644
--- a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
+++ b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
@@ -1183,6 +1183,39 @@ TEST_P(ASTMatchersTest, AsmStatement) {
EXPECT_TRUE(matches("void foo() { __asm(\"mov al, 2\"); }", asmStmt()));
}
+TEST_P(ASTMatchersTest, HasConditionVariableStatement) {
+ if (!GetParam().isCXX()) {
+ // FIXME: Add a test for `hasConditionVariableStatement()` that does not
+ // depend on C++.
+ return;
+ }
+
+ StatementMatcher IfCondition =
+ ifStmt(hasConditionVariableStatement(declStmt()));
+
+ EXPECT_TRUE(matches("void x() { if (int* a = 0) {} }", IfCondition));
+ EXPECT_TRUE(notMatches("void x() { if (true) {} }", IfCondition));
+ EXPECT_TRUE(notMatches("void x() { int x; if ((x = 42)) {} }", IfCondition));
+
+ StatementMatcher SwitchCondition =
+ switchStmt(hasConditionVariableStatement(declStmt()));
+
+ EXPECT_TRUE(matches("void x() { switch (int a = 0) {} }", SwitchCondition));
+ if (GetParam().isCXX17OrLater()) {
+ EXPECT_TRUE(
+ notMatches("void x() { switch (int a = 0; a) {} }", SwitchCondition));
+ }
+
+ StatementMatcher ForCondition =
+ forStmt(hasConditionVariableStatement(declStmt()));
+
+ EXPECT_TRUE(matches("void x() { for (; int a = 0; ) {} }", ForCondition));
+ EXPECT_TRUE(notMatches("void x() { for (int a = 0; ; ) {} }", ForCondition));
+
+ EXPECT_TRUE(matches("void x() { while (int a = 0) {} }",
+ whileStmt(hasConditionVariableStatement(declStmt()))));
+}
+
TEST_P(ASTMatchersTest, HasCondition) {
if (!GetParam().isCXX()) {
// FIXME: Add a test for `hasCondition()` that does not depend on C++.
@@ -1938,8 +1971,7 @@ TEST_P(ASTMatchersTest, PointerType_MatchesPointersToConstTypes) {
TEST_P(ASTMatchersTest, TypedefType) {
EXPECT_TRUE(matches("typedef int X; X a;",
- varDecl(hasName("a"), hasType(elaboratedType(
- namesType(typedefType()))))));
+ varDecl(hasName("a"), hasType(typedefType()))));
}
TEST_P(ASTMatchersTest, MacroQualifiedType) {
@@ -2018,22 +2050,6 @@ TEST_P(ASTMatchersTest, RecordType_CXX) {
recordType(hasDeclaration(recordDecl(hasName("S"))))));
}
-TEST_P(ASTMatchersTest, ElaboratedType) {
- if (!GetParam().isCXX()) {
- // FIXME: Add a test for `elaboratedType()` that does not depend on C++.
- return;
- }
- EXPECT_TRUE(matches("namespace N {"
- " namespace M {"
- " class D {};"
- " }"
- "}"
- "N::M::D d;",
- elaboratedType()));
- EXPECT_TRUE(matches("class C {} c;", elaboratedType()));
- EXPECT_TRUE(matches("class C {}; C c;", elaboratedType()));
-}
-
TEST_P(ASTMatchersTest, SubstTemplateTypeParmType) {
if (!GetParam().isCXX()) {
return;
@@ -2133,16 +2149,16 @@ TEST_P(ASTMatchersTest,
if (!GetParam().isCXX()) {
return;
}
- EXPECT_TRUE(matches(
- "struct A { struct B { struct C {}; }; }; A::B::C c;",
- nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A"))))));
+ EXPECT_TRUE(
+ matches("struct A { struct B { struct C {}; }; }; A::B::C c;",
+ nestedNameSpecifier(hasPrefix(specifiesType(asString("A"))))));
EXPECT_TRUE(matches("struct A { struct B { struct C {}; }; }; A::B::C c;",
- nestedNameSpecifierLoc(hasPrefix(specifiesTypeLoc(
- loc(qualType(asString("struct A"))))))));
+ nestedNameSpecifierLoc(hasPrefix(
+ specifiesTypeLoc(loc(qualType(asString("A"))))))));
EXPECT_TRUE(matches(
"namespace N { struct A { struct B { struct C {}; }; }; } N::A::B::C c;",
- nestedNameSpecifierLoc(hasPrefix(
- specifiesTypeLoc(loc(qualType(asString("struct N::A"))))))));
+ nestedNameSpecifierLoc(
+ hasPrefix(specifiesTypeLoc(loc(qualType(asString("N::A"))))))));
}
template <typename T>
@@ -2338,8 +2354,7 @@ TEST_P(ASTMatchersTest,
}
EXPECT_TRUE(matches(
"template <typename T> class C {}; C<char> var;",
- varDecl(hasName("var"), hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc()))))));
+ varDecl(hasName("var"), hasTypeLoc(templateSpecializationTypeLoc()))));
}
TEST_P(
@@ -2353,58 +2368,6 @@ TEST_P(
varDecl(hasName("var"), hasTypeLoc(templateSpecializationTypeLoc()))));
}
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToElaboratedObjectDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("class C {}; class C c;",
- varDecl(hasName("c"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToNamespaceElaboratedObjectDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("namespace N { class D {}; } N::D d;",
- varDecl(hasName("d"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToElaboratedStructDeclaration) {
- EXPECT_TRUE(matches("struct s {}; struct s ss;",
- varDecl(hasName("ss"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToBareElaboratedObjectDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("class C {}; C c;",
- varDecl(hasName("c"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(
- ASTMatchersTest,
- ElaboratedTypeLocTest_DoesNotBindToNamespaceNonElaboratedObjectDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("namespace N { class D {}; } using N::D; D d;",
- varDecl(hasName("d"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToBareElaboratedStructDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("struct s {}; s ss;",
- varDecl(hasName("ss"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
TEST_P(ASTMatchersTest, LambdaCaptureTest) {
if (!GetParam().isCXX11OrLater()) {
return;
diff --git a/clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp
index 8ddae4e..c0a03de 100644
--- a/clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp
+++ b/clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp
@@ -190,13 +190,11 @@ TEST(TypeMatcher, MatchesDeclTypes) {
EXPECT_TRUE(matches("template <typename T> struct S {"
" void f(S s);"
"};",
- parmVarDecl(hasType(elaboratedType(
- namesType(injectedClassNameType()))))));
+ parmVarDecl(hasType(injectedClassNameType()))));
EXPECT_TRUE(notMatches("template <typename T> struct S {"
" void g(S<T> s);"
"};",
- parmVarDecl(hasType(elaboratedType(
- namesType(injectedClassNameType()))))));
+ parmVarDecl(hasType(injectedClassNameType()))));
// InjectedClassNameType -> CXXRecordDecl
EXPECT_TRUE(matches("template <typename T> struct S {"
" void f(S s);"
@@ -228,46 +226,39 @@ TEST(HasDeclaration, HasDeclarationOfEnumType) {
TEST(HasDeclaration, HasGetDeclTraitTest) {
static_assert(internal::has_getDecl<TypedefType>,
"Expected TypedefType to have a getDecl.");
- static_assert(internal::has_getDecl<RecordType>,
- "Expected RecordType to have a getDecl.");
static_assert(!internal::has_getDecl<TemplateSpecializationType>,
"Expected TemplateSpecializationType to *not* have a getDecl.");
}
TEST(HasDeclaration, ElaboratedType) {
- EXPECT_TRUE(matches(
- "namespace n { template <typename T> struct X {}; }"
- "void f(n::X<int>);",
- parmVarDecl(hasType(qualType(hasDeclaration(cxxRecordDecl()))))));
- EXPECT_TRUE(matches(
- "namespace n { template <typename T> struct X {}; }"
- "void f(n::X<int>);",
- parmVarDecl(hasType(elaboratedType(hasDeclaration(cxxRecordDecl()))))));
+ static const char Elaborated[] = "namespace n { struct X {}; }"
+ "void f(n::X);";
+ EXPECT_TRUE(
+ matches(Elaborated,
+ parmVarDecl(hasType(qualType(hasDeclaration(cxxRecordDecl()))))));
+ EXPECT_TRUE(matches(Elaborated, parmVarDecl(hasType(recordType(
+ hasDeclaration(cxxRecordDecl()))))));
}
TEST(HasDeclaration, HasDeclarationOfTypeWithDecl) {
EXPECT_TRUE(matches(
"typedef int X; X a;",
- varDecl(hasName("a"), hasType(elaboratedType(namesType(
- typedefType(hasDeclaration(decl()))))))));
+ varDecl(hasName("a"), hasType(typedefType(hasDeclaration(decl()))))));
// FIXME: Add tests for other types with getDecl() (e.g. RecordType)
}
TEST(HasDeclaration, HasDeclarationOfTemplateSpecializationType) {
- EXPECT_TRUE(matches(
- "template <typename T> class A {}; A<int> a;",
- varDecl(hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(namedDecl(hasName("A"))))))))));
- EXPECT_TRUE(matches(
- "template <typename T> class A {};"
- "template <typename T> class B { A<T> a; };",
- fieldDecl(hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(namedDecl(hasName("A"))))))))));
- EXPECT_TRUE(matches(
- "template <typename T> class A {}; A<int> a;",
- varDecl(hasType(elaboratedType(namesType(
- templateSpecializationType(hasDeclaration(cxxRecordDecl()))))))));
+ EXPECT_TRUE(matches("template <typename T> class A {}; A<int> a;",
+ varDecl(hasType(templateSpecializationType(
+ hasDeclaration(namedDecl(hasName("A"))))))));
+ EXPECT_TRUE(matches("template <typename T> class A {};"
+ "template <typename T> class B { A<T> a; };",
+ fieldDecl(hasType(templateSpecializationType(
+ hasDeclaration(namedDecl(hasName("A"))))))));
+ EXPECT_TRUE(matches("template <typename T> class A {}; A<int> a;",
+ varDecl(hasType(templateSpecializationType(
+ hasDeclaration(cxxRecordDecl()))))));
}
TEST(HasDeclaration, HasDeclarationOfCXXNewExpr) {
@@ -277,10 +268,9 @@ TEST(HasDeclaration, HasDeclarationOfCXXNewExpr) {
}
TEST(HasDeclaration, HasDeclarationOfTypeAlias) {
- EXPECT_TRUE(matches(
- "template <typename T> using C = T; C<int> c;",
- varDecl(hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(typeAliasTemplateDecl()))))))));
+ EXPECT_TRUE(matches("template <typename T> using C = T; C<int> c;",
+ varDecl(hasType(templateSpecializationType(
+ hasDeclaration(typeAliasTemplateDecl()))))));
}
TEST(HasDeclaration, HasDeclarationOfObjCInterface) {
@@ -5152,21 +5142,6 @@ TEST(ForEachLambdaCapture, MatchExplicitCapturesOnly) {
matcher, std::make_unique<VerifyIdIsBoundTo<LambdaCapture>>("LC", 1)));
}
-TEST(HasConditionVariableStatement, DoesNotMatchCondition) {
- EXPECT_TRUE(notMatches(
- "void x() { if(true) {} }",
- ifStmt(hasConditionVariableStatement(declStmt()))));
- EXPECT_TRUE(notMatches(
- "void x() { int x; if((x = 42)) {} }",
- ifStmt(hasConditionVariableStatement(declStmt()))));
-}
-
-TEST(HasConditionVariableStatement, MatchesConditionVariables) {
- EXPECT_TRUE(matches(
- "void x() { if(int* a = 0) {} }",
- ifStmt(hasConditionVariableStatement(declStmt()))));
-}
-
TEST(ForEach, BindsOneNode) {
EXPECT_TRUE(matchAndVerifyResultTrue("class C { int x; };",
recordDecl(hasName("C"), forEach(fieldDecl(hasName("x")).bind("x"))),
@@ -5410,14 +5385,15 @@ TEST(LoopingMatchers, DoNotOverwritePreviousMatchResultOnFailure) {
functionDecl(parameterCountIs(1))))),
std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
EXPECT_TRUE(matchAndVerifyResultTrue(
- "class A{}; class B{}; class C : B, A {};",
- cxxRecordDecl(decl().bind("x"), isDerivedFrom("::A")),
- std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
+ "class A{}; class B{}; class C : B, A {};",
+ cxxRecordDecl(decl().bind("x"), isDerivedFrom("::A"),
+ unless(isImplicit())),
+ std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
EXPECT_TRUE(matchAndVerifyResultTrue(
- "class A{}; typedef A B; typedef A C; typedef A D;"
+ "class A{}; typedef A B; typedef A C; typedef A D;"
"class E : A {};",
- cxxRecordDecl(decl().bind("x"), isDerivedFrom("C")),
- std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
+ cxxRecordDecl(decl().bind("x"), isDerivedFrom("C"), unless(isImplicit())),
+ std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
EXPECT_TRUE(matchAndVerifyResultTrue(
"class A { class B { void f() {} }; };",
functionDecl(decl().bind("x"), hasAncestor(recordDecl(hasName("::A")))),
@@ -5710,7 +5686,7 @@ TEST(HasAnyBase, BindsInnerBoundNodes) {
EXPECT_TRUE(matchAndVerifyResultTrue(
"struct Inner {}; struct Proxy : Inner {}; struct Main : public "
"Proxy {};",
- cxxRecordDecl(hasName("Main"),
+ cxxRecordDecl(hasName("Main"), unless(isImplicit()),
hasAnyBase(cxxBaseSpecifier(hasType(
cxxRecordDecl(hasName("Inner")).bind("base-class")))))
.bind("class"),
@@ -5736,47 +5712,28 @@ TEST(TypeMatching, PointeeTypes) {
TEST(ElaboratedTypeNarrowing, hasQualifier) {
EXPECT_TRUE(matches(
- "namespace N {"
- " namespace M {"
- " class D {};"
- " }"
- "}"
- "N::M::D d;",
- elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))))));
- EXPECT_TRUE(notMatches(
- "namespace M {"
- " class D {};"
- "}"
- "M::D d;",
- elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))))));
- EXPECT_TRUE(notMatches(
- "struct D {"
- "} d;",
- elaboratedType(hasQualifier(nestedNameSpecifier()))));
-}
-
-TEST(ElaboratedTypeNarrowing, namesType) {
- EXPECT_TRUE(matches(
- "namespace N {"
+ "namespace N {"
" namespace M {"
" class D {};"
" }"
"}"
"N::M::D d;",
- elaboratedType(elaboratedType(namesType(recordType(
- hasDeclaration(namedDecl(hasName("D")))))))));
+ recordType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))))));
EXPECT_TRUE(notMatches(
- "namespace M {"
+ "namespace M {"
" class D {};"
"}"
"M::D d;",
- elaboratedType(elaboratedType(namesType(typedefType())))));
+ recordType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))))));
+ EXPECT_TRUE(notMatches("struct D {"
+ "} d;",
+ recordType(hasQualifier(nestedNameSpecifier()))));
}
TEST(NNS, BindsNestedNameSpecifiers) {
EXPECT_TRUE(matchAndVerifyResultTrue(
"namespace ns { struct E { struct B {}; }; } ns::E::B b;",
- nestedNameSpecifier(specifiesType(asString("struct ns::E"))).bind("nns"),
+ nestedNameSpecifier(specifiesType(asString("ns::E"))).bind("nns"),
std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifier>>("nns",
"ns::E::")));
}
@@ -5793,38 +5750,33 @@ TEST(NNS, DescendantsOfNestedNameSpecifiers) {
"namespace a { struct A { struct B { struct C {}; }; }; };"
"void f() { a::A::B::C c; }";
EXPECT_TRUE(matches(
- Fragment,
- nestedNameSpecifier(specifiesType(asString("struct a::A::B")),
- hasDescendant(nestedNameSpecifier(
- specifiesNamespace(hasName("a")))))));
+ Fragment, nestedNameSpecifier(specifiesType(asString("a::A::B")),
+ hasDescendant(nestedNameSpecifier(
+ specifiesNamespace(hasName("a")))))));
EXPECT_TRUE(notMatches(
- Fragment,
- nestedNameSpecifier(specifiesType(asString("struct a::A::B")),
- has(nestedNameSpecifier(
- specifiesNamespace(hasName("a")))))));
+ Fragment, nestedNameSpecifier(specifiesType(asString("a::A::B")),
+ has(nestedNameSpecifier(
+ specifiesNamespace(hasName("a")))))));
EXPECT_TRUE(matches(
- Fragment,
- nestedNameSpecifier(specifiesType(asString("struct a::A")),
- has(nestedNameSpecifier(
- specifiesNamespace(hasName("a")))))));
+ Fragment, nestedNameSpecifier(specifiesType(asString("a::A")),
+ has(nestedNameSpecifier(
+ specifiesNamespace(hasName("a")))))));
// Not really useful because a NestedNameSpecifier can af at most one child,
// but to complete the interface.
EXPECT_TRUE(matchAndVerifyResultTrue(
- Fragment,
- nestedNameSpecifier(specifiesType(asString("struct a::A::B")),
- forEach(nestedNameSpecifier().bind("x"))),
- std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifier>>("x", 1)));
+ Fragment,
+ nestedNameSpecifier(specifiesType(asString("a::A::B")),
+ forEach(nestedNameSpecifier().bind("x"))),
+ std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifier>>("x", 1)));
}
TEST(NNS, NestedNameSpecifiersAsDescendants) {
StringRef Fragment =
"namespace a { struct A { struct B { struct C {}; }; }; };"
"void f() { a::A::B::C c; }";
- EXPECT_TRUE(matches(
- Fragment,
- decl(hasDescendant(nestedNameSpecifier(specifiesType(
- asString("struct a::A")))))));
+ EXPECT_TRUE(matches(Fragment, decl(hasDescendant(nestedNameSpecifier(
+ specifiesType(asString("a::A")))))));
EXPECT_TRUE(matchAndVerifyResultTrue(
Fragment,
functionDecl(hasName("f"),
@@ -5837,37 +5789,34 @@ TEST(NNSLoc, DescendantsOfNestedNameSpecifierLocs) {
StringRef Fragment =
"namespace a { struct A { struct B { struct C {}; }; }; };"
"void f() { a::A::B::C c; }";
- EXPECT_TRUE(matches(
- Fragment,
- nestedNameSpecifierLoc(loc(specifiesType(asString("struct a::A::B"))),
- hasDescendant(loc(nestedNameSpecifier(
- specifiesNamespace(hasName("a"))))))));
+ EXPECT_TRUE(matches(Fragment, nestedNameSpecifierLoc(
+ loc(specifiesType(asString("a::A::B"))),
+ hasDescendant(loc(nestedNameSpecifier(
+ specifiesNamespace(hasName("a"))))))));
EXPECT_TRUE(notMatches(
- Fragment,
- nestedNameSpecifierLoc(loc(specifiesType(asString("struct a::A::B"))),
- has(loc(nestedNameSpecifier(
- specifiesNamespace(hasName("a"))))))));
+ Fragment,
+ nestedNameSpecifierLoc(
+ loc(specifiesType(asString("a::A::B"))),
+ has(loc(nestedNameSpecifier(specifiesNamespace(hasName("a"))))))));
EXPECT_TRUE(matches(
- Fragment,
- nestedNameSpecifierLoc(loc(specifiesType(asString("struct a::A"))),
- has(loc(nestedNameSpecifier(
- specifiesNamespace(hasName("a"))))))));
+ Fragment,
+ nestedNameSpecifierLoc(
+ loc(specifiesType(asString("a::A"))),
+ has(loc(nestedNameSpecifier(specifiesNamespace(hasName("a"))))))));
EXPECT_TRUE(matchAndVerifyResultTrue(
- Fragment,
- nestedNameSpecifierLoc(loc(specifiesType(asString("struct a::A::B"))),
- forEach(nestedNameSpecifierLoc().bind("x"))),
- std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifierLoc>>("x", 1)));
+ Fragment,
+ nestedNameSpecifierLoc(loc(specifiesType(asString("a::A::B"))),
+ forEach(nestedNameSpecifierLoc().bind("x"))),
+ std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifierLoc>>("x", 1)));
}
TEST(NNSLoc, NestedNameSpecifierLocsAsDescendants) {
StringRef Fragment =
"namespace a { struct A { struct B { struct C {}; }; }; };"
"void f() { a::A::B::C c; }";
- EXPECT_TRUE(matches(
- Fragment,
- decl(hasDescendant(loc(nestedNameSpecifier(specifiesType(
- asString("struct a::A"))))))));
+ EXPECT_TRUE(matches(Fragment, decl(hasDescendant(loc(nestedNameSpecifier(
+ specifiesType(asString("a::A"))))))));
EXPECT_TRUE(matchAndVerifyResultTrue(
Fragment,
functionDecl(hasName("f"),
@@ -5883,7 +5832,9 @@ TEST(Attr, AttrsAsDescendants) {
EXPECT_TRUE(matchAndVerifyResultTrue(
Fragment,
namespaceDecl(hasName("a"),
- forEachDescendant(attr(unless(isImplicit())).bind("x"))),
+ forEachDescendant(decl(
+ hasDescendant(attr(unless(isImplicit())).bind("x")),
+ unless(isImplicit())))),
std::make_unique<VerifyIdIsBoundTo<Attr>>("x", 2)));
}
@@ -6531,21 +6482,19 @@ TEST(HasReferentLoc, DoesNotBindToParameterWithoutIntReferenceTypeLoc) {
}
TEST(HasAnyTemplateArgumentLoc, BindsToSpecializationWithIntArgument) {
- EXPECT_TRUE(matches(
- "template<typename T> class A {}; A<int> a;",
- varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasAnyTemplateArgumentLoc(
- hasTypeLoc(loc(asString("int")))))))))));
+ EXPECT_TRUE(
+ matches("template<typename T> class A {}; A<int> a;",
+ varDecl(hasName("a"), hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(hasTypeLoc(
+ loc(asString("int")))))))));
}
TEST(HasAnyTemplateArgumentLoc, BindsToSpecializationWithDoubleArgument) {
- EXPECT_TRUE(matches(
- "template<typename T> class A {}; A<double> a;",
- varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasAnyTemplateArgumentLoc(
- hasTypeLoc(loc(asString("double")))))))))));
+ EXPECT_TRUE(
+ matches("template<typename T> class A {}; A<double> a;",
+ varDecl(hasName("a"), hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(hasTypeLoc(
+ loc(asString("double")))))))));
}
TEST(HasAnyTemplateArgumentLoc, BindsToExplicitSpecializationWithIntArgument) {
@@ -6598,30 +6547,27 @@ TEST(HasAnyTemplateArgumentLoc,
}
TEST(HasTemplateArgumentLoc, BindsToSpecializationWithIntArgument) {
- EXPECT_TRUE(
- matches("template<typename T> class A {}; A<int> a;",
- varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasTemplateArgumentLoc(
- 0, hasTypeLoc(loc(asString("int")))))))))));
+ EXPECT_TRUE(matches(
+ "template<typename T> class A {}; A<int> a;",
+ varDecl(hasName("a"),
+ hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(
+ 0, hasTypeLoc(loc(asString("int")))))))));
}
TEST(HasTemplateArgumentLoc, BindsToSpecializationWithDoubleArgument) {
- EXPECT_TRUE(
- matches("template<typename T> class A {}; A<double> a;",
- varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasTemplateArgumentLoc(
- 0, hasTypeLoc(loc(asString("double")))))))))));
+ EXPECT_TRUE(matches(
+ "template<typename T> class A {}; A<double> a;",
+ varDecl(hasName("a"),
+ hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(
+ 0, hasTypeLoc(loc(asString("double")))))))));
}
TEST(HasTemplateArgumentLoc, DoesNotBindToSpecializationWithIntArgument) {
EXPECT_TRUE(notMatches(
"template<typename T> class A {}; A<int> a;",
varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasTemplateArgumentLoc(
- 0, hasTypeLoc(loc(asString("double")))))))))));
+ hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(
+ 0, hasTypeLoc(loc(asString("double")))))))));
}
TEST(HasTemplateArgumentLoc, BindsToExplicitSpecializationWithIntArgument) {
@@ -6735,12 +6681,11 @@ TEST(HasNamedTypeLoc, BindsToElaboratedObjectDeclaration) {
class C<int> c;
)",
varDecl(hasName("c"),
- hasTypeLoc(elaboratedTypeLoc(
- hasNamedTypeLoc(templateSpecializationTypeLoc(
- hasAnyTemplateArgumentLoc(templateArgumentLoc()))))))));
+ hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(templateArgumentLoc()))))));
}
-TEST(HasNamedTypeLoc, DoesNotBindToNonElaboratedObjectDeclaration) {
+TEST(HasNamedTypeLoc, BindsToNonElaboratedObjectDeclaration) {
EXPECT_TRUE(matches(
R"(
template <typename T>
@@ -6748,9 +6693,8 @@ TEST(HasNamedTypeLoc, DoesNotBindToNonElaboratedObjectDeclaration) {
C<int> c;
)",
varDecl(hasName("c"),
- hasTypeLoc(elaboratedTypeLoc(
- hasNamedTypeLoc(templateSpecializationTypeLoc(
- hasAnyTemplateArgumentLoc(templateArgumentLoc()))))))));
+ hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(templateArgumentLoc()))))));
}
} // namespace ast_matchers
diff --git a/clang/unittests/Analysis/FlowSensitive/CMakeLists.txt b/clang/unittests/Analysis/FlowSensitive/CMakeLists.txt
index 4ac5631..3bd4a6e 100644
--- a/clang/unittests/Analysis/FlowSensitive/CMakeLists.txt
+++ b/clang/unittests/Analysis/FlowSensitive/CMakeLists.txt
@@ -8,6 +8,7 @@ add_clang_unittest(ClangAnalysisFlowSensitiveTests
DataflowEnvironmentTest.cpp
DebugSupportTest.cpp
DeterminismTest.cpp
+ FormulaTest.cpp
LoggerTest.cpp
MapLatticeTest.cpp
MatchSwitchTest.cpp
diff --git a/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp b/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp
index ffc50fb..fb3ab7c 100644
--- a/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp
@@ -80,7 +80,7 @@ struct CommonTestInputs {
)cpp") {
auto *SDecl = cast<CXXRecordDecl>(
lookup("S", *AST.context().getTranslationUnitDecl()));
- SType = AST.context().getRecordType(SDecl);
+ SType = AST.context().getCanonicalTagType(SDecl);
CallVal = selectFirst<CallExpr>(
"call",
match(cxxMemberCallExpr(callee(functionDecl(hasName("valProperty"))))
@@ -163,7 +163,7 @@ TEST_F(CachedConstAccessorsLatticeTest,
)cpp");
auto *SDecl =
cast<CXXRecordDecl>(lookup("S", *AST.context().getTranslationUnitDecl()));
- QualType SType = AST.context().getRecordType(SDecl);
+ CanQualType SType = AST.context().getCanonicalTagType(SDecl);
const CallExpr *CE = selectFirst<CallExpr>(
"call", match(cxxMemberCallExpr(
callee(functionDecl(hasName("structValProperty"))))
@@ -237,7 +237,7 @@ TEST_F(CachedConstAccessorsLatticeTest, DifferentValsFromDifferentLocs) {
)cpp");
auto *SDecl =
cast<CXXRecordDecl>(lookup("S", *AST.context().getTranslationUnitDecl()));
- QualType SType = AST.context().getRecordType(SDecl);
+ CanQualType SType = AST.context().getCanonicalTagType(SDecl);
SmallVector<BoundNodes, 1> valPropertyCalls =
match(cxxMemberCallExpr(callee(functionDecl(hasName("valProperty"))))
.bind("call"),
diff --git a/clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp b/clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp
index 4f7a72c..92b687a 100644
--- a/clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp
@@ -17,6 +17,9 @@ namespace {
using namespace clang;
using namespace dataflow;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAre;
+
class DataflowAnalysisContextTest : public ::testing::Test {
protected:
DataflowAnalysisContextTest()
@@ -171,4 +174,97 @@ TEST_F(DataflowAnalysisContextTest, EquivBoolVals) {
A.makeAnd(X, A.makeAnd(Y, Z))));
}
+using ExportLogicalContextTest = DataflowAnalysisContextTest;
+
+TEST_F(ExportLogicalContextTest, EmptySet) {
+ EXPECT_THAT(Context.exportLogicalContext({}).TokenDefs, IsEmpty());
+}
+
+// Only constrainted tokens are included in the output.
+TEST_F(ExportLogicalContextTest, UnconstrainedIgnored) {
+ Atom FC1 = A.makeFlowConditionToken();
+ EXPECT_THAT(Context.exportLogicalContext({FC1}).TokenDefs, IsEmpty());
+}
+
+TEST_F(ExportLogicalContextTest, SingletonSet) {
+ Atom FC1 = A.makeFlowConditionToken();
+ auto &C1 = A.makeAtomRef(A.makeAtom());
+ Context.addFlowConditionConstraint(FC1, C1);
+ EXPECT_THAT(Context.exportLogicalContext({FC1}).TokenDefs.keys(),
+ UnorderedElementsAre(FC1));
+}
+
+TEST_F(ExportLogicalContextTest, NoDependency) {
+ Atom FC1 = A.makeFlowConditionToken();
+ Atom FC2 = A.makeFlowConditionToken();
+ Atom FC3 = A.makeFlowConditionToken();
+ auto &C1 = A.makeAtomRef(A.makeAtom());
+ auto &C2 = A.makeAtomRef(A.makeAtom());
+ auto &C3 = A.makeAtomRef(A.makeAtom());
+
+ Context.addFlowConditionConstraint(FC1, C1);
+ Context.addFlowConditionConstraint(FC2, C2);
+ Context.addFlowConditionConstraint(FC3, C3);
+
+ // FCs are independent.
+ EXPECT_THAT(Context.exportLogicalContext({FC1}).TokenDefs.keys(),
+ UnorderedElementsAre(FC1));
+ EXPECT_THAT(Context.exportLogicalContext({FC2}).TokenDefs.keys(),
+ UnorderedElementsAre(FC2));
+ EXPECT_THAT(Context.exportLogicalContext({FC3}).TokenDefs.keys(),
+ UnorderedElementsAre(FC3));
+}
+
+TEST_F(ExportLogicalContextTest, SimpleDependencyChain) {
+ Atom FC1 = A.makeFlowConditionToken();
+ const Formula &C = A.makeAtomRef(A.makeAtom());
+ Context.addFlowConditionConstraint(FC1, C);
+ Atom FC2 = Context.forkFlowCondition(FC1);
+ Atom FC3 = Context.forkFlowCondition(FC2);
+
+ EXPECT_THAT(Context.exportLogicalContext({FC3}).TokenDefs.keys(),
+ UnorderedElementsAre(FC1, FC2, FC3));
+}
+
+TEST_F(ExportLogicalContextTest, DependencyTree) {
+ Atom FC1 = A.makeFlowConditionToken();
+ const Formula &C = A.makeAtomRef(A.makeAtom());
+ Context.addFlowConditionConstraint(FC1, C);
+ Atom FC2 = Context.forkFlowCondition(FC1);
+ Atom FC3 = A.makeFlowConditionToken();
+ Context.addFlowConditionConstraint(FC3, C);
+ Atom FC4 = Context.joinFlowConditions(FC2, FC3);
+
+ EXPECT_THAT(Context.exportLogicalContext({FC4}).TokenDefs.keys(),
+ UnorderedElementsAre(FC1, FC2, FC3, FC4));
+}
+
+TEST_F(ExportLogicalContextTest, DependencyDAG) {
+ Atom FC1 = A.makeFlowConditionToken();
+ const Formula &C = A.makeAtomRef(A.makeAtom());
+ Context.addFlowConditionConstraint(FC1, C);
+
+ Atom FC2 = Context.forkFlowCondition(FC1);
+ Atom FC3 = Context.forkFlowCondition(FC1);
+ Atom FC4 = Context.joinFlowConditions(FC2, FC3);
+
+ EXPECT_THAT(Context.exportLogicalContext({FC4}).TokenDefs.keys(),
+ UnorderedElementsAre(FC1, FC2, FC3, FC4));
+}
+
+TEST_F(ExportLogicalContextTest, MixedDependencies) {
+ Atom FC1 = A.makeFlowConditionToken();
+ const Formula &C = A.makeAtomRef(A.makeAtom());
+ Context.addFlowConditionConstraint(FC1, C);
+
+ Atom FC2 = Context.forkFlowCondition(FC1);
+ Atom FC3 = Context.forkFlowCondition(FC2);
+ (void)FC3; // unused, and we test below that it is not included.
+
+ Atom FC4 = A.makeFlowConditionToken();
+ Context.addFlowConditionConstraint(FC4, C);
+
+ EXPECT_THAT(Context.exportLogicalContext({FC2, FC4}).TokenDefs.keys(),
+ UnorderedElementsAre(FC1, FC2, FC4));
+}
} // namespace
diff --git a/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp b/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp
index 737277e..0780db9 100644
--- a/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp
@@ -402,7 +402,7 @@ TEST_F(EnvironmentTest,
// `Target<&S::accessor>`.
Environment Env(DAContext, *Fun);
Env.initialize();
- EXPECT_THAT(DAContext.getModeledFields(QualType(Struct->getTypeForDecl(), 0)),
+ EXPECT_THAT(DAContext.getModeledFields(Context.getCanonicalTagType(Struct)),
Contains(Member));
}
diff --git a/clang/unittests/Analysis/FlowSensitive/FormulaTest.cpp b/clang/unittests/Analysis/FlowSensitive/FormulaTest.cpp
new file mode 100644
index 0000000..db9c028
--- /dev/null
+++ b/clang/unittests/Analysis/FlowSensitive/FormulaTest.cpp
@@ -0,0 +1,199 @@
+//===- unittests/Analysis/FlowSensitive/FormulaTest.cpp -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/Arena.h"
+#include "clang/Analysis/FlowSensitive/FormulaSerialization.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Testing/Support/Error.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+using namespace clang;
+using namespace dataflow;
+
+using ::llvm::Failed;
+using ::llvm::HasValue;
+using ::llvm::Succeeded;
+
+class SerializeFormulaTest : public ::testing::Test {
+protected:
+ Arena A;
+ std::string Out;
+ llvm::raw_string_ostream OS{Out};
+
+ const Formula &A1 = A.makeAtomRef(A.makeAtom());
+ const Formula &A2 = A.makeAtomRef(A.makeAtom());
+};
+
+TEST_F(SerializeFormulaTest, Atom) {
+ serializeFormula(A1, OS);
+ EXPECT_EQ(Out, "V0");
+ Out = "";
+
+ serializeFormula(A2, OS);
+ EXPECT_EQ(Out, "V1");
+}
+
+TEST_F(SerializeFormulaTest, LiteralTrue) {
+ serializeFormula(A.makeLiteral(true), OS);
+ EXPECT_EQ(Out, "T");
+}
+
+TEST_F(SerializeFormulaTest, LiteralFalse) {
+ serializeFormula(A.makeLiteral(false), OS);
+ EXPECT_EQ(Out, "F");
+}
+
+TEST_F(SerializeFormulaTest, Not) {
+ serializeFormula(A.makeNot(A1), OS);
+ EXPECT_EQ(Out, "!V0");
+}
+
+TEST_F(SerializeFormulaTest, Or) {
+ serializeFormula(A.makeOr(A1, A2), OS);
+ EXPECT_EQ(Out, "|V0V1");
+}
+
+TEST_F(SerializeFormulaTest, And) {
+ serializeFormula(A.makeAnd(A1, A2), OS);
+ EXPECT_EQ(Out, "&V0V1");
+}
+
+TEST_F(SerializeFormulaTest, Implies) {
+ serializeFormula(A.makeImplies(A1, A2), OS);
+ EXPECT_EQ(Out, ">V0V1");
+}
+
+TEST_F(SerializeFormulaTest, Equal) {
+ serializeFormula(A.makeEquals(A1, A2), OS);
+ EXPECT_EQ(Out, "=V0V1");
+}
+
+TEST_F(SerializeFormulaTest, NestedBinaryUnary) {
+ serializeFormula(A.makeEquals(A.makeOr(A1, A2), A2), OS);
+ EXPECT_EQ(Out, "=|V0V1V1");
+}
+
+TEST_F(SerializeFormulaTest, NestedBinaryBinary) {
+ serializeFormula(A.makeEquals(A.makeOr(A1, A2), A.makeAnd(A1, A2)), OS);
+ EXPECT_EQ(Out, "=|V0V1&V0V1");
+}
+
+class ParseFormulaTest : public ::testing::Test {
+protected:
+ void SetUp() override {
+ AtomMap[0] = Atom1;
+ AtomMap[1] = Atom2;
+ }
+
+ // Convenience wrapper for `testParseFormula`.
+ llvm::Expected<const Formula *> testParseFormula(llvm::StringRef Str) {
+ return parseFormula(Str, A, AtomMap);
+ }
+
+ Arena A;
+ std::string Out;
+ llvm::raw_string_ostream OS{Out};
+
+ Atom Atom1 = A.makeAtom();
+ Atom Atom2 = A.makeAtom();
+ const Formula &A1 = A.makeAtomRef(Atom1);
+ const Formula &A2 = A.makeAtomRef(Atom2);
+ llvm::DenseMap<unsigned, Atom> AtomMap;
+};
+
+TEST_F(ParseFormulaTest, Atom) {
+ EXPECT_THAT_EXPECTED(testParseFormula("V0"), HasValue(&A1));
+ EXPECT_THAT_EXPECTED(testParseFormula("V1"), HasValue(&A2));
+}
+
+TEST_F(ParseFormulaTest, LiteralTrue) {
+ EXPECT_THAT_EXPECTED(testParseFormula("T"), HasValue(&A.makeLiteral(true)));
+}
+
+TEST_F(ParseFormulaTest, LiteralFalse) {
+ EXPECT_THAT_EXPECTED(testParseFormula("F"), HasValue(&A.makeLiteral(false)));
+}
+
+TEST_F(ParseFormulaTest, Not) {
+ EXPECT_THAT_EXPECTED(testParseFormula("!V0"), HasValue(&A.makeNot(A1)));
+}
+
+TEST_F(ParseFormulaTest, Or) {
+ EXPECT_THAT_EXPECTED(testParseFormula("|V0V1"), HasValue(&A.makeOr(A1, A2)));
+}
+
+TEST_F(ParseFormulaTest, And) {
+ EXPECT_THAT_EXPECTED(testParseFormula("&V0V1"), HasValue(&A.makeAnd(A1, A2)));
+}
+
+TEST_F(ParseFormulaTest, OutOfNumericOrder) {
+ EXPECT_THAT_EXPECTED(testParseFormula("&V1V0"), HasValue(&A.makeAnd(A2, A1)));
+}
+
+TEST_F(ParseFormulaTest, Implies) {
+ EXPECT_THAT_EXPECTED(testParseFormula(">V0V1"),
+ HasValue(&A.makeImplies(A1, A2)));
+}
+
+TEST_F(ParseFormulaTest, Equal) {
+ EXPECT_THAT_EXPECTED(testParseFormula("=V0V1"),
+ HasValue(&A.makeEquals(A1, A2)));
+}
+
+TEST_F(ParseFormulaTest, NestedBinaryUnary) {
+ EXPECT_THAT_EXPECTED(testParseFormula("=|V0V1V1"),
+ HasValue(&A.makeEquals(A.makeOr(A1, A2), A2)));
+}
+
+TEST_F(ParseFormulaTest, NestedBinaryBinary) {
+ EXPECT_THAT_EXPECTED(
+ testParseFormula("=|V0V1&V0V1"),
+ HasValue(&A.makeEquals(A.makeOr(A1, A2), A.makeAnd(A1, A2))));
+}
+
+// Verifies that parsing generates fresh atoms, if they are not already in the
+// map.
+TEST_F(ParseFormulaTest, GeneratesAtoms) {
+ llvm::DenseMap<unsigned, Atom> FreshAtomMap;
+ ASSERT_THAT_EXPECTED(parseFormula("=V0V1", A, FreshAtomMap), Succeeded());
+ // The map contains two, unique elements.
+ ASSERT_EQ(FreshAtomMap.size(), 2U);
+ EXPECT_NE(FreshAtomMap[0], FreshAtomMap[1]);
+}
+
+TEST_F(ParseFormulaTest, MalformedFormulaFails) {
+ // Arbitrary string.
+ EXPECT_THAT_EXPECTED(testParseFormula("Hello"), Failed());
+ // Empty string.
+ EXPECT_THAT_EXPECTED(testParseFormula(""), Failed());
+ // Malformed atom.
+ EXPECT_THAT_EXPECTED(testParseFormula("Vabc"), Failed());
+ // Irrelevant suffix.
+ EXPECT_THAT_EXPECTED(testParseFormula("V0Hello"), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula("=V0V1Hello"), Failed());
+ // Sequence without operator.
+ EXPECT_THAT_EXPECTED(testParseFormula("TF"), Failed());
+ // Bad subformula.
+ EXPECT_THAT_EXPECTED(testParseFormula("!G"), Failed());
+ // Incomplete formulas.
+ EXPECT_THAT_EXPECTED(testParseFormula("V"), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula("&"), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula("|"), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula(">"), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula("="), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula("&V0"), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula("|V0"), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula(">V0"), Failed());
+ EXPECT_THAT_EXPECTED(testParseFormula("=V0"), Failed());
+}
+
+} // namespace
diff --git a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
index 214aaee..96e759e 100644
--- a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
@@ -9,17 +9,25 @@
#include "TestingSupport.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/NoopAnalysis.h"
+#include "clang/Analysis/FlowSensitive/NoopLattice.h"
#include "clang/Analysis/FlowSensitive/RecordOps.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Testing/TestAST.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Testing/Support/Error.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -27,6 +35,7 @@
#include <string>
#include <string_view>
#include <utility>
+#include <vector>
namespace clang {
namespace dataflow {
@@ -3541,7 +3550,7 @@ TEST(TransferTest, ResultObjectLocationDontVisitUnevaluatedContexts) {
testFunction(Code, "noexceptTarget");
}
-TEST(TransferTest, StaticCast) {
+TEST(TransferTest, StaticCastNoOp) {
std::string Code = R"(
void target(int Foo) {
int Bar = static_cast<int>(Foo);
@@ -3561,6 +3570,13 @@ TEST(TransferTest, StaticCast) {
const ValueDecl *BarDecl = findValueDecl(ASTCtx, "Bar");
ASSERT_THAT(BarDecl, NotNull());
+ const auto *Cast = ast_matchers::selectFirst<CXXStaticCastExpr>(
+ "cast",
+ ast_matchers::match(ast_matchers::cxxStaticCastExpr().bind("cast"),
+ ASTCtx));
+ ASSERT_THAT(Cast, NotNull());
+ ASSERT_EQ(Cast->getCastKind(), CK_NoOp);
+
const auto *FooVal = Env.getValue(*FooDecl);
const auto *BarVal = Env.getValue(*BarDecl);
EXPECT_TRUE(isa<IntegerValue>(FooVal));
@@ -3569,6 +3585,268 @@ TEST(TransferTest, StaticCast) {
});
}
+TEST(TransferTest, StaticCastBaseToDerived) {
+ std::string Code = R"cc(
+ struct Base {
+ char C;
+ };
+ struct Intermediate : public Base {
+ bool B;
+ };
+ struct Derived : public Intermediate {
+ int I;
+ };
+ Base& getBaseRef();
+ void target(Base* BPtr) {
+ Derived* DPtr = static_cast<Derived*>(BPtr);
+ DPtr->C;
+ DPtr->B;
+ DPtr->I;
+ Derived& DRef = static_cast<Derived&>(*BPtr);
+ DRef.C;
+ DRef.B;
+ DRef.I;
+ Derived& DRefFromFunc = static_cast<Derived&>(getBaseRef());
+ DRefFromFunc.C;
+ DRefFromFunc.B;
+ DRefFromFunc.I;
+ // [[p]]
+ }
+ )cc";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {
+ ASSERT_THAT(Results.keys(), UnorderedElementsAre("p"));
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ const ValueDecl *BPtrDecl = findValueDecl(ASTCtx, "BPtr");
+ ASSERT_THAT(BPtrDecl, NotNull());
+
+ const ValueDecl *DPtrDecl = findValueDecl(ASTCtx, "DPtr");
+ ASSERT_THAT(DPtrDecl, NotNull());
+
+ const ValueDecl *DRefDecl = findValueDecl(ASTCtx, "DRef");
+ ASSERT_THAT(DRefDecl, NotNull());
+
+ const ValueDecl *DRefFromFuncDecl =
+ findValueDecl(ASTCtx, "DRefFromFunc");
+ ASSERT_THAT(DRefFromFuncDecl, NotNull());
+
+ const auto *Cast = ast_matchers::selectFirst<CXXStaticCastExpr>(
+ "cast",
+ ast_matchers::match(ast_matchers::cxxStaticCastExpr().bind("cast"),
+ ASTCtx));
+ ASSERT_THAT(Cast, NotNull());
+ ASSERT_EQ(Cast->getCastKind(), CK_BaseToDerived);
+
+ EXPECT_EQ(Env.getValue(*BPtrDecl), Env.getValue(*DPtrDecl));
+ EXPECT_EQ(&Env.get<PointerValue>(*BPtrDecl)->getPointeeLoc(),
+ Env.getStorageLocation(*DRefDecl));
+ // For DRefFromFunc, not crashing when analyzing the field accesses is
+ // enough.
+ });
+}
+
+TEST(TransferTest, ExplicitDerivedToBaseCast) {
+ std::string Code = R"cc(
+ struct Base {};
+ struct Derived : public Base {};
+ void target(Derived D) {
+ (Base*)&D;
+ // [[p]]
+ }
+)cc";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {
+ ASSERT_THAT(Results.keys(), UnorderedElementsAre("p"));
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ auto *Cast = ast_matchers::selectFirst<ImplicitCastExpr>(
+ "cast", ast_matchers::match(
+ ast_matchers::implicitCastExpr().bind("cast"), ASTCtx));
+ ASSERT_THAT(Cast, NotNull());
+ ASSERT_EQ(Cast->getCastKind(), CK_DerivedToBase);
+
+ auto *AddressOf = ast_matchers::selectFirst<UnaryOperator>(
+ "addressof",
+ ast_matchers::match(ast_matchers::unaryOperator().bind("addressof"),
+ ASTCtx));
+ ASSERT_THAT(AddressOf, NotNull());
+ ASSERT_EQ(AddressOf->getOpcode(), UO_AddrOf);
+
+ EXPECT_EQ(Env.getValue(*Cast), Env.getValue(*AddressOf));
+ });
+}
+
+TEST(TransferTest, ConstructorConversion) {
+ std::string Code = R"cc(
+ struct Base {};
+ struct Derived : public Base {};
+ void target(Derived D) {
+ Base B = (Base)D;
+ // [[p]]
+ }
+)cc";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {
+ ASSERT_THAT(Results.keys(), UnorderedElementsAre("p"));
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ auto *Cast = ast_matchers::selectFirst<CStyleCastExpr>(
+ "cast", ast_matchers::match(
+ ast_matchers::cStyleCastExpr().bind("cast"), ASTCtx));
+ ASSERT_THAT(Cast, NotNull());
+ ASSERT_EQ(Cast->getCastKind(), CK_ConstructorConversion);
+
+ auto &DLoc = getLocForDecl<StorageLocation>(ASTCtx, Env, "D");
+ auto &BLoc = getLocForDecl<StorageLocation>(ASTCtx, Env, "B");
+ EXPECT_NE(&BLoc, &DLoc);
+ });
+}
+
+TEST(TransferTest, UserDefinedConversion) {
+ std::string Code = R"cc(
+ struct To {};
+ struct From {
+ operator To();
+ };
+ void target(From F) {
+ To T = (To)F;
+ // [[p]]
+ }
+)cc";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {
+ ASSERT_THAT(Results.keys(), UnorderedElementsAre("p"));
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ auto *Cast = ast_matchers::selectFirst<ImplicitCastExpr>(
+ "cast", ast_matchers::match(
+ ast_matchers::implicitCastExpr().bind("cast"), ASTCtx));
+ ASSERT_THAT(Cast, NotNull());
+ ASSERT_EQ(Cast->getCastKind(), CK_UserDefinedConversion);
+
+ auto &FLoc = getLocForDecl<StorageLocation>(ASTCtx, Env, "F");
+ auto &TLoc = getLocForDecl<StorageLocation>(ASTCtx, Env, "T");
+ EXPECT_NE(&TLoc, &FLoc);
+ });
+}
+
+TEST(TransferTest, ImplicitUncheckedDerivedToBaseCast) {
+ std::string Code = R"cc(
+ struct Base {
+ void method();
+ };
+ struct Derived : public Base {};
+ void target(Derived D) {
+ D.method();
+ // [[p]]
+ }
+)cc";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {
+ ASSERT_THAT(Results.keys(), UnorderedElementsAre("p"));
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ auto *Cast = ast_matchers::selectFirst<ImplicitCastExpr>(
+ "cast", ast_matchers::match(
+ ast_matchers::implicitCastExpr().bind("cast"), ASTCtx));
+ ASSERT_THAT(Cast, NotNull());
+ ASSERT_EQ(Cast->getCastKind(), CK_UncheckedDerivedToBase);
+
+ auto &DLoc = getLocForDecl<StorageLocation>(ASTCtx, Env, "D");
+ EXPECT_EQ(Env.getStorageLocation(*Cast), &DLoc);
+ });
+}
+
+TEST(TransferTest, ImplicitDerivedToBaseCast) {
+ std::string Code = R"cc(
+ struct Base {};
+ struct Derived : public Base {};
+ void target() {
+ Base* B = new Derived();
+ // [[p]]
+ }
+)cc";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {
+ ASSERT_THAT(Results.keys(), UnorderedElementsAre("p"));
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ auto *Cast = ast_matchers::selectFirst<ImplicitCastExpr>(
+ "cast", ast_matchers::match(
+ ast_matchers::implicitCastExpr().bind("cast"), ASTCtx));
+ ASSERT_THAT(Cast, NotNull());
+ ASSERT_EQ(Cast->getCastKind(), CK_DerivedToBase);
+
+ auto *New = ast_matchers::selectFirst<CXXNewExpr>(
+ "new", ast_matchers::match(ast_matchers::cxxNewExpr().bind("new"),
+ ASTCtx));
+ ASSERT_THAT(New, NotNull());
+
+ EXPECT_EQ(Env.getValue(*Cast), Env.getValue(*New));
+ });
+}
+
+TEST(TransferTest, ReinterpretCast) {
+ std::string Code = R"cc(
+ struct S {
+ int I;
+ };
+
+ void target(unsigned char* Bytes) {
+ S& SRef = reinterpret_cast<S&>(Bytes);
+ SRef.I;
+ S* SPtr = reinterpret_cast<S*>(Bytes);
+ SPtr->I;
+ // [[p]]
+ }
+ )cc";
+ runDataflow(Code, [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>>
+ &Results,
+ ASTContext &ASTCtx) {
+ ASSERT_THAT(Results.keys(), UnorderedElementsAre("p"));
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+ const ValueDecl *I = findValueDecl(ASTCtx, "I");
+ ASSERT_THAT(I, NotNull());
+
+ // No particular knowledge of I's value is modeled, but for both casts,
+ // the fields of S are modeled.
+
+ {
+ auto &Loc = getLocForDecl<RecordStorageLocation>(ASTCtx, Env, "SRef");
+ std::vector<const ValueDecl *> Children;
+ for (const auto &Entry : Loc.children()) {
+ Children.push_back(Entry.getFirst());
+ }
+
+ EXPECT_THAT(Children, UnorderedElementsAre(I));
+ }
+
+ {
+ auto &Loc = cast<RecordStorageLocation>(
+ getValueForDecl<PointerValue>(ASTCtx, Env, "SPtr").getPointeeLoc());
+ std::vector<const ValueDecl *> Children;
+ for (const auto &Entry : Loc.children()) {
+ Children.push_back(Entry.getFirst());
+ }
+
+ EXPECT_THAT(Children, UnorderedElementsAre(I));
+ }
+ });
+}
+
TEST(TransferTest, IntegralCast) {
std::string Code = R"(
void target(int Foo) {
diff --git a/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp b/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp
index 9fb7beb..d1dd4ff 100644
--- a/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp
@@ -693,6 +693,80 @@ TEST_F(NoreturnDestructorTest, ConditionalOperatorNestedBranchReturns) {
// FIXME: Called functions at point `p` should contain only "foo".
}
+class AnalyzerNoreturnTest : public Test {
+protected:
+ template <typename Matcher>
+ void runDataflow(llvm::StringRef Code, Matcher Expectations) {
+ tooling::FileContentMappings FilesContents;
+ FilesContents.push_back(
+ std::make_pair<std::string, std::string>("noreturn_test_defs.h", R"(
+ void assertionHandler() __attribute__((analyzer_noreturn));
+
+ void trap() {}
+ )"));
+
+ ASSERT_THAT_ERROR(
+ test::checkDataflow<FunctionCallAnalysis>(
+ AnalysisInputs<FunctionCallAnalysis>(
+ Code, ast_matchers::hasName("target"),
+ [](ASTContext &C, Environment &) {
+ return FunctionCallAnalysis(C);
+ })
+ .withASTBuildArgs({"-fsyntax-only", "-std=c++17"})
+ .withASTBuildVirtualMappedFiles(std::move(FilesContents)),
+ /*VerifyResults=*/
+ [&Expectations](
+ const llvm::StringMap<
+ DataflowAnalysisState<FunctionCallLattice>> &Results,
+ const AnalysisOutputs &) {
+ EXPECT_THAT(Results, Expectations);
+ }),
+ llvm::Succeeded());
+ }
+};
+
+TEST_F(AnalyzerNoreturnTest, Breathing) {
+ std::string Code = R"(
+ #include "noreturn_test_defs.h"
+
+ void target() {
+ trap();
+ // [[p]]
+ }
+ )";
+ runDataflow(Code, UnorderedElementsAre(IsStringMapEntry(
+ "p", HoldsFunctionCallLattice(HasCalledFunctions(
+ UnorderedElementsAre("trap"))))));
+}
+
+TEST_F(AnalyzerNoreturnTest, DirectNoReturnCall) {
+ std::string Code = R"(
+ #include "noreturn_test_defs.h"
+
+ void target() {
+ assertionHandler();
+ trap();
+ // [[p]]
+ }
+ )";
+ runDataflow(Code, IsEmpty());
+}
+
+TEST_F(AnalyzerNoreturnTest, CanonicalDeclCallCheck) {
+ std::string Code = R"(
+ #include "noreturn_test_defs.h"
+
+ extern void assertionHandler();
+
+ void target() {
+ assertionHandler();
+ trap();
+ // [[p]]
+ }
+ )";
+ runDataflow(Code, IsEmpty());
+}
+
// Models an analysis that uses flow conditions.
class SpecialBoolAnalysis final
: public DataflowAnalysis<SpecialBoolAnalysis, NoopLattice> {
diff --git a/clang/unittests/Analysis/LifetimeSafetyTest.cpp b/clang/unittests/Analysis/LifetimeSafetyTest.cpp
index 7cd679e..c8d88b4 100644
--- a/clang/unittests/Analysis/LifetimeSafetyTest.cpp
+++ b/clang/unittests/Analysis/LifetimeSafetyTest.cpp
@@ -33,7 +33,9 @@ public:
)";
FullCode += Code.str();
- AST = std::make_unique<clang::TestAST>(FullCode);
+ Inputs = TestInputs(FullCode);
+ Inputs.Language = TestLanguage::Lang_CXX20;
+ AST = std::make_unique<clang::TestAST>(Inputs);
ASTCtx = &AST->context();
// Find the target function using AST matchers.
@@ -51,7 +53,7 @@ public:
BuildOptions.AddTemporaryDtors = true;
// Run the main analysis.
- Analysis = std::make_unique<LifetimeSafetyAnalysis>(*AnalysisCtx);
+ Analysis = std::make_unique<LifetimeSafetyAnalysis>(*AnalysisCtx, nullptr);
Analysis->run();
AnnotationToPointMap = Analysis->getTestPoints();
@@ -70,6 +72,7 @@ public:
}
private:
+ TestInputs Inputs;
std::unique_ptr<TestAST> AST;
ASTContext *ASTCtx = nullptr;
std::unique_ptr<AnalysisDeclContext> AnalysisCtx;
@@ -118,11 +121,13 @@ public:
return Analysis.getLoansAtPoint(OID, PP);
}
- std::optional<LoanSet> getExpiredLoansAtPoint(llvm::StringRef Annotation) {
+ std::optional<llvm::DenseSet<LoanID>>
+ getExpiredLoansAtPoint(llvm::StringRef Annotation) {
ProgramPoint PP = Runner.getProgramPoint(Annotation);
if (!PP)
return std::nullopt;
- return Analysis.getExpiredLoansAtPoint(PP);
+ auto Expired = Analysis.getExpiredLoansAtPoint(PP);
+ return llvm::DenseSet<LoanID>{Expired.begin(), Expired.end()};
}
private:
diff --git a/clang/unittests/CodeGen/CodeGenExternalTest.cpp b/clang/unittests/CodeGen/CodeGenExternalTest.cpp
index 0081588..be3be1474 100644
--- a/clang/unittests/CodeGen/CodeGenExternalTest.cpp
+++ b/clang/unittests/CodeGen/CodeGenExternalTest.cpp
@@ -172,6 +172,7 @@ static void test_codegen_fns(MyASTConsumer *my) {
bool mytest_struct_ok = false;
CodeGen::CodeGenModule &CGM = my->Builder->CGM();
+ const ASTContext &Ctx = my->toplevel_decls.front()->getASTContext();
for (auto decl : my->toplevel_decls ) {
if (FunctionDecl *fd = dyn_cast<FunctionDecl>(decl)) {
@@ -189,9 +190,7 @@ static void test_codegen_fns(MyASTConsumer *my) {
if (rd->getName() == "mytest_struct") {
RecordDecl *def = rd->getDefinition();
ASSERT_TRUE(def != NULL);
- const clang::Type *clangTy = rd->getCanonicalDecl()->getTypeForDecl();
- ASSERT_TRUE(clangTy != NULL);
- QualType qType = clangTy->getCanonicalTypeInternal();
+ CanQualType qType = Ctx.getCanonicalTagType(rd);
// Check convertTypeForMemory
llvm::Type *llvmTy = CodeGen::convertTypeForMemory(CGM, qType);
diff --git a/clang/unittests/Format/AlignBracketsTest.cpp b/clang/unittests/Format/AlignBracketsTest.cpp
new file mode 100644
index 0000000..c4380ae
--- /dev/null
+++ b/clang/unittests/Format/AlignBracketsTest.cpp
@@ -0,0 +1,784 @@
+//===- unittest/Format/AlignBracketsTest.cpp ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FormatTestBase.h"
+
+#define DEBUG_TYPE "align-brackets-test"
+
+namespace clang {
+namespace format {
+namespace test {
+namespace {
+
+class AlignBracketsTest : public FormatTestBase {};
+
+TEST_F(AlignBracketsTest, AlignsAfterOpenBracket) {
+ verifyFormat(
+ "void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaa aaaaaaaa,\n"
+ " aaaaaaaaa aaaaaaa) {}");
+ verifyFormat(
+ "SomeLongVariableName->someVeryLongFunctionName(aaaaaaaaaaa aaaaaaaaa,\n"
+ " aaaaaaaaaaa aaaaaaaaa);");
+ verifyFormat(
+ "SomeLongVariableName->someFunction(foooooooo(aaaaaaaaaaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaa));");
+ FormatStyle Style = getLLVMStyle();
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
+ verifyFormat("void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaa aaaaaaaa, aaaaaaaaa aaaaaaa) {}",
+ Style);
+ verifyFormat("SomeLongVariableName->someVeryLongFunctionName(\n"
+ " aaaaaaaaaaa aaaaaaaaa, aaaaaaaaaaa aaaaaaaaa);",
+ Style);
+ verifyFormat("SomeLongVariableName->someFunction(\n"
+ " foooooooo(aaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaaaaaaa));",
+ Style);
+ verifyFormat(
+ "void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaa aaaaaaaa,\n"
+ " aaaaaaaaa aaaaaaa, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) {}",
+ Style);
+ verifyFormat(
+ "SomeLongVariableName->someVeryLongFunctionName(aaaaaaaaaaa aaaaaaaaa,\n"
+ " aaaaaaaaaaa aaaaaaaaa, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa);",
+ Style);
+ verifyFormat(
+ "SomeLongVariableName->someFunction(foooooooo(aaaaaaaaaaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa));",
+ Style);
+
+ verifyFormat("bbbbbbbbbbbb(aaaaaaaaaaaaaaaaaaaaaaaa, //\n"
+ " ccccccc(aaaaaaaaaaaaaaaaa, //\n"
+ " b));",
+ Style);
+
+ Style.ColumnLimit = 30;
+ verifyFormat("for (int foo = 0; foo < FOO;\n"
+ " ++foo) {\n"
+ " bar(foo);\n"
+ "}",
+ Style);
+ Style.ColumnLimit = 80;
+
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
+ Style.BinPackArguments = false;
+ Style.BinPackParameters = FormatStyle::BPPS_OnePerLine;
+ verifyFormat("void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaa aaaaaaaa,\n"
+ " aaaaaaaaa aaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) {}",
+ Style);
+ verifyFormat("SomeLongVariableName->someVeryLongFunctionName(\n"
+ " aaaaaaaaaaa aaaaaaaaa,\n"
+ " aaaaaaaaaaa aaaaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa);",
+ Style);
+ verifyFormat("SomeLongVariableName->someFunction(foooooooo(\n"
+ " aaaaaaaaaaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa));",
+ Style);
+ verifyFormat(
+ "aaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)));",
+ Style);
+ verifyFormat(
+ "aaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaa.aaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)));",
+ Style);
+ verifyFormat(
+ "aaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)),\n"
+ " aaaaaaaaaaaaaaaa);",
+ Style);
+ verifyFormat(
+ "aaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)) &&\n"
+ " aaaaaaaaaaaaaaaa);",
+ Style);
+ verifyFormat(
+ "fooooooooooo(new BARRRRRRRRR(\n"
+ " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZZZZZZZZZZZZZZZZZZZZZZZZZ()));",
+ Style);
+ verifyFormat(
+ "fooooooooooo(::new BARRRRRRRRR(\n"
+ " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZZZZZZZZZZZZZZZZZZZZZZZZZ()));",
+ Style);
+ verifyFormat(
+ "fooooooooooo(new FOO::BARRRR(\n"
+ " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZZZZZZZZZZZZZZZZZZZZZZZZZ()));",
+ Style);
+
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+ Style.BinPackArguments = false;
+ Style.BinPackParameters = FormatStyle::BPPS_OnePerLine;
+ verifyFormat("void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaa aaaaaaaa,\n"
+ " aaaaaaaaa aaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
+ ") {}",
+ Style);
+ verifyFormat("SomeLongVariableName->someVeryLongFunctionName(\n"
+ " aaaaaaaaaaa aaaaaaaaa,\n"
+ " aaaaaaaaaaa aaaaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
+ ");",
+ Style);
+ verifyFormat("SomeLongVariableName->someFunction(foooooooo(\n"
+ " aaaaaaaaaaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaa,\n"
+ " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
+ "));",
+ Style);
+ verifyFormat("aaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
+ "));",
+ Style);
+ verifyFormat("aaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaa.aaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
+ "));",
+ Style);
+ verifyFormat(
+ "aaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
+ " ),\n"
+ " aaaaaaaaaaaaaaaa\n"
+ ");",
+ Style);
+ verifyFormat(
+ "aaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaaa(\n"
+ " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
+ " ) &&\n"
+ " aaaaaaaaaaaaaaaa\n"
+ ");",
+ Style);
+ verifyFormat("void foo(\n"
+ " void (*foobarpntr)(\n"
+ " aaaaaaaaaaaaaaaaaa *,\n"
+ " bbbbbbbbbbbbbb *,\n"
+ " cccccccccccccccccccc *,\n"
+ " dddddddddddddddddd *\n"
+ " )\n"
+ ");",
+ Style);
+ verifyFormat("aaaaaaa<bbbbbbbb> const aaaaaaaaaa{\n"
+ " aaaaaaaaaaaaa(aaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
+ "};",
+ Style);
+
+ verifyFormat("bool aaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " const bool &aaaaaaaaa, const void *aaaaaaaaaa\n"
+ ") const {\n"
+ " return true;\n"
+ "}",
+ Style);
+ verifyFormat("bool aaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " const bool &aaaaaaaaaa, const void *aaaaaaaaaa\n"
+ ") const;",
+ Style);
+ verifyFormat("void aaaaaaaaa(\n"
+ " int aaaaaa, int bbbbbb, int cccccc, int dddddddddd\n"
+ ") const noexcept -> std::vector<of_very_long_type>;",
+ Style);
+ verifyFormat(
+ "x = aaaaaaaaaaaaaaa(\n"
+ " \"a aaaaaaa aaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaa aaaaaaaaaaaaa\"\n"
+ ");",
+ Style);
+ Style.ColumnLimit = 60;
+ verifyFormat("auto lambda =\n"
+ " [&b](\n"
+ " auto aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
+ " ) {};",
+ Style);
+ verifyFormat("aaaaaaaaaaaaaaaaaaaaaaaa(\n"
+ " &bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n"
+ ");",
+ Style);
+}
+
+TEST_F(AlignBracketsTest, AlignAfterOpenBracketBlockIndent) {
+ auto Style = getLLVMStyle();
+
+ StringRef Short = "functionCall(paramA, paramB, paramC);\n"
+ "void functionDecl(int a, int b, int c);";
+
+ StringRef Medium = "functionCall(paramA, paramB, paramC, paramD, paramE, "
+ "paramF, paramG, paramH, paramI);\n"
+ "void functionDecl(int argumentA, int argumentB, int "
+ "argumentC, int argumentD, int argumentE);";
+
+ verifyFormat(Short, Style);
+
+ StringRef NoBreak = "functionCall(paramA, paramB, paramC, paramD, paramE, "
+ "paramF, paramG, paramH,\n"
+ " paramI);\n"
+ "void functionDecl(int argumentA, int argumentB, int "
+ "argumentC, int argumentD,\n"
+ " int argumentE);";
+
+ verifyFormat(NoBreak, Medium, Style);
+ verifyFormat(NoBreak,
+ "functionCall(\n"
+ " paramA,\n"
+ " paramB,\n"
+ " paramC,\n"
+ " paramD,\n"
+ " paramE,\n"
+ " paramF,\n"
+ " paramG,\n"
+ " paramH,\n"
+ " paramI\n"
+ ");\n"
+ "void functionDecl(\n"
+ " int argumentA,\n"
+ " int argumentB,\n"
+ " int argumentC,\n"
+ " int argumentD,\n"
+ " int argumentE\n"
+ ");",
+ Style);
+
+ verifyFormat("outerFunctionCall(nestedFunctionCall(argument1),\n"
+ " nestedLongFunctionCall(argument1, "
+ "argument2, argument3,\n"
+ " argument4, "
+ "argument5));",
+ Style);
+
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+
+ verifyFormat(Short, Style);
+ verifyFormat(
+ "functionCall(\n"
+ " paramA, paramB, paramC, paramD, paramE, paramF, paramG, paramH, "
+ "paramI\n"
+ ");\n"
+ "void functionDecl(\n"
+ " int argumentA, int argumentB, int argumentC, int argumentD, int "
+ "argumentE\n"
+ ");",
+ Medium, Style);
+
+ Style.AllowAllArgumentsOnNextLine = false;
+ Style.AllowAllParametersOfDeclarationOnNextLine = false;
+
+ verifyFormat(Short, Style);
+ verifyFormat(
+ "functionCall(\n"
+ " paramA, paramB, paramC, paramD, paramE, paramF, paramG, paramH, "
+ "paramI\n"
+ ");\n"
+ "void functionDecl(\n"
+ " int argumentA, int argumentB, int argumentC, int argumentD, int "
+ "argumentE\n"
+ ");",
+ Medium, Style);
+
+ Style.BinPackArguments = false;
+ Style.BinPackParameters = FormatStyle::BPPS_OnePerLine;
+
+ verifyFormat(Short, Style);
+
+ verifyFormat("functionCall(\n"
+ " paramA,\n"
+ " paramB,\n"
+ " paramC,\n"
+ " paramD,\n"
+ " paramE,\n"
+ " paramF,\n"
+ " paramG,\n"
+ " paramH,\n"
+ " paramI\n"
+ ");\n"
+ "void functionDecl(\n"
+ " int argumentA,\n"
+ " int argumentB,\n"
+ " int argumentC,\n"
+ " int argumentD,\n"
+ " int argumentE\n"
+ ");",
+ Medium, Style);
+
+ verifyFormat("outerFunctionCall(\n"
+ " nestedFunctionCall(argument1),\n"
+ " nestedLongFunctionCall(\n"
+ " argument1,\n"
+ " argument2,\n"
+ " argument3,\n"
+ " argument4,\n"
+ " argument5\n"
+ " )\n"
+ ");",
+ Style);
+
+ verifyFormat("int a = (int)b;", Style);
+ verifyFormat("int a = (int)b;",
+ "int a = (\n"
+ " int\n"
+ ") b;",
+ Style);
+
+ verifyFormat("return (true);", Style);
+ verifyFormat("return (true);",
+ "return (\n"
+ " true\n"
+ ");",
+ Style);
+
+ verifyFormat("void foo();", Style);
+ verifyFormat("void foo();",
+ "void foo(\n"
+ ");",
+ Style);
+
+ verifyFormat("void foo() {}", Style);
+ verifyFormat("void foo() {}",
+ "void foo(\n"
+ ") {\n"
+ "}",
+ Style);
+
+ verifyFormat("auto string = std::string();", Style);
+ verifyFormat("auto string = std::string();",
+ "auto string = std::string(\n"
+ ");",
+ Style);
+
+ verifyFormat("void (*functionPointer)() = nullptr;", Style);
+ verifyFormat("void (*functionPointer)() = nullptr;",
+ "void (\n"
+ " *functionPointer\n"
+ ")\n"
+ "(\n"
+ ") = nullptr;",
+ Style);
+}
+
+TEST_F(AlignBracketsTest, AlignAfterOpenBracketBlockIndentIfStatement) {
+ auto Style = getLLVMStyle();
+
+ verifyFormat("if (foo()) {\n"
+ " return;\n"
+ "}",
+ Style);
+
+ verifyFormat("if (quiteLongArg !=\n"
+ " (alsoLongArg - 1)) { // ABC is a very longgggggggggggg "
+ "comment\n"
+ " return;\n"
+ "}",
+ Style);
+
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+
+ verifyFormat("if (foo()) {\n"
+ " return;\n"
+ "}",
+ Style);
+
+ verifyFormat("if (quiteLongArg !=\n"
+ " (alsoLongArg - 1)) { // ABC is a very longgggggggggggg "
+ "comment\n"
+ " return;\n"
+ "}",
+ Style);
+
+ verifyFormat("void foo() {\n"
+ " if (camelCaseName < alsoLongName ||\n"
+ " anotherEvenLongerName <=\n"
+ " thisReallyReallyReallyReallyReallyReallyLongerName ||"
+ "\n"
+ " otherName < thisLastName) {\n"
+ " return;\n"
+ " } else if (quiteLongName < alsoLongName ||\n"
+ " anotherEvenLongerName <=\n"
+ " thisReallyReallyReallyReallyReallyReallyLonger"
+ "Name ||\n"
+ " otherName < thisLastName) {\n"
+ " return;\n"
+ " }\n"
+ "}",
+ Style);
+
+ Style.ContinuationIndentWidth = 2;
+ verifyFormat("void foo() {\n"
+ " if (ThisIsRatherALongIfClause && thatIExpectToBeBroken ||\n"
+ " ontoMultipleLines && whenFormattedCorrectly) {\n"
+ " if (false) {\n"
+ " return;\n"
+ " } else if (thisIsRatherALongIfClause && "
+ "thatIExpectToBeBroken ||\n"
+ " ontoMultipleLines && whenFormattedCorrectly) {\n"
+ " return;\n"
+ " }\n"
+ " }\n"
+ "}",
+ Style);
+}
+
+TEST_F(AlignBracketsTest, AlignAfterOpenBracketBlockIndentForStatement) {
+ auto Style = getLLVMStyle();
+
+ verifyFormat("for (int i = 0; i < 5; ++i) {\n"
+ " doSomething();\n"
+ "}",
+ Style);
+
+ verifyFormat("for (int myReallyLongCountVariable = 0; "
+ "myReallyLongCountVariable < count;\n"
+ " myReallyLongCountVariable++) {\n"
+ " doSomething();\n"
+ "}",
+ Style);
+
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+
+ verifyFormat("for (int i = 0; i < 5; ++i) {\n"
+ " doSomething();\n"
+ "}",
+ Style);
+
+ verifyFormat("for (int myReallyLongCountVariable = 0; "
+ "myReallyLongCountVariable < count;\n"
+ " myReallyLongCountVariable++) {\n"
+ " doSomething();\n"
+ "}",
+ Style);
+}
+
+TEST_F(AlignBracketsTest, AlignAfterOpenBracketBlockIndentInitializers) {
+ auto Style = getLLVMStyleWithColumns(60);
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+ // Aggregate initialization.
+ verifyFormat("int LooooooooooooooooooooooooongVariable[2] = {\n"
+ " 10000000, 20000000\n"
+ "};",
+ Style);
+ verifyFormat("SomeStruct s{\n"
+ " \"xxxxxxxxxxxxxxxx\", \"yyyyyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzzzzz\"\n"
+ "};",
+ Style);
+ // Designated initializers.
+ verifyFormat("int LooooooooooooooooooooooooongVariable[2] = {\n"
+ " [0] = 10000000, [1] = 20000000\n"
+ "};",
+ Style);
+ verifyFormat("SomeStruct s{\n"
+ " .foo = \"xxxxxxxxxxxxx\",\n"
+ " .bar = \"yyyyyyyyyyyyy\",\n"
+ " .baz = \"zzzzzzzzzzzzz\"\n"
+ "};",
+ Style);
+ // List initialization.
+ verifyFormat("SomeStruct s{\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "};",
+ Style);
+ verifyFormat("SomeStruct{\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "};",
+ Style);
+ verifyFormat("new SomeStruct{\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "};",
+ Style);
+ // Member initializer.
+ verifyFormat("class SomeClass {\n"
+ " SomeStruct s{\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ " };\n"
+ "};",
+ Style);
+ // Constructor member initializer.
+ verifyFormat("SomeClass::SomeClass : strct{\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ " } {}",
+ Style);
+ // Copy initialization.
+ verifyFormat("SomeStruct s = SomeStruct{\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "};",
+ Style);
+ // Copy list initialization.
+ verifyFormat("SomeStruct s = {\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "};",
+ Style);
+ // Assignment operand initialization.
+ verifyFormat("s = {\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "};",
+ Style);
+ // Returned object initialization.
+ verifyFormat("return {\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "};",
+ Style);
+ // Initializer list.
+ verifyFormat("auto initializerList = {\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "};",
+ Style);
+ // Function parameter initialization.
+ verifyFormat("func({\n"
+ " \"xxxxxxxxxxxxx\",\n"
+ " \"yyyyyyyyyyyyy\",\n"
+ " \"zzzzzzzzzzzzz\",\n"
+ "});",
+ Style);
+ // Nested init lists.
+ verifyFormat("SomeStruct s = {\n"
+ " {{init1, init2, init3, init4, init5},\n"
+ " {init1, init2, init3, init4, init5}}\n"
+ "};",
+ Style);
+ verifyFormat("SomeStruct s = {\n"
+ " {{\n"
+ " .init1 = 1,\n"
+ " .init2 = 2,\n"
+ " .init3 = 3,\n"
+ " .init4 = 4,\n"
+ " .init5 = 5,\n"
+ " },\n"
+ " {init1, init2, init3, init4, init5}}\n"
+ "};",
+ Style);
+ verifyFormat("SomeArrayT a[3] = {\n"
+ " {\n"
+ " foo,\n"
+ " bar,\n"
+ " },\n"
+ " {\n"
+ " foo,\n"
+ " bar,\n"
+ " },\n"
+ " SomeArrayT{},\n"
+ "};",
+ Style);
+ verifyFormat("SomeArrayT a[3] = {\n"
+ " {foo},\n"
+ " {\n"
+ " {\n"
+ " init1,\n"
+ " init2,\n"
+ " init3,\n"
+ " },\n"
+ " {\n"
+ " init1,\n"
+ " init2,\n"
+ " init3,\n"
+ " },\n"
+ " },\n"
+ " {baz},\n"
+ "};",
+ Style);
+}
+
+TEST_F(AlignBracketsTest, AllowAllArgumentsOnNextLineDontAlign) {
+ // Check that AllowAllArgumentsOnNextLine is respected for both BAS_DontAlign
+ // and BAS_Align.
+ FormatStyle Style = getLLVMStyleWithColumns(35);
+ StringRef Input = "functionCall(paramA, paramB, paramC);\n"
+ "void functionDecl(int A, int B, int C);";
+ Style.AllowAllArgumentsOnNextLine = false;
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
+ verifyFormat(StringRef("functionCall(paramA, paramB,\n"
+ " paramC);\n"
+ "void functionDecl(int A, int B,\n"
+ " int C);"),
+ Input, Style);
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_Align;
+ verifyFormat(StringRef("functionCall(paramA, paramB,\n"
+ " paramC);\n"
+ "void functionDecl(int A, int B,\n"
+ " int C);"),
+ Input, Style);
+ // However, BAS_AlwaysBreak and BAS_BlockIndent should take precedence over
+ // AllowAllArgumentsOnNextLine.
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
+ verifyFormat(StringRef("functionCall(\n"
+ " paramA, paramB, paramC);\n"
+ "void functionDecl(\n"
+ " int A, int B, int C);"),
+ Input, Style);
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+ verifyFormat("functionCall(\n"
+ " paramA, paramB, paramC\n"
+ ");\n"
+ "void functionDecl(\n"
+ " int A, int B, int C\n"
+ ");",
+ Input, Style);
+
+ // When AllowAllArgumentsOnNextLine is set, we prefer breaking before the
+ // first argument.
+ Style.AllowAllArgumentsOnNextLine = true;
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
+ verifyFormat(StringRef("functionCall(\n"
+ " paramA, paramB, paramC);\n"
+ "void functionDecl(\n"
+ " int A, int B, int C);"),
+ Input, Style);
+ // It wouldn't fit on one line with aligned parameters so this setting
+ // doesn't change anything for BAS_Align.
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_Align;
+ verifyFormat(StringRef("functionCall(paramA, paramB,\n"
+ " paramC);\n"
+ "void functionDecl(int A, int B,\n"
+ " int C);"),
+ Input, Style);
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
+ verifyFormat(StringRef("functionCall(\n"
+ " paramA, paramB, paramC);\n"
+ "void functionDecl(\n"
+ " int A, int B, int C);"),
+ Input, Style);
+}
+
+TEST_F(AlignBracketsTest, FormatsDeclarationBreakAlways) {
+ FormatStyle BreakAlways = getGoogleStyle();
+ BreakAlways.BinPackParameters = FormatStyle::BPPS_AlwaysOnePerLine;
+ verifyFormat("void f(int a,\n"
+ " int b);",
+ BreakAlways);
+ verifyFormat("void f(int aaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
+ " int bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
+ " int cccccccccccccccccccccccc);",
+ BreakAlways);
+
+ // Ensure AlignAfterOpenBracket interacts correctly with BinPackParameters set
+ // to BPPS_AlwaysOnePerLine.
+ BreakAlways.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
+ verifyFormat(
+ "void someLongFunctionName(\n"
+ " int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
+ " int b);",
+ BreakAlways);
+ BreakAlways.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+ verifyFormat(
+ "void someLongFunctionName(\n"
+ " int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
+ " int b\n"
+ ");",
+ BreakAlways);
+}
+
+TEST_F(AlignBracketsTest, FormatsDefinitionBreakAlways) {
+ FormatStyle BreakAlways = getGoogleStyle();
+ BreakAlways.BinPackParameters = FormatStyle::BPPS_AlwaysOnePerLine;
+ verifyFormat("void f(int a,\n"
+ " int b) {\n"
+ " f(a, b);\n"
+ "}",
+ BreakAlways);
+
+ // Ensure BinPackArguments interact correctly when BinPackParameters is set to
+ // BPPS_AlwaysOnePerLine.
+ verifyFormat("void f(int aaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
+ " int bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
+ " int cccccccccccccccccccccccc) {\n"
+ " f(aaaaaaaaaaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
+ " cccccccccccccccccccccccc);\n"
+ "}",
+ BreakAlways);
+ BreakAlways.BinPackArguments = false;
+ verifyFormat("void f(int aaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
+ " int bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
+ " int cccccccccccccccccccccccc) {\n"
+ " f(aaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
+ " bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
+ " cccccccccccccccccccccccc);\n"
+ "}",
+ BreakAlways);
+
+ // Ensure BreakFunctionDefinitionParameters interacts correctly when
+ // BinPackParameters is set to BPPS_AlwaysOnePerLine.
+ BreakAlways.BreakFunctionDefinitionParameters = true;
+ verifyFormat("void f(\n"
+ " int a,\n"
+ " int b) {\n"
+ " f(a, b);\n"
+ "}",
+ BreakAlways);
+ BreakAlways.BreakFunctionDefinitionParameters = false;
+
+ // Ensure AlignAfterOpenBracket interacts correctly with BinPackParameters set
+ // to BPPS_AlwaysOnePerLine.
+ BreakAlways.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
+ verifyFormat(
+ "void someLongFunctionName(\n"
+ " int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
+ " int b) {\n"
+ " someLongFunctionName(\n"
+ " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, b);\n"
+ "}",
+ BreakAlways);
+ BreakAlways.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+ verifyFormat(
+ "void someLongFunctionName(\n"
+ " int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
+ " int b\n"
+ ") {\n"
+ " someLongFunctionName(\n"
+ " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, b\n"
+ " );\n"
+ "}",
+ BreakAlways);
+}
+
+TEST_F(AlignBracketsTest, ParenthesesAndOperandAlignment) {
+ FormatStyle Style = getLLVMStyleWithColumns(40);
+ verifyFormat("int a = f(aaaaaaaaaaaaaaaaaaaaaa &&\n"
+ " bbbbbbbbbbbbbbbbbbbbbb);",
+ Style);
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_Align;
+ Style.AlignOperands = FormatStyle::OAS_DontAlign;
+ verifyFormat("int a = f(aaaaaaaaaaaaaaaaaaaaaa &&\n"
+ " bbbbbbbbbbbbbbbbbbbbbb);",
+ Style);
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
+ Style.AlignOperands = FormatStyle::OAS_Align;
+ verifyFormat("int a = f(aaaaaaaaaaaaaaaaaaaaaa &&\n"
+ " bbbbbbbbbbbbbbbbbbbbbb);",
+ Style);
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
+ Style.AlignOperands = FormatStyle::OAS_DontAlign;
+ verifyFormat("int a = f(aaaaaaaaaaaaaaaaaaaaaa &&\n"
+ " bbbbbbbbbbbbbbbbbbbbbb);",
+ Style);
+}
+
+} // namespace
+} // namespace test
+} // namespace format
+} // namespace clang
diff --git a/clang/unittests/Format/CMakeLists.txt b/clang/unittests/Format/CMakeLists.txt
index edfc8d7a..5e5a7a0 100644
--- a/clang/unittests/Format/CMakeLists.txt
+++ b/clang/unittests/Format/CMakeLists.txt
@@ -1,6 +1,7 @@
# Format tests have few LLVM and Clang dependencies, so linking it as a
# distinct target enables faster iteration times at low cost.
add_distinct_clang_unittest(FormatTests
+ AlignBracketsTest.cpp
BracesInserterTest.cpp
BracesRemoverTest.cpp
CleanupTest.cpp
@@ -27,6 +28,7 @@ add_distinct_clang_unittest(FormatTests
MacroExpanderTest.cpp
MatchFilePathTest.cpp
NamespaceEndCommentsFixerTest.cpp
+ NumericLiteralInfoTest.cpp
ObjCPropertyAttributeOrderFixerTest.cpp
QualifierFixerTest.cpp
SortImportsTestJS.cpp
diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp
index 9de3cca..7c993c0 100644
--- a/clang/unittests/Format/ConfigParseTest.cpp
+++ b/clang/unittests/Format/ConfigParseTest.cpp
@@ -200,7 +200,6 @@ TEST(ConfigParseTest, ParsesConfigurationBools) {
CHECK_PARSE_BOOL(RemoveSemicolon);
CHECK_PARSE_BOOL(SkipMacroDefinitionBody);
CHECK_PARSE_BOOL(SpacesInSquareBrackets);
- CHECK_PARSE_BOOL(SpaceInEmptyBlock);
CHECK_PARSE_BOOL(SpacesInContainerLiterals);
CHECK_PARSE_BOOL(SpaceAfterCStyleCast);
CHECK_PARSE_BOOL(SpaceAfterTemplateKeyword);
@@ -688,6 +687,17 @@ TEST(ConfigParseTest, ParsesConfiguration) {
SpaceBeforeParens,
FormatStyle::SBPO_ControlStatementsExceptControlMacros);
+ Style.SpaceInEmptyBraces = FormatStyle::SIEB_Never;
+ CHECK_PARSE("SpaceInEmptyBraces: Always", SpaceInEmptyBraces,
+ FormatStyle::SIEB_Always);
+ CHECK_PARSE("SpaceInEmptyBraces: Block", SpaceInEmptyBraces,
+ FormatStyle::SIEB_Block);
+ CHECK_PARSE("SpaceInEmptyBraces: Never", SpaceInEmptyBraces,
+ FormatStyle::SIEB_Never);
+ // For backward compatibility:
+ CHECK_PARSE("SpaceInEmptyBlock: true", SpaceInEmptyBraces,
+ FormatStyle::SIEB_Block);
+
// For backward compatibility:
Style.SpacesInParens = FormatStyle::SIPO_Never;
Style.SpacesInParensOptions = {};
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 96cc650..4e9d318 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -4050,6 +4050,10 @@ TEST_F(FormatTest, FormatsBitfields) {
" uchar : 8;\n"
" uchar other;\n"
"};");
+ verifyFormat("struct foo {\n"
+ " uint8_t i_am_a_bit_field_this_long\n"
+ " : struct_with_constexpr::i_am_a_constexpr_lengthhhhh;\n"
+ "};");
FormatStyle Style = getLLVMStyle();
Style.BitFieldColonSpacing = FormatStyle::BFCS_None;
verifyFormat("struct Bitfields {\n"
@@ -7055,7 +7059,7 @@ TEST_F(FormatTest, PutEmptyBlocksIntoOneLine) {
verifyFormat("enum E {};");
verifyFormat("enum E {}");
FormatStyle Style = getLLVMStyle();
- Style.SpaceInEmptyBlock = true;
+ Style.SpaceInEmptyBraces = FormatStyle::SIEB_Block;
verifyFormat("void f() { }", "void f() {}", Style);
Style.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Empty;
verifyFormat("{ }", Style);
@@ -7083,7 +7087,7 @@ TEST_F(FormatTest, PutEmptyBlocksIntoOneLine) {
Style);
Style = getLLVMStyle(FormatStyle::LK_CSharp);
- Style.SpaceInEmptyBlock = true;
+ Style.SpaceInEmptyBraces = FormatStyle::SIEB_Block;
verifyFormat("Event += () => { };", Style);
}
@@ -8047,67 +8051,6 @@ TEST_F(FormatTest, AllowAllArgumentsOnNextLine) {
Style);
}
-TEST_F(FormatTest, AllowAllArgumentsOnNextLineDontAlign) {
- // Check that AllowAllArgumentsOnNextLine is respected for both BAS_DontAlign
- // and BAS_Align.
- FormatStyle Style = getLLVMStyleWithColumns(35);
- StringRef Input = "functionCall(paramA, paramB, paramC);\n"
- "void functionDecl(int A, int B, int C);";
- Style.AllowAllArgumentsOnNextLine = false;
- Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- verifyFormat(StringRef("functionCall(paramA, paramB,\n"
- " paramC);\n"
- "void functionDecl(int A, int B,\n"
- " int C);"),
- Input, Style);
- Style.AlignAfterOpenBracket = FormatStyle::BAS_Align;
- verifyFormat(StringRef("functionCall(paramA, paramB,\n"
- " paramC);\n"
- "void functionDecl(int A, int B,\n"
- " int C);"),
- Input, Style);
- // However, BAS_AlwaysBreak and BAS_BlockIndent should take precedence over
- // AllowAllArgumentsOnNextLine.
- Style.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
- verifyFormat(StringRef("functionCall(\n"
- " paramA, paramB, paramC);\n"
- "void functionDecl(\n"
- " int A, int B, int C);"),
- Input, Style);
- Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
- verifyFormat("functionCall(\n"
- " paramA, paramB, paramC\n"
- ");\n"
- "void functionDecl(\n"
- " int A, int B, int C\n"
- ");",
- Input, Style);
-
- // When AllowAllArgumentsOnNextLine is set, we prefer breaking before the
- // first argument.
- Style.AllowAllArgumentsOnNextLine = true;
- Style.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
- verifyFormat(StringRef("functionCall(\n"
- " paramA, paramB, paramC);\n"
- "void functionDecl(\n"
- " int A, int B, int C);"),
- Input, Style);
- // It wouldn't fit on one line with aligned parameters so this setting
- // doesn't change anything for BAS_Align.
- Style.AlignAfterOpenBracket = FormatStyle::BAS_Align;
- verifyFormat(StringRef("functionCall(paramA, paramB,\n"
- " paramC);\n"
- "void functionDecl(int A, int B,\n"
- " int C);"),
- Input, Style);
- Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- verifyFormat(StringRef("functionCall(\n"
- " paramA, paramB, paramC);\n"
- "void functionDecl(\n"
- " int A, int B, int C);"),
- Input, Style);
-}
-
TEST_F(FormatTest, BreakFunctionDefinitionParameters) {
StringRef Input = "void functionDecl(paramA, paramB, paramC);\n"
"void emptyFunctionDefinition() {}\n"
@@ -8614,7 +8557,7 @@ TEST_F(FormatTest, BreaksFunctionDeclarations) {
verifyFormat("extern \"C\" //\n"
" void f();");
- FormatStyle Style = getLLVMStyle();
+ auto Style = getLLVMStyle();
Style.PointerAlignment = FormatStyle::PAS_Left;
verifyFormat("void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
" aaaaaaaaaaaaaaaaaaaaaaaaa* const aaaaaaaaaaaa) {}",
@@ -8622,6 +8565,14 @@ TEST_F(FormatTest, BreaksFunctionDeclarations) {
verifyFormat("void aaaaaaa(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa*\n"
" aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) {}",
Style);
+
+ Style = getLLVMStyleWithColumns(45);
+ Style.PenaltyReturnTypeOnItsOwnLine = 400;
+ verifyFormat("template <bool abool, // a comment\n"
+ " bool anotherbool>\n"
+ "static inline std::pair<size_t, MyCustomType>\n"
+ "myfunc(const char *buf, const char *&err);",
+ Style);
}
TEST_F(FormatTest, DontBreakBeforeQualifiedOperator) {
@@ -9018,97 +8969,6 @@ TEST_F(FormatTest, FormatsOneParameterPerLineIfNecessary) {
NoBinPacking);
}
-TEST_F(FormatTest, FormatsDeclarationBreakAlways) {
- FormatStyle BreakAlways = getGoogleStyle();
- BreakAlways.BinPackParameters = FormatStyle::BPPS_AlwaysOnePerLine;
- verifyFormat("void f(int a,\n"
- " int b);",
- BreakAlways);
- verifyFormat("void f(int aaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
- " int bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
- " int cccccccccccccccccccccccc);",
- BreakAlways);
-
- // Ensure AlignAfterOpenBracket interacts correctly with BinPackParameters set
- // to BPPS_AlwaysOnePerLine.
- BreakAlways.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
- verifyFormat(
- "void someLongFunctionName(\n"
- " int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
- " int b);",
- BreakAlways);
- BreakAlways.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
- verifyFormat(
- "void someLongFunctionName(\n"
- " int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
- " int b\n"
- ");",
- BreakAlways);
-}
-
-TEST_F(FormatTest, FormatsDefinitionBreakAlways) {
- FormatStyle BreakAlways = getGoogleStyle();
- BreakAlways.BinPackParameters = FormatStyle::BPPS_AlwaysOnePerLine;
- verifyFormat("void f(int a,\n"
- " int b) {\n"
- " f(a, b);\n"
- "}",
- BreakAlways);
-
- // Ensure BinPackArguments interact correctly when BinPackParameters is set to
- // BPPS_AlwaysOnePerLine.
- verifyFormat("void f(int aaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
- " int bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
- " int cccccccccccccccccccccccc) {\n"
- " f(aaaaaaaaaaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
- " cccccccccccccccccccccccc);\n"
- "}",
- BreakAlways);
- BreakAlways.BinPackArguments = false;
- verifyFormat("void f(int aaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
- " int bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
- " int cccccccccccccccccccccccc) {\n"
- " f(aaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
- " bbbbbbbbbbbbbbbbbbbbbbbbb,\n"
- " cccccccccccccccccccccccc);\n"
- "}",
- BreakAlways);
-
- // Ensure BreakFunctionDefinitionParameters interacts correctly when
- // BinPackParameters is set to BPPS_AlwaysOnePerLine.
- BreakAlways.BreakFunctionDefinitionParameters = true;
- verifyFormat("void f(\n"
- " int a,\n"
- " int b) {\n"
- " f(a, b);\n"
- "}",
- BreakAlways);
- BreakAlways.BreakFunctionDefinitionParameters = false;
-
- // Ensure AlignAfterOpenBracket interacts correctly with BinPackParameters set
- // to BPPS_AlwaysOnePerLine.
- BreakAlways.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
- verifyFormat(
- "void someLongFunctionName(\n"
- " int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
- " int b) {\n"
- " someLongFunctionName(\n"
- " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, b);\n"
- "}",
- BreakAlways);
- BreakAlways.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
- verifyFormat(
- "void someLongFunctionName(\n"
- " int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,\n"
- " int b\n"
- ") {\n"
- " someLongFunctionName(\n"
- " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, b\n"
- " );\n"
- "}",
- BreakAlways);
-}
-
TEST_F(FormatTest, AdaptiveOnePerLineFormatting) {
FormatStyle Style = getLLVMStyleWithColumns(15);
Style.ExperimentalAutoDetectBinPacking = true;
@@ -9506,216 +9366,6 @@ TEST_F(FormatTest, AlignsAfterReturn) {
" code == a || code == b;");
}
-TEST_F(FormatTest, AlignsAfterOpenBracket) {
- verifyFormat(
- "void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaa aaaaaaaa,\n"
- " aaaaaaaaa aaaaaaa) {}");
- verifyFormat(
- "SomeLongVariableName->someVeryLongFunctionName(aaaaaaaaaaa aaaaaaaaa,\n"
- " aaaaaaaaaaa aaaaaaaaa);");
- verifyFormat(
- "SomeLongVariableName->someFunction(foooooooo(aaaaaaaaaaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaa));");
- FormatStyle Style = getLLVMStyle();
- Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- verifyFormat("void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaa aaaaaaaa, aaaaaaaaa aaaaaaa) {}",
- Style);
- verifyFormat("SomeLongVariableName->someVeryLongFunctionName(\n"
- " aaaaaaaaaaa aaaaaaaaa, aaaaaaaaaaa aaaaaaaaa);",
- Style);
- verifyFormat("SomeLongVariableName->someFunction(\n"
- " foooooooo(aaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaaaaaaa));",
- Style);
- verifyFormat(
- "void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaa aaaaaaaa,\n"
- " aaaaaaaaa aaaaaaa, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) {}",
- Style);
- verifyFormat(
- "SomeLongVariableName->someVeryLongFunctionName(aaaaaaaaaaa aaaaaaaaa,\n"
- " aaaaaaaaaaa aaaaaaaaa, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa);",
- Style);
- verifyFormat(
- "SomeLongVariableName->someFunction(foooooooo(aaaaaaaaaaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa));",
- Style);
-
- verifyFormat("bbbbbbbbbbbb(aaaaaaaaaaaaaaaaaaaaaaaa, //\n"
- " ccccccc(aaaaaaaaaaaaaaaaa, //\n"
- " b));",
- Style);
-
- Style.ColumnLimit = 30;
- verifyFormat("for (int foo = 0; foo < FOO;\n"
- " ++foo) {\n"
- " bar(foo);\n"
- "}",
- Style);
- Style.ColumnLimit = 80;
-
- Style.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
- Style.BinPackArguments = false;
- Style.BinPackParameters = FormatStyle::BPPS_OnePerLine;
- verifyFormat("void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaa aaaaaaaa,\n"
- " aaaaaaaaa aaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) {}",
- Style);
- verifyFormat("SomeLongVariableName->someVeryLongFunctionName(\n"
- " aaaaaaaaaaa aaaaaaaaa,\n"
- " aaaaaaaaaaa aaaaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa);",
- Style);
- verifyFormat("SomeLongVariableName->someFunction(foooooooo(\n"
- " aaaaaaaaaaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa));",
- Style);
- verifyFormat(
- "aaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)));",
- Style);
- verifyFormat(
- "aaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaa.aaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)));",
- Style);
- verifyFormat(
- "aaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)),\n"
- " aaaaaaaaaaaaaaaa);",
- Style);
- verifyFormat(
- "aaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)) &&\n"
- " aaaaaaaaaaaaaaaa);",
- Style);
- verifyFormat(
- "fooooooooooo(new BARRRRRRRRR(\n"
- " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZZZZZZZZZZZZZZZZZZZZZZZZZ()));",
- Style);
- verifyFormat(
- "fooooooooooo(::new BARRRRRRRRR(\n"
- " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZZZZZZZZZZZZZZZZZZZZZZZZZ()));",
- Style);
- verifyFormat(
- "fooooooooooo(new FOO::BARRRR(\n"
- " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZZZZZZZZZZZZZZZZZZZZZZZZZ()));",
- Style);
-
- Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
- Style.BinPackArguments = false;
- Style.BinPackParameters = FormatStyle::BPPS_OnePerLine;
- verifyFormat("void aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaa aaaaaaaa,\n"
- " aaaaaaaaa aaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
- ") {}",
- Style);
- verifyFormat("SomeLongVariableName->someVeryLongFunctionName(\n"
- " aaaaaaaaaaa aaaaaaaaa,\n"
- " aaaaaaaaaaa aaaaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
- ");",
- Style);
- verifyFormat("SomeLongVariableName->someFunction(foooooooo(\n"
- " aaaaaaaaaaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaa,\n"
- " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
- "));",
- Style);
- verifyFormat("aaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
- "));",
- Style);
- verifyFormat("aaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaa.aaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
- "));",
- Style);
- verifyFormat(
- "aaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
- " ),\n"
- " aaaaaaaaaaaaaaaa\n"
- ");",
- Style);
- verifyFormat(
- "aaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaaa(\n"
- " aaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
- " ) &&\n"
- " aaaaaaaaaaaaaaaa\n"
- ");",
- Style);
- verifyFormat("void foo(\n"
- " void (*foobarpntr)(\n"
- " aaaaaaaaaaaaaaaaaa *,\n"
- " bbbbbbbbbbbbbb *,\n"
- " cccccccccccccccccccc *,\n"
- " dddddddddddddddddd *\n"
- " )\n"
- ");",
- Style);
- verifyFormat("aaaaaaa<bbbbbbbb> const aaaaaaaaaa{\n"
- " aaaaaaaaaaaaa(aaaaaaaaaaa, aaaaaaaaaaaaaaaa)\n"
- "};",
- Style);
-
- verifyFormat("bool aaaaaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " const bool &aaaaaaaaa, const void *aaaaaaaaaa\n"
- ") const {\n"
- " return true;\n"
- "}",
- Style);
- verifyFormat("bool aaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " const bool &aaaaaaaaaa, const void *aaaaaaaaaa\n"
- ") const;",
- Style);
- verifyFormat("void aaaaaaaaa(\n"
- " int aaaaaa, int bbbbbb, int cccccc, int dddddddddd\n"
- ") const noexcept -> std::vector<of_very_long_type>;",
- Style);
- verifyFormat(
- "x = aaaaaaaaaaaaaaa(\n"
- " \"a aaaaaaa aaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaa aaaaaaaaaaaaa\"\n"
- ");",
- Style);
- Style.ColumnLimit = 60;
- verifyFormat("auto lambda =\n"
- " [&b](\n"
- " auto aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
- " ) {};",
- Style);
- verifyFormat("aaaaaaaaaaaaaaaaaaaaaaaa(\n"
- " &bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n"
- ");",
- Style);
-}
-
-TEST_F(FormatTest, ParenthesesAndOperandAlignment) {
- FormatStyle Style = getLLVMStyleWithColumns(40);
- verifyFormat("int a = f(aaaaaaaaaaaaaaaaaaaaaa &&\n"
- " bbbbbbbbbbbbbbbbbbbbbb);",
- Style);
- Style.AlignAfterOpenBracket = FormatStyle::BAS_Align;
- Style.AlignOperands = FormatStyle::OAS_DontAlign;
- verifyFormat("int a = f(aaaaaaaaaaaaaaaaaaaaaa &&\n"
- " bbbbbbbbbbbbbbbbbbbbbb);",
- Style);
- Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- Style.AlignOperands = FormatStyle::OAS_Align;
- verifyFormat("int a = f(aaaaaaaaaaaaaaaaaaaaaa &&\n"
- " bbbbbbbbbbbbbbbbbbbbbb);",
- Style);
- Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- Style.AlignOperands = FormatStyle::OAS_DontAlign;
- verifyFormat("int a = f(aaaaaaaaaaaaaaaaaaaaaa &&\n"
- " bbbbbbbbbbbbbbbbbbbbbb);",
- Style);
-}
-
TEST_F(FormatTest, BreaksConditionalExpressions) {
verifyFormat(
"aaaa(aaaaaaaaaaaaaaaaaaaa, aaaaaaaaaaaaaaaaaaaaaaaaaa\n"
@@ -15346,6 +14996,18 @@ TEST_F(FormatTest, SplitEmptyFunctionButNotRecord) {
Style);
}
+TEST_F(FormatTest, MergeShortFunctionBody) {
+ auto Style = getLLVMStyle();
+ Style.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
+ Style.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Always;
+ Style.BreakBeforeBraces = FormatStyle::BS_Custom;
+ Style.BraceWrapping.AfterFunction = true;
+
+ verifyFormat("int foo()\n"
+ "{ return 1; }",
+ Style);
+}
+
TEST_F(FormatTest, KeepShortFunctionAfterPPElse) {
FormatStyle Style = getLLVMStyle();
Style.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
@@ -25584,6 +25246,30 @@ TEST_F(FormatTest, SpacesInConditionalStatement) {
verifyFormat("MYIF( a )\n return;\nelse\n return;", Spaces);
}
+TEST_F(FormatTest, SpaceInEmptyBraces) {
+ constexpr StringRef Code("void f() {}\n"
+ "class Unit {};\n"
+ "auto a = [] {};\n"
+ "int x{};");
+ verifyFormat(Code);
+
+ auto Style = getWebKitStyle();
+ EXPECT_EQ(Style.SpaceInEmptyBraces, FormatStyle::SIEB_Always);
+
+ verifyFormat("void f() { }\n"
+ "class Unit { };\n"
+ "auto a = [] { };\n"
+ "int x { };",
+ Code, Style);
+
+ Style.SpaceInEmptyBraces = FormatStyle::SIEB_Block;
+ verifyFormat("void f() { }\n"
+ "class Unit { };\n"
+ "auto a = [] { };\n"
+ "int x {};",
+ Code, Style);
+}
+
TEST_F(FormatTest, AlternativeOperators) {
// Test case for ensuring alternate operators are not
// combined with their right most neighbour.
@@ -25986,6 +25672,11 @@ TEST_F(FormatTest, SkipMacroDefinitionBody) {
" A a \\\n "
" A a",
Style);
+ verifyNoChange("#define MY_MACRO \\\n"
+ " /*foo*//*bar*/ \\\n"
+ " /* comment */ \\\n"
+ " 1",
+ Style);
}
TEST_F(FormatTest, VeryLongNamespaceCommentSplit) {
@@ -27536,405 +27227,6 @@ TEST_F(FormatTest, MultilineLambdaInConditional) {
Style);
}
-TEST_F(FormatTest, AlignAfterOpenBracketBlockIndent) {
- auto Style = getLLVMStyle();
-
- StringRef Short = "functionCall(paramA, paramB, paramC);\n"
- "void functionDecl(int a, int b, int c);";
-
- StringRef Medium = "functionCall(paramA, paramB, paramC, paramD, paramE, "
- "paramF, paramG, paramH, paramI);\n"
- "void functionDecl(int argumentA, int argumentB, int "
- "argumentC, int argumentD, int argumentE);";
-
- verifyFormat(Short, Style);
-
- StringRef NoBreak = "functionCall(paramA, paramB, paramC, paramD, paramE, "
- "paramF, paramG, paramH,\n"
- " paramI);\n"
- "void functionDecl(int argumentA, int argumentB, int "
- "argumentC, int argumentD,\n"
- " int argumentE);";
-
- verifyFormat(NoBreak, Medium, Style);
- verifyFormat(NoBreak,
- "functionCall(\n"
- " paramA,\n"
- " paramB,\n"
- " paramC,\n"
- " paramD,\n"
- " paramE,\n"
- " paramF,\n"
- " paramG,\n"
- " paramH,\n"
- " paramI\n"
- ");\n"
- "void functionDecl(\n"
- " int argumentA,\n"
- " int argumentB,\n"
- " int argumentC,\n"
- " int argumentD,\n"
- " int argumentE\n"
- ");",
- Style);
-
- verifyFormat("outerFunctionCall(nestedFunctionCall(argument1),\n"
- " nestedLongFunctionCall(argument1, "
- "argument2, argument3,\n"
- " argument4, "
- "argument5));",
- Style);
-
- Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
-
- verifyFormat(Short, Style);
- verifyFormat(
- "functionCall(\n"
- " paramA, paramB, paramC, paramD, paramE, paramF, paramG, paramH, "
- "paramI\n"
- ");\n"
- "void functionDecl(\n"
- " int argumentA, int argumentB, int argumentC, int argumentD, int "
- "argumentE\n"
- ");",
- Medium, Style);
-
- Style.AllowAllArgumentsOnNextLine = false;
- Style.AllowAllParametersOfDeclarationOnNextLine = false;
-
- verifyFormat(Short, Style);
- verifyFormat(
- "functionCall(\n"
- " paramA, paramB, paramC, paramD, paramE, paramF, paramG, paramH, "
- "paramI\n"
- ");\n"
- "void functionDecl(\n"
- " int argumentA, int argumentB, int argumentC, int argumentD, int "
- "argumentE\n"
- ");",
- Medium, Style);
-
- Style.BinPackArguments = false;
- Style.BinPackParameters = FormatStyle::BPPS_OnePerLine;
-
- verifyFormat(Short, Style);
-
- verifyFormat("functionCall(\n"
- " paramA,\n"
- " paramB,\n"
- " paramC,\n"
- " paramD,\n"
- " paramE,\n"
- " paramF,\n"
- " paramG,\n"
- " paramH,\n"
- " paramI\n"
- ");\n"
- "void functionDecl(\n"
- " int argumentA,\n"
- " int argumentB,\n"
- " int argumentC,\n"
- " int argumentD,\n"
- " int argumentE\n"
- ");",
- Medium, Style);
-
- verifyFormat("outerFunctionCall(\n"
- " nestedFunctionCall(argument1),\n"
- " nestedLongFunctionCall(\n"
- " argument1,\n"
- " argument2,\n"
- " argument3,\n"
- " argument4,\n"
- " argument5\n"
- " )\n"
- ");",
- Style);
-
- verifyFormat("int a = (int)b;", Style);
- verifyFormat("int a = (int)b;",
- "int a = (\n"
- " int\n"
- ") b;",
- Style);
-
- verifyFormat("return (true);", Style);
- verifyFormat("return (true);",
- "return (\n"
- " true\n"
- ");",
- Style);
-
- verifyFormat("void foo();", Style);
- verifyFormat("void foo();",
- "void foo(\n"
- ");",
- Style);
-
- verifyFormat("void foo() {}", Style);
- verifyFormat("void foo() {}",
- "void foo(\n"
- ") {\n"
- "}",
- Style);
-
- verifyFormat("auto string = std::string();", Style);
- verifyFormat("auto string = std::string();",
- "auto string = std::string(\n"
- ");",
- Style);
-
- verifyFormat("void (*functionPointer)() = nullptr;", Style);
- verifyFormat("void (*functionPointer)() = nullptr;",
- "void (\n"
- " *functionPointer\n"
- ")\n"
- "(\n"
- ") = nullptr;",
- Style);
-}
-
-TEST_F(FormatTest, AlignAfterOpenBracketBlockIndentIfStatement) {
- auto Style = getLLVMStyle();
-
- verifyFormat("if (foo()) {\n"
- " return;\n"
- "}",
- Style);
-
- verifyFormat("if (quiteLongArg !=\n"
- " (alsoLongArg - 1)) { // ABC is a very longgggggggggggg "
- "comment\n"
- " return;\n"
- "}",
- Style);
-
- Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
-
- verifyFormat("if (foo()) {\n"
- " return;\n"
- "}",
- Style);
-
- verifyFormat("if (quiteLongArg !=\n"
- " (alsoLongArg - 1)) { // ABC is a very longgggggggggggg "
- "comment\n"
- " return;\n"
- "}",
- Style);
-
- verifyFormat("void foo() {\n"
- " if (camelCaseName < alsoLongName ||\n"
- " anotherEvenLongerName <=\n"
- " thisReallyReallyReallyReallyReallyReallyLongerName ||"
- "\n"
- " otherName < thisLastName) {\n"
- " return;\n"
- " } else if (quiteLongName < alsoLongName ||\n"
- " anotherEvenLongerName <=\n"
- " thisReallyReallyReallyReallyReallyReallyLonger"
- "Name ||\n"
- " otherName < thisLastName) {\n"
- " return;\n"
- " }\n"
- "}",
- Style);
-
- Style.ContinuationIndentWidth = 2;
- verifyFormat("void foo() {\n"
- " if (ThisIsRatherALongIfClause && thatIExpectToBeBroken ||\n"
- " ontoMultipleLines && whenFormattedCorrectly) {\n"
- " if (false) {\n"
- " return;\n"
- " } else if (thisIsRatherALongIfClause && "
- "thatIExpectToBeBroken ||\n"
- " ontoMultipleLines && whenFormattedCorrectly) {\n"
- " return;\n"
- " }\n"
- " }\n"
- "}",
- Style);
-}
-
-TEST_F(FormatTest, AlignAfterOpenBracketBlockIndentForStatement) {
- auto Style = getLLVMStyle();
-
- verifyFormat("for (int i = 0; i < 5; ++i) {\n"
- " doSomething();\n"
- "}",
- Style);
-
- verifyFormat("for (int myReallyLongCountVariable = 0; "
- "myReallyLongCountVariable < count;\n"
- " myReallyLongCountVariable++) {\n"
- " doSomething();\n"
- "}",
- Style);
-
- Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
-
- verifyFormat("for (int i = 0; i < 5; ++i) {\n"
- " doSomething();\n"
- "}",
- Style);
-
- verifyFormat("for (int myReallyLongCountVariable = 0; "
- "myReallyLongCountVariable < count;\n"
- " myReallyLongCountVariable++) {\n"
- " doSomething();\n"
- "}",
- Style);
-}
-
-TEST_F(FormatTest, AlignAfterOpenBracketBlockIndentInitializers) {
- auto Style = getLLVMStyleWithColumns(60);
- Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
- // Aggregate initialization.
- verifyFormat("int LooooooooooooooooooooooooongVariable[2] = {\n"
- " 10000000, 20000000\n"
- "};",
- Style);
- verifyFormat("SomeStruct s{\n"
- " \"xxxxxxxxxxxxxxxx\", \"yyyyyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzzzzz\"\n"
- "};",
- Style);
- // Designated initializers.
- verifyFormat("int LooooooooooooooooooooooooongVariable[2] = {\n"
- " [0] = 10000000, [1] = 20000000\n"
- "};",
- Style);
- verifyFormat("SomeStruct s{\n"
- " .foo = \"xxxxxxxxxxxxx\",\n"
- " .bar = \"yyyyyyyyyyyyy\",\n"
- " .baz = \"zzzzzzzzzzzzz\"\n"
- "};",
- Style);
- // List initialization.
- verifyFormat("SomeStruct s{\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "};",
- Style);
- verifyFormat("SomeStruct{\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "};",
- Style);
- verifyFormat("new SomeStruct{\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "};",
- Style);
- // Member initializer.
- verifyFormat("class SomeClass {\n"
- " SomeStruct s{\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- " };\n"
- "};",
- Style);
- // Constructor member initializer.
- verifyFormat("SomeClass::SomeClass : strct{\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- " } {}",
- Style);
- // Copy initialization.
- verifyFormat("SomeStruct s = SomeStruct{\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "};",
- Style);
- // Copy list initialization.
- verifyFormat("SomeStruct s = {\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "};",
- Style);
- // Assignment operand initialization.
- verifyFormat("s = {\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "};",
- Style);
- // Returned object initialization.
- verifyFormat("return {\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "};",
- Style);
- // Initializer list.
- verifyFormat("auto initializerList = {\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "};",
- Style);
- // Function parameter initialization.
- verifyFormat("func({\n"
- " \"xxxxxxxxxxxxx\",\n"
- " \"yyyyyyyyyyyyy\",\n"
- " \"zzzzzzzzzzzzz\",\n"
- "});",
- Style);
- // Nested init lists.
- verifyFormat("SomeStruct s = {\n"
- " {{init1, init2, init3, init4, init5},\n"
- " {init1, init2, init3, init4, init5}}\n"
- "};",
- Style);
- verifyFormat("SomeStruct s = {\n"
- " {{\n"
- " .init1 = 1,\n"
- " .init2 = 2,\n"
- " .init3 = 3,\n"
- " .init4 = 4,\n"
- " .init5 = 5,\n"
- " },\n"
- " {init1, init2, init3, init4, init5}}\n"
- "};",
- Style);
- verifyFormat("SomeArrayT a[3] = {\n"
- " {\n"
- " foo,\n"
- " bar,\n"
- " },\n"
- " {\n"
- " foo,\n"
- " bar,\n"
- " },\n"
- " SomeArrayT{},\n"
- "};",
- Style);
- verifyFormat("SomeArrayT a[3] = {\n"
- " {foo},\n"
- " {\n"
- " {\n"
- " init1,\n"
- " init2,\n"
- " init3,\n"
- " },\n"
- " {\n"
- " init1,\n"
- " init2,\n"
- " init3,\n"
- " },\n"
- " },\n"
- " {baz},\n"
- "};",
- Style);
-}
-
TEST_F(FormatTest, UnderstandsDigraphs) {
verifyFormat("int arr<:5:> = {};");
verifyFormat("int arr[5] = <%%>;");
diff --git a/clang/unittests/Format/FormatTestTableGen.cpp b/clang/unittests/Format/FormatTestTableGen.cpp
index 1c3d187d..df20cc2 100644
--- a/clang/unittests/Format/FormatTestTableGen.cpp
+++ b/clang/unittests/Format/FormatTestTableGen.cpp
@@ -187,9 +187,16 @@ TEST_F(FormatTestTableGen, SimpleValue6) {
" );\n"
" let DAGArgBang = (!cast<SomeType>(\"Some\") i32:$src1,\n"
" i32:$src2);\n"
+ " let NestedDAGArg = ((DAGArg1 (v111 v112, v113), v12) v2,\n"
+ " (DAGArg3 (v31 v32)));\n"
"}");
}
+TEST_F(FormatTestTableGen, SimpleValue6_NestedInPat) {
+ verifyFormat("def : Pat<(vec.vt (avg (vec.vt V128:$l), (vec.vt V128:$r))),\n"
+ " (inst $l, $r)>;");
+}
+
TEST_F(FormatTestTableGen, SimpleValue7) {
verifyFormat("def SimpleValue7 { let Identifier = SimpleValue; }");
}
diff --git a/clang/unittests/Format/NumericLiteralInfoTest.cpp b/clang/unittests/Format/NumericLiteralInfoTest.cpp
new file mode 100644
index 0000000..a892cff
--- /dev/null
+++ b/clang/unittests/Format/NumericLiteralInfoTest.cpp
@@ -0,0 +1,71 @@
+//===- unittest/Format/NumericLiteralInfoTest.cpp -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../../lib/Format/NumericLiteralInfo.h"
+#include "gtest/gtest.h"
+
+namespace clang {
+namespace format {
+namespace {
+
+static constexpr auto npos = llvm::StringRef::npos;
+
+class NumericLiteralInfoTest : public testing::Test {
+protected:
+ bool verifyInfo(const NumericLiteralInfo &Info, size_t BaseLetterPos = npos,
+ size_t DotPos = npos, size_t ExponentLetterPos = npos,
+ size_t SuffixPos = npos) {
+ return Info.BaseLetterPos == BaseLetterPos && Info.DotPos == DotPos &&
+ Info.ExponentLetterPos == ExponentLetterPos &&
+ Info.SuffixPos == SuffixPos;
+ }
+};
+
+TEST_F(NumericLiteralInfoTest, IntegerLiteral) {
+ // Decimal.
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("90")));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("9L"), npos, npos, npos, 1));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("9'0U"), npos, npos, npos, 3));
+
+ // Octal.
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0")));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("07")));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0z"), npos, npos, npos, 1));
+ // JavaScript.
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0o7"), 1));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0O7_0", '_'), 1));
+
+ // Binary.
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0b1"), 1));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0B1ul"), 1, npos, npos, 3));
+
+ // Hexadecimal.
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0xF"), 1));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0XfZ"), 1, npos, npos, 3));
+}
+
+TEST_F(NumericLiteralInfoTest, FloatingPointLiteral) {
+ // Decimal.
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo(".9"), npos, 0));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("9."), npos, 1));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("9.F"), npos, 1, npos, 2));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("9e9"), npos, npos, 1));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("9E-9f"), npos, npos, 1, 4));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("9.9e+9bf16"), npos, 1, 3, 6));
+
+ // Hexadecimal.
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0X.Fp9"), 1, 2, 4));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0xF.P9"), 1, 3, 4));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0xFp9"), 1, npos, 3));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0xFp+9F128"), 1, npos, 3, 6));
+ EXPECT_TRUE(verifyInfo(NumericLiteralInfo("0xF.Fp-9_Pa"), 1, 3, 5, 8));
+}
+
+} // namespace
+} // namespace format
+} // namespace clang
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index 7f99655..141b000 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -618,6 +618,13 @@ TEST_F(TokenAnnotatorTest, UnderstandsStructs) {
EXPECT_TOKEN(Tokens[19], tok::l_brace, TT_StructLBrace);
EXPECT_TOKEN(Tokens[20], tok::r_brace, TT_StructRBrace);
+ Tokens = annotate("class Outer {\n"
+ " struct Inner final : Base {};\n"
+ "};");
+ ASSERT_EQ(Tokens.size(), 14u) << Tokens;
+ EXPECT_TOKEN(Tokens[5], tok::identifier, TT_Unknown); // Not TT_StartOfName
+ EXPECT_TOKEN(Tokens[6], tok::colon, TT_InheritanceColon);
+
constexpr StringRef Code("struct EXPORT StructName {};");
Tokens = annotate(Code);
@@ -1342,6 +1349,14 @@ TEST_F(TokenAnnotatorTest, UnderstandsRequiresClausesAndConcepts) {
EXPECT_EQ(Tokens[21]->MatchingParen, Tokens[15]);
EXPECT_TRUE(Tokens[21]->ClosesRequiresClause);
+ Tokens = annotate("template <typename Foo>\n"
+ "void Fun(const Foo &F)\n"
+ " requires requires(Foo F) {\n"
+ " { F.Bar() } -> std::same_as<int>;\n"
+ " };");
+ ASSERT_EQ(Tokens.size(), 38u) << Tokens;
+ EXPECT_TOKEN(Tokens[19], tok::l_brace, TT_RequiresExpressionLBrace);
+
Tokens =
annotate("template <class A, class B> concept C ="
"std::same_as<std::iter_value_t<A>, std::iter_value_t<B>>;");
diff --git a/clang/unittests/Index/IndexTests.cpp b/clang/unittests/Index/IndexTests.cpp
index 05ce448..6df4b57 100644
--- a/clang/unittests/Index/IndexTests.cpp
+++ b/clang/unittests/Index/IndexTests.cpp
@@ -347,7 +347,7 @@ TEST(IndexTest, Constructors) {
WrittenAt(Position(4, 8)))));
}
-TEST(IndexTest, InjecatedNameClass) {
+TEST(IndexTest, InjectedNameClass) {
std::string Code = R"cpp(
template <typename T>
class Foo {
diff --git a/clang/unittests/Interpreter/CMakeLists.txt b/clang/unittests/Interpreter/CMakeLists.txt
index 1dda902..db9f80d 100644
--- a/clang/unittests/Interpreter/CMakeLists.txt
+++ b/clang/unittests/Interpreter/CMakeLists.txt
@@ -1,3 +1,34 @@
+if(EMSCRIPTEN)
+set(LLVM_COMPONENTS_TO_LINK
+ ""
+ )
+set(LLVM_LIBS_TO_LINK
+ ""
+ )
+set(CLANG_LIBS_TO_LINK
+ clangInterpreter
+ )
+else()
+set(LLVM_COMPONENTS_TO_LINK
+ ${LLVM_TARGETS_TO_BUILD}
+ Core
+ MC
+ OrcJIT
+ Support
+ TargetParser
+ )
+set(LLVM_LIBS_TO_LINK
+ LLVMTestingSupport
+ )
+set(CLANG_LIBS_TO_LINK
+ clangAST
+ clangBasic
+ clangInterpreter
+ clangFrontend
+ clangSema
+ )
+endif()
+
add_distinct_clang_unittest(ClangReplInterpreterTests
IncrementalCompilerBuilderTest.cpp
IncrementalProcessingTest.cpp
@@ -8,24 +39,38 @@ add_distinct_clang_unittest(ClangReplInterpreterTests
EXPORT_SYMBOLS
CLANG_LIBS
- clangAST
- clangBasic
- clangInterpreter
- clangFrontend
- clangSema
+ ${CLANG_LIBS_TO_LINK}
LINK_LIBS
- LLVMTestingSupport
+ ${LLVM_LIBS_TO_LINK}
LLVM_COMPONENTS
- ${LLVM_TARGETS_TO_BUILD}
- Core
- MC
- OrcJIT
- Support
- TargetParser
+ ${LLVM_COMPONENTS_TO_LINK}
)
+if(EMSCRIPTEN)
+# Without the above you try to link to LLVMSupport twice, and end
+# up with a duplicate symbol error when creating the main module
+get_target_property(LINKED_LIBS ClangReplInterpreterTests LINK_LIBRARIES)
+list(REMOVE_ITEM LINKED_LIBS LLVMSupport)
+set_target_properties(ClangReplInterpreterTests PROPERTIES LINK_LIBRARIES "${LINKED_LIBS}")
+target_link_options(ClangReplInterpreterTests
+ PUBLIC "SHELL: -s MAIN_MODULE=1"
+ PUBLIC "SHELL: -s ALLOW_MEMORY_GROWTH=1"
+ PUBLIC "SHELL: -s STACK_SIZE=32mb"
+ PUBLIC "SHELL: -s INITIAL_MEMORY=128mb"
+ PUBLIC "SHELL: --emrun"
+ PUBLIC "SHELL: -Wl,--export=__clang_Interpreter_SetValueWithAlloc"
+ PUBLIC "SHELL: -Wl,--export=__clang_Interpreter_SetValueNoAlloc"
+ PUBLIC "SHELL: -Wl,--export=_ZnwmPv26__clang_Interpreter_NewTag"
+ PUBLIC "SHELL: -Wl,--export=_Z9getGlobalv"
+ PUBLIC "SHELL: -Wl,--export=_Z9setGlobali"
+)
+set_target_properties(ClangReplInterpreterTests PROPERTIES
+ SUFFIX ".html"
+)
+endif()
+
# Exceptions on Windows are not yet supported.
if(NOT WIN32)
add_subdirectory(ExceptionTests)
diff --git a/clang/unittests/Interpreter/CodeCompletionTest.cpp b/clang/unittests/Interpreter/CodeCompletionTest.cpp
index 23cfc46..ceb6834 100644
--- a/clang/unittests/Interpreter/CodeCompletionTest.cpp
+++ b/clang/unittests/Interpreter/CodeCompletionTest.cpp
@@ -29,8 +29,14 @@ public:
std::unique_ptr<clang::Interpreter> Interp;
void SetUp() override {
+// FIXME : WebAssembly doesn't currently support Jit (see
+// https: // github.com/llvm/llvm-project/pull/150977#discussion_r2237521095).
+// so this check of HostSupportsJIT has been skipped
+// over until support is added, and HostSupportsJIT can return true.
+#ifndef __EMSCRIPTEN__
if (!HostSupportsJIT())
GTEST_SKIP();
+#endif
std::unique_ptr<CompilerInstance> CI = cantFail(CB.CreateCpp());
this->Interp = cantFail(clang::Interpreter::create(std::move(CI)));
}
diff --git a/clang/unittests/Interpreter/IncrementalCompilerBuilderTest.cpp b/clang/unittests/Interpreter/IncrementalCompilerBuilderTest.cpp
index c4a4007..7b4633b 100644
--- a/clang/unittests/Interpreter/IncrementalCompilerBuilderTest.cpp
+++ b/clang/unittests/Interpreter/IncrementalCompilerBuilderTest.cpp
@@ -37,6 +37,14 @@ TEST(IncrementalCompilerBuilder, SetCompilerArgs) {
}
TEST(IncrementalCompilerBuilder, SetTargetTriple) {
+// FIXME : This test doesn't current work for Emscripten builds.
+// It should be possible to make it work.For details on how it fails and
+// the current progress to enable this test see
+// the following Github issue https: //
+// github.com/llvm/llvm-project/issues/153461
+#ifdef __EMSCRIPTEN__
+ GTEST_SKIP() << "Test fails for Emscipten builds";
+#endif
auto CB = clang::IncrementalCompilerBuilder();
CB.SetTargetTriple("armv6-none-eabi");
auto CI = cantFail(CB.CreateCpp());
diff --git a/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp b/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
index 1c27cfb..f50f6e3 100644
--- a/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
+++ b/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
@@ -75,9 +75,14 @@ struct OutOfProcInterpreter : public Interpreter {
};
TEST_F(InterpreterExtensionsTest, FindRuntimeInterface) {
+// FIXME : WebAssembly doesn't currently support Jit (see
+// https: // github.com/llvm/llvm-project/pull/150977#discussion_r2237521095).
+// so this check of HostSupportsJIT has been skipped
+// over until support is added, and HostSupportsJIT can return true.
+#ifndef __EMSCRIPTEN__
if (!HostSupportsJIT())
GTEST_SKIP();
-
+#endif
clang::IncrementalCompilerBuilder CB;
llvm::Error ErrOut = llvm::Error::success();
auto CI = cantFail(CB.CreateCpp());
diff --git a/clang/unittests/Interpreter/InterpreterTest.cpp b/clang/unittests/Interpreter/InterpreterTest.cpp
index 768058b..9ff9092 100644
--- a/clang/unittests/Interpreter/InterpreterTest.cpp
+++ b/clang/unittests/Interpreter/InterpreterTest.cpp
@@ -147,6 +147,14 @@ TEST_F(InterpreterTest, DeclsAndStatements) {
}
TEST_F(InterpreterTest, UndoCommand) {
+// FIXME : This test doesn't current work for Emscripten builds.
+// It should be possible to make it work.For details on how it fails and
+// the current progress to enable this test see
+// the following Github issue https: //
+// github.com/llvm/llvm-project/issues/153461
+#ifdef __EMSCRIPTEN__
+ GTEST_SKIP() << "Test fails for Emscipten builds";
+#endif
Args ExtraArgs = {"-Xclang", "-diagnostic-log-file", "-Xclang", "-"};
// Create the diagnostic engine with unowned consumer.
diff --git a/clang/unittests/Interpreter/InterpreterTestFixture.h b/clang/unittests/Interpreter/InterpreterTestFixture.h
index 113599f..b088fa4 100644
--- a/clang/unittests/Interpreter/InterpreterTestFixture.h
+++ b/clang/unittests/Interpreter/InterpreterTestFixture.h
@@ -38,8 +38,14 @@ protected:
}
void SetUp() override {
+// FIXME : WebAssembly doesn't currently support Jit (see
+// https: // github.com/llvm/llvm-project/pull/150977#discussion_r2237521095).
+// so this check of HostSupportsJIT has been skipped
+// over until support is added, and HostSupportsJIT can return true.
+#ifndef __EMSCRIPTEN__
if (!HostSupportsJIT())
GTEST_SKIP();
+#endif
}
void TearDown() override {}
diff --git a/clang/unittests/Lex/CMakeLists.txt b/clang/unittests/Lex/CMakeLists.txt
index 96ca6dd..fa5e58f 100644
--- a/clang/unittests/Lex/CMakeLists.txt
+++ b/clang/unittests/Lex/CMakeLists.txt
@@ -5,6 +5,7 @@ add_clang_unittest(LexTests
LexerTest.cpp
LexHLSLRootSignatureTest.cpp
ModuleDeclStateTest.cpp
+ NoTrivialPPDirectiveTracerTest.cpp
PPCallbacksTest.cpp
PPConditionalDirectiveRecordTest.cpp
PPDependencyDirectivesTest.cpp
diff --git a/clang/unittests/Lex/LexerTest.cpp b/clang/unittests/Lex/LexerTest.cpp
index 56d73ce..c51cd0d 100644
--- a/clang/unittests/Lex/LexerTest.cpp
+++ b/clang/unittests/Lex/LexerTest.cpp
@@ -795,7 +795,7 @@ TEST_F(LexerTest, CheckFirstPPToken) {
EXPECT_FALSE(Lexer::getRawToken(PP->getMainFileFirstPPTokenLoc(), Tok,
PP->getSourceManager(), PP->getLangOpts(),
/*IgnoreWhiteSpace=*/false));
- EXPECT_TRUE(Tok.isFirstPPToken());
+ EXPECT_TRUE(PP->getMainFileFirstPPTokenLoc() == Tok.getLocation());
EXPECT_TRUE(Tok.is(tok::hash));
}
@@ -811,7 +811,7 @@ TEST_F(LexerTest, CheckFirstPPToken) {
EXPECT_FALSE(Lexer::getRawToken(PP->getMainFileFirstPPTokenLoc(), Tok,
PP->getSourceManager(), PP->getLangOpts(),
/*IgnoreWhiteSpace=*/false));
- EXPECT_TRUE(Tok.isFirstPPToken());
+ EXPECT_TRUE(PP->getMainFileFirstPPTokenLoc() == Tok.getLocation());
EXPECT_TRUE(Tok.is(tok::raw_identifier));
EXPECT_TRUE(Tok.getRawIdentifier() == "FOO");
}
diff --git a/clang/unittests/Lex/ModuleDeclStateTest.cpp b/clang/unittests/Lex/ModuleDeclStateTest.cpp
index adc6cf1..ac2ddfa 100644
--- a/clang/unittests/Lex/ModuleDeclStateTest.cpp
+++ b/clang/unittests/Lex/ModuleDeclStateTest.cpp
@@ -61,14 +61,15 @@ protected:
Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
}
- std::unique_ptr<Preprocessor>
- getPreprocessor(const char *source, Language Lang) {
+ std::unique_ptr<Preprocessor> getPreprocessor(const char *source,
+ Language Lang) {
std::unique_ptr<llvm::MemoryBuffer> Buf =
llvm::MemoryBuffer::getMemBuffer(source);
SourceMgr.setMainFileID(SourceMgr.createFileID(std::move(Buf)));
std::vector<std::string> Includes;
- LangOptions::setLangDefaults(LangOpts, Lang, Target->getTriple(), Includes, LangStandard::lang_cxx20);
+ LangOptions::setLangDefaults(LangOpts, Lang, Target->getTriple(), Includes,
+ LangStandard::lang_cxx20);
LangOpts.CPlusPlusModules = true;
if (Lang != Language::CXX) {
LangOpts.Modules = true;
@@ -112,12 +113,11 @@ export module foo;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)0);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)0);
EXPECT_TRUE(PP->isInNamedModule());
EXPECT_TRUE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -131,12 +131,11 @@ module foo;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)0);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)0);
EXPECT_TRUE(PP->isInNamedModule());
EXPECT_FALSE(PP->isInNamedInterfaceUnit());
EXPECT_TRUE(PP->isInImplementationUnit());
@@ -150,12 +149,11 @@ module foo:part;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)0);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)0);
EXPECT_TRUE(PP->isInNamedModule());
EXPECT_FALSE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -169,12 +167,11 @@ export module foo:part;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)0);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)0);
EXPECT_TRUE(PP->isInNamedModule());
EXPECT_TRUE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -188,12 +185,11 @@ export module foo.dot:part.dot;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)0);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)0);
EXPECT_TRUE(PP->isInNamedModule());
EXPECT_TRUE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -207,12 +203,11 @@ TEST_F(ModuleDeclStateTest, NotModule) {
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)0);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)0);
EXPECT_FALSE(PP->isInNamedModule());
EXPECT_FALSE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -233,12 +228,11 @@ import :another;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {true, true};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)2);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)2);
EXPECT_TRUE(PP->isInNamedModule());
EXPECT_TRUE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -260,12 +254,11 @@ import :another;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {true, true};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)2);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)2);
EXPECT_TRUE(PP->isInNamedModule());
EXPECT_TRUE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -286,12 +279,11 @@ import :another;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
std::initializer_list<bool> ImportKinds = {true};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)1);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)1);
EXPECT_FALSE(PP->isInNamedModule());
EXPECT_FALSE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -304,12 +296,11 @@ TEST_F(ModuleDeclStateTest, ImportAClangNamedModule) {
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::ObjCXX);
std::initializer_list<bool> ImportKinds = {false};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)1);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)1);
EXPECT_FALSE(PP->isInNamedModule());
EXPECT_FALSE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
@@ -326,12 +317,11 @@ import M2;
std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::ObjCXX);
std::initializer_list<bool> ImportKinds = {false, true, false, true};
- preprocess(*PP,
- std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds));
-
- auto *Callback =
- static_cast<CheckNamedModuleImportingCB *>(PP->getPPCallbacks());
- EXPECT_EQ(Callback->importNamedModuleNum(), (size_t)4);
+ auto Callback =
+ std::make_unique<CheckNamedModuleImportingCB>(*PP, ImportKinds);
+ CheckNamedModuleImportingCB *CallbackPtr = Callback.get();
+ preprocess(*PP, std::move(Callback));
+ EXPECT_EQ(CallbackPtr->importNamedModuleNum(), (size_t)4);
EXPECT_FALSE(PP->isInNamedModule());
EXPECT_FALSE(PP->isInNamedInterfaceUnit());
EXPECT_FALSE(PP->isInImplementationUnit());
diff --git a/clang/unittests/Lex/NoTrivialPPDirectiveTracerTest.cpp b/clang/unittests/Lex/NoTrivialPPDirectiveTracerTest.cpp
new file mode 100644
index 0000000..d79c142
--- /dev/null
+++ b/clang/unittests/Lex/NoTrivialPPDirectiveTracerTest.cpp
@@ -0,0 +1,182 @@
+//===- unittests/Lex/NoTrivialPPDirectiveTracerTest.cpp -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "gtest/gtest.h"
+#include <cstddef>
+#include <initializer_list>
+
+using namespace clang;
+
+namespace {
+class NoTrivialPPDirectiveTracerTest : public ::testing::Test {
+protected:
+ NoTrivialPPDirectiveTracerTest()
+ : VFS(llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
+ FileMgr(FileMgrOpts, VFS),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
+ SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions) {
+ TargetOpts->Triple = "x86_64-unknown-linux-gnu";
+ Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
+ }
+
+ void addFile(const char *source, StringRef Filename) {
+ VFS->addFile(Filename, 0, llvm::MemoryBuffer::getMemBuffer(source),
+ /*User=*/std::nullopt,
+ /*Group=*/std::nullopt,
+ llvm::sys::fs::file_type::regular_file);
+ }
+
+ std::unique_ptr<Preprocessor> getPreprocessor(const char *source,
+ Language Lang) {
+ std::unique_ptr<llvm::MemoryBuffer> Buf =
+ llvm::MemoryBuffer::getMemBuffer(source);
+ SourceMgr.setMainFileID(SourceMgr.createFileID(std::move(Buf)));
+
+ std::vector<std::string> Includes;
+ LangOptions::setLangDefaults(LangOpts, Lang, Target->getTriple(), Includes,
+ LangStandard::lang_cxx20);
+ LangOpts.CPlusPlusModules = true;
+ if (Lang != Language::CXX) {
+ LangOpts.Modules = true;
+ LangOpts.ImplicitModules = true;
+ }
+
+ HeaderInfo.emplace(HSOpts, SourceMgr, Diags, LangOpts, Target.get());
+
+ auto DE = FileMgr.getOptionalDirectoryRef(".");
+ assert(DE);
+ auto DL = DirectoryLookup(*DE, SrcMgr::C_User, /*isFramework=*/false);
+ HeaderInfo->AddSearchPath(DL, /*isAngled=*/false);
+
+ return std::make_unique<Preprocessor>(PPOpts, Diags, LangOpts, SourceMgr,
+ *HeaderInfo, ModLoader,
+ /*IILookup=*/nullptr,
+ /*OwnsHeaderSearch=*/false);
+ }
+
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS;
+ FileSystemOptions FileMgrOpts;
+ FileManager FileMgr;
+ DiagnosticOptions DiagOpts;
+ DiagnosticsEngine Diags;
+ SourceManager SourceMgr;
+ std::shared_ptr<TargetOptions> TargetOpts;
+ IntrusiveRefCntPtr<TargetInfo> Target;
+ LangOptions LangOpts;
+ TrivialModuleLoader ModLoader;
+ HeaderSearchOptions HSOpts;
+ std::optional<HeaderSearch> HeaderInfo;
+ PreprocessorOptions PPOpts;
+};
+
+TEST_F(NoTrivialPPDirectiveTracerTest, TrivialDirective) {
+ const char *source = R"(
+ #line 7
+ # 1 __FILE__ 1 3
+ #ident "$Header:$"
+ #pragma comment(lib, "msvcrt.lib")
+ #pragma mark LLVM's world
+ #pragma detect_mismatch("test", "1")
+ #pragma clang __debug dump Test
+ #pragma message "test"
+ #pragma GCC warning "Foo"
+ #pragma GCC error "Foo"
+ #pragma gcc diagnostic push
+ #pragma gcc diagnostic pop
+ #pragma GCC diagnostic ignored "-Wframe-larger-than"
+ #pragma OPENCL EXTENSION __cl_clang_variadic_functions : enable
+ #pragma warning(push)
+ #pragma warning(pop)
+ #pragma execution_character_set(push, "UTF-8")
+ #pragma execution_character_set(pop)
+ #pragma clang assume_nonnull begin
+ #pragma clang assume_nonnull end
+ int foo;
+ )";
+ std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
+ PP->Initialize(*Target);
+ PP->EnterMainSourceFile();
+ Token Tok;
+ PP->Lex(Tok);
+ EXPECT_FALSE(PP->hasSeenNoTrivialPPDirective());
+}
+
+TEST_F(NoTrivialPPDirectiveTracerTest, IncludeDirective) {
+ const char *source = R"(
+ #include "header.h"
+ int foo;
+ )";
+ const char *header = R"(
+ #ifndef HEADER_H
+ #define HEADER_H
+ #endif // HEADER_H
+ )";
+ std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
+ addFile(header, "header.h");
+ PP->Initialize(*Target);
+ PP->EnterMainSourceFile();
+ Token Tok;
+ PP->Lex(Tok);
+ EXPECT_TRUE(PP->hasSeenNoTrivialPPDirective());
+}
+
+TEST_F(NoTrivialPPDirectiveTracerTest, DefineDirective) {
+ const char *source = R"(
+ #define FOO
+ int foo;
+ )";
+ std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
+ PP->Initialize(*Target);
+ PP->EnterMainSourceFile();
+ Token Tok;
+ PP->Lex(Tok);
+ EXPECT_TRUE(PP->hasSeenNoTrivialPPDirective());
+}
+
+TEST_F(NoTrivialPPDirectiveTracerTest, UnDefineDirective) {
+ const char *source = R"(
+ #undef FOO
+ int foo;
+ )";
+ std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
+ PP->Initialize(*Target);
+ PP->setPredefines("#define FOO");
+ PP->EnterMainSourceFile();
+ Token Tok;
+ PP->Lex(Tok);
+ EXPECT_TRUE(PP->hasSeenNoTrivialPPDirective());
+}
+
+TEST_F(NoTrivialPPDirectiveTracerTest, IfDefinedDirective) {
+ const char *source = R"(
+ #if defined(FOO)
+ #endif
+ int foo;
+ )";
+ std::unique_ptr<Preprocessor> PP = getPreprocessor(source, Language::CXX);
+ PP->Initialize(*Target);
+ PP->setPredefines("#define FOO");
+ PP->EnterMainSourceFile();
+ Token Tok;
+ PP->Lex(Tok);
+ EXPECT_TRUE(PP->hasSeenNoTrivialPPDirective());
+}
+
+} // namespace
diff --git a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
index 44f6b04..44c0978 100644
--- a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
+++ b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
@@ -180,7 +180,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseDTClausesTest) {
// First Descriptor Table with 4 elements
RootElement Elem = Elements[0].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::CBuffer);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::CBuffer);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Reg.ViewType,
RegisterType::BReg);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Reg.Number, 0u);
@@ -193,7 +193,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseDTClausesTest) {
Elem = Elements[1].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::SRV);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::SRV);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Reg.ViewType,
RegisterType::TReg);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Reg.Number, 42u);
@@ -205,7 +205,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseDTClausesTest) {
Elem = Elements[2].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::Sampler);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::Sampler);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Reg.ViewType,
RegisterType::SReg);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Reg.Number, 987u);
@@ -218,7 +218,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseDTClausesTest) {
Elem = Elements[3].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::UAV);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::UAV);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Reg.ViewType,
RegisterType::UReg);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Reg.Number, 4294967294u);
@@ -445,7 +445,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidSamplerFlagsTest) {
auto Elements = Parser.getElements();
RootElement Elem = Elements[0].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::Sampler);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::Sampler);
auto ValidSamplerFlags =
llvm::dxbc::DescriptorRangeFlags::DescriptorsVolatile;
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags, ValidSamplerFlags);
@@ -591,7 +591,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseRootDescriptorsTest) {
RootElement Elem = Elements[0].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::CBuffer);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::CBuffer);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Reg.ViewType, RegisterType::BReg);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Reg.Number, 0u);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Space, 0u);
@@ -602,7 +602,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseRootDescriptorsTest) {
Elem = Elements[1].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::SRV);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::SRV);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Reg.ViewType, RegisterType::TReg);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Reg.Number, 42u);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Space, 4u);
@@ -616,7 +616,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseRootDescriptorsTest) {
Elem = Elements[2].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::UAV);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::UAV);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Reg.ViewType, RegisterType::UReg);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Reg.Number, 34893247u);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Space, 0u);
@@ -628,7 +628,7 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseRootDescriptorsTest) {
RootDescriptorFlags::DataVolatile);
Elem = Elements[3].getElement();
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::CBuffer);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::CBuffer);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Reg.ViewType, RegisterType::BReg);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Reg.Number, 0u);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Space, 0u);
@@ -696,17 +696,17 @@ TEST_F(ParseHLSLRootSignatureTest, ValidVersion10Test) {
auto DefRootDescriptorFlag = llvm::dxbc::RootDescriptorFlags::DataVolatile;
RootElement Elem = Elements[0].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::CBuffer);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::CBuffer);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Flags, DefRootDescriptorFlag);
Elem = Elements[1].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::SRV);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::SRV);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Flags, DefRootDescriptorFlag);
Elem = Elements[2].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::UAV);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::UAV);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Flags, DefRootDescriptorFlag);
auto ValidNonSamplerFlags =
@@ -714,22 +714,22 @@ TEST_F(ParseHLSLRootSignatureTest, ValidVersion10Test) {
llvm::dxbc::DescriptorRangeFlags::DataVolatile;
Elem = Elements[3].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::CBuffer);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::CBuffer);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags, ValidNonSamplerFlags);
Elem = Elements[4].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::SRV);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::SRV);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags, ValidNonSamplerFlags);
Elem = Elements[5].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::UAV);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::UAV);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags, ValidNonSamplerFlags);
Elem = Elements[6].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::Sampler);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::Sampler);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags,
llvm::dxbc::DescriptorRangeFlags::DescriptorsVolatile);
@@ -767,43 +767,43 @@ TEST_F(ParseHLSLRootSignatureTest, ValidVersion11Test) {
auto Elements = Parser.getElements();
RootElement Elem = Elements[0].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::CBuffer);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::CBuffer);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Flags,
llvm::dxbc::RootDescriptorFlags::DataStaticWhileSetAtExecute);
Elem = Elements[1].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::SRV);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::SRV);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Flags,
llvm::dxbc::RootDescriptorFlags::DataStaticWhileSetAtExecute);
Elem = Elements[2].getElement();
ASSERT_TRUE(std::holds_alternative<RootDescriptor>(Elem));
- ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, DescriptorType::UAV);
+ ASSERT_EQ(std::get<RootDescriptor>(Elem).Type, ResourceClass::UAV);
ASSERT_EQ(std::get<RootDescriptor>(Elem).Flags,
llvm::dxbc::RootDescriptorFlags::DataVolatile);
Elem = Elements[3].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::CBuffer);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::CBuffer);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags,
llvm::dxbc::DescriptorRangeFlags::DataStaticWhileSetAtExecute);
Elem = Elements[4].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::SRV);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::SRV);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags,
llvm::dxbc::DescriptorRangeFlags::DataStaticWhileSetAtExecute);
Elem = Elements[5].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::UAV);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::UAV);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags,
llvm::dxbc::DescriptorRangeFlags::DataVolatile);
Elem = Elements[6].getElement();
ASSERT_TRUE(std::holds_alternative<DescriptorTableClause>(Elem));
- ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ClauseType::Sampler);
+ ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Type, ResourceClass::Sampler);
ASSERT_EQ(std::get<DescriptorTableClause>(Elem).Flags,
llvm::dxbc::DescriptorRangeFlags::None);
diff --git a/clang/unittests/Sema/CMakeLists.txt b/clang/unittests/Sema/CMakeLists.txt
index acc76c9..b61ed8c 100644
--- a/clang/unittests/Sema/CMakeLists.txt
+++ b/clang/unittests/Sema/CMakeLists.txt
@@ -1,4 +1,8 @@
-add_clang_unittest(SemaTests
+# SemaTests are distinct to enable fast incremental builds.
+# Merging it into AllClangTests would result in one less
+# large statically linked binary, but separating it out is
+# the right tradeoff today.
+add_distinct_clang_unittest(SemaTests
ExternalSemaSourceTest.cpp
CodeCompleteTest.cpp
HeuristicResolverTest.cpp
diff --git a/clang/unittests/Sema/HeuristicResolverTest.cpp b/clang/unittests/Sema/HeuristicResolverTest.cpp
index ee434f7..883a4e2 100644
--- a/clang/unittests/Sema/HeuristicResolverTest.cpp
+++ b/clang/unittests/Sema/HeuristicResolverTest.cpp
@@ -8,6 +8,7 @@
#include "clang/Sema/HeuristicResolver.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Tooling/Tooling.h"
#include "gmock/gmock-matchers.h"
#include "gtest/gtest.h"
@@ -29,7 +30,7 @@ MATCHER_P2(matchAdapter, MatcherForElement, MatchFunction, "matchAdapter") {
template <typename InputNode>
using ResolveFnT = std::function<std::vector<const NamedDecl *>(
- const HeuristicResolver *, const InputNode *)>;
+ const HeuristicResolver *, InputNode)>;
// Test heuristic resolution on `Code` using the resolution procedure
// `ResolveFn`, which takes a `HeuristicResolver` and an input AST node of type
@@ -37,10 +38,23 @@ using ResolveFnT = std::function<std::vector<const NamedDecl *>(
// `InputMatcher` should be an AST matcher that matches a single node to pass as
// input to `ResolveFn`, bound to the ID "input". `OutputMatchers` should be AST
// matchers that each match a single node, bound to the ID "output".
-template <typename InputNode, typename InputMatcher, typename... OutputMatchers>
-void expectResolution(llvm::StringRef Code, ResolveFnT<InputNode> ResolveFn,
+template <typename InputNode, typename ParamT, typename InputMatcher,
+ typename... OutputMatchers>
+void expectResolution(llvm::StringRef Code, ResolveFnT<ParamT> ResolveFn,
const InputMatcher &IM, const OutputMatchers &...OMS) {
- auto TU = tooling::buildASTFromCodeWithArgs(Code, {"-std=c++20"});
+ auto TU = tooling::buildASTFromCodeWithArgs(
+ Code, {"-std=c++23"}, "input.cc", "clang-tool",
+ std::make_shared<PCHContainerOperations>(),
+ tooling::getClangStripDependencyFileAdjuster(),
+ tooling::FileContentMappings(), nullptr, llvm::vfs::getRealFileSystem(),
+ CaptureDiagsKind::All);
+
+ for (const auto &D : TU->storedDiagnostics()) {
+ EXPECT_TRUE(D.getLevel() < DiagnosticsEngine::Error)
+ << "Unexpected error diagnostic while building AST for test code: "
+ << D.getMessage();
+ }
+
auto &Ctx = TU->getASTContext();
auto InputMatches = match(IM, Ctx);
ASSERT_EQ(1u, InputMatches.size());
@@ -59,7 +73,11 @@ void expectResolution(llvm::StringRef Code, ResolveFnT<InputNode> ResolveFn,
};
HeuristicResolver H(Ctx);
- auto Results = ResolveFn(&H, Input);
+ std::vector<const NamedDecl *> Results;
+ if constexpr (std::is_pointer_v<ParamT>)
+ Results = ResolveFn(&H, Input);
+ else
+ Results = ResolveFn(&H, *Input);
EXPECT_THAT(Results, ElementsAre(matchAdapter(OMS, OutputNodeMatches)...));
}
@@ -71,8 +89,8 @@ void expectResolution(llvm::StringRef Code,
HeuristicResolver::*ResolveFn)(const InputNode *)
const,
const InputMatcher &IM, const OutputMatchers &...OMS) {
- expectResolution(Code, ResolveFnT<InputNode>(std::mem_fn(ResolveFn)), IM,
- OMS...);
+ expectResolution<InputNode>(
+ Code, ResolveFnT<const InputNode *>(std::mem_fn(ResolveFn)), IM, OMS...);
}
TEST(HeuristicResolver, MemberExpr) {
@@ -444,6 +462,23 @@ TEST(HeuristicResolver, MemberExpr_DefaultTemplateArgument_Recursive) {
cxxMethodDecl(hasName("foo")).bind("output"));
}
+TEST(HeuristicResolver, MemberExpr_ExplicitObjectParameter) {
+ std::string Code = R"cpp(
+ struct Foo {
+ int m_int;
+
+ int bar(this auto&& self) {
+ return self.m_int;
+ }
+ };
+ )cpp";
+ // Test resolution of "m_int" in "self.m_int()".
+ expectResolution(
+ Code, &HeuristicResolver::resolveMemberExpr,
+ cxxDependentScopeMemberExpr(hasMemberName("m_int")).bind("input"),
+ fieldDecl(hasName("m_int")).bind("output"));
+}
+
TEST(HeuristicResolver, DeclRefExpr_StaticMethod) {
std::string Code = R"cpp(
template <typename T>
@@ -643,15 +678,16 @@ TEST(HeuristicResolver, NestedNameSpecifier) {
// expected by expectResolution() (returning a vector of decls).
ResolveFnT<NestedNameSpecifier> ResolveFn =
[](const HeuristicResolver *H,
- const NestedNameSpecifier *NNS) -> std::vector<const NamedDecl *> {
+ NestedNameSpecifier NNS) -> std::vector<const NamedDecl *> {
return {H->resolveNestedNameSpecifierToType(NNS)->getAsCXXRecordDecl()};
};
- expectResolution(Code, ResolveFn,
- nestedNameSpecifier(hasPrefix(specifiesType(hasDeclaration(
- classTemplateDecl(hasName("A"))))))
- .bind("input"),
- classTemplateDecl(has(cxxRecordDecl(
- has(cxxRecordDecl(hasName("B")).bind("output"))))));
+ expectResolution<NestedNameSpecifier>(
+ Code, ResolveFn,
+ nestedNameSpecifier(hasPrefix(specifiesType(
+ hasDeclaration(classTemplateDecl(hasName("A"))))))
+ .bind("input"),
+ classTemplateDecl(
+ has(cxxRecordDecl(has(cxxRecordDecl(hasName("B")).bind("output"))))));
}
TEST(HeuristicResolver, TemplateSpecializationType) {
diff --git a/clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp b/clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp
index 12be228..ab4b8c7 100644
--- a/clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp
+++ b/clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp
@@ -55,11 +55,13 @@ public:
", Stmt = " + S->getStmtClassName());
}
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const {
emitErrorReport(C, Bug,
"checkBind: Loc = " + dumpToString(Loc) +
", Val = " + dumpToString(Val) +
- ", Stmt = " + S->getStmtClassName());
+ ", Stmt = " + S->getStmtClassName() +
+ ", AtDeclInit = " + (AtDeclInit ? "true" : "false"));
}
private:
@@ -140,7 +142,7 @@ TEST(ExprEngineVisitTest, checkLocationAndBind) {
"Stmt = ImplicitCastExpr";
std::string BindMsg =
"checkBind: Loc = &MyClassWrite, Val = lazyCompoundVal{0x0,MyClassRead}, "
- "Stmt = CXXOperatorCallExpr";
+ "Stmt = CXXOperatorCallExpr, AtDeclInit = false";
std::size_t LocPos = Diags.find(LocMsg);
std::size_t BindPos = Diags.find(BindMsg);
EXPECT_NE(LocPos, std::string::npos);
@@ -150,4 +152,20 @@ TEST(ExprEngineVisitTest, checkLocationAndBind) {
EXPECT_TRUE(LocPos > BindPos);
}
+TEST(ExprEngineVisitTest, checkLocationAndBindInitialization) {
+ std::string Diags;
+ EXPECT_TRUE(runCheckerOnCode<addMemAccessChecker>(R"(
+ class MyClass{
+ public:
+ int Value;
+ };
+ void top(MyClass param) {
+ MyClass MyClassWrite = param;
+ }
+ )",
+ Diags));
+
+ EXPECT_TRUE(StringRef(Diags).contains("AtDeclInit = true"));
+}
+
} // namespace
diff --git a/clang/unittests/StaticAnalyzer/SValTest.cpp b/clang/unittests/StaticAnalyzer/SValTest.cpp
index 58e9a8d..1f4a18b 100644
--- a/clang/unittests/StaticAnalyzer/SValTest.cpp
+++ b/clang/unittests/StaticAnalyzer/SValTest.cpp
@@ -61,7 +61,8 @@ using SVals = llvm::StringMap<SVal>;
/// can test whatever we gathered.
class SValCollector : public Checker<check::Bind, check::EndAnalysis> {
public:
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const {
// Skip instantly if we finished testing.
// Also, we care only for binds happening in variable initializations.
if (Tested || !isa<DeclStmt>(S))
@@ -301,13 +302,13 @@ void foo(int x) {
ASSERT_FALSE(B.getType(Context).isNull());
const auto *BRecordType = dyn_cast<RecordType>(B.getType(Context));
ASSERT_NE(BRecordType, nullptr);
- EXPECT_EQ("TestStruct", BRecordType->getDecl()->getName());
+ EXPECT_EQ("TestStruct", BRecordType->getOriginalDecl()->getName());
SVal C = getByName("c");
ASSERT_FALSE(C.getType(Context).isNull());
const auto *CRecordType = dyn_cast<RecordType>(C.getType(Context));
ASSERT_NE(CRecordType, nullptr);
- EXPECT_EQ("TestUnion", CRecordType->getDecl()->getName());
+ EXPECT_EQ("TestUnion", CRecordType->getOriginalDecl()->getName());
auto D = getByName("d").getAs<nonloc::CompoundVal>();
ASSERT_TRUE(D.has_value());
@@ -319,12 +320,9 @@ void foo(int x) {
ASSERT_TRUE(LD.has_value());
auto LDT = LD->getType(Context);
ASSERT_FALSE(LDT.isNull());
- const auto *DElaboratedType = dyn_cast<ElaboratedType>(LDT);
- ASSERT_NE(DElaboratedType, nullptr);
- const auto *DRecordType =
- dyn_cast<RecordType>(DElaboratedType->getNamedType());
+ const auto *DRecordType = dyn_cast<RecordType>(LDT);
ASSERT_NE(DRecordType, nullptr);
- EXPECT_EQ("TestStruct", DRecordType->getDecl()->getName());
+ EXPECT_EQ("TestStruct", DRecordType->getOriginalDecl()->getName());
}
SVAL_TEST(GetStringType, R"(
@@ -353,7 +351,7 @@ void TestClass::foo() {
ASSERT_NE(APtrTy, nullptr);
const auto *ARecordType = dyn_cast<RecordType>(APtrTy->getPointeeType());
ASSERT_NE(ARecordType, nullptr);
- EXPECT_EQ("TestClass", ARecordType->getDecl()->getName());
+ EXPECT_EQ("TestClass", ARecordType->getOriginalDecl()->getName());
}
SVAL_TEST(GetFunctionPtrType, R"(
diff --git a/clang/unittests/Tooling/LookupTest.cpp b/clang/unittests/Tooling/LookupTest.cpp
index acd1714..4c49ebe 100644
--- a/clang/unittests/Tooling/LookupTest.cpp
+++ b/clang/unittests/Tooling/LookupTest.cpp
@@ -193,16 +193,16 @@ TEST(LookupTest, replaceNestedClassName) {
auto replaceTypeLoc = [&](const NamedDecl *ND, SourceLocation Loc,
StringRef ReplacementString) {
return tooling::replaceNestedName(
- nullptr, Loc, Visitor.DeclStack.back()->getDeclContext(), ND,
- ReplacementString);
+ /*Use=*/std::nullopt, Loc, Visitor.DeclStack.back()->getDeclContext(),
+ ND, ReplacementString);
};
Visitor.OnRecordTypeLoc = [&](RecordTypeLoc Type) {
// Filter Types by name since there are other `RecordTypeLoc` in the test
// file.
- if (Type.getDecl()->getQualifiedNameAsString() == "a::b::Foo") {
- EXPECT_EQ("x::Bar", replaceTypeLoc(Type.getDecl(), Type.getBeginLoc(),
- "::a::x::Bar"));
+ if (Type.getOriginalDecl()->getQualifiedNameAsString() == "a::b::Foo") {
+ EXPECT_EQ("x::Bar", replaceTypeLoc(Type.getOriginalDecl(),
+ Type.getBeginLoc(), "::a::x::Bar"));
}
};
Visitor.runOver("namespace a { namespace b {\n"
@@ -214,7 +214,7 @@ TEST(LookupTest, replaceNestedClassName) {
// Filter Types by name since there are other `RecordTypeLoc` in the test
// file.
// `a::b::Foo` in using shadow decl is not `TypeLoc`.
- auto *TD = Type.getFoundDecl()->getTargetDecl();
+ auto *TD = Type.getDecl()->getTargetDecl();
if (TD->getQualifiedNameAsString() == "a::b::Foo") {
EXPECT_EQ("Bar", replaceTypeLoc(TD, Type.getBeginLoc(), "::a::x::Bar"));
}
@@ -227,9 +227,9 @@ TEST(LookupTest, replaceNestedClassName) {
// `x::y::Foo` in c.cc [1], it should not make "Foo" at [0] ambiguous because
// it's not visible at [0].
Visitor.OnRecordTypeLoc = [&](RecordTypeLoc Type) {
- if (Type.getDecl()->getQualifiedNameAsString() == "x::y::Old") {
- EXPECT_EQ("Foo",
- replaceTypeLoc(Type.getDecl(), Type.getBeginLoc(), "::x::Foo"));
+ if (Type.getOriginalDecl()->getQualifiedNameAsString() == "x::y::Old") {
+ EXPECT_EQ("Foo", replaceTypeLoc(Type.getOriginalDecl(),
+ Type.getBeginLoc(), "::x::Foo"));
}
};
Visitor.runOver(R"(
diff --git a/clang/unittests/Tooling/QualTypeNamesTest.cpp b/clang/unittests/Tooling/QualTypeNamesTest.cpp
index bcf7d89..1139392 100644
--- a/clang/unittests/Tooling/QualTypeNamesTest.cpp
+++ b/clang/unittests/Tooling/QualTypeNamesTest.cpp
@@ -295,9 +295,9 @@ TEST(QualTypeNameTest, TemplatedClass) {
auto *A2 = *ASpec;
// Their type names follow the records.
- QualType A1RecordTy = Context.getRecordType(A1);
+ CanQualType A1RecordTy = Context.getCanonicalTagType(A1);
EXPECT_EQ(getFullyQualifiedName(A1RecordTy), "A<1>");
- QualType A2RecordTy = Context.getRecordType(A2);
+ CanQualType A2RecordTy = Context.getCanonicalTagType(A2);
EXPECT_EQ(getFullyQualifiedName(A2RecordTy), "A<2U>");
// getTemplateSpecializationType() gives types that print the integral
@@ -305,13 +305,13 @@ TEST(QualTypeNameTest, TemplatedClass) {
TemplateArgument Args1[] = {
{Context, llvm::APSInt::getUnsigned(1u), Context.UnsignedIntTy}};
QualType A1TemplateSpecTy = Context.getTemplateSpecializationType(
- TemplateName(A), Args1, Args1, A1RecordTy);
+ ElaboratedTypeKeyword::None, TemplateName(A), Args1, Args1, A1RecordTy);
EXPECT_EQ(A1TemplateSpecTy.getAsString(), "A<1>");
TemplateArgument Args2[] = {
{Context, llvm::APSInt::getUnsigned(2u), Context.UnsignedIntTy}};
QualType A2TemplateSpecTy = Context.getTemplateSpecializationType(
- TemplateName(A), Args2, Args2, A2RecordTy);
+ ElaboratedTypeKeyword::None, TemplateName(A), Args2, Args2, A2RecordTy);
EXPECT_EQ(A2TemplateSpecTy.getAsString(), "A<2>");
// Find A<1>::B and its specialization B<3>.
@@ -321,21 +321,19 @@ TEST(QualTypeNameTest, TemplatedClass) {
auto A1BSpec = A1B->spec_begin();
ASSERT_NE(A1BSpec, A1B->spec_end());
auto *A1B3 = *A1BSpec;
- QualType A1B3RecordTy = Context.getRecordType(A1B3);
+ CanQualType A1B3RecordTy = Context.getCanonicalTagType(A1B3);
EXPECT_EQ(getFullyQualifiedName(A1B3RecordTy), "A<1>::B<3>");
// Construct A<1>::B<3> and check name.
+ NestedNameSpecifier A1Nested(A1TemplateSpecTy.getTypePtr());
+ TemplateName A1B3Name = Context.getQualifiedTemplateName(
+ A1Nested, /*TemplateKeyword=*/false, TemplateName(A1B));
+
TemplateArgument Args3[] = {
{Context, llvm::APSInt::getUnsigned(3u), Context.UnsignedIntTy}};
QualType A1B3TemplateSpecTy = Context.getTemplateSpecializationType(
- TemplateName(A1B), Args3, Args3, A1B3RecordTy);
- EXPECT_EQ(A1B3TemplateSpecTy.getAsString(), "B<3>");
-
- NestedNameSpecifier *A1Nested = NestedNameSpecifier::Create(
- Context, nullptr, A1TemplateSpecTy.getTypePtr());
- QualType A1B3ElaboratedTy = Context.getElaboratedType(
- ElaboratedTypeKeyword::None, A1Nested, A1B3TemplateSpecTy);
- EXPECT_EQ(A1B3ElaboratedTy.getAsString(), "A<1>::B<3>");
+ ElaboratedTypeKeyword::None, A1B3Name, Args3, Args3, A1B3RecordTy);
+ EXPECT_EQ(A1B3TemplateSpecTy.getAsString(), "A<1>::B<3>");
// Find A<2u>::B and its specialization B<4u>.
auto *A2B =
@@ -344,21 +342,19 @@ TEST(QualTypeNameTest, TemplatedClass) {
auto A2BSpec = A2B->spec_begin();
ASSERT_NE(A2BSpec, A2B->spec_end());
auto *A2B4 = *A2BSpec;
- QualType A2B4RecordTy = Context.getRecordType(A2B4);
+ CanQualType A2B4RecordTy = Context.getCanonicalTagType(A2B4);
EXPECT_EQ(getFullyQualifiedName(A2B4RecordTy), "A<2U>::B<4U>");
// Construct A<2>::B<4> and check name.
+ NestedNameSpecifier A2Nested(A2TemplateSpecTy.getTypePtr());
+ TemplateName A2B4Name = Context.getQualifiedTemplateName(
+ A2Nested, /*TemplateKeyword=*/false, TemplateName(A2B));
+
TemplateArgument Args4[] = {
{Context, llvm::APSInt::getUnsigned(4u), Context.UnsignedIntTy}};
QualType A2B4TemplateSpecTy = Context.getTemplateSpecializationType(
- TemplateName(A2B), Args4, Args4, A2B4RecordTy);
- EXPECT_EQ(A2B4TemplateSpecTy.getAsString(), "B<4>");
-
- NestedNameSpecifier *A2Nested = NestedNameSpecifier::Create(
- Context, nullptr, A2TemplateSpecTy.getTypePtr());
- QualType A2B4ElaboratedTy = Context.getElaboratedType(
- ElaboratedTypeKeyword::None, A2Nested, A2B4TemplateSpecTy);
- EXPECT_EQ(A2B4ElaboratedTy.getAsString(), "A<2>::B<4>");
+ ElaboratedTypeKeyword::None, A2B4Name, Args4, Args4, A2B4RecordTy);
+ EXPECT_EQ(A2B4TemplateSpecTy.getAsString(), "A<2>::B<4>");
}
TEST(QualTypeNameTest, AnonStrucs) {
diff --git a/clang/unittests/Tooling/RangeSelectorTest.cpp b/clang/unittests/Tooling/RangeSelectorTest.cpp
index 12f7a8c..adf5e74 100644
--- a/clang/unittests/Tooling/RangeSelectorTest.cpp
+++ b/clang/unittests/Tooling/RangeSelectorTest.cpp
@@ -474,15 +474,15 @@ TEST(RangeSelectorTest, NameOpTypeLoc) {
// Matches declaration of `a`
TestMatch MatchA = matchCode(
Code, varDecl(hasName("a"), hasTypeLoc(typeLoc().bind(CtorTy))));
- EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchA), HasValue("Foo"));
+ EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchA), HasValue("ns::Foo"));
// Matches call of Foo(int)
TestMatch MatchB = matchCode(
Code, cxxFunctionalCastExpr(hasTypeLoc(typeLoc().bind(CtorTy))));
- EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchB), HasValue("Foo"));
+ EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchB), HasValue("ns::Foo"));
// Matches call of Foo(int, int)
TestMatch MatchC = matchCode(
Code, cxxTemporaryObjectExpr(hasTypeLoc(typeLoc().bind(CtorTy))));
- EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchC), HasValue("Foo"));
+ EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchC), HasValue("ns::Foo"));
}
TEST(RangeSelectorTest, NameOpTemplateSpecializationTypeLoc) {
diff --git a/clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp b/clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp
index eec628c..7a35661 100644
--- a/clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp
+++ b/clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp
@@ -22,13 +22,13 @@ public:
TEST(RecursiveASTVisitor, VisitsBaseClassDeclarations) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("class X", 1, 30);
+ Visitor.ExpectMatch("X", 1, 30);
EXPECT_TRUE(Visitor.runOver("class X {}; class Y : public X {};"));
}
TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersOfForwardDeclaredClass) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("class X", 3, 18);
+ Visitor.ExpectMatch("X", 3, 18);
EXPECT_TRUE(Visitor.runOver(
"class Y;\n"
"class X {};\n"
@@ -37,7 +37,7 @@ TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersOfForwardDeclaredClass) {
TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersWithIncompleteInnerClass) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("class X", 2, 18);
+ Visitor.ExpectMatch("X", 2, 18);
EXPECT_TRUE(Visitor.runOver(
"class X {};\n"
"class Y : public X { class Z; };"));
@@ -45,7 +45,7 @@ TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersWithIncompleteInnerClass) {
TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersOfSelfReferentialType) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("X<Y>", 2, 18, 2);
+ Visitor.ExpectMatch("X<Y>", 2, 18);
EXPECT_TRUE(Visitor.runOver(
"template<typename T> class X {};\n"
"class Y : public X<Y> {};"));
@@ -53,7 +53,7 @@ TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersOfSelfReferentialType) {
TEST(RecursiveASTVisitor, VisitsClassTemplateTypeParmDefaultArgument) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("class X", 2, 23);
+ Visitor.ExpectMatch("X", 2, 23);
EXPECT_TRUE(Visitor.runOver(
"class X;\n"
"template<typename T = X> class Y;\n"
@@ -62,7 +62,7 @@ TEST(RecursiveASTVisitor, VisitsClassTemplateTypeParmDefaultArgument) {
TEST(RecursiveASTVisitor, VisitsCompoundLiteralType) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("struct S", 1, 26);
+ Visitor.ExpectMatch("struct S", 1, 19);
EXPECT_TRUE(Visitor.runOver(
"int f() { return (struct S { int a; }){.a = 0}.a; }",
TypeLocVisitor::Lang_C));
diff --git a/clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp b/clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp
index 587a00d..88cebb7 100644
--- a/clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp
+++ b/clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp
@@ -24,7 +24,7 @@ public:
bool VisitRecordTypeLoc(RecordTypeLoc RTL) override {
if (!RTL)
return true;
- Match(RTL.getDecl()->getName(), RTL.getNameLoc());
+ Match(RTL.getOriginalDecl()->getName(), RTL.getNameLoc());
return true;
}
};
diff --git a/clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp b/clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp
index 23a2df4..4181cd2 100644
--- a/clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp
+++ b/clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp
@@ -18,17 +18,20 @@ public:
bool VisitRecordTypeLoc(RecordTypeLoc RTL) override {
if (!RTL)
return true;
- Match(RTL.getDecl()->getName(), RTL.getNameLoc());
+ Match(RTL.getOriginalDecl()->getName(), RTL.getNameLoc());
return true;
}
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) override {
- if (!NNS)
- return true;
- if (const auto *ND = dyn_cast_if_present<NamespaceDecl>(
- NNS.getNestedNameSpecifier()->getAsNamespace()))
- Match(ND->getName(), NNS.getLocalBeginLoc());
- return ExpectedLocationVisitor::TraverseNestedNameSpecifierLoc(NNS);
+ bool
+ TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc QualifierLoc) override {
+ NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::Namespace) {
+ if (const auto *ND = dyn_cast<NamespaceDecl>(
+ Qualifier.getAsNamespaceAndPrefix().Namespace))
+ Match(ND->getName(), QualifierLoc.getLocalBeginLoc());
+ }
+ return ExpectedLocationVisitor::TraverseNestedNameSpecifierLoc(
+ QualifierLoc);
}
};
diff --git a/clang/unittests/Tooling/RefactoringTest.cpp b/clang/unittests/Tooling/RefactoringTest.cpp
index aff7523e..171dc6d 100644
--- a/clang/unittests/Tooling/RefactoringTest.cpp
+++ b/clang/unittests/Tooling/RefactoringTest.cpp
@@ -747,9 +747,12 @@ TEST(Replacement, TemplatedFunctionCall) {
class NestedNameSpecifierAVisitor : public TestVisitor {
public:
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLoc) override {
- if (NNSLoc.getNestedNameSpecifier()) {
- if (const auto *NS = dyn_cast_if_present<NamespaceDecl>(
- NNSLoc.getNestedNameSpecifier()->getAsNamespace())) {
+ if (NestedNameSpecifier NNS = NNSLoc.getNestedNameSpecifier();
+ NNS.getKind() == NestedNameSpecifier::Kind::Namespace) {
+ if (const auto *NS =
+ dyn_cast<NamespaceDecl>(NNSLoc.getNestedNameSpecifier()
+ .getAsNamespaceAndPrefix()
+ .Namespace)) {
if (NS->getName() == "a") {
Replace = Replacement(*SM, &NNSLoc, "", Context->getLangOpts());
}
diff --git a/clang/utils/TableGen/ASTTableGen.h b/clang/utils/TableGen/ASTTableGen.h
index 02b9763..e9a86f3 100644
--- a/clang/utils/TableGen/ASTTableGen.h
+++ b/clang/utils/TableGen/ASTTableGen.h
@@ -38,7 +38,7 @@
#define AlwaysDependentClassName "AlwaysDependent"
#define NeverCanonicalClassName "NeverCanonical"
#define NeverCanonicalUnlessDependentClassName "NeverCanonicalUnlessDependent"
-#define LeafTypeClassName "LeafType"
+#define AlwaysCanonicalTypeClassName "AlwaysCanonical"
// Cases of various non-ASTNode structured types like DeclarationName.
#define TypeKindClassName "PropertyTypeKind"
diff --git a/clang/utils/TableGen/ClangTypeNodesEmitter.cpp b/clang/utils/TableGen/ClangTypeNodesEmitter.cpp
index 3703936..2e1eaef 100644
--- a/clang/utils/TableGen/ClangTypeNodesEmitter.cpp
+++ b/clang/utils/TableGen/ClangTypeNodesEmitter.cpp
@@ -40,8 +40,9 @@
// There is a sixth macro, independent of the others. Most clients
// will not need to use it.
//
-// LEAF_TYPE(Class) - A type that never has inner types. Clients
-// which can operate on such types more efficiently may wish to do so.
+// ALWAYS_CANONICAL_TYPE(Class) - A type which is always identical to its
+// canonical type. Clients which can operate on such types more efficiently
+// may wish to do so.
//
//===----------------------------------------------------------------------===//
@@ -66,7 +67,7 @@ using namespace clang::tblgen;
#define NonCanonicalUnlessDependentTypeMacroName "NON_CANONICAL_UNLESS_DEPENDENT_TYPE"
#define TypeMacroArgs "(Class, Base)"
#define LastTypeMacroName "LAST_TYPE"
-#define LeafTypeMacroName "LEAF_TYPE"
+#define AlwaysCanonicalTypeMacroName "ALWAYS_CANONICAL_TYPE"
#define TypeClassName "Type"
@@ -90,7 +91,7 @@ private:
void emitNodeInvocations();
void emitLastNodeInvocation(TypeNode lastType);
- void emitLeafNodeInvocations();
+ void emitAlwaysCanonicalNodeInvocations();
void addMacroToUndef(StringRef macroName);
void emitUndefs();
@@ -109,12 +110,12 @@ void TypeNodeEmitter::emit() {
emitFallbackDefine(AbstractTypeMacroName, TypeMacroName, TypeMacroArgs);
emitFallbackDefine(NonCanonicalTypeMacroName, TypeMacroName, TypeMacroArgs);
emitFallbackDefine(DependentTypeMacroName, TypeMacroName, TypeMacroArgs);
- emitFallbackDefine(NonCanonicalUnlessDependentTypeMacroName, TypeMacroName,
+ emitFallbackDefine(NonCanonicalUnlessDependentTypeMacroName, TypeMacroName,
TypeMacroArgs);
// Invocations.
emitNodeInvocations();
- emitLeafNodeInvocations();
+ emitAlwaysCanonicalNodeInvocations();
// Postmatter
emitUndefs();
@@ -178,15 +179,16 @@ void TypeNodeEmitter::emitLastNodeInvocation(TypeNode type) {
"#endif\n";
}
-void TypeNodeEmitter::emitLeafNodeInvocations() {
- Out << "#ifdef " LeafTypeMacroName "\n";
+void TypeNodeEmitter::emitAlwaysCanonicalNodeInvocations() {
+ Out << "#ifdef " AlwaysCanonicalTypeMacroName "\n";
for (TypeNode type : Types) {
- if (!type.isSubClassOf(LeafTypeClassName)) continue;
- Out << LeafTypeMacroName "(" << type.getId() << ")\n";
+ if (!type.isSubClassOf(AlwaysCanonicalTypeClassName))
+ continue;
+ Out << AlwaysCanonicalTypeMacroName "(" << type.getId() << ")\n";
}
- Out << "#undef " LeafTypeMacroName "\n"
+ Out << "#undef " AlwaysCanonicalTypeMacroName "\n"
"#endif\n";
}
diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index e2da20e..af2dcf6 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -972,10 +972,10 @@ Intrinsic::Intrinsic(StringRef Name, StringRef Proto, uint64_t MergeTy,
BaseType(BT, 'd'), Flags(Flags), ImmChecks(Checks) {
auto FormatGuard = [](StringRef Guard, StringRef Base) -> std::string {
+ if (Guard.empty() || Guard == Base)
+ return Guard.str();
if (Guard.contains('|'))
return Base.str() + ",(" + Guard.str() + ")";
- if (Guard.empty() || Guard == Base || Guard.starts_with(Base.str() + ","))
- return Guard.str();
return Base.str() + "," + Guard.str();
};
diff --git a/clang/www/c_status.html b/clang/www/c_status.html
index dcff2fc..5b31f97 100644
--- a/clang/www/c_status.html
+++ b/clang/www/c_status.html
@@ -1302,16 +1302,11 @@ conforms by not defining the <code>__STDC_IEC_559_COMPLEX__</code> macro.
<td class="full" align="center">Yes</td>
</tr>
<tr>
- <td>mixed declarations and code</td>
+ <td>mixed declarations and code<br \>new block scopes for selection and iteration statements</td>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/n740.htm">N740</a></td>
<td class="full" align="center">Yes</td>
</tr>
<tr>
- <td>new block scopes for selection and iteration statements</td>
- <td>Unknown</td>
- <td class="full" align="center">Yes</td>
- </tr>
- <tr>
<td>integer constant type rules</td>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/n629.htm">N629</a></td>
<td class="full" align="center">Yes</td>